Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH) |
| 4 | * |
| 5 | * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 6 | * |
| 7 | * Interactivity improvements by Mike Galbraith |
| 8 | * (C) 2007 Mike Galbraith <efault@gmx.de> |
| 9 | * |
| 10 | * Various enhancements by Dmitry Adamushko. |
| 11 | * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com> |
| 12 | * |
| 13 | * Group scheduling enhancements by Srivatsa Vaddagiri |
| 14 | * Copyright IBM Corporation, 2007 |
| 15 | * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> |
| 16 | * |
| 17 | * Scaled math optimizations by Thomas Gleixner |
| 18 | * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> |
| 19 | * |
| 20 | * Adaptive scheduling granularity, math enhancements by Peter Zijlstra |
| 21 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
| 22 | */ |
| 23 | #include "sched.h" |
| 24 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | /* |
| 26 | * Targeted preemption latency for CPU-bound tasks: |
| 27 | * |
| 28 | * NOTE: this latency value is not the same as the concept of |
| 29 | * 'timeslice length' - timeslices in CFS are of variable length |
| 30 | * and have no persistent notion like in traditional, time-slice |
| 31 | * based scheduling concepts. |
| 32 | * |
| 33 | * (to see the precise effective timeslice length of your workload, |
| 34 | * run vmstat and monitor the context-switches (cs) field) |
| 35 | * |
| 36 | * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds) |
| 37 | */ |
| 38 | unsigned int sysctl_sched_latency = 6000000ULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 39 | static unsigned int normalized_sysctl_sched_latency = 6000000ULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * The initial- and re-scaling of tunables is configurable |
| 43 | * |
| 44 | * Options are: |
| 45 | * |
| 46 | * SCHED_TUNABLESCALING_NONE - unscaled, always *1 |
| 47 | * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus) |
| 48 | * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus |
| 49 | * |
| 50 | * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus)) |
| 51 | */ |
| 52 | enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; |
| 53 | |
| 54 | /* |
| 55 | * Minimal preemption granularity for CPU-bound tasks: |
| 56 | * |
| 57 | * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds) |
| 58 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 59 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
| 60 | static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | |
| 62 | /* |
| 63 | * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity |
| 64 | */ |
| 65 | static unsigned int sched_nr_latency = 8; |
| 66 | |
| 67 | /* |
| 68 | * After fork, child runs first. If set to 0 (default) then |
| 69 | * parent will (try to) run first. |
| 70 | */ |
| 71 | unsigned int sysctl_sched_child_runs_first __read_mostly; |
| 72 | |
| 73 | /* |
| 74 | * SCHED_OTHER wake-up granularity. |
| 75 | * |
| 76 | * This option delays the preemption effects of decoupled workloads |
| 77 | * and reduces their over-scheduling. Synchronous workloads will still |
| 78 | * have immediate wakeup/sleep latencies. |
| 79 | * |
| 80 | * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) |
| 81 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 82 | unsigned int sysctl_sched_wakeup_granularity = 1000000UL; |
| 83 | static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 84 | |
| 85 | const_debug unsigned int sysctl_sched_migration_cost = 500000UL; |
| 86 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 87 | int sched_thermal_decay_shift; |
| 88 | static int __init setup_sched_thermal_decay_shift(char *str) |
| 89 | { |
| 90 | int _shift = 0; |
| 91 | |
| 92 | if (kstrtoint(str, 0, &_shift)) |
| 93 | pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n"); |
| 94 | |
| 95 | sched_thermal_decay_shift = clamp(_shift, 0, 10); |
| 96 | return 1; |
| 97 | } |
| 98 | __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); |
| 99 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | #ifdef CONFIG_SMP |
| 101 | /* |
| 102 | * For asym packing, by default the lower numbered CPU has higher priority. |
| 103 | */ |
| 104 | int __weak arch_asym_cpu_priority(int cpu) |
| 105 | { |
| 106 | return -cpu; |
| 107 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * The margin used when comparing utilization with CPU capacity. |
| 111 | * |
| 112 | * (default: ~20%) |
| 113 | */ |
| 114 | #define fits_capacity(cap, max) ((cap) * 1280 < (max) * 1024) |
| 115 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 116 | #endif |
| 117 | |
| 118 | #ifdef CONFIG_CFS_BANDWIDTH |
| 119 | /* |
| 120 | * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool |
| 121 | * each time a cfs_rq requests quota. |
| 122 | * |
| 123 | * Note: in the case that the slice exceeds the runtime remaining (either due |
| 124 | * to consumption or the quota being specified to be smaller than the slice) |
| 125 | * we will always only issue the remaining available time. |
| 126 | * |
| 127 | * (default: 5 msec, units: microseconds) |
| 128 | */ |
| 129 | unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; |
| 130 | #endif |
| 131 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 132 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
| 133 | { |
| 134 | lw->weight += inc; |
| 135 | lw->inv_weight = 0; |
| 136 | } |
| 137 | |
| 138 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) |
| 139 | { |
| 140 | lw->weight -= dec; |
| 141 | lw->inv_weight = 0; |
| 142 | } |
| 143 | |
| 144 | static inline void update_load_set(struct load_weight *lw, unsigned long w) |
| 145 | { |
| 146 | lw->weight = w; |
| 147 | lw->inv_weight = 0; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Increase the granularity value when there are more CPUs, |
| 152 | * because with more CPUs the 'effective latency' as visible |
| 153 | * to users decreases. But the relationship is not linear, |
| 154 | * so pick a second-best guess by going with the log2 of the |
| 155 | * number of CPUs. |
| 156 | * |
| 157 | * This idea comes from the SD scheduler of Con Kolivas: |
| 158 | */ |
| 159 | static unsigned int get_update_sysctl_factor(void) |
| 160 | { |
| 161 | unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8); |
| 162 | unsigned int factor; |
| 163 | |
| 164 | switch (sysctl_sched_tunable_scaling) { |
| 165 | case SCHED_TUNABLESCALING_NONE: |
| 166 | factor = 1; |
| 167 | break; |
| 168 | case SCHED_TUNABLESCALING_LINEAR: |
| 169 | factor = cpus; |
| 170 | break; |
| 171 | case SCHED_TUNABLESCALING_LOG: |
| 172 | default: |
| 173 | factor = 1 + ilog2(cpus); |
| 174 | break; |
| 175 | } |
| 176 | |
| 177 | return factor; |
| 178 | } |
| 179 | |
| 180 | static void update_sysctl(void) |
| 181 | { |
| 182 | unsigned int factor = get_update_sysctl_factor(); |
| 183 | |
| 184 | #define SET_SYSCTL(name) \ |
| 185 | (sysctl_##name = (factor) * normalized_sysctl_##name) |
| 186 | SET_SYSCTL(sched_min_granularity); |
| 187 | SET_SYSCTL(sched_latency); |
| 188 | SET_SYSCTL(sched_wakeup_granularity); |
| 189 | #undef SET_SYSCTL |
| 190 | } |
| 191 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 192 | void __init sched_init_granularity(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 193 | { |
| 194 | update_sysctl(); |
| 195 | } |
| 196 | |
| 197 | #define WMULT_CONST (~0U) |
| 198 | #define WMULT_SHIFT 32 |
| 199 | |
| 200 | static void __update_inv_weight(struct load_weight *lw) |
| 201 | { |
| 202 | unsigned long w; |
| 203 | |
| 204 | if (likely(lw->inv_weight)) |
| 205 | return; |
| 206 | |
| 207 | w = scale_load_down(lw->weight); |
| 208 | |
| 209 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) |
| 210 | lw->inv_weight = 1; |
| 211 | else if (unlikely(!w)) |
| 212 | lw->inv_weight = WMULT_CONST; |
| 213 | else |
| 214 | lw->inv_weight = WMULT_CONST / w; |
| 215 | } |
| 216 | |
| 217 | /* |
| 218 | * delta_exec * weight / lw.weight |
| 219 | * OR |
| 220 | * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT |
| 221 | * |
| 222 | * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case |
| 223 | * we're guaranteed shift stays positive because inv_weight is guaranteed to |
| 224 | * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22. |
| 225 | * |
| 226 | * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus |
| 227 | * weight/lw.weight <= 1, and therefore our shift will also be positive. |
| 228 | */ |
| 229 | static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) |
| 230 | { |
| 231 | u64 fact = scale_load_down(weight); |
| 232 | int shift = WMULT_SHIFT; |
| 233 | |
| 234 | __update_inv_weight(lw); |
| 235 | |
| 236 | if (unlikely(fact >> 32)) { |
| 237 | while (fact >> 32) { |
| 238 | fact >>= 1; |
| 239 | shift--; |
| 240 | } |
| 241 | } |
| 242 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 243 | fact = mul_u32_u32(fact, lw->inv_weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 244 | |
| 245 | while (fact >> 32) { |
| 246 | fact >>= 1; |
| 247 | shift--; |
| 248 | } |
| 249 | |
| 250 | return mul_u64_u32_shr(delta_exec, fact, shift); |
| 251 | } |
| 252 | |
| 253 | |
| 254 | const struct sched_class fair_sched_class; |
| 255 | |
| 256 | /************************************************************** |
| 257 | * CFS operations on generic schedulable entities: |
| 258 | */ |
| 259 | |
| 260 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 261 | static inline struct task_struct *task_of(struct sched_entity *se) |
| 262 | { |
| 263 | SCHED_WARN_ON(!entity_is_task(se)); |
| 264 | return container_of(se, struct task_struct, se); |
| 265 | } |
| 266 | |
| 267 | /* Walk up scheduling entities hierarchy */ |
| 268 | #define for_each_sched_entity(se) \ |
| 269 | for (; se; se = se->parent) |
| 270 | |
| 271 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) |
| 272 | { |
| 273 | return p->se.cfs_rq; |
| 274 | } |
| 275 | |
| 276 | /* runqueue on which this entity is (to be) queued */ |
| 277 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) |
| 278 | { |
| 279 | return se->cfs_rq; |
| 280 | } |
| 281 | |
| 282 | /* runqueue "owned" by this group */ |
| 283 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
| 284 | { |
| 285 | return grp->my_q; |
| 286 | } |
| 287 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 288 | static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 289 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 290 | if (!path) |
| 291 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 292 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 293 | if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) |
| 294 | autogroup_path(cfs_rq->tg, path, len); |
| 295 | else if (cfs_rq && cfs_rq->tg->css.cgroup) |
| 296 | cgroup_path(cfs_rq->tg->css.cgroup, path, len); |
| 297 | else |
| 298 | strlcpy(path, "(null)", len); |
| 299 | } |
| 300 | |
| 301 | static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 302 | { |
| 303 | struct rq *rq = rq_of(cfs_rq); |
| 304 | int cpu = cpu_of(rq); |
| 305 | |
| 306 | if (cfs_rq->on_list) |
| 307 | return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; |
| 308 | |
| 309 | cfs_rq->on_list = 1; |
| 310 | |
| 311 | /* |
| 312 | * Ensure we either appear before our parent (if already |
| 313 | * enqueued) or force our parent to appear after us when it is |
| 314 | * enqueued. The fact that we always enqueue bottom-up |
| 315 | * reduces this to two cases and a special case for the root |
| 316 | * cfs_rq. Furthermore, it also means that we will always reset |
| 317 | * tmp_alone_branch either when the branch is connected |
| 318 | * to a tree or when we reach the top of the tree |
| 319 | */ |
| 320 | if (cfs_rq->tg->parent && |
| 321 | cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { |
| 322 | /* |
| 323 | * If parent is already on the list, we add the child |
| 324 | * just before. Thanks to circular linked property of |
| 325 | * the list, this means to put the child at the tail |
| 326 | * of the list that starts by parent. |
| 327 | */ |
| 328 | list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, |
| 329 | &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); |
| 330 | /* |
| 331 | * The branch is now connected to its tree so we can |
| 332 | * reset tmp_alone_branch to the beginning of the |
| 333 | * list. |
| 334 | */ |
| 335 | rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; |
| 336 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 337 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 338 | |
| 339 | if (!cfs_rq->tg->parent) { |
| 340 | /* |
| 341 | * cfs rq without parent should be put |
| 342 | * at the tail of the list. |
| 343 | */ |
| 344 | list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, |
| 345 | &rq->leaf_cfs_rq_list); |
| 346 | /* |
| 347 | * We have reach the top of a tree so we can reset |
| 348 | * tmp_alone_branch to the beginning of the list. |
| 349 | */ |
| 350 | rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; |
| 351 | return true; |
| 352 | } |
| 353 | |
| 354 | /* |
| 355 | * The parent has not already been added so we want to |
| 356 | * make sure that it will be put after us. |
| 357 | * tmp_alone_branch points to the begin of the branch |
| 358 | * where we will add parent. |
| 359 | */ |
| 360 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); |
| 361 | /* |
| 362 | * update tmp_alone_branch to points to the new begin |
| 363 | * of the branch |
| 364 | */ |
| 365 | rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; |
| 366 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 367 | } |
| 368 | |
| 369 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 370 | { |
| 371 | if (cfs_rq->on_list) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 372 | struct rq *rq = rq_of(cfs_rq); |
| 373 | |
| 374 | /* |
| 375 | * With cfs_rq being unthrottled/throttled during an enqueue, |
| 376 | * it can happen the tmp_alone_branch points the a leaf that |
| 377 | * we finally want to del. In this case, tmp_alone_branch moves |
| 378 | * to the prev element but it will point to rq->leaf_cfs_rq_list |
| 379 | * at the end of the enqueue. |
| 380 | */ |
| 381 | if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) |
| 382 | rq->tmp_alone_branch = cfs_rq->leaf_cfs_rq_list.prev; |
| 383 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 384 | list_del_rcu(&cfs_rq->leaf_cfs_rq_list); |
| 385 | cfs_rq->on_list = 0; |
| 386 | } |
| 387 | } |
| 388 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 389 | static inline void assert_list_leaf_cfs_rq(struct rq *rq) |
| 390 | { |
| 391 | SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list); |
| 392 | } |
| 393 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 394 | /* Iterate thr' all leaf cfs_rq's on a runqueue */ |
| 395 | #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ |
| 396 | list_for_each_entry_safe(cfs_rq, pos, &rq->leaf_cfs_rq_list, \ |
| 397 | leaf_cfs_rq_list) |
| 398 | |
| 399 | /* Do the two (enqueued) entities belong to the same group ? */ |
| 400 | static inline struct cfs_rq * |
| 401 | is_same_group(struct sched_entity *se, struct sched_entity *pse) |
| 402 | { |
| 403 | if (se->cfs_rq == pse->cfs_rq) |
| 404 | return se->cfs_rq; |
| 405 | |
| 406 | return NULL; |
| 407 | } |
| 408 | |
| 409 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
| 410 | { |
| 411 | return se->parent; |
| 412 | } |
| 413 | |
| 414 | static void |
| 415 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) |
| 416 | { |
| 417 | int se_depth, pse_depth; |
| 418 | |
| 419 | /* |
| 420 | * preemption test can be made between sibling entities who are in the |
| 421 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of |
| 422 | * both tasks until we find their ancestors who are siblings of common |
| 423 | * parent. |
| 424 | */ |
| 425 | |
| 426 | /* First walk up until both entities are at same depth */ |
| 427 | se_depth = (*se)->depth; |
| 428 | pse_depth = (*pse)->depth; |
| 429 | |
| 430 | while (se_depth > pse_depth) { |
| 431 | se_depth--; |
| 432 | *se = parent_entity(*se); |
| 433 | } |
| 434 | |
| 435 | while (pse_depth > se_depth) { |
| 436 | pse_depth--; |
| 437 | *pse = parent_entity(*pse); |
| 438 | } |
| 439 | |
| 440 | while (!is_same_group(*se, *pse)) { |
| 441 | *se = parent_entity(*se); |
| 442 | *pse = parent_entity(*pse); |
| 443 | } |
| 444 | } |
| 445 | |
| 446 | #else /* !CONFIG_FAIR_GROUP_SCHED */ |
| 447 | |
| 448 | static inline struct task_struct *task_of(struct sched_entity *se) |
| 449 | { |
| 450 | return container_of(se, struct task_struct, se); |
| 451 | } |
| 452 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 453 | #define for_each_sched_entity(se) \ |
| 454 | for (; se; se = NULL) |
| 455 | |
| 456 | static inline struct cfs_rq *task_cfs_rq(struct task_struct *p) |
| 457 | { |
| 458 | return &task_rq(p)->cfs; |
| 459 | } |
| 460 | |
| 461 | static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se) |
| 462 | { |
| 463 | struct task_struct *p = task_of(se); |
| 464 | struct rq *rq = task_rq(p); |
| 465 | |
| 466 | return &rq->cfs; |
| 467 | } |
| 468 | |
| 469 | /* runqueue "owned" by this group */ |
| 470 | static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) |
| 471 | { |
| 472 | return NULL; |
| 473 | } |
| 474 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 475 | static inline void cfs_rq_tg_path(struct cfs_rq *cfs_rq, char *path, int len) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 476 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 477 | if (path) |
| 478 | strlcpy(path, "(null)", len); |
| 479 | } |
| 480 | |
| 481 | static inline bool list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 482 | { |
| 483 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 484 | } |
| 485 | |
| 486 | static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) |
| 487 | { |
| 488 | } |
| 489 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 490 | static inline void assert_list_leaf_cfs_rq(struct rq *rq) |
| 491 | { |
| 492 | } |
| 493 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 494 | #define for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) \ |
| 495 | for (cfs_rq = &rq->cfs, pos = NULL; cfs_rq; cfs_rq = pos) |
| 496 | |
| 497 | static inline struct sched_entity *parent_entity(struct sched_entity *se) |
| 498 | { |
| 499 | return NULL; |
| 500 | } |
| 501 | |
| 502 | static inline void |
| 503 | find_matching_se(struct sched_entity **se, struct sched_entity **pse) |
| 504 | { |
| 505 | } |
| 506 | |
| 507 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 508 | |
| 509 | static __always_inline |
| 510 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec); |
| 511 | |
| 512 | /************************************************************** |
| 513 | * Scheduling class tree data structure manipulation methods: |
| 514 | */ |
| 515 | |
| 516 | static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime) |
| 517 | { |
| 518 | s64 delta = (s64)(vruntime - max_vruntime); |
| 519 | if (delta > 0) |
| 520 | max_vruntime = vruntime; |
| 521 | |
| 522 | return max_vruntime; |
| 523 | } |
| 524 | |
| 525 | static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime) |
| 526 | { |
| 527 | s64 delta = (s64)(vruntime - min_vruntime); |
| 528 | if (delta < 0) |
| 529 | min_vruntime = vruntime; |
| 530 | |
| 531 | return min_vruntime; |
| 532 | } |
| 533 | |
| 534 | static inline int entity_before(struct sched_entity *a, |
| 535 | struct sched_entity *b) |
| 536 | { |
| 537 | return (s64)(a->vruntime - b->vruntime) < 0; |
| 538 | } |
| 539 | |
| 540 | static void update_min_vruntime(struct cfs_rq *cfs_rq) |
| 541 | { |
| 542 | struct sched_entity *curr = cfs_rq->curr; |
| 543 | struct rb_node *leftmost = rb_first_cached(&cfs_rq->tasks_timeline); |
| 544 | |
| 545 | u64 vruntime = cfs_rq->min_vruntime; |
| 546 | |
| 547 | if (curr) { |
| 548 | if (curr->on_rq) |
| 549 | vruntime = curr->vruntime; |
| 550 | else |
| 551 | curr = NULL; |
| 552 | } |
| 553 | |
| 554 | if (leftmost) { /* non-empty tree */ |
| 555 | struct sched_entity *se; |
| 556 | se = rb_entry(leftmost, struct sched_entity, run_node); |
| 557 | |
| 558 | if (!curr) |
| 559 | vruntime = se->vruntime; |
| 560 | else |
| 561 | vruntime = min_vruntime(vruntime, se->vruntime); |
| 562 | } |
| 563 | |
| 564 | /* ensure we never gain time by being placed backwards. */ |
| 565 | cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime); |
| 566 | #ifndef CONFIG_64BIT |
| 567 | smp_wmb(); |
| 568 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
| 569 | #endif |
| 570 | } |
| 571 | |
| 572 | /* |
| 573 | * Enqueue an entity into the rb-tree: |
| 574 | */ |
| 575 | static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 576 | { |
| 577 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_root.rb_node; |
| 578 | struct rb_node *parent = NULL; |
| 579 | struct sched_entity *entry; |
| 580 | bool leftmost = true; |
| 581 | |
| 582 | /* |
| 583 | * Find the right place in the rbtree: |
| 584 | */ |
| 585 | while (*link) { |
| 586 | parent = *link; |
| 587 | entry = rb_entry(parent, struct sched_entity, run_node); |
| 588 | /* |
| 589 | * We dont care about collisions. Nodes with |
| 590 | * the same key stay together. |
| 591 | */ |
| 592 | if (entity_before(se, entry)) { |
| 593 | link = &parent->rb_left; |
| 594 | } else { |
| 595 | link = &parent->rb_right; |
| 596 | leftmost = false; |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | rb_link_node(&se->run_node, parent, link); |
| 601 | rb_insert_color_cached(&se->run_node, |
| 602 | &cfs_rq->tasks_timeline, leftmost); |
| 603 | } |
| 604 | |
| 605 | static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 606 | { |
| 607 | rb_erase_cached(&se->run_node, &cfs_rq->tasks_timeline); |
| 608 | } |
| 609 | |
| 610 | struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq) |
| 611 | { |
| 612 | struct rb_node *left = rb_first_cached(&cfs_rq->tasks_timeline); |
| 613 | |
| 614 | if (!left) |
| 615 | return NULL; |
| 616 | |
| 617 | return rb_entry(left, struct sched_entity, run_node); |
| 618 | } |
| 619 | |
| 620 | static struct sched_entity *__pick_next_entity(struct sched_entity *se) |
| 621 | { |
| 622 | struct rb_node *next = rb_next(&se->run_node); |
| 623 | |
| 624 | if (!next) |
| 625 | return NULL; |
| 626 | |
| 627 | return rb_entry(next, struct sched_entity, run_node); |
| 628 | } |
| 629 | |
| 630 | #ifdef CONFIG_SCHED_DEBUG |
| 631 | struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) |
| 632 | { |
| 633 | struct rb_node *last = rb_last(&cfs_rq->tasks_timeline.rb_root); |
| 634 | |
| 635 | if (!last) |
| 636 | return NULL; |
| 637 | |
| 638 | return rb_entry(last, struct sched_entity, run_node); |
| 639 | } |
| 640 | |
| 641 | /************************************************************** |
| 642 | * Scheduling class statistics methods: |
| 643 | */ |
| 644 | |
| 645 | int sched_proc_update_handler(struct ctl_table *table, int write, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 646 | void *buffer, size_t *lenp, loff_t *ppos) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 647 | { |
| 648 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 649 | unsigned int factor = get_update_sysctl_factor(); |
| 650 | |
| 651 | if (ret || !write) |
| 652 | return ret; |
| 653 | |
| 654 | sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency, |
| 655 | sysctl_sched_min_granularity); |
| 656 | |
| 657 | #define WRT_SYSCTL(name) \ |
| 658 | (normalized_sysctl_##name = sysctl_##name / (factor)) |
| 659 | WRT_SYSCTL(sched_min_granularity); |
| 660 | WRT_SYSCTL(sched_latency); |
| 661 | WRT_SYSCTL(sched_wakeup_granularity); |
| 662 | #undef WRT_SYSCTL |
| 663 | |
| 664 | return 0; |
| 665 | } |
| 666 | #endif |
| 667 | |
| 668 | /* |
| 669 | * delta /= w |
| 670 | */ |
| 671 | static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) |
| 672 | { |
| 673 | if (unlikely(se->load.weight != NICE_0_LOAD)) |
| 674 | delta = __calc_delta(delta, NICE_0_LOAD, &se->load); |
| 675 | |
| 676 | return delta; |
| 677 | } |
| 678 | |
| 679 | /* |
| 680 | * The idea is to set a period in which each task runs once. |
| 681 | * |
| 682 | * When there are too many tasks (sched_nr_latency) we have to stretch |
| 683 | * this period because otherwise the slices get too small. |
| 684 | * |
| 685 | * p = (nr <= nl) ? l : l*nr/nl |
| 686 | */ |
| 687 | static u64 __sched_period(unsigned long nr_running) |
| 688 | { |
| 689 | if (unlikely(nr_running > sched_nr_latency)) |
| 690 | return nr_running * sysctl_sched_min_granularity; |
| 691 | else |
| 692 | return sysctl_sched_latency; |
| 693 | } |
| 694 | |
| 695 | /* |
| 696 | * We calculate the wall-time slice from the period by taking a part |
| 697 | * proportional to the weight. |
| 698 | * |
| 699 | * s = p*P[w/rw] |
| 700 | */ |
| 701 | static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 702 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 703 | unsigned int nr_running = cfs_rq->nr_running; |
| 704 | u64 slice; |
| 705 | |
| 706 | if (sched_feat(ALT_PERIOD)) |
| 707 | nr_running = rq_of(cfs_rq)->cfs.h_nr_running; |
| 708 | |
| 709 | slice = __sched_period(nr_running + !se->on_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 710 | |
| 711 | for_each_sched_entity(se) { |
| 712 | struct load_weight *load; |
| 713 | struct load_weight lw; |
| 714 | |
| 715 | cfs_rq = cfs_rq_of(se); |
| 716 | load = &cfs_rq->load; |
| 717 | |
| 718 | if (unlikely(!se->on_rq)) { |
| 719 | lw = cfs_rq->load; |
| 720 | |
| 721 | update_load_add(&lw, se->load.weight); |
| 722 | load = &lw; |
| 723 | } |
| 724 | slice = __calc_delta(slice, se->load.weight, load); |
| 725 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 726 | |
| 727 | if (sched_feat(BASE_SLICE)) |
| 728 | slice = max(slice, (u64)sysctl_sched_min_granularity); |
| 729 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 730 | return slice; |
| 731 | } |
| 732 | |
| 733 | /* |
| 734 | * We calculate the vruntime slice of a to-be-inserted task. |
| 735 | * |
| 736 | * vs = s/w |
| 737 | */ |
| 738 | static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 739 | { |
| 740 | return calc_delta_fair(sched_slice(cfs_rq, se), se); |
| 741 | } |
| 742 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 743 | #include "pelt.h" |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 744 | #ifdef CONFIG_SMP |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 745 | |
| 746 | static int select_idle_sibling(struct task_struct *p, int prev_cpu, int cpu); |
| 747 | static unsigned long task_h_load(struct task_struct *p); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 748 | static unsigned long capacity_of(int cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 749 | |
| 750 | /* Give new sched_entity start runnable values to heavy its load in infant time */ |
| 751 | void init_entity_runnable_average(struct sched_entity *se) |
| 752 | { |
| 753 | struct sched_avg *sa = &se->avg; |
| 754 | |
| 755 | memset(sa, 0, sizeof(*sa)); |
| 756 | |
| 757 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 758 | * Tasks are initialized with full load to be seen as heavy tasks until |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 759 | * they get a chance to stabilize to their real load level. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 760 | * Group entities are initialized with zero load to reflect the fact that |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 761 | * nothing has been attached to the task group yet. |
| 762 | */ |
| 763 | if (entity_is_task(se)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 764 | sa->load_avg = scale_load_down(se->load.weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 765 | |
| 766 | /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */ |
| 767 | } |
| 768 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 769 | static void attach_entity_cfs_rq(struct sched_entity *se); |
| 770 | |
| 771 | /* |
| 772 | * With new tasks being created, their initial util_avgs are extrapolated |
| 773 | * based on the cfs_rq's current util_avg: |
| 774 | * |
| 775 | * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight |
| 776 | * |
| 777 | * However, in many cases, the above util_avg does not give a desired |
| 778 | * value. Moreover, the sum of the util_avgs may be divergent, such |
| 779 | * as when the series is a harmonic series. |
| 780 | * |
| 781 | * To solve this problem, we also cap the util_avg of successive tasks to |
| 782 | * only 1/2 of the left utilization budget: |
| 783 | * |
| 784 | * util_avg_cap = (cpu_scale - cfs_rq->avg.util_avg) / 2^n |
| 785 | * |
| 786 | * where n denotes the nth task and cpu_scale the CPU capacity. |
| 787 | * |
| 788 | * For example, for a CPU with 1024 of capacity, a simplest series from |
| 789 | * the beginning would be like: |
| 790 | * |
| 791 | * task util_avg: 512, 256, 128, 64, 32, 16, 8, ... |
| 792 | * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ... |
| 793 | * |
| 794 | * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap) |
| 795 | * if util_avg > util_avg_cap. |
| 796 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 797 | void post_init_entity_util_avg(struct task_struct *p) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 798 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 799 | struct sched_entity *se = &p->se; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 800 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 801 | struct sched_avg *sa = &se->avg; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 802 | long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq))); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 803 | long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2; |
| 804 | |
| 805 | if (cap > 0) { |
| 806 | if (cfs_rq->avg.util_avg != 0) { |
| 807 | sa->util_avg = cfs_rq->avg.util_avg * se->load.weight; |
| 808 | sa->util_avg /= (cfs_rq->avg.load_avg + 1); |
| 809 | |
| 810 | if (sa->util_avg > cap) |
| 811 | sa->util_avg = cap; |
| 812 | } else { |
| 813 | sa->util_avg = cap; |
| 814 | } |
| 815 | } |
| 816 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 817 | sa->runnable_avg = sa->util_avg; |
| 818 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 819 | if (p->sched_class != &fair_sched_class) { |
| 820 | /* |
| 821 | * For !fair tasks do: |
| 822 | * |
| 823 | update_cfs_rq_load_avg(now, cfs_rq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 824 | attach_entity_load_avg(cfs_rq, se); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 825 | switched_from_fair(rq, p); |
| 826 | * |
| 827 | * such that the next switched_to_fair() has the |
| 828 | * expected state. |
| 829 | */ |
| 830 | se->avg.last_update_time = cfs_rq_clock_pelt(cfs_rq); |
| 831 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 832 | } |
| 833 | |
| 834 | attach_entity_cfs_rq(se); |
| 835 | } |
| 836 | |
| 837 | #else /* !CONFIG_SMP */ |
| 838 | void init_entity_runnable_average(struct sched_entity *se) |
| 839 | { |
| 840 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 841 | void post_init_entity_util_avg(struct task_struct *p) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 842 | { |
| 843 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 844 | static void update_tg_load_avg(struct cfs_rq *cfs_rq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 845 | { |
| 846 | } |
| 847 | #endif /* CONFIG_SMP */ |
| 848 | |
| 849 | /* |
| 850 | * Update the current task's runtime statistics. |
| 851 | */ |
| 852 | static void update_curr(struct cfs_rq *cfs_rq) |
| 853 | { |
| 854 | struct sched_entity *curr = cfs_rq->curr; |
| 855 | u64 now = rq_clock_task(rq_of(cfs_rq)); |
| 856 | u64 delta_exec; |
| 857 | |
| 858 | if (unlikely(!curr)) |
| 859 | return; |
| 860 | |
| 861 | delta_exec = now - curr->exec_start; |
| 862 | if (unlikely((s64)delta_exec <= 0)) |
| 863 | return; |
| 864 | |
| 865 | curr->exec_start = now; |
| 866 | |
| 867 | schedstat_set(curr->statistics.exec_max, |
| 868 | max(delta_exec, curr->statistics.exec_max)); |
| 869 | |
| 870 | curr->sum_exec_runtime += delta_exec; |
| 871 | schedstat_add(cfs_rq->exec_clock, delta_exec); |
| 872 | |
| 873 | curr->vruntime += calc_delta_fair(delta_exec, curr); |
| 874 | update_min_vruntime(cfs_rq); |
| 875 | |
| 876 | if (entity_is_task(curr)) { |
| 877 | struct task_struct *curtask = task_of(curr); |
| 878 | |
| 879 | trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); |
| 880 | cgroup_account_cputime(curtask, delta_exec); |
| 881 | account_group_exec_runtime(curtask, delta_exec); |
| 882 | } |
| 883 | |
| 884 | account_cfs_rq_runtime(cfs_rq, delta_exec); |
| 885 | } |
| 886 | |
| 887 | static void update_curr_fair(struct rq *rq) |
| 888 | { |
| 889 | update_curr(cfs_rq_of(&rq->curr->se)); |
| 890 | } |
| 891 | |
| 892 | static inline void |
| 893 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 894 | { |
| 895 | u64 wait_start, prev_wait_start; |
| 896 | |
| 897 | if (!schedstat_enabled()) |
| 898 | return; |
| 899 | |
| 900 | wait_start = rq_clock(rq_of(cfs_rq)); |
| 901 | prev_wait_start = schedstat_val(se->statistics.wait_start); |
| 902 | |
| 903 | if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) && |
| 904 | likely(wait_start > prev_wait_start)) |
| 905 | wait_start -= prev_wait_start; |
| 906 | |
| 907 | __schedstat_set(se->statistics.wait_start, wait_start); |
| 908 | } |
| 909 | |
| 910 | static inline void |
| 911 | update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 912 | { |
| 913 | struct task_struct *p; |
| 914 | u64 delta; |
| 915 | |
| 916 | if (!schedstat_enabled()) |
| 917 | return; |
| 918 | |
| 919 | delta = rq_clock(rq_of(cfs_rq)) - schedstat_val(se->statistics.wait_start); |
| 920 | |
| 921 | if (entity_is_task(se)) { |
| 922 | p = task_of(se); |
| 923 | if (task_on_rq_migrating(p)) { |
| 924 | /* |
| 925 | * Preserve migrating task's wait time so wait_start |
| 926 | * time stamp can be adjusted to accumulate wait time |
| 927 | * prior to migration. |
| 928 | */ |
| 929 | __schedstat_set(se->statistics.wait_start, delta); |
| 930 | return; |
| 931 | } |
| 932 | trace_sched_stat_wait(p, delta); |
| 933 | } |
| 934 | |
| 935 | __schedstat_set(se->statistics.wait_max, |
| 936 | max(schedstat_val(se->statistics.wait_max), delta)); |
| 937 | __schedstat_inc(se->statistics.wait_count); |
| 938 | __schedstat_add(se->statistics.wait_sum, delta); |
| 939 | __schedstat_set(se->statistics.wait_start, 0); |
| 940 | } |
| 941 | |
| 942 | static inline void |
| 943 | update_stats_enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 944 | { |
| 945 | struct task_struct *tsk = NULL; |
| 946 | u64 sleep_start, block_start; |
| 947 | |
| 948 | if (!schedstat_enabled()) |
| 949 | return; |
| 950 | |
| 951 | sleep_start = schedstat_val(se->statistics.sleep_start); |
| 952 | block_start = schedstat_val(se->statistics.block_start); |
| 953 | |
| 954 | if (entity_is_task(se)) |
| 955 | tsk = task_of(se); |
| 956 | |
| 957 | if (sleep_start) { |
| 958 | u64 delta = rq_clock(rq_of(cfs_rq)) - sleep_start; |
| 959 | |
| 960 | if ((s64)delta < 0) |
| 961 | delta = 0; |
| 962 | |
| 963 | if (unlikely(delta > schedstat_val(se->statistics.sleep_max))) |
| 964 | __schedstat_set(se->statistics.sleep_max, delta); |
| 965 | |
| 966 | __schedstat_set(se->statistics.sleep_start, 0); |
| 967 | __schedstat_add(se->statistics.sum_sleep_runtime, delta); |
| 968 | |
| 969 | if (tsk) { |
| 970 | account_scheduler_latency(tsk, delta >> 10, 1); |
| 971 | trace_sched_stat_sleep(tsk, delta); |
| 972 | } |
| 973 | } |
| 974 | if (block_start) { |
| 975 | u64 delta = rq_clock(rq_of(cfs_rq)) - block_start; |
| 976 | |
| 977 | if ((s64)delta < 0) |
| 978 | delta = 0; |
| 979 | |
| 980 | if (unlikely(delta > schedstat_val(se->statistics.block_max))) |
| 981 | __schedstat_set(se->statistics.block_max, delta); |
| 982 | |
| 983 | __schedstat_set(se->statistics.block_start, 0); |
| 984 | __schedstat_add(se->statistics.sum_sleep_runtime, delta); |
| 985 | |
| 986 | if (tsk) { |
| 987 | if (tsk->in_iowait) { |
| 988 | __schedstat_add(se->statistics.iowait_sum, delta); |
| 989 | __schedstat_inc(se->statistics.iowait_count); |
| 990 | trace_sched_stat_iowait(tsk, delta); |
| 991 | } |
| 992 | |
| 993 | trace_sched_stat_blocked(tsk, delta); |
| 994 | |
| 995 | /* |
| 996 | * Blocking time is in units of nanosecs, so shift by |
| 997 | * 20 to get a milliseconds-range estimation of the |
| 998 | * amount of time that the task spent sleeping: |
| 999 | */ |
| 1000 | if (unlikely(prof_on == SLEEP_PROFILING)) { |
| 1001 | profile_hits(SLEEP_PROFILING, |
| 1002 | (void *)get_wchan(tsk), |
| 1003 | delta >> 20); |
| 1004 | } |
| 1005 | account_scheduler_latency(tsk, delta >> 10, 0); |
| 1006 | } |
| 1007 | } |
| 1008 | } |
| 1009 | |
| 1010 | /* |
| 1011 | * Task is being enqueued - update stats: |
| 1012 | */ |
| 1013 | static inline void |
| 1014 | update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
| 1015 | { |
| 1016 | if (!schedstat_enabled()) |
| 1017 | return; |
| 1018 | |
| 1019 | /* |
| 1020 | * Are we enqueueing a waiting task? (for current tasks |
| 1021 | * a dequeue/enqueue event is a NOP) |
| 1022 | */ |
| 1023 | if (se != cfs_rq->curr) |
| 1024 | update_stats_wait_start(cfs_rq, se); |
| 1025 | |
| 1026 | if (flags & ENQUEUE_WAKEUP) |
| 1027 | update_stats_enqueue_sleeper(cfs_rq, se); |
| 1028 | } |
| 1029 | |
| 1030 | static inline void |
| 1031 | update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
| 1032 | { |
| 1033 | |
| 1034 | if (!schedstat_enabled()) |
| 1035 | return; |
| 1036 | |
| 1037 | /* |
| 1038 | * Mark the end of the wait period if dequeueing a |
| 1039 | * waiting task: |
| 1040 | */ |
| 1041 | if (se != cfs_rq->curr) |
| 1042 | update_stats_wait_end(cfs_rq, se); |
| 1043 | |
| 1044 | if ((flags & DEQUEUE_SLEEP) && entity_is_task(se)) { |
| 1045 | struct task_struct *tsk = task_of(se); |
| 1046 | |
| 1047 | if (tsk->state & TASK_INTERRUPTIBLE) |
| 1048 | __schedstat_set(se->statistics.sleep_start, |
| 1049 | rq_clock(rq_of(cfs_rq))); |
| 1050 | if (tsk->state & TASK_UNINTERRUPTIBLE) |
| 1051 | __schedstat_set(se->statistics.block_start, |
| 1052 | rq_clock(rq_of(cfs_rq))); |
| 1053 | } |
| 1054 | } |
| 1055 | |
| 1056 | /* |
| 1057 | * We are picking a new current task - update its stats: |
| 1058 | */ |
| 1059 | static inline void |
| 1060 | update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 1061 | { |
| 1062 | /* |
| 1063 | * We are starting a new run period: |
| 1064 | */ |
| 1065 | se->exec_start = rq_clock_task(rq_of(cfs_rq)); |
| 1066 | } |
| 1067 | |
| 1068 | /************************************************** |
| 1069 | * Scheduling class queueing methods: |
| 1070 | */ |
| 1071 | |
| 1072 | #ifdef CONFIG_NUMA_BALANCING |
| 1073 | /* |
| 1074 | * Approximate time to scan a full NUMA task in ms. The task scan period is |
| 1075 | * calculated based on the tasks virtual memory size and |
| 1076 | * numa_balancing_scan_size. |
| 1077 | */ |
| 1078 | unsigned int sysctl_numa_balancing_scan_period_min = 1000; |
| 1079 | unsigned int sysctl_numa_balancing_scan_period_max = 60000; |
| 1080 | |
| 1081 | /* Portion of address space to scan in MB */ |
| 1082 | unsigned int sysctl_numa_balancing_scan_size = 256; |
| 1083 | |
| 1084 | /* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */ |
| 1085 | unsigned int sysctl_numa_balancing_scan_delay = 1000; |
| 1086 | |
| 1087 | struct numa_group { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1088 | refcount_t refcount; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1089 | |
| 1090 | spinlock_t lock; /* nr_tasks, tasks */ |
| 1091 | int nr_tasks; |
| 1092 | pid_t gid; |
| 1093 | int active_nodes; |
| 1094 | |
| 1095 | struct rcu_head rcu; |
| 1096 | unsigned long total_faults; |
| 1097 | unsigned long max_faults_cpu; |
| 1098 | /* |
| 1099 | * Faults_cpu is used to decide whether memory should move |
| 1100 | * towards the CPU. As a consequence, these stats are weighted |
| 1101 | * more by CPU use than by memory faults. |
| 1102 | */ |
| 1103 | unsigned long *faults_cpu; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1104 | unsigned long faults[]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1105 | }; |
| 1106 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1107 | /* |
| 1108 | * For functions that can be called in multiple contexts that permit reading |
| 1109 | * ->numa_group (see struct task_struct for locking rules). |
| 1110 | */ |
| 1111 | static struct numa_group *deref_task_numa_group(struct task_struct *p) |
| 1112 | { |
| 1113 | return rcu_dereference_check(p->numa_group, p == current || |
| 1114 | (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu))); |
| 1115 | } |
| 1116 | |
| 1117 | static struct numa_group *deref_curr_numa_group(struct task_struct *p) |
| 1118 | { |
| 1119 | return rcu_dereference_protected(p->numa_group, p == current); |
| 1120 | } |
| 1121 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1122 | static inline unsigned long group_faults_priv(struct numa_group *ng); |
| 1123 | static inline unsigned long group_faults_shared(struct numa_group *ng); |
| 1124 | |
| 1125 | static unsigned int task_nr_scan_windows(struct task_struct *p) |
| 1126 | { |
| 1127 | unsigned long rss = 0; |
| 1128 | unsigned long nr_scan_pages; |
| 1129 | |
| 1130 | /* |
| 1131 | * Calculations based on RSS as non-present and empty pages are skipped |
| 1132 | * by the PTE scanner and NUMA hinting faults should be trapped based |
| 1133 | * on resident pages |
| 1134 | */ |
| 1135 | nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT); |
| 1136 | rss = get_mm_rss(p->mm); |
| 1137 | if (!rss) |
| 1138 | rss = nr_scan_pages; |
| 1139 | |
| 1140 | rss = round_up(rss, nr_scan_pages); |
| 1141 | return rss / nr_scan_pages; |
| 1142 | } |
| 1143 | |
| 1144 | /* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */ |
| 1145 | #define MAX_SCAN_WINDOW 2560 |
| 1146 | |
| 1147 | static unsigned int task_scan_min(struct task_struct *p) |
| 1148 | { |
| 1149 | unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); |
| 1150 | unsigned int scan, floor; |
| 1151 | unsigned int windows = 1; |
| 1152 | |
| 1153 | if (scan_size < MAX_SCAN_WINDOW) |
| 1154 | windows = MAX_SCAN_WINDOW / scan_size; |
| 1155 | floor = 1000 / windows; |
| 1156 | |
| 1157 | scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); |
| 1158 | return max_t(unsigned int, floor, scan); |
| 1159 | } |
| 1160 | |
| 1161 | static unsigned int task_scan_start(struct task_struct *p) |
| 1162 | { |
| 1163 | unsigned long smin = task_scan_min(p); |
| 1164 | unsigned long period = smin; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1165 | struct numa_group *ng; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1166 | |
| 1167 | /* Scale the maximum scan period with the amount of shared memory. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1168 | rcu_read_lock(); |
| 1169 | ng = rcu_dereference(p->numa_group); |
| 1170 | if (ng) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1171 | unsigned long shared = group_faults_shared(ng); |
| 1172 | unsigned long private = group_faults_priv(ng); |
| 1173 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1174 | period *= refcount_read(&ng->refcount); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1175 | period *= shared + 1; |
| 1176 | period /= private + shared + 1; |
| 1177 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1178 | rcu_read_unlock(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1179 | |
| 1180 | return max(smin, period); |
| 1181 | } |
| 1182 | |
| 1183 | static unsigned int task_scan_max(struct task_struct *p) |
| 1184 | { |
| 1185 | unsigned long smin = task_scan_min(p); |
| 1186 | unsigned long smax; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1187 | struct numa_group *ng; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1188 | |
| 1189 | /* Watch for min being lower than max due to floor calculations */ |
| 1190 | smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p); |
| 1191 | |
| 1192 | /* Scale the maximum scan period with the amount of shared memory. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1193 | ng = deref_curr_numa_group(p); |
| 1194 | if (ng) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1195 | unsigned long shared = group_faults_shared(ng); |
| 1196 | unsigned long private = group_faults_priv(ng); |
| 1197 | unsigned long period = smax; |
| 1198 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1199 | period *= refcount_read(&ng->refcount); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1200 | period *= shared + 1; |
| 1201 | period /= private + shared + 1; |
| 1202 | |
| 1203 | smax = max(smax, period); |
| 1204 | } |
| 1205 | |
| 1206 | return max(smin, smax); |
| 1207 | } |
| 1208 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1209 | static void account_numa_enqueue(struct rq *rq, struct task_struct *p) |
| 1210 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1211 | rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1212 | rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); |
| 1213 | } |
| 1214 | |
| 1215 | static void account_numa_dequeue(struct rq *rq, struct task_struct *p) |
| 1216 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1217 | rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1218 | rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); |
| 1219 | } |
| 1220 | |
| 1221 | /* Shared or private faults. */ |
| 1222 | #define NR_NUMA_HINT_FAULT_TYPES 2 |
| 1223 | |
| 1224 | /* Memory and CPU locality */ |
| 1225 | #define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2) |
| 1226 | |
| 1227 | /* Averaged statistics, and temporary buffers. */ |
| 1228 | #define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2) |
| 1229 | |
| 1230 | pid_t task_numa_group_id(struct task_struct *p) |
| 1231 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1232 | struct numa_group *ng; |
| 1233 | pid_t gid = 0; |
| 1234 | |
| 1235 | rcu_read_lock(); |
| 1236 | ng = rcu_dereference(p->numa_group); |
| 1237 | if (ng) |
| 1238 | gid = ng->gid; |
| 1239 | rcu_read_unlock(); |
| 1240 | |
| 1241 | return gid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1242 | } |
| 1243 | |
| 1244 | /* |
| 1245 | * The averaged statistics, shared & private, memory & CPU, |
| 1246 | * occupy the first half of the array. The second half of the |
| 1247 | * array is for current counters, which are averaged into the |
| 1248 | * first set by task_numa_placement. |
| 1249 | */ |
| 1250 | static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv) |
| 1251 | { |
| 1252 | return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; |
| 1253 | } |
| 1254 | |
| 1255 | static inline unsigned long task_faults(struct task_struct *p, int nid) |
| 1256 | { |
| 1257 | if (!p->numa_faults) |
| 1258 | return 0; |
| 1259 | |
| 1260 | return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] + |
| 1261 | p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)]; |
| 1262 | } |
| 1263 | |
| 1264 | static inline unsigned long group_faults(struct task_struct *p, int nid) |
| 1265 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1266 | struct numa_group *ng = deref_task_numa_group(p); |
| 1267 | |
| 1268 | if (!ng) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1269 | return 0; |
| 1270 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1271 | return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] + |
| 1272 | ng->faults[task_faults_idx(NUMA_MEM, nid, 1)]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1273 | } |
| 1274 | |
| 1275 | static inline unsigned long group_faults_cpu(struct numa_group *group, int nid) |
| 1276 | { |
| 1277 | return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] + |
| 1278 | group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)]; |
| 1279 | } |
| 1280 | |
| 1281 | static inline unsigned long group_faults_priv(struct numa_group *ng) |
| 1282 | { |
| 1283 | unsigned long faults = 0; |
| 1284 | int node; |
| 1285 | |
| 1286 | for_each_online_node(node) { |
| 1287 | faults += ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; |
| 1288 | } |
| 1289 | |
| 1290 | return faults; |
| 1291 | } |
| 1292 | |
| 1293 | static inline unsigned long group_faults_shared(struct numa_group *ng) |
| 1294 | { |
| 1295 | unsigned long faults = 0; |
| 1296 | int node; |
| 1297 | |
| 1298 | for_each_online_node(node) { |
| 1299 | faults += ng->faults[task_faults_idx(NUMA_MEM, node, 0)]; |
| 1300 | } |
| 1301 | |
| 1302 | return faults; |
| 1303 | } |
| 1304 | |
| 1305 | /* |
| 1306 | * A node triggering more than 1/3 as many NUMA faults as the maximum is |
| 1307 | * considered part of a numa group's pseudo-interleaving set. Migrations |
| 1308 | * between these nodes are slowed down, to allow things to settle down. |
| 1309 | */ |
| 1310 | #define ACTIVE_NODE_FRACTION 3 |
| 1311 | |
| 1312 | static bool numa_is_active_node(int nid, struct numa_group *ng) |
| 1313 | { |
| 1314 | return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu; |
| 1315 | } |
| 1316 | |
| 1317 | /* Handle placement on systems where not all nodes are directly connected. */ |
| 1318 | static unsigned long score_nearby_nodes(struct task_struct *p, int nid, |
| 1319 | int maxdist, bool task) |
| 1320 | { |
| 1321 | unsigned long score = 0; |
| 1322 | int node; |
| 1323 | |
| 1324 | /* |
| 1325 | * All nodes are directly connected, and the same distance |
| 1326 | * from each other. No need for fancy placement algorithms. |
| 1327 | */ |
| 1328 | if (sched_numa_topology_type == NUMA_DIRECT) |
| 1329 | return 0; |
| 1330 | |
| 1331 | /* |
| 1332 | * This code is called for each node, introducing N^2 complexity, |
| 1333 | * which should be ok given the number of nodes rarely exceeds 8. |
| 1334 | */ |
| 1335 | for_each_online_node(node) { |
| 1336 | unsigned long faults; |
| 1337 | int dist = node_distance(nid, node); |
| 1338 | |
| 1339 | /* |
| 1340 | * The furthest away nodes in the system are not interesting |
| 1341 | * for placement; nid was already counted. |
| 1342 | */ |
| 1343 | if (dist == sched_max_numa_distance || node == nid) |
| 1344 | continue; |
| 1345 | |
| 1346 | /* |
| 1347 | * On systems with a backplane NUMA topology, compare groups |
| 1348 | * of nodes, and move tasks towards the group with the most |
| 1349 | * memory accesses. When comparing two nodes at distance |
| 1350 | * "hoplimit", only nodes closer by than "hoplimit" are part |
| 1351 | * of each group. Skip other nodes. |
| 1352 | */ |
| 1353 | if (sched_numa_topology_type == NUMA_BACKPLANE && |
| 1354 | dist >= maxdist) |
| 1355 | continue; |
| 1356 | |
| 1357 | /* Add up the faults from nearby nodes. */ |
| 1358 | if (task) |
| 1359 | faults = task_faults(p, node); |
| 1360 | else |
| 1361 | faults = group_faults(p, node); |
| 1362 | |
| 1363 | /* |
| 1364 | * On systems with a glueless mesh NUMA topology, there are |
| 1365 | * no fixed "groups of nodes". Instead, nodes that are not |
| 1366 | * directly connected bounce traffic through intermediate |
| 1367 | * nodes; a numa_group can occupy any set of nodes. |
| 1368 | * The further away a node is, the less the faults count. |
| 1369 | * This seems to result in good task placement. |
| 1370 | */ |
| 1371 | if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { |
| 1372 | faults *= (sched_max_numa_distance - dist); |
| 1373 | faults /= (sched_max_numa_distance - LOCAL_DISTANCE); |
| 1374 | } |
| 1375 | |
| 1376 | score += faults; |
| 1377 | } |
| 1378 | |
| 1379 | return score; |
| 1380 | } |
| 1381 | |
| 1382 | /* |
| 1383 | * These return the fraction of accesses done by a particular task, or |
| 1384 | * task group, on a particular numa node. The group weight is given a |
| 1385 | * larger multiplier, in order to group tasks together that are almost |
| 1386 | * evenly spread out between numa nodes. |
| 1387 | */ |
| 1388 | static inline unsigned long task_weight(struct task_struct *p, int nid, |
| 1389 | int dist) |
| 1390 | { |
| 1391 | unsigned long faults, total_faults; |
| 1392 | |
| 1393 | if (!p->numa_faults) |
| 1394 | return 0; |
| 1395 | |
| 1396 | total_faults = p->total_numa_faults; |
| 1397 | |
| 1398 | if (!total_faults) |
| 1399 | return 0; |
| 1400 | |
| 1401 | faults = task_faults(p, nid); |
| 1402 | faults += score_nearby_nodes(p, nid, dist, true); |
| 1403 | |
| 1404 | return 1000 * faults / total_faults; |
| 1405 | } |
| 1406 | |
| 1407 | static inline unsigned long group_weight(struct task_struct *p, int nid, |
| 1408 | int dist) |
| 1409 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1410 | struct numa_group *ng = deref_task_numa_group(p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1411 | unsigned long faults, total_faults; |
| 1412 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1413 | if (!ng) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1414 | return 0; |
| 1415 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1416 | total_faults = ng->total_faults; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1417 | |
| 1418 | if (!total_faults) |
| 1419 | return 0; |
| 1420 | |
| 1421 | faults = group_faults(p, nid); |
| 1422 | faults += score_nearby_nodes(p, nid, dist, false); |
| 1423 | |
| 1424 | return 1000 * faults / total_faults; |
| 1425 | } |
| 1426 | |
| 1427 | bool should_numa_migrate_memory(struct task_struct *p, struct page * page, |
| 1428 | int src_nid, int dst_cpu) |
| 1429 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1430 | struct numa_group *ng = deref_curr_numa_group(p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1431 | int dst_nid = cpu_to_node(dst_cpu); |
| 1432 | int last_cpupid, this_cpupid; |
| 1433 | |
| 1434 | this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid); |
| 1435 | last_cpupid = page_cpupid_xchg_last(page, this_cpupid); |
| 1436 | |
| 1437 | /* |
| 1438 | * Allow first faults or private faults to migrate immediately early in |
| 1439 | * the lifetime of a task. The magic number 4 is based on waiting for |
| 1440 | * two full passes of the "multi-stage node selection" test that is |
| 1441 | * executed below. |
| 1442 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1443 | if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1444 | (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) |
| 1445 | return true; |
| 1446 | |
| 1447 | /* |
| 1448 | * Multi-stage node selection is used in conjunction with a periodic |
| 1449 | * migration fault to build a temporal task<->page relation. By using |
| 1450 | * a two-stage filter we remove short/unlikely relations. |
| 1451 | * |
| 1452 | * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate |
| 1453 | * a task's usage of a particular page (n_p) per total usage of this |
| 1454 | * page (n_t) (in a given time-span) to a probability. |
| 1455 | * |
| 1456 | * Our periodic faults will sample this probability and getting the |
| 1457 | * same result twice in a row, given these samples are fully |
| 1458 | * independent, is then given by P(n)^2, provided our sample period |
| 1459 | * is sufficiently short compared to the usage pattern. |
| 1460 | * |
| 1461 | * This quadric squishes small probabilities, making it less likely we |
| 1462 | * act on an unlikely task<->page relation. |
| 1463 | */ |
| 1464 | if (!cpupid_pid_unset(last_cpupid) && |
| 1465 | cpupid_to_nid(last_cpupid) != dst_nid) |
| 1466 | return false; |
| 1467 | |
| 1468 | /* Always allow migrate on private faults */ |
| 1469 | if (cpupid_match_pid(p, last_cpupid)) |
| 1470 | return true; |
| 1471 | |
| 1472 | /* A shared fault, but p->numa_group has not been set up yet. */ |
| 1473 | if (!ng) |
| 1474 | return true; |
| 1475 | |
| 1476 | /* |
| 1477 | * Destination node is much more heavily used than the source |
| 1478 | * node? Allow migration. |
| 1479 | */ |
| 1480 | if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) * |
| 1481 | ACTIVE_NODE_FRACTION) |
| 1482 | return true; |
| 1483 | |
| 1484 | /* |
| 1485 | * Distribute memory according to CPU & memory use on each node, |
| 1486 | * with 3/4 hysteresis to avoid unnecessary memory migrations: |
| 1487 | * |
| 1488 | * faults_cpu(dst) 3 faults_cpu(src) |
| 1489 | * --------------- * - > --------------- |
| 1490 | * faults_mem(dst) 4 faults_mem(src) |
| 1491 | */ |
| 1492 | return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 > |
| 1493 | group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4; |
| 1494 | } |
| 1495 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1496 | /* |
| 1497 | * 'numa_type' describes the node at the moment of load balancing. |
| 1498 | */ |
| 1499 | enum numa_type { |
| 1500 | /* The node has spare capacity that can be used to run more tasks. */ |
| 1501 | node_has_spare = 0, |
| 1502 | /* |
| 1503 | * The node is fully used and the tasks don't compete for more CPU |
| 1504 | * cycles. Nevertheless, some tasks might wait before running. |
| 1505 | */ |
| 1506 | node_fully_busy, |
| 1507 | /* |
| 1508 | * The node is overloaded and can't provide expected CPU cycles to all |
| 1509 | * tasks. |
| 1510 | */ |
| 1511 | node_overloaded |
| 1512 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1513 | |
| 1514 | /* Cached statistics for all CPUs within a node */ |
| 1515 | struct numa_stats { |
| 1516 | unsigned long load; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1517 | unsigned long runnable; |
| 1518 | unsigned long util; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1519 | /* Total compute capacity of CPUs on a node */ |
| 1520 | unsigned long compute_capacity; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1521 | unsigned int nr_running; |
| 1522 | unsigned int weight; |
| 1523 | enum numa_type node_type; |
| 1524 | int idle_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1525 | }; |
| 1526 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1527 | static inline bool is_core_idle(int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1528 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1529 | #ifdef CONFIG_SCHED_SMT |
| 1530 | int sibling; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1531 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1532 | for_each_cpu(sibling, cpu_smt_mask(cpu)) { |
| 1533 | if (cpu == sibling) |
| 1534 | continue; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1535 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1536 | if (!idle_cpu(sibling)) |
| 1537 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1538 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1539 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1540 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1541 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1542 | } |
| 1543 | |
| 1544 | struct task_numa_env { |
| 1545 | struct task_struct *p; |
| 1546 | |
| 1547 | int src_cpu, src_nid; |
| 1548 | int dst_cpu, dst_nid; |
| 1549 | |
| 1550 | struct numa_stats src_stats, dst_stats; |
| 1551 | |
| 1552 | int imbalance_pct; |
| 1553 | int dist; |
| 1554 | |
| 1555 | struct task_struct *best_task; |
| 1556 | long best_imp; |
| 1557 | int best_cpu; |
| 1558 | }; |
| 1559 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1560 | static unsigned long cpu_load(struct rq *rq); |
| 1561 | static unsigned long cpu_runnable(struct rq *rq); |
| 1562 | static unsigned long cpu_util(int cpu); |
| 1563 | static inline long adjust_numa_imbalance(int imbalance, int nr_running); |
| 1564 | |
| 1565 | static inline enum |
| 1566 | numa_type numa_classify(unsigned int imbalance_pct, |
| 1567 | struct numa_stats *ns) |
| 1568 | { |
| 1569 | if ((ns->nr_running > ns->weight) && |
| 1570 | (((ns->compute_capacity * 100) < (ns->util * imbalance_pct)) || |
| 1571 | ((ns->compute_capacity * imbalance_pct) < (ns->runnable * 100)))) |
| 1572 | return node_overloaded; |
| 1573 | |
| 1574 | if ((ns->nr_running < ns->weight) || |
| 1575 | (((ns->compute_capacity * 100) > (ns->util * imbalance_pct)) && |
| 1576 | ((ns->compute_capacity * imbalance_pct) > (ns->runnable * 100)))) |
| 1577 | return node_has_spare; |
| 1578 | |
| 1579 | return node_fully_busy; |
| 1580 | } |
| 1581 | |
| 1582 | #ifdef CONFIG_SCHED_SMT |
| 1583 | /* Forward declarations of select_idle_sibling helpers */ |
| 1584 | static inline bool test_idle_cores(int cpu, bool def); |
| 1585 | static inline int numa_idle_core(int idle_core, int cpu) |
| 1586 | { |
| 1587 | if (!static_branch_likely(&sched_smt_present) || |
| 1588 | idle_core >= 0 || !test_idle_cores(cpu, false)) |
| 1589 | return idle_core; |
| 1590 | |
| 1591 | /* |
| 1592 | * Prefer cores instead of packing HT siblings |
| 1593 | * and triggering future load balancing. |
| 1594 | */ |
| 1595 | if (is_core_idle(cpu)) |
| 1596 | idle_core = cpu; |
| 1597 | |
| 1598 | return idle_core; |
| 1599 | } |
| 1600 | #else |
| 1601 | static inline int numa_idle_core(int idle_core, int cpu) |
| 1602 | { |
| 1603 | return idle_core; |
| 1604 | } |
| 1605 | #endif |
| 1606 | |
| 1607 | /* |
| 1608 | * Gather all necessary information to make NUMA balancing placement |
| 1609 | * decisions that are compatible with standard load balancer. This |
| 1610 | * borrows code and logic from update_sg_lb_stats but sharing a |
| 1611 | * common implementation is impractical. |
| 1612 | */ |
| 1613 | static void update_numa_stats(struct task_numa_env *env, |
| 1614 | struct numa_stats *ns, int nid, |
| 1615 | bool find_idle) |
| 1616 | { |
| 1617 | int cpu, idle_core = -1; |
| 1618 | |
| 1619 | memset(ns, 0, sizeof(*ns)); |
| 1620 | ns->idle_cpu = -1; |
| 1621 | |
| 1622 | rcu_read_lock(); |
| 1623 | for_each_cpu(cpu, cpumask_of_node(nid)) { |
| 1624 | struct rq *rq = cpu_rq(cpu); |
| 1625 | |
| 1626 | ns->load += cpu_load(rq); |
| 1627 | ns->runnable += cpu_runnable(rq); |
| 1628 | ns->util += cpu_util(cpu); |
| 1629 | ns->nr_running += rq->cfs.h_nr_running; |
| 1630 | ns->compute_capacity += capacity_of(cpu); |
| 1631 | |
| 1632 | if (find_idle && !rq->nr_running && idle_cpu(cpu)) { |
| 1633 | if (READ_ONCE(rq->numa_migrate_on) || |
| 1634 | !cpumask_test_cpu(cpu, env->p->cpus_ptr)) |
| 1635 | continue; |
| 1636 | |
| 1637 | if (ns->idle_cpu == -1) |
| 1638 | ns->idle_cpu = cpu; |
| 1639 | |
| 1640 | idle_core = numa_idle_core(idle_core, cpu); |
| 1641 | } |
| 1642 | } |
| 1643 | rcu_read_unlock(); |
| 1644 | |
| 1645 | ns->weight = cpumask_weight(cpumask_of_node(nid)); |
| 1646 | |
| 1647 | ns->node_type = numa_classify(env->imbalance_pct, ns); |
| 1648 | |
| 1649 | if (idle_core >= 0) |
| 1650 | ns->idle_cpu = idle_core; |
| 1651 | } |
| 1652 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1653 | static void task_numa_assign(struct task_numa_env *env, |
| 1654 | struct task_struct *p, long imp) |
| 1655 | { |
| 1656 | struct rq *rq = cpu_rq(env->dst_cpu); |
| 1657 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1658 | /* Check if run-queue part of active NUMA balance. */ |
| 1659 | if (env->best_cpu != env->dst_cpu && xchg(&rq->numa_migrate_on, 1)) { |
| 1660 | int cpu; |
| 1661 | int start = env->dst_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1662 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1663 | /* Find alternative idle CPU. */ |
| 1664 | for_each_cpu_wrap(cpu, cpumask_of_node(env->dst_nid), start) { |
| 1665 | if (cpu == env->best_cpu || !idle_cpu(cpu) || |
| 1666 | !cpumask_test_cpu(cpu, env->p->cpus_ptr)) { |
| 1667 | continue; |
| 1668 | } |
| 1669 | |
| 1670 | env->dst_cpu = cpu; |
| 1671 | rq = cpu_rq(env->dst_cpu); |
| 1672 | if (!xchg(&rq->numa_migrate_on, 1)) |
| 1673 | goto assign; |
| 1674 | } |
| 1675 | |
| 1676 | /* Failed to find an alternative idle CPU */ |
| 1677 | return; |
| 1678 | } |
| 1679 | |
| 1680 | assign: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1681 | /* |
| 1682 | * Clear previous best_cpu/rq numa-migrate flag, since task now |
| 1683 | * found a better CPU to move/swap. |
| 1684 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1685 | if (env->best_cpu != -1 && env->best_cpu != env->dst_cpu) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1686 | rq = cpu_rq(env->best_cpu); |
| 1687 | WRITE_ONCE(rq->numa_migrate_on, 0); |
| 1688 | } |
| 1689 | |
| 1690 | if (env->best_task) |
| 1691 | put_task_struct(env->best_task); |
| 1692 | if (p) |
| 1693 | get_task_struct(p); |
| 1694 | |
| 1695 | env->best_task = p; |
| 1696 | env->best_imp = imp; |
| 1697 | env->best_cpu = env->dst_cpu; |
| 1698 | } |
| 1699 | |
| 1700 | static bool load_too_imbalanced(long src_load, long dst_load, |
| 1701 | struct task_numa_env *env) |
| 1702 | { |
| 1703 | long imb, old_imb; |
| 1704 | long orig_src_load, orig_dst_load; |
| 1705 | long src_capacity, dst_capacity; |
| 1706 | |
| 1707 | /* |
| 1708 | * The load is corrected for the CPU capacity available on each node. |
| 1709 | * |
| 1710 | * src_load dst_load |
| 1711 | * ------------ vs --------- |
| 1712 | * src_capacity dst_capacity |
| 1713 | */ |
| 1714 | src_capacity = env->src_stats.compute_capacity; |
| 1715 | dst_capacity = env->dst_stats.compute_capacity; |
| 1716 | |
| 1717 | imb = abs(dst_load * src_capacity - src_load * dst_capacity); |
| 1718 | |
| 1719 | orig_src_load = env->src_stats.load; |
| 1720 | orig_dst_load = env->dst_stats.load; |
| 1721 | |
| 1722 | old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity); |
| 1723 | |
| 1724 | /* Would this change make things worse? */ |
| 1725 | return (imb > old_imb); |
| 1726 | } |
| 1727 | |
| 1728 | /* |
| 1729 | * Maximum NUMA importance can be 1998 (2*999); |
| 1730 | * SMALLIMP @ 30 would be close to 1998/64. |
| 1731 | * Used to deter task migration. |
| 1732 | */ |
| 1733 | #define SMALLIMP 30 |
| 1734 | |
| 1735 | /* |
| 1736 | * This checks if the overall compute and NUMA accesses of the system would |
| 1737 | * be improved if the source tasks was migrated to the target dst_cpu taking |
| 1738 | * into account that it might be best if task running on the dst_cpu should |
| 1739 | * be exchanged with the source task |
| 1740 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1741 | static bool task_numa_compare(struct task_numa_env *env, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1742 | long taskimp, long groupimp, bool maymove) |
| 1743 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1744 | struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1745 | struct rq *dst_rq = cpu_rq(env->dst_cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1746 | long imp = p_ng ? groupimp : taskimp; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1747 | struct task_struct *cur; |
| 1748 | long src_load, dst_load; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1749 | int dist = env->dist; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1750 | long moveimp = imp; |
| 1751 | long load; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1752 | bool stopsearch = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1753 | |
| 1754 | if (READ_ONCE(dst_rq->numa_migrate_on)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1755 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1756 | |
| 1757 | rcu_read_lock(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1758 | cur = rcu_dereference(dst_rq->curr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1759 | if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur))) |
| 1760 | cur = NULL; |
| 1761 | |
| 1762 | /* |
| 1763 | * Because we have preemption enabled we can get migrated around and |
| 1764 | * end try selecting ourselves (current == env->p) as a swap candidate. |
| 1765 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1766 | if (cur == env->p) { |
| 1767 | stopsearch = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1768 | goto unlock; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1769 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1770 | |
| 1771 | if (!cur) { |
| 1772 | if (maymove && moveimp >= env->best_imp) |
| 1773 | goto assign; |
| 1774 | else |
| 1775 | goto unlock; |
| 1776 | } |
| 1777 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1778 | /* Skip this swap candidate if cannot move to the source cpu. */ |
| 1779 | if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) |
| 1780 | goto unlock; |
| 1781 | |
| 1782 | /* |
| 1783 | * Skip this swap candidate if it is not moving to its preferred |
| 1784 | * node and the best task is. |
| 1785 | */ |
| 1786 | if (env->best_task && |
| 1787 | env->best_task->numa_preferred_nid == env->src_nid && |
| 1788 | cur->numa_preferred_nid != env->src_nid) { |
| 1789 | goto unlock; |
| 1790 | } |
| 1791 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1792 | /* |
| 1793 | * "imp" is the fault differential for the source task between the |
| 1794 | * source and destination node. Calculate the total differential for |
| 1795 | * the source task and potential destination task. The more negative |
| 1796 | * the value is, the more remote accesses that would be expected to |
| 1797 | * be incurred if the tasks were swapped. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1798 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1799 | * If dst and source tasks are in the same NUMA group, or not |
| 1800 | * in any group then look only at task weights. |
| 1801 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1802 | cur_ng = rcu_dereference(cur->numa_group); |
| 1803 | if (cur_ng == p_ng) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1804 | imp = taskimp + task_weight(cur, env->src_nid, dist) - |
| 1805 | task_weight(cur, env->dst_nid, dist); |
| 1806 | /* |
| 1807 | * Add some hysteresis to prevent swapping the |
| 1808 | * tasks within a group over tiny differences. |
| 1809 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1810 | if (cur_ng) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1811 | imp -= imp / 16; |
| 1812 | } else { |
| 1813 | /* |
| 1814 | * Compare the group weights. If a task is all by itself |
| 1815 | * (not part of a group), use the task weight instead. |
| 1816 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1817 | if (cur_ng && p_ng) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1818 | imp += group_weight(cur, env->src_nid, dist) - |
| 1819 | group_weight(cur, env->dst_nid, dist); |
| 1820 | else |
| 1821 | imp += task_weight(cur, env->src_nid, dist) - |
| 1822 | task_weight(cur, env->dst_nid, dist); |
| 1823 | } |
| 1824 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1825 | /* Discourage picking a task already on its preferred node */ |
| 1826 | if (cur->numa_preferred_nid == env->dst_nid) |
| 1827 | imp -= imp / 16; |
| 1828 | |
| 1829 | /* |
| 1830 | * Encourage picking a task that moves to its preferred node. |
| 1831 | * This potentially makes imp larger than it's maximum of |
| 1832 | * 1998 (see SMALLIMP and task_weight for why) but in this |
| 1833 | * case, it does not matter. |
| 1834 | */ |
| 1835 | if (cur->numa_preferred_nid == env->src_nid) |
| 1836 | imp += imp / 8; |
| 1837 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1838 | if (maymove && moveimp > imp && moveimp > env->best_imp) { |
| 1839 | imp = moveimp; |
| 1840 | cur = NULL; |
| 1841 | goto assign; |
| 1842 | } |
| 1843 | |
| 1844 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1845 | * Prefer swapping with a task moving to its preferred node over a |
| 1846 | * task that is not. |
| 1847 | */ |
| 1848 | if (env->best_task && cur->numa_preferred_nid == env->src_nid && |
| 1849 | env->best_task->numa_preferred_nid != env->src_nid) { |
| 1850 | goto assign; |
| 1851 | } |
| 1852 | |
| 1853 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1854 | * If the NUMA importance is less than SMALLIMP, |
| 1855 | * task migration might only result in ping pong |
| 1856 | * of tasks and also hurt performance due to cache |
| 1857 | * misses. |
| 1858 | */ |
| 1859 | if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2) |
| 1860 | goto unlock; |
| 1861 | |
| 1862 | /* |
| 1863 | * In the overloaded case, try and keep the load balanced. |
| 1864 | */ |
| 1865 | load = task_h_load(env->p) - task_h_load(cur); |
| 1866 | if (!load) |
| 1867 | goto assign; |
| 1868 | |
| 1869 | dst_load = env->dst_stats.load + load; |
| 1870 | src_load = env->src_stats.load - load; |
| 1871 | |
| 1872 | if (load_too_imbalanced(src_load, dst_load, env)) |
| 1873 | goto unlock; |
| 1874 | |
| 1875 | assign: |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1876 | /* Evaluate an idle CPU for a task numa move. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1877 | if (!cur) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1878 | int cpu = env->dst_stats.idle_cpu; |
| 1879 | |
| 1880 | /* Nothing cached so current CPU went idle since the search. */ |
| 1881 | if (cpu < 0) |
| 1882 | cpu = env->dst_cpu; |
| 1883 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1884 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1885 | * If the CPU is no longer truly idle and the previous best CPU |
| 1886 | * is, keep using it. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1887 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1888 | if (!idle_cpu(cpu) && env->best_cpu >= 0 && |
| 1889 | idle_cpu(env->best_cpu)) { |
| 1890 | cpu = env->best_cpu; |
| 1891 | } |
| 1892 | |
| 1893 | env->dst_cpu = cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1894 | } |
| 1895 | |
| 1896 | task_numa_assign(env, cur, imp); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1897 | |
| 1898 | /* |
| 1899 | * If a move to idle is allowed because there is capacity or load |
| 1900 | * balance improves then stop the search. While a better swap |
| 1901 | * candidate may exist, a search is not free. |
| 1902 | */ |
| 1903 | if (maymove && !cur && env->best_cpu >= 0 && idle_cpu(env->best_cpu)) |
| 1904 | stopsearch = true; |
| 1905 | |
| 1906 | /* |
| 1907 | * If a swap candidate must be identified and the current best task |
| 1908 | * moves its preferred node then stop the search. |
| 1909 | */ |
| 1910 | if (!maymove && env->best_task && |
| 1911 | env->best_task->numa_preferred_nid == env->src_nid) { |
| 1912 | stopsearch = true; |
| 1913 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1914 | unlock: |
| 1915 | rcu_read_unlock(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1916 | |
| 1917 | return stopsearch; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1918 | } |
| 1919 | |
| 1920 | static void task_numa_find_cpu(struct task_numa_env *env, |
| 1921 | long taskimp, long groupimp) |
| 1922 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1923 | bool maymove = false; |
| 1924 | int cpu; |
| 1925 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1926 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1927 | * If dst node has spare capacity, then check if there is an |
| 1928 | * imbalance that would be overruled by the load balancer. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1929 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1930 | if (env->dst_stats.node_type == node_has_spare) { |
| 1931 | unsigned int imbalance; |
| 1932 | int src_running, dst_running; |
| 1933 | |
| 1934 | /* |
| 1935 | * Would movement cause an imbalance? Note that if src has |
| 1936 | * more running tasks that the imbalance is ignored as the |
| 1937 | * move improves the imbalance from the perspective of the |
| 1938 | * CPU load balancer. |
| 1939 | * */ |
| 1940 | src_running = env->src_stats.nr_running - 1; |
| 1941 | dst_running = env->dst_stats.nr_running + 1; |
| 1942 | imbalance = max(0, dst_running - src_running); |
| 1943 | imbalance = adjust_numa_imbalance(imbalance, dst_running); |
| 1944 | |
| 1945 | /* Use idle CPU if there is no imbalance */ |
| 1946 | if (!imbalance) { |
| 1947 | maymove = true; |
| 1948 | if (env->dst_stats.idle_cpu >= 0) { |
| 1949 | env->dst_cpu = env->dst_stats.idle_cpu; |
| 1950 | task_numa_assign(env, NULL, 0); |
| 1951 | return; |
| 1952 | } |
| 1953 | } |
| 1954 | } else { |
| 1955 | long src_load, dst_load, load; |
| 1956 | /* |
| 1957 | * If the improvement from just moving env->p direction is better |
| 1958 | * than swapping tasks around, check if a move is possible. |
| 1959 | */ |
| 1960 | load = task_h_load(env->p); |
| 1961 | dst_load = env->dst_stats.load + load; |
| 1962 | src_load = env->src_stats.load - load; |
| 1963 | maymove = !load_too_imbalanced(src_load, dst_load, env); |
| 1964 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1965 | |
| 1966 | for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { |
| 1967 | /* Skip this CPU if the source task cannot migrate */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1968 | if (!cpumask_test_cpu(cpu, env->p->cpus_ptr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1969 | continue; |
| 1970 | |
| 1971 | env->dst_cpu = cpu; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1972 | if (task_numa_compare(env, taskimp, groupimp, maymove)) |
| 1973 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1974 | } |
| 1975 | } |
| 1976 | |
| 1977 | static int task_numa_migrate(struct task_struct *p) |
| 1978 | { |
| 1979 | struct task_numa_env env = { |
| 1980 | .p = p, |
| 1981 | |
| 1982 | .src_cpu = task_cpu(p), |
| 1983 | .src_nid = task_node(p), |
| 1984 | |
| 1985 | .imbalance_pct = 112, |
| 1986 | |
| 1987 | .best_task = NULL, |
| 1988 | .best_imp = 0, |
| 1989 | .best_cpu = -1, |
| 1990 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1991 | unsigned long taskweight, groupweight; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1992 | struct sched_domain *sd; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1993 | long taskimp, groupimp; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1994 | struct numa_group *ng; |
| 1995 | struct rq *best_rq; |
| 1996 | int nid, ret, dist; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1997 | |
| 1998 | /* |
| 1999 | * Pick the lowest SD_NUMA domain, as that would have the smallest |
| 2000 | * imbalance and would be the first to start moving tasks about. |
| 2001 | * |
| 2002 | * And we want to avoid any moving of tasks about, as that would create |
| 2003 | * random movement of tasks -- counter the numa conditions we're trying |
| 2004 | * to satisfy here. |
| 2005 | */ |
| 2006 | rcu_read_lock(); |
| 2007 | sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu)); |
| 2008 | if (sd) |
| 2009 | env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2; |
| 2010 | rcu_read_unlock(); |
| 2011 | |
| 2012 | /* |
| 2013 | * Cpusets can break the scheduler domain tree into smaller |
| 2014 | * balance domains, some of which do not cross NUMA boundaries. |
| 2015 | * Tasks that are "trapped" in such domains cannot be migrated |
| 2016 | * elsewhere, so there is no point in (re)trying. |
| 2017 | */ |
| 2018 | if (unlikely(!sd)) { |
| 2019 | sched_setnuma(p, task_node(p)); |
| 2020 | return -EINVAL; |
| 2021 | } |
| 2022 | |
| 2023 | env.dst_nid = p->numa_preferred_nid; |
| 2024 | dist = env.dist = node_distance(env.src_nid, env.dst_nid); |
| 2025 | taskweight = task_weight(p, env.src_nid, dist); |
| 2026 | groupweight = group_weight(p, env.src_nid, dist); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2027 | update_numa_stats(&env, &env.src_stats, env.src_nid, false); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2028 | taskimp = task_weight(p, env.dst_nid, dist) - taskweight; |
| 2029 | groupimp = group_weight(p, env.dst_nid, dist) - groupweight; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2030 | update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2031 | |
| 2032 | /* Try to find a spot on the preferred nid. */ |
| 2033 | task_numa_find_cpu(&env, taskimp, groupimp); |
| 2034 | |
| 2035 | /* |
| 2036 | * Look at other nodes in these cases: |
| 2037 | * - there is no space available on the preferred_nid |
| 2038 | * - the task is part of a numa_group that is interleaved across |
| 2039 | * multiple NUMA nodes; in order to better consolidate the group, |
| 2040 | * we need to check other locations. |
| 2041 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2042 | ng = deref_curr_numa_group(p); |
| 2043 | if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2044 | for_each_online_node(nid) { |
| 2045 | if (nid == env.src_nid || nid == p->numa_preferred_nid) |
| 2046 | continue; |
| 2047 | |
| 2048 | dist = node_distance(env.src_nid, env.dst_nid); |
| 2049 | if (sched_numa_topology_type == NUMA_BACKPLANE && |
| 2050 | dist != env.dist) { |
| 2051 | taskweight = task_weight(p, env.src_nid, dist); |
| 2052 | groupweight = group_weight(p, env.src_nid, dist); |
| 2053 | } |
| 2054 | |
| 2055 | /* Only consider nodes where both task and groups benefit */ |
| 2056 | taskimp = task_weight(p, nid, dist) - taskweight; |
| 2057 | groupimp = group_weight(p, nid, dist) - groupweight; |
| 2058 | if (taskimp < 0 && groupimp < 0) |
| 2059 | continue; |
| 2060 | |
| 2061 | env.dist = dist; |
| 2062 | env.dst_nid = nid; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2063 | update_numa_stats(&env, &env.dst_stats, env.dst_nid, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2064 | task_numa_find_cpu(&env, taskimp, groupimp); |
| 2065 | } |
| 2066 | } |
| 2067 | |
| 2068 | /* |
| 2069 | * If the task is part of a workload that spans multiple NUMA nodes, |
| 2070 | * and is migrating into one of the workload's active nodes, remember |
| 2071 | * this node as the task's preferred numa node, so the workload can |
| 2072 | * settle down. |
| 2073 | * A task that migrated to a second choice node will be better off |
| 2074 | * trying for a better one later. Do not set the preferred node here. |
| 2075 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2076 | if (ng) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2077 | if (env.best_cpu == -1) |
| 2078 | nid = env.src_nid; |
| 2079 | else |
| 2080 | nid = cpu_to_node(env.best_cpu); |
| 2081 | |
| 2082 | if (nid != p->numa_preferred_nid) |
| 2083 | sched_setnuma(p, nid); |
| 2084 | } |
| 2085 | |
| 2086 | /* No better CPU than the current one was found. */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2087 | if (env.best_cpu == -1) { |
| 2088 | trace_sched_stick_numa(p, env.src_cpu, NULL, -1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2089 | return -EAGAIN; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2090 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2091 | |
| 2092 | best_rq = cpu_rq(env.best_cpu); |
| 2093 | if (env.best_task == NULL) { |
| 2094 | ret = migrate_task_to(p, env.best_cpu); |
| 2095 | WRITE_ONCE(best_rq->numa_migrate_on, 0); |
| 2096 | if (ret != 0) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2097 | trace_sched_stick_numa(p, env.src_cpu, NULL, env.best_cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2098 | return ret; |
| 2099 | } |
| 2100 | |
| 2101 | ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu); |
| 2102 | WRITE_ONCE(best_rq->numa_migrate_on, 0); |
| 2103 | |
| 2104 | if (ret != 0) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2105 | trace_sched_stick_numa(p, env.src_cpu, env.best_task, env.best_cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2106 | put_task_struct(env.best_task); |
| 2107 | return ret; |
| 2108 | } |
| 2109 | |
| 2110 | /* Attempt to migrate a task to a CPU on the preferred node. */ |
| 2111 | static void numa_migrate_preferred(struct task_struct *p) |
| 2112 | { |
| 2113 | unsigned long interval = HZ; |
| 2114 | |
| 2115 | /* This task has no NUMA fault statistics yet */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2116 | if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2117 | return; |
| 2118 | |
| 2119 | /* Periodically retry migrating the task to the preferred node */ |
| 2120 | interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16); |
| 2121 | p->numa_migrate_retry = jiffies + interval; |
| 2122 | |
| 2123 | /* Success if task is already running on preferred CPU */ |
| 2124 | if (task_node(p) == p->numa_preferred_nid) |
| 2125 | return; |
| 2126 | |
| 2127 | /* Otherwise, try migrate to a CPU on the preferred node */ |
| 2128 | task_numa_migrate(p); |
| 2129 | } |
| 2130 | |
| 2131 | /* |
| 2132 | * Find out how many nodes on the workload is actively running on. Do this by |
| 2133 | * tracking the nodes from which NUMA hinting faults are triggered. This can |
| 2134 | * be different from the set of nodes where the workload's memory is currently |
| 2135 | * located. |
| 2136 | */ |
| 2137 | static void numa_group_count_active_nodes(struct numa_group *numa_group) |
| 2138 | { |
| 2139 | unsigned long faults, max_faults = 0; |
| 2140 | int nid, active_nodes = 0; |
| 2141 | |
| 2142 | for_each_online_node(nid) { |
| 2143 | faults = group_faults_cpu(numa_group, nid); |
| 2144 | if (faults > max_faults) |
| 2145 | max_faults = faults; |
| 2146 | } |
| 2147 | |
| 2148 | for_each_online_node(nid) { |
| 2149 | faults = group_faults_cpu(numa_group, nid); |
| 2150 | if (faults * ACTIVE_NODE_FRACTION > max_faults) |
| 2151 | active_nodes++; |
| 2152 | } |
| 2153 | |
| 2154 | numa_group->max_faults_cpu = max_faults; |
| 2155 | numa_group->active_nodes = active_nodes; |
| 2156 | } |
| 2157 | |
| 2158 | /* |
| 2159 | * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS |
| 2160 | * increments. The more local the fault statistics are, the higher the scan |
| 2161 | * period will be for the next scan window. If local/(local+remote) ratio is |
| 2162 | * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) |
| 2163 | * the scan period will decrease. Aim for 70% local accesses. |
| 2164 | */ |
| 2165 | #define NUMA_PERIOD_SLOTS 10 |
| 2166 | #define NUMA_PERIOD_THRESHOLD 7 |
| 2167 | |
| 2168 | /* |
| 2169 | * Increase the scan period (slow down scanning) if the majority of |
| 2170 | * our memory is already on our local node, or if the majority of |
| 2171 | * the page accesses are shared with other processes. |
| 2172 | * Otherwise, decrease the scan period. |
| 2173 | */ |
| 2174 | static void update_task_scan_period(struct task_struct *p, |
| 2175 | unsigned long shared, unsigned long private) |
| 2176 | { |
| 2177 | unsigned int period_slot; |
| 2178 | int lr_ratio, ps_ratio; |
| 2179 | int diff; |
| 2180 | |
| 2181 | unsigned long remote = p->numa_faults_locality[0]; |
| 2182 | unsigned long local = p->numa_faults_locality[1]; |
| 2183 | |
| 2184 | /* |
| 2185 | * If there were no record hinting faults then either the task is |
| 2186 | * completely idle or all activity is areas that are not of interest |
| 2187 | * to automatic numa balancing. Related to that, if there were failed |
| 2188 | * migration then it implies we are migrating too quickly or the local |
| 2189 | * node is overloaded. In either case, scan slower |
| 2190 | */ |
| 2191 | if (local + shared == 0 || p->numa_faults_locality[2]) { |
| 2192 | p->numa_scan_period = min(p->numa_scan_period_max, |
| 2193 | p->numa_scan_period << 1); |
| 2194 | |
| 2195 | p->mm->numa_next_scan = jiffies + |
| 2196 | msecs_to_jiffies(p->numa_scan_period); |
| 2197 | |
| 2198 | return; |
| 2199 | } |
| 2200 | |
| 2201 | /* |
| 2202 | * Prepare to scale scan period relative to the current period. |
| 2203 | * == NUMA_PERIOD_THRESHOLD scan period stays the same |
| 2204 | * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster) |
| 2205 | * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower) |
| 2206 | */ |
| 2207 | period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS); |
| 2208 | lr_ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote); |
| 2209 | ps_ratio = (private * NUMA_PERIOD_SLOTS) / (private + shared); |
| 2210 | |
| 2211 | if (ps_ratio >= NUMA_PERIOD_THRESHOLD) { |
| 2212 | /* |
| 2213 | * Most memory accesses are local. There is no need to |
| 2214 | * do fast NUMA scanning, since memory is already local. |
| 2215 | */ |
| 2216 | int slot = ps_ratio - NUMA_PERIOD_THRESHOLD; |
| 2217 | if (!slot) |
| 2218 | slot = 1; |
| 2219 | diff = slot * period_slot; |
| 2220 | } else if (lr_ratio >= NUMA_PERIOD_THRESHOLD) { |
| 2221 | /* |
| 2222 | * Most memory accesses are shared with other tasks. |
| 2223 | * There is no point in continuing fast NUMA scanning, |
| 2224 | * since other tasks may just move the memory elsewhere. |
| 2225 | */ |
| 2226 | int slot = lr_ratio - NUMA_PERIOD_THRESHOLD; |
| 2227 | if (!slot) |
| 2228 | slot = 1; |
| 2229 | diff = slot * period_slot; |
| 2230 | } else { |
| 2231 | /* |
| 2232 | * Private memory faults exceed (SLOTS-THRESHOLD)/SLOTS, |
| 2233 | * yet they are not on the local NUMA node. Speed up |
| 2234 | * NUMA scanning to get the memory moved over. |
| 2235 | */ |
| 2236 | int ratio = max(lr_ratio, ps_ratio); |
| 2237 | diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot; |
| 2238 | } |
| 2239 | |
| 2240 | p->numa_scan_period = clamp(p->numa_scan_period + diff, |
| 2241 | task_scan_min(p), task_scan_max(p)); |
| 2242 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); |
| 2243 | } |
| 2244 | |
| 2245 | /* |
| 2246 | * Get the fraction of time the task has been running since the last |
| 2247 | * NUMA placement cycle. The scheduler keeps similar statistics, but |
| 2248 | * decays those on a 32ms period, which is orders of magnitude off |
| 2249 | * from the dozens-of-seconds NUMA balancing period. Use the scheduler |
| 2250 | * stats only if the task is so new there are no NUMA statistics yet. |
| 2251 | */ |
| 2252 | static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) |
| 2253 | { |
| 2254 | u64 runtime, delta, now; |
| 2255 | /* Use the start of this time slice to avoid calculations. */ |
| 2256 | now = p->se.exec_start; |
| 2257 | runtime = p->se.sum_exec_runtime; |
| 2258 | |
| 2259 | if (p->last_task_numa_placement) { |
| 2260 | delta = runtime - p->last_sum_exec_runtime; |
| 2261 | *period = now - p->last_task_numa_placement; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2262 | |
| 2263 | /* Avoid time going backwards, prevent potential divide error: */ |
| 2264 | if (unlikely((s64)*period < 0)) |
| 2265 | *period = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2266 | } else { |
| 2267 | delta = p->se.avg.load_sum; |
| 2268 | *period = LOAD_AVG_MAX; |
| 2269 | } |
| 2270 | |
| 2271 | p->last_sum_exec_runtime = runtime; |
| 2272 | p->last_task_numa_placement = now; |
| 2273 | |
| 2274 | return delta; |
| 2275 | } |
| 2276 | |
| 2277 | /* |
| 2278 | * Determine the preferred nid for a task in a numa_group. This needs to |
| 2279 | * be done in a way that produces consistent results with group_weight, |
| 2280 | * otherwise workloads might not converge. |
| 2281 | */ |
| 2282 | static int preferred_group_nid(struct task_struct *p, int nid) |
| 2283 | { |
| 2284 | nodemask_t nodes; |
| 2285 | int dist; |
| 2286 | |
| 2287 | /* Direct connections between all NUMA nodes. */ |
| 2288 | if (sched_numa_topology_type == NUMA_DIRECT) |
| 2289 | return nid; |
| 2290 | |
| 2291 | /* |
| 2292 | * On a system with glueless mesh NUMA topology, group_weight |
| 2293 | * scores nodes according to the number of NUMA hinting faults on |
| 2294 | * both the node itself, and on nearby nodes. |
| 2295 | */ |
| 2296 | if (sched_numa_topology_type == NUMA_GLUELESS_MESH) { |
| 2297 | unsigned long score, max_score = 0; |
| 2298 | int node, max_node = nid; |
| 2299 | |
| 2300 | dist = sched_max_numa_distance; |
| 2301 | |
| 2302 | for_each_online_node(node) { |
| 2303 | score = group_weight(p, node, dist); |
| 2304 | if (score > max_score) { |
| 2305 | max_score = score; |
| 2306 | max_node = node; |
| 2307 | } |
| 2308 | } |
| 2309 | return max_node; |
| 2310 | } |
| 2311 | |
| 2312 | /* |
| 2313 | * Finding the preferred nid in a system with NUMA backplane |
| 2314 | * interconnect topology is more involved. The goal is to locate |
| 2315 | * tasks from numa_groups near each other in the system, and |
| 2316 | * untangle workloads from different sides of the system. This requires |
| 2317 | * searching down the hierarchy of node groups, recursively searching |
| 2318 | * inside the highest scoring group of nodes. The nodemask tricks |
| 2319 | * keep the complexity of the search down. |
| 2320 | */ |
| 2321 | nodes = node_online_map; |
| 2322 | for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { |
| 2323 | unsigned long max_faults = 0; |
| 2324 | nodemask_t max_group = NODE_MASK_NONE; |
| 2325 | int a, b; |
| 2326 | |
| 2327 | /* Are there nodes at this distance from each other? */ |
| 2328 | if (!find_numa_distance(dist)) |
| 2329 | continue; |
| 2330 | |
| 2331 | for_each_node_mask(a, nodes) { |
| 2332 | unsigned long faults = 0; |
| 2333 | nodemask_t this_group; |
| 2334 | nodes_clear(this_group); |
| 2335 | |
| 2336 | /* Sum group's NUMA faults; includes a==b case. */ |
| 2337 | for_each_node_mask(b, nodes) { |
| 2338 | if (node_distance(a, b) < dist) { |
| 2339 | faults += group_faults(p, b); |
| 2340 | node_set(b, this_group); |
| 2341 | node_clear(b, nodes); |
| 2342 | } |
| 2343 | } |
| 2344 | |
| 2345 | /* Remember the top group. */ |
| 2346 | if (faults > max_faults) { |
| 2347 | max_faults = faults; |
| 2348 | max_group = this_group; |
| 2349 | /* |
| 2350 | * subtle: at the smallest distance there is |
| 2351 | * just one node left in each "group", the |
| 2352 | * winner is the preferred nid. |
| 2353 | */ |
| 2354 | nid = a; |
| 2355 | } |
| 2356 | } |
| 2357 | /* Next round, evaluate the nodes within max_group. */ |
| 2358 | if (!max_faults) |
| 2359 | break; |
| 2360 | nodes = max_group; |
| 2361 | } |
| 2362 | return nid; |
| 2363 | } |
| 2364 | |
| 2365 | static void task_numa_placement(struct task_struct *p) |
| 2366 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2367 | int seq, nid, max_nid = NUMA_NO_NODE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2368 | unsigned long max_faults = 0; |
| 2369 | unsigned long fault_types[2] = { 0, 0 }; |
| 2370 | unsigned long total_faults; |
| 2371 | u64 runtime, period; |
| 2372 | spinlock_t *group_lock = NULL; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2373 | struct numa_group *ng; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2374 | |
| 2375 | /* |
| 2376 | * The p->mm->numa_scan_seq field gets updated without |
| 2377 | * exclusive access. Use READ_ONCE() here to ensure |
| 2378 | * that the field is read in a single access: |
| 2379 | */ |
| 2380 | seq = READ_ONCE(p->mm->numa_scan_seq); |
| 2381 | if (p->numa_scan_seq == seq) |
| 2382 | return; |
| 2383 | p->numa_scan_seq = seq; |
| 2384 | p->numa_scan_period_max = task_scan_max(p); |
| 2385 | |
| 2386 | total_faults = p->numa_faults_locality[0] + |
| 2387 | p->numa_faults_locality[1]; |
| 2388 | runtime = numa_get_avg_runtime(p, &period); |
| 2389 | |
| 2390 | /* If the task is part of a group prevent parallel updates to group stats */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2391 | ng = deref_curr_numa_group(p); |
| 2392 | if (ng) { |
| 2393 | group_lock = &ng->lock; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2394 | spin_lock_irq(group_lock); |
| 2395 | } |
| 2396 | |
| 2397 | /* Find the node with the highest number of faults */ |
| 2398 | for_each_online_node(nid) { |
| 2399 | /* Keep track of the offsets in numa_faults array */ |
| 2400 | int mem_idx, membuf_idx, cpu_idx, cpubuf_idx; |
| 2401 | unsigned long faults = 0, group_faults = 0; |
| 2402 | int priv; |
| 2403 | |
| 2404 | for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) { |
| 2405 | long diff, f_diff, f_weight; |
| 2406 | |
| 2407 | mem_idx = task_faults_idx(NUMA_MEM, nid, priv); |
| 2408 | membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv); |
| 2409 | cpu_idx = task_faults_idx(NUMA_CPU, nid, priv); |
| 2410 | cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv); |
| 2411 | |
| 2412 | /* Decay existing window, copy faults since last scan */ |
| 2413 | diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2; |
| 2414 | fault_types[priv] += p->numa_faults[membuf_idx]; |
| 2415 | p->numa_faults[membuf_idx] = 0; |
| 2416 | |
| 2417 | /* |
| 2418 | * Normalize the faults_from, so all tasks in a group |
| 2419 | * count according to CPU use, instead of by the raw |
| 2420 | * number of faults. Tasks with little runtime have |
| 2421 | * little over-all impact on throughput, and thus their |
| 2422 | * faults are less important. |
| 2423 | */ |
| 2424 | f_weight = div64_u64(runtime << 16, period + 1); |
| 2425 | f_weight = (f_weight * p->numa_faults[cpubuf_idx]) / |
| 2426 | (total_faults + 1); |
| 2427 | f_diff = f_weight - p->numa_faults[cpu_idx] / 2; |
| 2428 | p->numa_faults[cpubuf_idx] = 0; |
| 2429 | |
| 2430 | p->numa_faults[mem_idx] += diff; |
| 2431 | p->numa_faults[cpu_idx] += f_diff; |
| 2432 | faults += p->numa_faults[mem_idx]; |
| 2433 | p->total_numa_faults += diff; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2434 | if (ng) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2435 | /* |
| 2436 | * safe because we can only change our own group |
| 2437 | * |
| 2438 | * mem_idx represents the offset for a given |
| 2439 | * nid and priv in a specific region because it |
| 2440 | * is at the beginning of the numa_faults array. |
| 2441 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2442 | ng->faults[mem_idx] += diff; |
| 2443 | ng->faults_cpu[mem_idx] += f_diff; |
| 2444 | ng->total_faults += diff; |
| 2445 | group_faults += ng->faults[mem_idx]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2446 | } |
| 2447 | } |
| 2448 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2449 | if (!ng) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2450 | if (faults > max_faults) { |
| 2451 | max_faults = faults; |
| 2452 | max_nid = nid; |
| 2453 | } |
| 2454 | } else if (group_faults > max_faults) { |
| 2455 | max_faults = group_faults; |
| 2456 | max_nid = nid; |
| 2457 | } |
| 2458 | } |
| 2459 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2460 | if (ng) { |
| 2461 | numa_group_count_active_nodes(ng); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2462 | spin_unlock_irq(group_lock); |
| 2463 | max_nid = preferred_group_nid(p, max_nid); |
| 2464 | } |
| 2465 | |
| 2466 | if (max_faults) { |
| 2467 | /* Set the new preferred node */ |
| 2468 | if (max_nid != p->numa_preferred_nid) |
| 2469 | sched_setnuma(p, max_nid); |
| 2470 | } |
| 2471 | |
| 2472 | update_task_scan_period(p, fault_types[0], fault_types[1]); |
| 2473 | } |
| 2474 | |
| 2475 | static inline int get_numa_group(struct numa_group *grp) |
| 2476 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2477 | return refcount_inc_not_zero(&grp->refcount); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2478 | } |
| 2479 | |
| 2480 | static inline void put_numa_group(struct numa_group *grp) |
| 2481 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2482 | if (refcount_dec_and_test(&grp->refcount)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2483 | kfree_rcu(grp, rcu); |
| 2484 | } |
| 2485 | |
| 2486 | static void task_numa_group(struct task_struct *p, int cpupid, int flags, |
| 2487 | int *priv) |
| 2488 | { |
| 2489 | struct numa_group *grp, *my_grp; |
| 2490 | struct task_struct *tsk; |
| 2491 | bool join = false; |
| 2492 | int cpu = cpupid_to_cpu(cpupid); |
| 2493 | int i; |
| 2494 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2495 | if (unlikely(!deref_curr_numa_group(p))) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2496 | unsigned int size = sizeof(struct numa_group) + |
| 2497 | 4*nr_node_ids*sizeof(unsigned long); |
| 2498 | |
| 2499 | grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
| 2500 | if (!grp) |
| 2501 | return; |
| 2502 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2503 | refcount_set(&grp->refcount, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2504 | grp->active_nodes = 1; |
| 2505 | grp->max_faults_cpu = 0; |
| 2506 | spin_lock_init(&grp->lock); |
| 2507 | grp->gid = p->pid; |
| 2508 | /* Second half of the array tracks nids where faults happen */ |
| 2509 | grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES * |
| 2510 | nr_node_ids; |
| 2511 | |
| 2512 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
| 2513 | grp->faults[i] = p->numa_faults[i]; |
| 2514 | |
| 2515 | grp->total_faults = p->total_numa_faults; |
| 2516 | |
| 2517 | grp->nr_tasks++; |
| 2518 | rcu_assign_pointer(p->numa_group, grp); |
| 2519 | } |
| 2520 | |
| 2521 | rcu_read_lock(); |
| 2522 | tsk = READ_ONCE(cpu_rq(cpu)->curr); |
| 2523 | |
| 2524 | if (!cpupid_match_pid(tsk, cpupid)) |
| 2525 | goto no_join; |
| 2526 | |
| 2527 | grp = rcu_dereference(tsk->numa_group); |
| 2528 | if (!grp) |
| 2529 | goto no_join; |
| 2530 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2531 | my_grp = deref_curr_numa_group(p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2532 | if (grp == my_grp) |
| 2533 | goto no_join; |
| 2534 | |
| 2535 | /* |
| 2536 | * Only join the other group if its bigger; if we're the bigger group, |
| 2537 | * the other task will join us. |
| 2538 | */ |
| 2539 | if (my_grp->nr_tasks > grp->nr_tasks) |
| 2540 | goto no_join; |
| 2541 | |
| 2542 | /* |
| 2543 | * Tie-break on the grp address. |
| 2544 | */ |
| 2545 | if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp) |
| 2546 | goto no_join; |
| 2547 | |
| 2548 | /* Always join threads in the same process. */ |
| 2549 | if (tsk->mm == current->mm) |
| 2550 | join = true; |
| 2551 | |
| 2552 | /* Simple filter to avoid false positives due to PID collisions */ |
| 2553 | if (flags & TNF_SHARED) |
| 2554 | join = true; |
| 2555 | |
| 2556 | /* Update priv based on whether false sharing was detected */ |
| 2557 | *priv = !join; |
| 2558 | |
| 2559 | if (join && !get_numa_group(grp)) |
| 2560 | goto no_join; |
| 2561 | |
| 2562 | rcu_read_unlock(); |
| 2563 | |
| 2564 | if (!join) |
| 2565 | return; |
| 2566 | |
| 2567 | BUG_ON(irqs_disabled()); |
| 2568 | double_lock_irq(&my_grp->lock, &grp->lock); |
| 2569 | |
| 2570 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { |
| 2571 | my_grp->faults[i] -= p->numa_faults[i]; |
| 2572 | grp->faults[i] += p->numa_faults[i]; |
| 2573 | } |
| 2574 | my_grp->total_faults -= p->total_numa_faults; |
| 2575 | grp->total_faults += p->total_numa_faults; |
| 2576 | |
| 2577 | my_grp->nr_tasks--; |
| 2578 | grp->nr_tasks++; |
| 2579 | |
| 2580 | spin_unlock(&my_grp->lock); |
| 2581 | spin_unlock_irq(&grp->lock); |
| 2582 | |
| 2583 | rcu_assign_pointer(p->numa_group, grp); |
| 2584 | |
| 2585 | put_numa_group(my_grp); |
| 2586 | return; |
| 2587 | |
| 2588 | no_join: |
| 2589 | rcu_read_unlock(); |
| 2590 | return; |
| 2591 | } |
| 2592 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2593 | /* |
| 2594 | * Get rid of NUMA staticstics associated with a task (either current or dead). |
| 2595 | * If @final is set, the task is dead and has reached refcount zero, so we can |
| 2596 | * safely free all relevant data structures. Otherwise, there might be |
| 2597 | * concurrent reads from places like load balancing and procfs, and we should |
| 2598 | * reset the data back to default state without freeing ->numa_faults. |
| 2599 | */ |
| 2600 | void task_numa_free(struct task_struct *p, bool final) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2601 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2602 | /* safe: p either is current or is being freed by current */ |
| 2603 | struct numa_group *grp = rcu_dereference_raw(p->numa_group); |
| 2604 | unsigned long *numa_faults = p->numa_faults; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2605 | unsigned long flags; |
| 2606 | int i; |
| 2607 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2608 | if (!numa_faults) |
| 2609 | return; |
| 2610 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2611 | if (grp) { |
| 2612 | spin_lock_irqsave(&grp->lock, flags); |
| 2613 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
| 2614 | grp->faults[i] -= p->numa_faults[i]; |
| 2615 | grp->total_faults -= p->total_numa_faults; |
| 2616 | |
| 2617 | grp->nr_tasks--; |
| 2618 | spin_unlock_irqrestore(&grp->lock, flags); |
| 2619 | RCU_INIT_POINTER(p->numa_group, NULL); |
| 2620 | put_numa_group(grp); |
| 2621 | } |
| 2622 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2623 | if (final) { |
| 2624 | p->numa_faults = NULL; |
| 2625 | kfree(numa_faults); |
| 2626 | } else { |
| 2627 | p->total_numa_faults = 0; |
| 2628 | for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) |
| 2629 | numa_faults[i] = 0; |
| 2630 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2631 | } |
| 2632 | |
| 2633 | /* |
| 2634 | * Got a PROT_NONE fault for a page on @node. |
| 2635 | */ |
| 2636 | void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) |
| 2637 | { |
| 2638 | struct task_struct *p = current; |
| 2639 | bool migrated = flags & TNF_MIGRATED; |
| 2640 | int cpu_node = task_node(current); |
| 2641 | int local = !!(flags & TNF_FAULT_LOCAL); |
| 2642 | struct numa_group *ng; |
| 2643 | int priv; |
| 2644 | |
| 2645 | if (!static_branch_likely(&sched_numa_balancing)) |
| 2646 | return; |
| 2647 | |
| 2648 | /* for example, ksmd faulting in a user's mm */ |
| 2649 | if (!p->mm) |
| 2650 | return; |
| 2651 | |
| 2652 | /* Allocate buffer to track faults on a per-node basis */ |
| 2653 | if (unlikely(!p->numa_faults)) { |
| 2654 | int size = sizeof(*p->numa_faults) * |
| 2655 | NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; |
| 2656 | |
| 2657 | p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN); |
| 2658 | if (!p->numa_faults) |
| 2659 | return; |
| 2660 | |
| 2661 | p->total_numa_faults = 0; |
| 2662 | memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality)); |
| 2663 | } |
| 2664 | |
| 2665 | /* |
| 2666 | * First accesses are treated as private, otherwise consider accesses |
| 2667 | * to be private if the accessing pid has not changed |
| 2668 | */ |
| 2669 | if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) { |
| 2670 | priv = 1; |
| 2671 | } else { |
| 2672 | priv = cpupid_match_pid(p, last_cpupid); |
| 2673 | if (!priv && !(flags & TNF_NO_GROUP)) |
| 2674 | task_numa_group(p, last_cpupid, flags, &priv); |
| 2675 | } |
| 2676 | |
| 2677 | /* |
| 2678 | * If a workload spans multiple NUMA nodes, a shared fault that |
| 2679 | * occurs wholly within the set of nodes that the workload is |
| 2680 | * actively using should be counted as local. This allows the |
| 2681 | * scan rate to slow down when a workload has settled down. |
| 2682 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2683 | ng = deref_curr_numa_group(p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2684 | if (!priv && !local && ng && ng->active_nodes > 1 && |
| 2685 | numa_is_active_node(cpu_node, ng) && |
| 2686 | numa_is_active_node(mem_node, ng)) |
| 2687 | local = 1; |
| 2688 | |
| 2689 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2690 | * Retry to migrate task to preferred node periodically, in case it |
| 2691 | * previously failed, or the scheduler moved us. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2692 | */ |
| 2693 | if (time_after(jiffies, p->numa_migrate_retry)) { |
| 2694 | task_numa_placement(p); |
| 2695 | numa_migrate_preferred(p); |
| 2696 | } |
| 2697 | |
| 2698 | if (migrated) |
| 2699 | p->numa_pages_migrated += pages; |
| 2700 | if (flags & TNF_MIGRATE_FAIL) |
| 2701 | p->numa_faults_locality[2] += pages; |
| 2702 | |
| 2703 | p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; |
| 2704 | p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; |
| 2705 | p->numa_faults_locality[local] += pages; |
| 2706 | } |
| 2707 | |
| 2708 | static void reset_ptenuma_scan(struct task_struct *p) |
| 2709 | { |
| 2710 | /* |
| 2711 | * We only did a read acquisition of the mmap sem, so |
| 2712 | * p->mm->numa_scan_seq is written to without exclusive access |
| 2713 | * and the update is not guaranteed to be atomic. That's not |
| 2714 | * much of an issue though, since this is just used for |
| 2715 | * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not |
| 2716 | * expensive, to avoid any form of compiler optimizations: |
| 2717 | */ |
| 2718 | WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); |
| 2719 | p->mm->numa_scan_offset = 0; |
| 2720 | } |
| 2721 | |
| 2722 | /* |
| 2723 | * The expensive part of numa migration is done from task_work context. |
| 2724 | * Triggered from task_tick_numa(). |
| 2725 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2726 | static void task_numa_work(struct callback_head *work) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2727 | { |
| 2728 | unsigned long migrate, next_scan, now = jiffies; |
| 2729 | struct task_struct *p = current; |
| 2730 | struct mm_struct *mm = p->mm; |
| 2731 | u64 runtime = p->se.sum_exec_runtime; |
| 2732 | struct vm_area_struct *vma; |
| 2733 | unsigned long start, end; |
| 2734 | unsigned long nr_pte_updates = 0; |
| 2735 | long pages, virtpages; |
| 2736 | |
| 2737 | SCHED_WARN_ON(p != container_of(work, struct task_struct, numa_work)); |
| 2738 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2739 | work->next = work; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2740 | /* |
| 2741 | * Who cares about NUMA placement when they're dying. |
| 2742 | * |
| 2743 | * NOTE: make sure not to dereference p->mm before this check, |
| 2744 | * exit_task_work() happens _after_ exit_mm() so we could be called |
| 2745 | * without p->mm even though we still had it when we enqueued this |
| 2746 | * work. |
| 2747 | */ |
| 2748 | if (p->flags & PF_EXITING) |
| 2749 | return; |
| 2750 | |
| 2751 | if (!mm->numa_next_scan) { |
| 2752 | mm->numa_next_scan = now + |
| 2753 | msecs_to_jiffies(sysctl_numa_balancing_scan_delay); |
| 2754 | } |
| 2755 | |
| 2756 | /* |
| 2757 | * Enforce maximal scan/migration frequency.. |
| 2758 | */ |
| 2759 | migrate = mm->numa_next_scan; |
| 2760 | if (time_before(now, migrate)) |
| 2761 | return; |
| 2762 | |
| 2763 | if (p->numa_scan_period == 0) { |
| 2764 | p->numa_scan_period_max = task_scan_max(p); |
| 2765 | p->numa_scan_period = task_scan_start(p); |
| 2766 | } |
| 2767 | |
| 2768 | next_scan = now + msecs_to_jiffies(p->numa_scan_period); |
| 2769 | if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate) |
| 2770 | return; |
| 2771 | |
| 2772 | /* |
| 2773 | * Delay this task enough that another task of this mm will likely win |
| 2774 | * the next time around. |
| 2775 | */ |
| 2776 | p->node_stamp += 2 * TICK_NSEC; |
| 2777 | |
| 2778 | start = mm->numa_scan_offset; |
| 2779 | pages = sysctl_numa_balancing_scan_size; |
| 2780 | pages <<= 20 - PAGE_SHIFT; /* MB in pages */ |
| 2781 | virtpages = pages * 8; /* Scan up to this much virtual space */ |
| 2782 | if (!pages) |
| 2783 | return; |
| 2784 | |
| 2785 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2786 | if (!mmap_read_trylock(mm)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2787 | return; |
| 2788 | vma = find_vma(mm, start); |
| 2789 | if (!vma) { |
| 2790 | reset_ptenuma_scan(p); |
| 2791 | start = 0; |
| 2792 | vma = mm->mmap; |
| 2793 | } |
| 2794 | for (; vma; vma = vma->vm_next) { |
| 2795 | if (!vma_migratable(vma) || !vma_policy_mof(vma) || |
| 2796 | is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) { |
| 2797 | continue; |
| 2798 | } |
| 2799 | |
| 2800 | /* |
| 2801 | * Shared library pages mapped by multiple processes are not |
| 2802 | * migrated as it is expected they are cache replicated. Avoid |
| 2803 | * hinting faults in read-only file-backed mappings or the vdso |
| 2804 | * as migrating the pages will be of marginal benefit. |
| 2805 | */ |
| 2806 | if (!vma->vm_mm || |
| 2807 | (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ))) |
| 2808 | continue; |
| 2809 | |
| 2810 | /* |
| 2811 | * Skip inaccessible VMAs to avoid any confusion between |
| 2812 | * PROT_NONE and NUMA hinting ptes |
| 2813 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2814 | if (!vma_is_accessible(vma)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2815 | continue; |
| 2816 | |
| 2817 | do { |
| 2818 | start = max(start, vma->vm_start); |
| 2819 | end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE); |
| 2820 | end = min(end, vma->vm_end); |
| 2821 | nr_pte_updates = change_prot_numa(vma, start, end); |
| 2822 | |
| 2823 | /* |
| 2824 | * Try to scan sysctl_numa_balancing_size worth of |
| 2825 | * hpages that have at least one present PTE that |
| 2826 | * is not already pte-numa. If the VMA contains |
| 2827 | * areas that are unused or already full of prot_numa |
| 2828 | * PTEs, scan up to virtpages, to skip through those |
| 2829 | * areas faster. |
| 2830 | */ |
| 2831 | if (nr_pte_updates) |
| 2832 | pages -= (end - start) >> PAGE_SHIFT; |
| 2833 | virtpages -= (end - start) >> PAGE_SHIFT; |
| 2834 | |
| 2835 | start = end; |
| 2836 | if (pages <= 0 || virtpages <= 0) |
| 2837 | goto out; |
| 2838 | |
| 2839 | cond_resched(); |
| 2840 | } while (end != vma->vm_end); |
| 2841 | } |
| 2842 | |
| 2843 | out: |
| 2844 | /* |
| 2845 | * It is possible to reach the end of the VMA list but the last few |
| 2846 | * VMAs are not guaranteed to the vma_migratable. If they are not, we |
| 2847 | * would find the !migratable VMA on the next scan but not reset the |
| 2848 | * scanner to the start so check it now. |
| 2849 | */ |
| 2850 | if (vma) |
| 2851 | mm->numa_scan_offset = start; |
| 2852 | else |
| 2853 | reset_ptenuma_scan(p); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2854 | mmap_read_unlock(mm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2855 | |
| 2856 | /* |
| 2857 | * Make sure tasks use at least 32x as much time to run other code |
| 2858 | * than they used here, to limit NUMA PTE scanning overhead to 3% max. |
| 2859 | * Usually update_task_scan_period slows down scanning enough; on an |
| 2860 | * overloaded system we need to limit overhead on a per task basis. |
| 2861 | */ |
| 2862 | if (unlikely(p->se.sum_exec_runtime != runtime)) { |
| 2863 | u64 diff = p->se.sum_exec_runtime - runtime; |
| 2864 | p->node_stamp += 32 * diff; |
| 2865 | } |
| 2866 | } |
| 2867 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2868 | void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) |
| 2869 | { |
| 2870 | int mm_users = 0; |
| 2871 | struct mm_struct *mm = p->mm; |
| 2872 | |
| 2873 | if (mm) { |
| 2874 | mm_users = atomic_read(&mm->mm_users); |
| 2875 | if (mm_users == 1) { |
| 2876 | mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); |
| 2877 | mm->numa_scan_seq = 0; |
| 2878 | } |
| 2879 | } |
| 2880 | p->node_stamp = 0; |
| 2881 | p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; |
| 2882 | p->numa_scan_period = sysctl_numa_balancing_scan_delay; |
| 2883 | /* Protect against double add, see task_tick_numa and task_numa_work */ |
| 2884 | p->numa_work.next = &p->numa_work; |
| 2885 | p->numa_faults = NULL; |
| 2886 | RCU_INIT_POINTER(p->numa_group, NULL); |
| 2887 | p->last_task_numa_placement = 0; |
| 2888 | p->last_sum_exec_runtime = 0; |
| 2889 | |
| 2890 | init_task_work(&p->numa_work, task_numa_work); |
| 2891 | |
| 2892 | /* New address space, reset the preferred nid */ |
| 2893 | if (!(clone_flags & CLONE_VM)) { |
| 2894 | p->numa_preferred_nid = NUMA_NO_NODE; |
| 2895 | return; |
| 2896 | } |
| 2897 | |
| 2898 | /* |
| 2899 | * New thread, keep existing numa_preferred_nid which should be copied |
| 2900 | * already by arch_dup_task_struct but stagger when scans start. |
| 2901 | */ |
| 2902 | if (mm) { |
| 2903 | unsigned int delay; |
| 2904 | |
| 2905 | delay = min_t(unsigned int, task_scan_max(current), |
| 2906 | current->numa_scan_period * mm_users * NSEC_PER_MSEC); |
| 2907 | delay += 2 * TICK_NSEC; |
| 2908 | p->node_stamp = delay; |
| 2909 | } |
| 2910 | } |
| 2911 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2912 | /* |
| 2913 | * Drive the periodic memory faults.. |
| 2914 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2915 | static void task_tick_numa(struct rq *rq, struct task_struct *curr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2916 | { |
| 2917 | struct callback_head *work = &curr->numa_work; |
| 2918 | u64 period, now; |
| 2919 | |
| 2920 | /* |
| 2921 | * We don't care about NUMA placement if we don't have memory. |
| 2922 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 2923 | if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2924 | return; |
| 2925 | |
| 2926 | /* |
| 2927 | * Using runtime rather than walltime has the dual advantage that |
| 2928 | * we (mostly) drive the selection from busy threads and that the |
| 2929 | * task needs to have done some actual work before we bother with |
| 2930 | * NUMA placement. |
| 2931 | */ |
| 2932 | now = curr->se.sum_exec_runtime; |
| 2933 | period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; |
| 2934 | |
| 2935 | if (now > curr->node_stamp + period) { |
| 2936 | if (!curr->node_stamp) |
| 2937 | curr->numa_scan_period = task_scan_start(curr); |
| 2938 | curr->node_stamp += period; |
| 2939 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2940 | if (!time_before(jiffies, curr->mm->numa_next_scan)) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 2941 | task_work_add(curr, work, TWA_RESUME); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2942 | } |
| 2943 | } |
| 2944 | |
| 2945 | static void update_scan_period(struct task_struct *p, int new_cpu) |
| 2946 | { |
| 2947 | int src_nid = cpu_to_node(task_cpu(p)); |
| 2948 | int dst_nid = cpu_to_node(new_cpu); |
| 2949 | |
| 2950 | if (!static_branch_likely(&sched_numa_balancing)) |
| 2951 | return; |
| 2952 | |
| 2953 | if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING)) |
| 2954 | return; |
| 2955 | |
| 2956 | if (src_nid == dst_nid) |
| 2957 | return; |
| 2958 | |
| 2959 | /* |
| 2960 | * Allow resets if faults have been trapped before one scan |
| 2961 | * has completed. This is most likely due to a new task that |
| 2962 | * is pulled cross-node due to wakeups or load balancing. |
| 2963 | */ |
| 2964 | if (p->numa_scan_seq) { |
| 2965 | /* |
| 2966 | * Avoid scan adjustments if moving to the preferred |
| 2967 | * node or if the task was not previously running on |
| 2968 | * the preferred node. |
| 2969 | */ |
| 2970 | if (dst_nid == p->numa_preferred_nid || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 2971 | (p->numa_preferred_nid != NUMA_NO_NODE && |
| 2972 | src_nid != p->numa_preferred_nid)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2973 | return; |
| 2974 | } |
| 2975 | |
| 2976 | p->numa_scan_period = task_scan_start(p); |
| 2977 | } |
| 2978 | |
| 2979 | #else |
| 2980 | static void task_tick_numa(struct rq *rq, struct task_struct *curr) |
| 2981 | { |
| 2982 | } |
| 2983 | |
| 2984 | static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p) |
| 2985 | { |
| 2986 | } |
| 2987 | |
| 2988 | static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p) |
| 2989 | { |
| 2990 | } |
| 2991 | |
| 2992 | static inline void update_scan_period(struct task_struct *p, int new_cpu) |
| 2993 | { |
| 2994 | } |
| 2995 | |
| 2996 | #endif /* CONFIG_NUMA_BALANCING */ |
| 2997 | |
| 2998 | static void |
| 2999 | account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 3000 | { |
| 3001 | update_load_add(&cfs_rq->load, se->load.weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3002 | #ifdef CONFIG_SMP |
| 3003 | if (entity_is_task(se)) { |
| 3004 | struct rq *rq = rq_of(cfs_rq); |
| 3005 | |
| 3006 | account_numa_enqueue(rq, task_of(se)); |
| 3007 | list_add(&se->group_node, &rq->cfs_tasks); |
| 3008 | } |
| 3009 | #endif |
| 3010 | cfs_rq->nr_running++; |
| 3011 | } |
| 3012 | |
| 3013 | static void |
| 3014 | account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 3015 | { |
| 3016 | update_load_sub(&cfs_rq->load, se->load.weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3017 | #ifdef CONFIG_SMP |
| 3018 | if (entity_is_task(se)) { |
| 3019 | account_numa_dequeue(rq_of(cfs_rq), task_of(se)); |
| 3020 | list_del_init(&se->group_node); |
| 3021 | } |
| 3022 | #endif |
| 3023 | cfs_rq->nr_running--; |
| 3024 | } |
| 3025 | |
| 3026 | /* |
| 3027 | * Signed add and clamp on underflow. |
| 3028 | * |
| 3029 | * Explicitly do a load-store to ensure the intermediate value never hits |
| 3030 | * memory. This allows lockless observations without ever seeing the negative |
| 3031 | * values. |
| 3032 | */ |
| 3033 | #define add_positive(_ptr, _val) do { \ |
| 3034 | typeof(_ptr) ptr = (_ptr); \ |
| 3035 | typeof(_val) val = (_val); \ |
| 3036 | typeof(*ptr) res, var = READ_ONCE(*ptr); \ |
| 3037 | \ |
| 3038 | res = var + val; \ |
| 3039 | \ |
| 3040 | if (val < 0 && res > var) \ |
| 3041 | res = 0; \ |
| 3042 | \ |
| 3043 | WRITE_ONCE(*ptr, res); \ |
| 3044 | } while (0) |
| 3045 | |
| 3046 | /* |
| 3047 | * Unsigned subtract and clamp on underflow. |
| 3048 | * |
| 3049 | * Explicitly do a load-store to ensure the intermediate value never hits |
| 3050 | * memory. This allows lockless observations without ever seeing the negative |
| 3051 | * values. |
| 3052 | */ |
| 3053 | #define sub_positive(_ptr, _val) do { \ |
| 3054 | typeof(_ptr) ptr = (_ptr); \ |
| 3055 | typeof(*ptr) val = (_val); \ |
| 3056 | typeof(*ptr) res, var = READ_ONCE(*ptr); \ |
| 3057 | res = var - val; \ |
| 3058 | if (res > var) \ |
| 3059 | res = 0; \ |
| 3060 | WRITE_ONCE(*ptr, res); \ |
| 3061 | } while (0) |
| 3062 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3063 | /* |
| 3064 | * Remove and clamp on negative, from a local variable. |
| 3065 | * |
| 3066 | * A variant of sub_positive(), which does not use explicit load-store |
| 3067 | * and is thus optimized for local variable updates. |
| 3068 | */ |
| 3069 | #define lsub_positive(_ptr, _val) do { \ |
| 3070 | typeof(_ptr) ptr = (_ptr); \ |
| 3071 | *ptr -= min_t(typeof(*ptr), *ptr, _val); \ |
| 3072 | } while (0) |
| 3073 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3074 | #ifdef CONFIG_SMP |
| 3075 | static inline void |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3076 | enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 3077 | { |
| 3078 | cfs_rq->avg.load_avg += se->avg.load_avg; |
| 3079 | cfs_rq->avg.load_sum += se_weight(se) * se->avg.load_sum; |
| 3080 | } |
| 3081 | |
| 3082 | static inline void |
| 3083 | dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 3084 | { |
| 3085 | sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); |
| 3086 | sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum); |
| 3087 | } |
| 3088 | #else |
| 3089 | static inline void |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3090 | enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } |
| 3091 | static inline void |
| 3092 | dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { } |
| 3093 | #endif |
| 3094 | |
| 3095 | static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3096 | unsigned long weight) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3097 | { |
| 3098 | if (se->on_rq) { |
| 3099 | /* commit outstanding execution time */ |
| 3100 | if (cfs_rq->curr == se) |
| 3101 | update_curr(cfs_rq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3102 | update_load_sub(&cfs_rq->load, se->load.weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3103 | } |
| 3104 | dequeue_load_avg(cfs_rq, se); |
| 3105 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3106 | update_load_set(&se->load, weight); |
| 3107 | |
| 3108 | #ifdef CONFIG_SMP |
| 3109 | do { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3110 | u32 divider = get_pelt_divider(&se->avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3111 | |
| 3112 | se->avg.load_avg = div_u64(se_weight(se) * se->avg.load_sum, divider); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3113 | } while (0); |
| 3114 | #endif |
| 3115 | |
| 3116 | enqueue_load_avg(cfs_rq, se); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3117 | if (se->on_rq) |
| 3118 | update_load_add(&cfs_rq->load, se->load.weight); |
| 3119 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3120 | } |
| 3121 | |
| 3122 | void reweight_task(struct task_struct *p, int prio) |
| 3123 | { |
| 3124 | struct sched_entity *se = &p->se; |
| 3125 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 3126 | struct load_weight *load = &se->load; |
| 3127 | unsigned long weight = scale_load(sched_prio_to_weight[prio]); |
| 3128 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3129 | reweight_entity(cfs_rq, se, weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3130 | load->inv_weight = sched_prio_to_wmult[prio]; |
| 3131 | } |
| 3132 | |
| 3133 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 3134 | #ifdef CONFIG_SMP |
| 3135 | /* |
| 3136 | * All this does is approximate the hierarchical proportion which includes that |
| 3137 | * global sum we all love to hate. |
| 3138 | * |
| 3139 | * That is, the weight of a group entity, is the proportional share of the |
| 3140 | * group weight based on the group runqueue weights. That is: |
| 3141 | * |
| 3142 | * tg->weight * grq->load.weight |
| 3143 | * ge->load.weight = ----------------------------- (1) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 3144 | * \Sum grq->load.weight |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3145 | * |
| 3146 | * Now, because computing that sum is prohibitively expensive to compute (been |
| 3147 | * there, done that) we approximate it with this average stuff. The average |
| 3148 | * moves slower and therefore the approximation is cheaper and more stable. |
| 3149 | * |
| 3150 | * So instead of the above, we substitute: |
| 3151 | * |
| 3152 | * grq->load.weight -> grq->avg.load_avg (2) |
| 3153 | * |
| 3154 | * which yields the following: |
| 3155 | * |
| 3156 | * tg->weight * grq->avg.load_avg |
| 3157 | * ge->load.weight = ------------------------------ (3) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 3158 | * tg->load_avg |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3159 | * |
| 3160 | * Where: tg->load_avg ~= \Sum grq->avg.load_avg |
| 3161 | * |
| 3162 | * That is shares_avg, and it is right (given the approximation (2)). |
| 3163 | * |
| 3164 | * The problem with it is that because the average is slow -- it was designed |
| 3165 | * to be exactly that of course -- this leads to transients in boundary |
| 3166 | * conditions. In specific, the case where the group was idle and we start the |
| 3167 | * one task. It takes time for our CPU's grq->avg.load_avg to build up, |
| 3168 | * yielding bad latency etc.. |
| 3169 | * |
| 3170 | * Now, in that special case (1) reduces to: |
| 3171 | * |
| 3172 | * tg->weight * grq->load.weight |
| 3173 | * ge->load.weight = ----------------------------- = tg->weight (4) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 3174 | * grp->load.weight |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3175 | * |
| 3176 | * That is, the sum collapses because all other CPUs are idle; the UP scenario. |
| 3177 | * |
| 3178 | * So what we do is modify our approximation (3) to approach (4) in the (near) |
| 3179 | * UP case, like: |
| 3180 | * |
| 3181 | * ge->load.weight = |
| 3182 | * |
| 3183 | * tg->weight * grq->load.weight |
| 3184 | * --------------------------------------------------- (5) |
| 3185 | * tg->load_avg - grq->avg.load_avg + grq->load.weight |
| 3186 | * |
| 3187 | * But because grq->load.weight can drop to 0, resulting in a divide by zero, |
| 3188 | * we need to use grq->avg.load_avg as its lower bound, which then gives: |
| 3189 | * |
| 3190 | * |
| 3191 | * tg->weight * grq->load.weight |
| 3192 | * ge->load.weight = ----------------------------- (6) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 3193 | * tg_load_avg' |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3194 | * |
| 3195 | * Where: |
| 3196 | * |
| 3197 | * tg_load_avg' = tg->load_avg - grq->avg.load_avg + |
| 3198 | * max(grq->load.weight, grq->avg.load_avg) |
| 3199 | * |
| 3200 | * And that is shares_weight and is icky. In the (near) UP case it approaches |
| 3201 | * (4) while in the normal case it approaches (3). It consistently |
| 3202 | * overestimates the ge->load.weight and therefore: |
| 3203 | * |
| 3204 | * \Sum ge->load.weight >= tg->weight |
| 3205 | * |
| 3206 | * hence icky! |
| 3207 | */ |
| 3208 | static long calc_group_shares(struct cfs_rq *cfs_rq) |
| 3209 | { |
| 3210 | long tg_weight, tg_shares, load, shares; |
| 3211 | struct task_group *tg = cfs_rq->tg; |
| 3212 | |
| 3213 | tg_shares = READ_ONCE(tg->shares); |
| 3214 | |
| 3215 | load = max(scale_load_down(cfs_rq->load.weight), cfs_rq->avg.load_avg); |
| 3216 | |
| 3217 | tg_weight = atomic_long_read(&tg->load_avg); |
| 3218 | |
| 3219 | /* Ensure tg_weight >= load */ |
| 3220 | tg_weight -= cfs_rq->tg_load_avg_contrib; |
| 3221 | tg_weight += load; |
| 3222 | |
| 3223 | shares = (tg_shares * load); |
| 3224 | if (tg_weight) |
| 3225 | shares /= tg_weight; |
| 3226 | |
| 3227 | /* |
| 3228 | * MIN_SHARES has to be unscaled here to support per-CPU partitioning |
| 3229 | * of a group with small tg->shares value. It is a floor value which is |
| 3230 | * assigned as a minimum load.weight to the sched_entity representing |
| 3231 | * the group on a CPU. |
| 3232 | * |
| 3233 | * E.g. on 64-bit for a group with tg->shares of scale_load(15)=15*1024 |
| 3234 | * on an 8-core system with 8 tasks each runnable on one CPU shares has |
| 3235 | * to be 15*1024*1/8=1920 instead of scale_load(MIN_SHARES)=2*1024. In |
| 3236 | * case no task is runnable on a CPU MIN_SHARES=2 should be returned |
| 3237 | * instead of 0. |
| 3238 | */ |
| 3239 | return clamp_t(long, shares, MIN_SHARES, tg_shares); |
| 3240 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3241 | #endif /* CONFIG_SMP */ |
| 3242 | |
| 3243 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); |
| 3244 | |
| 3245 | /* |
| 3246 | * Recomputes the group entity based on the current state of its group |
| 3247 | * runqueue. |
| 3248 | */ |
| 3249 | static void update_cfs_group(struct sched_entity *se) |
| 3250 | { |
| 3251 | struct cfs_rq *gcfs_rq = group_cfs_rq(se); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3252 | long shares; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3253 | |
| 3254 | if (!gcfs_rq) |
| 3255 | return; |
| 3256 | |
| 3257 | if (throttled_hierarchy(gcfs_rq)) |
| 3258 | return; |
| 3259 | |
| 3260 | #ifndef CONFIG_SMP |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3261 | shares = READ_ONCE(gcfs_rq->tg->shares); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3262 | |
| 3263 | if (likely(se->load.weight == shares)) |
| 3264 | return; |
| 3265 | #else |
| 3266 | shares = calc_group_shares(gcfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3267 | #endif |
| 3268 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3269 | reweight_entity(cfs_rq_of(se), se, shares); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3270 | } |
| 3271 | |
| 3272 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
| 3273 | static inline void update_cfs_group(struct sched_entity *se) |
| 3274 | { |
| 3275 | } |
| 3276 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 3277 | |
| 3278 | static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags) |
| 3279 | { |
| 3280 | struct rq *rq = rq_of(cfs_rq); |
| 3281 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3282 | if (&rq->cfs == cfs_rq) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3283 | /* |
| 3284 | * There are a few boundary cases this might miss but it should |
| 3285 | * get called often enough that that should (hopefully) not be |
| 3286 | * a real problem. |
| 3287 | * |
| 3288 | * It will not get called when we go idle, because the idle |
| 3289 | * thread is a different class (!fair), nor will the utilization |
| 3290 | * number include things like RT tasks. |
| 3291 | * |
| 3292 | * As is, the util number is not freq-invariant (we'd have to |
| 3293 | * implement arch_scale_freq_capacity() for that). |
| 3294 | * |
| 3295 | * See cpu_util(). |
| 3296 | */ |
| 3297 | cpufreq_update_util(rq, flags); |
| 3298 | } |
| 3299 | } |
| 3300 | |
| 3301 | #ifdef CONFIG_SMP |
| 3302 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 3303 | /** |
| 3304 | * update_tg_load_avg - update the tg's load avg |
| 3305 | * @cfs_rq: the cfs_rq whose avg changed |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3306 | * |
| 3307 | * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load. |
| 3308 | * However, because tg->load_avg is a global value there are performance |
| 3309 | * considerations. |
| 3310 | * |
| 3311 | * In order to avoid having to look at the other cfs_rq's, we use a |
| 3312 | * differential update where we store the last value we propagated. This in |
| 3313 | * turn allows skipping updates if the differential is 'small'. |
| 3314 | * |
| 3315 | * Updating tg's load_avg is necessary before update_cfs_share(). |
| 3316 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3317 | static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3318 | { |
| 3319 | long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib; |
| 3320 | |
| 3321 | /* |
| 3322 | * No need to update load_avg for root_task_group as it is not used. |
| 3323 | */ |
| 3324 | if (cfs_rq->tg == &root_task_group) |
| 3325 | return; |
| 3326 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3327 | if (abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3328 | atomic_long_add(delta, &cfs_rq->tg->load_avg); |
| 3329 | cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg; |
| 3330 | } |
| 3331 | } |
| 3332 | |
| 3333 | /* |
| 3334 | * Called within set_task_rq() right before setting a task's CPU. The |
| 3335 | * caller only guarantees p->pi_lock is held; no other assumptions, |
| 3336 | * including the state of rq->lock, should be made. |
| 3337 | */ |
| 3338 | void set_task_rq_fair(struct sched_entity *se, |
| 3339 | struct cfs_rq *prev, struct cfs_rq *next) |
| 3340 | { |
| 3341 | u64 p_last_update_time; |
| 3342 | u64 n_last_update_time; |
| 3343 | |
| 3344 | if (!sched_feat(ATTACH_AGE_LOAD)) |
| 3345 | return; |
| 3346 | |
| 3347 | /* |
| 3348 | * We are supposed to update the task to "current" time, then its up to |
| 3349 | * date and ready to go to new CPU/cfs_rq. But we have difficulty in |
| 3350 | * getting what current time is, so simply throw away the out-of-date |
| 3351 | * time. This will result in the wakee task is less decayed, but giving |
| 3352 | * the wakee more load sounds not bad. |
| 3353 | */ |
| 3354 | if (!(se->avg.last_update_time && prev)) |
| 3355 | return; |
| 3356 | |
| 3357 | #ifndef CONFIG_64BIT |
| 3358 | { |
| 3359 | u64 p_last_update_time_copy; |
| 3360 | u64 n_last_update_time_copy; |
| 3361 | |
| 3362 | do { |
| 3363 | p_last_update_time_copy = prev->load_last_update_time_copy; |
| 3364 | n_last_update_time_copy = next->load_last_update_time_copy; |
| 3365 | |
| 3366 | smp_rmb(); |
| 3367 | |
| 3368 | p_last_update_time = prev->avg.last_update_time; |
| 3369 | n_last_update_time = next->avg.last_update_time; |
| 3370 | |
| 3371 | } while (p_last_update_time != p_last_update_time_copy || |
| 3372 | n_last_update_time != n_last_update_time_copy); |
| 3373 | } |
| 3374 | #else |
| 3375 | p_last_update_time = prev->avg.last_update_time; |
| 3376 | n_last_update_time = next->avg.last_update_time; |
| 3377 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3378 | __update_load_avg_blocked_se(p_last_update_time, se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3379 | se->avg.last_update_time = n_last_update_time; |
| 3380 | } |
| 3381 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3382 | /* |
| 3383 | * When on migration a sched_entity joins/leaves the PELT hierarchy, we need to |
| 3384 | * propagate its contribution. The key to this propagation is the invariant |
| 3385 | * that for each group: |
| 3386 | * |
| 3387 | * ge->avg == grq->avg (1) |
| 3388 | * |
| 3389 | * _IFF_ we look at the pure running and runnable sums. Because they |
| 3390 | * represent the very same entity, just at different points in the hierarchy. |
| 3391 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3392 | * Per the above update_tg_cfs_util() and update_tg_cfs_runnable() are trivial |
| 3393 | * and simply copies the running/runnable sum over (but still wrong, because |
| 3394 | * the group entity and group rq do not have their PELT windows aligned). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3395 | * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3396 | * However, update_tg_cfs_load() is more complex. So we have: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3397 | * |
| 3398 | * ge->avg.load_avg = ge->load.weight * ge->avg.runnable_avg (2) |
| 3399 | * |
| 3400 | * And since, like util, the runnable part should be directly transferable, |
| 3401 | * the following would _appear_ to be the straight forward approach: |
| 3402 | * |
| 3403 | * grq->avg.load_avg = grq->load.weight * grq->avg.runnable_avg (3) |
| 3404 | * |
| 3405 | * And per (1) we have: |
| 3406 | * |
| 3407 | * ge->avg.runnable_avg == grq->avg.runnable_avg |
| 3408 | * |
| 3409 | * Which gives: |
| 3410 | * |
| 3411 | * ge->load.weight * grq->avg.load_avg |
| 3412 | * ge->avg.load_avg = ----------------------------------- (4) |
| 3413 | * grq->load.weight |
| 3414 | * |
| 3415 | * Except that is wrong! |
| 3416 | * |
| 3417 | * Because while for entities historical weight is not important and we |
| 3418 | * really only care about our future and therefore can consider a pure |
| 3419 | * runnable sum, runqueues can NOT do this. |
| 3420 | * |
| 3421 | * We specifically want runqueues to have a load_avg that includes |
| 3422 | * historical weights. Those represent the blocked load, the load we expect |
| 3423 | * to (shortly) return to us. This only works by keeping the weights as |
| 3424 | * integral part of the sum. We therefore cannot decompose as per (3). |
| 3425 | * |
| 3426 | * Another reason this doesn't work is that runnable isn't a 0-sum entity. |
| 3427 | * Imagine a rq with 2 tasks that each are runnable 2/3 of the time. Then the |
| 3428 | * rq itself is runnable anywhere between 2/3 and 1 depending on how the |
| 3429 | * runnable section of these tasks overlap (or not). If they were to perfectly |
| 3430 | * align the rq as a whole would be runnable 2/3 of the time. If however we |
| 3431 | * always have at least 1 runnable task, the rq as a whole is always runnable. |
| 3432 | * |
| 3433 | * So we'll have to approximate.. :/ |
| 3434 | * |
| 3435 | * Given the constraint: |
| 3436 | * |
| 3437 | * ge->avg.running_sum <= ge->avg.runnable_sum <= LOAD_AVG_MAX |
| 3438 | * |
| 3439 | * We can construct a rule that adds runnable to a rq by assuming minimal |
| 3440 | * overlap. |
| 3441 | * |
| 3442 | * On removal, we'll assume each task is equally runnable; which yields: |
| 3443 | * |
| 3444 | * grq->avg.runnable_sum = grq->avg.load_sum / grq->load.weight |
| 3445 | * |
| 3446 | * XXX: only do this for the part of runnable > running ? |
| 3447 | * |
| 3448 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3449 | static inline void |
| 3450 | update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) |
| 3451 | { |
| 3452 | long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3453 | u32 divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3454 | |
| 3455 | /* Nothing to update */ |
| 3456 | if (!delta) |
| 3457 | return; |
| 3458 | |
| 3459 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3460 | * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. |
| 3461 | * See ___update_load_avg() for details. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3462 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3463 | divider = get_pelt_divider(&cfs_rq->avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3464 | |
| 3465 | /* Set new sched_entity's utilization */ |
| 3466 | se->avg.util_avg = gcfs_rq->avg.util_avg; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3467 | se->avg.util_sum = se->avg.util_avg * divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3468 | |
| 3469 | /* Update parent cfs_rq utilization */ |
| 3470 | add_positive(&cfs_rq->avg.util_avg, delta); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3471 | cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3472 | } |
| 3473 | |
| 3474 | static inline void |
| 3475 | update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) |
| 3476 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3477 | long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; |
| 3478 | u32 divider; |
| 3479 | |
| 3480 | /* Nothing to update */ |
| 3481 | if (!delta) |
| 3482 | return; |
| 3483 | |
| 3484 | /* |
| 3485 | * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. |
| 3486 | * See ___update_load_avg() for details. |
| 3487 | */ |
| 3488 | divider = get_pelt_divider(&cfs_rq->avg); |
| 3489 | |
| 3490 | /* Set new sched_entity's runnable */ |
| 3491 | se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; |
| 3492 | se->avg.runnable_sum = se->avg.runnable_avg * divider; |
| 3493 | |
| 3494 | /* Update parent cfs_rq runnable */ |
| 3495 | add_positive(&cfs_rq->avg.runnable_avg, delta); |
| 3496 | cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; |
| 3497 | } |
| 3498 | |
| 3499 | static inline void |
| 3500 | update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) |
| 3501 | { |
| 3502 | long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum; |
| 3503 | unsigned long load_avg; |
| 3504 | u64 load_sum = 0; |
| 3505 | u32 divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3506 | |
| 3507 | if (!runnable_sum) |
| 3508 | return; |
| 3509 | |
| 3510 | gcfs_rq->prop_runnable_sum = 0; |
| 3511 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3512 | /* |
| 3513 | * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. |
| 3514 | * See ___update_load_avg() for details. |
| 3515 | */ |
| 3516 | divider = get_pelt_divider(&cfs_rq->avg); |
| 3517 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3518 | if (runnable_sum >= 0) { |
| 3519 | /* |
| 3520 | * Add runnable; clip at LOAD_AVG_MAX. Reflects that until |
| 3521 | * the CPU is saturated running == runnable. |
| 3522 | */ |
| 3523 | runnable_sum += se->avg.load_sum; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3524 | runnable_sum = min_t(long, runnable_sum, divider); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3525 | } else { |
| 3526 | /* |
| 3527 | * Estimate the new unweighted runnable_sum of the gcfs_rq by |
| 3528 | * assuming all tasks are equally runnable. |
| 3529 | */ |
| 3530 | if (scale_load_down(gcfs_rq->load.weight)) { |
| 3531 | load_sum = div_s64(gcfs_rq->avg.load_sum, |
| 3532 | scale_load_down(gcfs_rq->load.weight)); |
| 3533 | } |
| 3534 | |
| 3535 | /* But make sure to not inflate se's runnable */ |
| 3536 | runnable_sum = min(se->avg.load_sum, load_sum); |
| 3537 | } |
| 3538 | |
| 3539 | /* |
| 3540 | * runnable_sum can't be lower than running_sum |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3541 | * Rescale running sum to be in the same range as runnable sum |
| 3542 | * running_sum is in [0 : LOAD_AVG_MAX << SCHED_CAPACITY_SHIFT] |
| 3543 | * runnable_sum is in [0 : LOAD_AVG_MAX] |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3544 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3545 | running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3546 | runnable_sum = max(runnable_sum, running_sum); |
| 3547 | |
| 3548 | load_sum = (s64)se_weight(se) * runnable_sum; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3549 | load_avg = div_s64(load_sum, divider); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3550 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3551 | delta = load_avg - se->avg.load_avg; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3552 | |
| 3553 | se->avg.load_sum = runnable_sum; |
| 3554 | se->avg.load_avg = load_avg; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3555 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3556 | add_positive(&cfs_rq->avg.load_avg, delta); |
| 3557 | cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3558 | } |
| 3559 | |
| 3560 | static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) |
| 3561 | { |
| 3562 | cfs_rq->propagate = 1; |
| 3563 | cfs_rq->prop_runnable_sum += runnable_sum; |
| 3564 | } |
| 3565 | |
| 3566 | /* Update task and its cfs_rq load average */ |
| 3567 | static inline int propagate_entity_load_avg(struct sched_entity *se) |
| 3568 | { |
| 3569 | struct cfs_rq *cfs_rq, *gcfs_rq; |
| 3570 | |
| 3571 | if (entity_is_task(se)) |
| 3572 | return 0; |
| 3573 | |
| 3574 | gcfs_rq = group_cfs_rq(se); |
| 3575 | if (!gcfs_rq->propagate) |
| 3576 | return 0; |
| 3577 | |
| 3578 | gcfs_rq->propagate = 0; |
| 3579 | |
| 3580 | cfs_rq = cfs_rq_of(se); |
| 3581 | |
| 3582 | add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); |
| 3583 | |
| 3584 | update_tg_cfs_util(cfs_rq, se, gcfs_rq); |
| 3585 | update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3586 | update_tg_cfs_load(cfs_rq, se, gcfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3587 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3588 | trace_pelt_cfs_tp(cfs_rq); |
| 3589 | trace_pelt_se_tp(se); |
| 3590 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3591 | return 1; |
| 3592 | } |
| 3593 | |
| 3594 | /* |
| 3595 | * Check if we need to update the load and the utilization of a blocked |
| 3596 | * group_entity: |
| 3597 | */ |
| 3598 | static inline bool skip_blocked_update(struct sched_entity *se) |
| 3599 | { |
| 3600 | struct cfs_rq *gcfs_rq = group_cfs_rq(se); |
| 3601 | |
| 3602 | /* |
| 3603 | * If sched_entity still have not zero load or utilization, we have to |
| 3604 | * decay it: |
| 3605 | */ |
| 3606 | if (se->avg.load_avg || se->avg.util_avg) |
| 3607 | return false; |
| 3608 | |
| 3609 | /* |
| 3610 | * If there is a pending propagation, we have to update the load and |
| 3611 | * the utilization of the sched_entity: |
| 3612 | */ |
| 3613 | if (gcfs_rq->propagate) |
| 3614 | return false; |
| 3615 | |
| 3616 | /* |
| 3617 | * Otherwise, the load and the utilization of the sched_entity is |
| 3618 | * already zero and there is no pending propagation, so it will be a |
| 3619 | * waste of time to try to decay it: |
| 3620 | */ |
| 3621 | return true; |
| 3622 | } |
| 3623 | |
| 3624 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
| 3625 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3626 | static inline void update_tg_load_avg(struct cfs_rq *cfs_rq) {} |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3627 | |
| 3628 | static inline int propagate_entity_load_avg(struct sched_entity *se) |
| 3629 | { |
| 3630 | return 0; |
| 3631 | } |
| 3632 | |
| 3633 | static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum) {} |
| 3634 | |
| 3635 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 3636 | |
| 3637 | /** |
| 3638 | * update_cfs_rq_load_avg - update the cfs_rq's load/util averages |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3639 | * @now: current time, as per cfs_rq_clock_pelt() |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3640 | * @cfs_rq: cfs_rq to update |
| 3641 | * |
| 3642 | * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) |
| 3643 | * avg. The immediate corollary is that all (fair) tasks must be attached, see |
| 3644 | * post_init_entity_util_avg(). |
| 3645 | * |
| 3646 | * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example. |
| 3647 | * |
| 3648 | * Returns true if the load decayed or we removed load. |
| 3649 | * |
| 3650 | * Since both these conditions indicate a changed cfs_rq->avg.load we should |
| 3651 | * call update_tg_load_avg() when this function returns true. |
| 3652 | */ |
| 3653 | static inline int |
| 3654 | update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) |
| 3655 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3656 | unsigned long removed_load = 0, removed_util = 0, removed_runnable = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3657 | struct sched_avg *sa = &cfs_rq->avg; |
| 3658 | int decayed = 0; |
| 3659 | |
| 3660 | if (cfs_rq->removed.nr) { |
| 3661 | unsigned long r; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3662 | u32 divider = get_pelt_divider(&cfs_rq->avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3663 | |
| 3664 | raw_spin_lock(&cfs_rq->removed.lock); |
| 3665 | swap(cfs_rq->removed.util_avg, removed_util); |
| 3666 | swap(cfs_rq->removed.load_avg, removed_load); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3667 | swap(cfs_rq->removed.runnable_avg, removed_runnable); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3668 | cfs_rq->removed.nr = 0; |
| 3669 | raw_spin_unlock(&cfs_rq->removed.lock); |
| 3670 | |
| 3671 | r = removed_load; |
| 3672 | sub_positive(&sa->load_avg, r); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3673 | sa->load_sum = sa->load_avg * divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3674 | |
| 3675 | r = removed_util; |
| 3676 | sub_positive(&sa->util_avg, r); |
| 3677 | sub_positive(&sa->util_sum, r * divider); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3678 | /* |
| 3679 | * Because of rounding, se->util_sum might ends up being +1 more than |
| 3680 | * cfs->util_sum. Although this is not a problem by itself, detaching |
| 3681 | * a lot of tasks with the rounding problem between 2 updates of |
| 3682 | * util_avg (~1ms) can make cfs->util_sum becoming null whereas |
| 3683 | * cfs_util_avg is not. |
| 3684 | * Check that util_sum is still above its lower bound for the new |
| 3685 | * util_avg. Given that period_contrib might have moved since the last |
| 3686 | * sync, we are only sure that util_sum must be above or equal to |
| 3687 | * util_avg * minimum possible divider |
| 3688 | */ |
| 3689 | sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3690 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3691 | r = removed_runnable; |
| 3692 | sub_positive(&sa->runnable_avg, r); |
| 3693 | sa->runnable_sum = sa->runnable_avg * divider; |
| 3694 | |
| 3695 | /* |
| 3696 | * removed_runnable is the unweighted version of removed_load so we |
| 3697 | * can use it to estimate removed_load_sum. |
| 3698 | */ |
| 3699 | add_tg_cfs_propagate(cfs_rq, |
| 3700 | -(long)(removed_runnable * divider) >> SCHED_CAPACITY_SHIFT); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3701 | |
| 3702 | decayed = 1; |
| 3703 | } |
| 3704 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3705 | decayed |= __update_load_avg_cfs_rq(now, cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3706 | |
| 3707 | #ifndef CONFIG_64BIT |
| 3708 | smp_wmb(); |
| 3709 | cfs_rq->load_last_update_time_copy = sa->last_update_time; |
| 3710 | #endif |
| 3711 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3712 | return decayed; |
| 3713 | } |
| 3714 | |
| 3715 | /** |
| 3716 | * attach_entity_load_avg - attach this entity to its cfs_rq load avg |
| 3717 | * @cfs_rq: cfs_rq to attach to |
| 3718 | * @se: sched_entity to attach |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3719 | * |
| 3720 | * Must call update_cfs_rq_load_avg() before this, since we rely on |
| 3721 | * cfs_rq->avg.last_update_time being current. |
| 3722 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3723 | static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3724 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3725 | /* |
| 3726 | * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. |
| 3727 | * See ___update_load_avg() for details. |
| 3728 | */ |
| 3729 | u32 divider = get_pelt_divider(&cfs_rq->avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3730 | |
| 3731 | /* |
| 3732 | * When we attach the @se to the @cfs_rq, we must align the decay |
| 3733 | * window because without that, really weird and wonderful things can |
| 3734 | * happen. |
| 3735 | * |
| 3736 | * XXX illustrate |
| 3737 | */ |
| 3738 | se->avg.last_update_time = cfs_rq->avg.last_update_time; |
| 3739 | se->avg.period_contrib = cfs_rq->avg.period_contrib; |
| 3740 | |
| 3741 | /* |
| 3742 | * Hell(o) Nasty stuff.. we need to recompute _sum based on the new |
| 3743 | * period_contrib. This isn't strictly correct, but since we're |
| 3744 | * entirely outside of the PELT hierarchy, nobody cares if we truncate |
| 3745 | * _sum a little. |
| 3746 | */ |
| 3747 | se->avg.util_sum = se->avg.util_avg * divider; |
| 3748 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3749 | se->avg.runnable_sum = se->avg.runnable_avg * divider; |
| 3750 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3751 | se->avg.load_sum = divider; |
| 3752 | if (se_weight(se)) { |
| 3753 | se->avg.load_sum = |
| 3754 | div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se)); |
| 3755 | } |
| 3756 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3757 | enqueue_load_avg(cfs_rq, se); |
| 3758 | cfs_rq->avg.util_avg += se->avg.util_avg; |
| 3759 | cfs_rq->avg.util_sum += se->avg.util_sum; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3760 | cfs_rq->avg.runnable_avg += se->avg.runnable_avg; |
| 3761 | cfs_rq->avg.runnable_sum += se->avg.runnable_sum; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3762 | |
| 3763 | add_tg_cfs_propagate(cfs_rq, se->avg.load_sum); |
| 3764 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3765 | cfs_rq_util_change(cfs_rq, 0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3766 | |
| 3767 | trace_pelt_cfs_tp(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3768 | } |
| 3769 | |
| 3770 | /** |
| 3771 | * detach_entity_load_avg - detach this entity from its cfs_rq load avg |
| 3772 | * @cfs_rq: cfs_rq to detach from |
| 3773 | * @se: sched_entity to detach |
| 3774 | * |
| 3775 | * Must call update_cfs_rq_load_avg() before this, since we rely on |
| 3776 | * cfs_rq->avg.last_update_time being current. |
| 3777 | */ |
| 3778 | static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 3779 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3780 | /* |
| 3781 | * cfs_rq->avg.period_contrib can be used for both cfs_rq and se. |
| 3782 | * See ___update_load_avg() for details. |
| 3783 | */ |
| 3784 | u32 divider = get_pelt_divider(&cfs_rq->avg); |
| 3785 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3786 | dequeue_load_avg(cfs_rq, se); |
| 3787 | sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3788 | cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider; |
| 3789 | sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg); |
| 3790 | cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3791 | |
| 3792 | add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum); |
| 3793 | |
| 3794 | cfs_rq_util_change(cfs_rq, 0); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3795 | |
| 3796 | trace_pelt_cfs_tp(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3797 | } |
| 3798 | |
| 3799 | /* |
| 3800 | * Optional action to be done while updating the load average |
| 3801 | */ |
| 3802 | #define UPDATE_TG 0x1 |
| 3803 | #define SKIP_AGE_LOAD 0x2 |
| 3804 | #define DO_ATTACH 0x4 |
| 3805 | |
| 3806 | /* Update task and its cfs_rq load average */ |
| 3807 | static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
| 3808 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3809 | u64 now = cfs_rq_clock_pelt(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3810 | int decayed; |
| 3811 | |
| 3812 | /* |
| 3813 | * Track task load average for carrying it to new CPU after migrated, and |
| 3814 | * track group sched_entity load average for task_h_load calc in migration |
| 3815 | */ |
| 3816 | if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3817 | __update_load_avg_se(now, cfs_rq, se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3818 | |
| 3819 | decayed = update_cfs_rq_load_avg(now, cfs_rq); |
| 3820 | decayed |= propagate_entity_load_avg(se); |
| 3821 | |
| 3822 | if (!se->avg.last_update_time && (flags & DO_ATTACH)) { |
| 3823 | |
| 3824 | /* |
| 3825 | * DO_ATTACH means we're here from enqueue_entity(). |
| 3826 | * !last_update_time means we've passed through |
| 3827 | * migrate_task_rq_fair() indicating we migrated. |
| 3828 | * |
| 3829 | * IOW we're enqueueing a task on a new CPU. |
| 3830 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3831 | attach_entity_load_avg(cfs_rq, se); |
| 3832 | update_tg_load_avg(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3833 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 3834 | } else if (decayed) { |
| 3835 | cfs_rq_util_change(cfs_rq, 0); |
| 3836 | |
| 3837 | if (flags & UPDATE_TG) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3838 | update_tg_load_avg(cfs_rq); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 3839 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3840 | } |
| 3841 | |
| 3842 | #ifndef CONFIG_64BIT |
| 3843 | static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) |
| 3844 | { |
| 3845 | u64 last_update_time_copy; |
| 3846 | u64 last_update_time; |
| 3847 | |
| 3848 | do { |
| 3849 | last_update_time_copy = cfs_rq->load_last_update_time_copy; |
| 3850 | smp_rmb(); |
| 3851 | last_update_time = cfs_rq->avg.last_update_time; |
| 3852 | } while (last_update_time != last_update_time_copy); |
| 3853 | |
| 3854 | return last_update_time; |
| 3855 | } |
| 3856 | #else |
| 3857 | static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq) |
| 3858 | { |
| 3859 | return cfs_rq->avg.last_update_time; |
| 3860 | } |
| 3861 | #endif |
| 3862 | |
| 3863 | /* |
| 3864 | * Synchronize entity load avg of dequeued entity without locking |
| 3865 | * the previous rq. |
| 3866 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3867 | static void sync_entity_load_avg(struct sched_entity *se) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3868 | { |
| 3869 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 3870 | u64 last_update_time; |
| 3871 | |
| 3872 | last_update_time = cfs_rq_last_update_time(cfs_rq); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3873 | __update_load_avg_blocked_se(last_update_time, se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3874 | } |
| 3875 | |
| 3876 | /* |
| 3877 | * Task first catches up with cfs_rq, and then subtract |
| 3878 | * itself from the cfs_rq (task must be off the queue now). |
| 3879 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3880 | static void remove_entity_load_avg(struct sched_entity *se) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3881 | { |
| 3882 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 3883 | unsigned long flags; |
| 3884 | |
| 3885 | /* |
| 3886 | * tasks cannot exit without having gone through wake_up_new_task() -> |
| 3887 | * post_init_entity_util_avg() which will have added things to the |
| 3888 | * cfs_rq, so we can remove unconditionally. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3889 | */ |
| 3890 | |
| 3891 | sync_entity_load_avg(se); |
| 3892 | |
| 3893 | raw_spin_lock_irqsave(&cfs_rq->removed.lock, flags); |
| 3894 | ++cfs_rq->removed.nr; |
| 3895 | cfs_rq->removed.util_avg += se->avg.util_avg; |
| 3896 | cfs_rq->removed.load_avg += se->avg.load_avg; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3897 | cfs_rq->removed.runnable_avg += se->avg.runnable_avg; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3898 | raw_spin_unlock_irqrestore(&cfs_rq->removed.lock, flags); |
| 3899 | } |
| 3900 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3901 | static inline unsigned long cfs_rq_runnable_avg(struct cfs_rq *cfs_rq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3902 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3903 | return cfs_rq->avg.runnable_avg; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3904 | } |
| 3905 | |
| 3906 | static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) |
| 3907 | { |
| 3908 | return cfs_rq->avg.load_avg; |
| 3909 | } |
| 3910 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3911 | static int newidle_balance(struct rq *this_rq, struct rq_flags *rf); |
| 3912 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3913 | static inline unsigned long task_util(struct task_struct *p) |
| 3914 | { |
| 3915 | return READ_ONCE(p->se.avg.util_avg); |
| 3916 | } |
| 3917 | |
| 3918 | static inline unsigned long _task_util_est(struct task_struct *p) |
| 3919 | { |
| 3920 | struct util_est ue = READ_ONCE(p->se.avg.util_est); |
| 3921 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3922 | return max(ue.ewma, (ue.enqueued & ~UTIL_AVG_UNCHANGED)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3923 | } |
| 3924 | |
| 3925 | static inline unsigned long task_util_est(struct task_struct *p) |
| 3926 | { |
| 3927 | return max(task_util(p), _task_util_est(p)); |
| 3928 | } |
| 3929 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3930 | #ifdef CONFIG_UCLAMP_TASK |
| 3931 | static inline unsigned long uclamp_task_util(struct task_struct *p) |
| 3932 | { |
| 3933 | return clamp(task_util_est(p), |
| 3934 | uclamp_eff_value(p, UCLAMP_MIN), |
| 3935 | uclamp_eff_value(p, UCLAMP_MAX)); |
| 3936 | } |
| 3937 | #else |
| 3938 | static inline unsigned long uclamp_task_util(struct task_struct *p) |
| 3939 | { |
| 3940 | return task_util_est(p); |
| 3941 | } |
| 3942 | #endif |
| 3943 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3944 | static inline void util_est_enqueue(struct cfs_rq *cfs_rq, |
| 3945 | struct task_struct *p) |
| 3946 | { |
| 3947 | unsigned int enqueued; |
| 3948 | |
| 3949 | if (!sched_feat(UTIL_EST)) |
| 3950 | return; |
| 3951 | |
| 3952 | /* Update root cfs_rq's estimated utilization */ |
| 3953 | enqueued = cfs_rq->avg.util_est.enqueued; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 3954 | enqueued += _task_util_est(p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3955 | WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3956 | |
| 3957 | trace_sched_util_est_cfs_tp(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3958 | } |
| 3959 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3960 | static inline void util_est_dequeue(struct cfs_rq *cfs_rq, |
| 3961 | struct task_struct *p) |
| 3962 | { |
| 3963 | unsigned int enqueued; |
| 3964 | |
| 3965 | if (!sched_feat(UTIL_EST)) |
| 3966 | return; |
| 3967 | |
| 3968 | /* Update root cfs_rq's estimated utilization */ |
| 3969 | enqueued = cfs_rq->avg.util_est.enqueued; |
| 3970 | enqueued -= min_t(unsigned int, enqueued, _task_util_est(p)); |
| 3971 | WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued); |
| 3972 | |
| 3973 | trace_sched_util_est_cfs_tp(cfs_rq); |
| 3974 | } |
| 3975 | |
| 3976 | #define UTIL_EST_MARGIN (SCHED_CAPACITY_SCALE / 100) |
| 3977 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3978 | /* |
| 3979 | * Check if a (signed) value is within a specified (unsigned) margin, |
| 3980 | * based on the observation that: |
| 3981 | * |
| 3982 | * abs(x) < y := (unsigned)(x + y - 1) < (2 * y - 1) |
| 3983 | * |
| 3984 | * NOTE: this only works when value + maring < INT_MAX. |
| 3985 | */ |
| 3986 | static inline bool within_margin(int value, int margin) |
| 3987 | { |
| 3988 | return ((unsigned int)(value + margin - 1) < (2 * margin - 1)); |
| 3989 | } |
| 3990 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3991 | static inline void util_est_update(struct cfs_rq *cfs_rq, |
| 3992 | struct task_struct *p, |
| 3993 | bool task_sleep) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3994 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 3995 | long last_ewma_diff, last_enqueued_diff; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 3996 | struct util_est ue; |
| 3997 | |
| 3998 | if (!sched_feat(UTIL_EST)) |
| 3999 | return; |
| 4000 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4001 | /* |
| 4002 | * Skip update of task's estimated utilization when the task has not |
| 4003 | * yet completed an activation, e.g. being migrated. |
| 4004 | */ |
| 4005 | if (!task_sleep) |
| 4006 | return; |
| 4007 | |
| 4008 | /* |
| 4009 | * If the PELT values haven't changed since enqueue time, |
| 4010 | * skip the util_est update. |
| 4011 | */ |
| 4012 | ue = p->se.avg.util_est; |
| 4013 | if (ue.enqueued & UTIL_AVG_UNCHANGED) |
| 4014 | return; |
| 4015 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4016 | last_enqueued_diff = ue.enqueued; |
| 4017 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4018 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4019 | * Reset EWMA on utilization increases, the moving average is used only |
| 4020 | * to smooth utilization decreases. |
| 4021 | */ |
| 4022 | ue.enqueued = task_util(p); |
| 4023 | if (sched_feat(UTIL_EST_FASTUP)) { |
| 4024 | if (ue.ewma < ue.enqueued) { |
| 4025 | ue.ewma = ue.enqueued; |
| 4026 | goto done; |
| 4027 | } |
| 4028 | } |
| 4029 | |
| 4030 | /* |
| 4031 | * Skip update of task's estimated utilization when its members are |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4032 | * already ~1% close to its last activation value. |
| 4033 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4034 | last_ewma_diff = ue.enqueued - ue.ewma; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4035 | last_enqueued_diff -= ue.enqueued; |
| 4036 | if (within_margin(last_ewma_diff, UTIL_EST_MARGIN)) { |
| 4037 | if (!within_margin(last_enqueued_diff, UTIL_EST_MARGIN)) |
| 4038 | goto done; |
| 4039 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4040 | return; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4041 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4042 | |
| 4043 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4044 | * To avoid overestimation of actual task utilization, skip updates if |
| 4045 | * we cannot grant there is idle time in this CPU. |
| 4046 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4047 | if (task_util(p) > capacity_orig_of(cpu_of(rq_of(cfs_rq)))) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4048 | return; |
| 4049 | |
| 4050 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4051 | * Update Task's estimated utilization |
| 4052 | * |
| 4053 | * When *p completes an activation we can consolidate another sample |
| 4054 | * of the task size. This is done by storing the current PELT value |
| 4055 | * as ue.enqueued and by using this value to update the Exponential |
| 4056 | * Weighted Moving Average (EWMA): |
| 4057 | * |
| 4058 | * ewma(t) = w * task_util(p) + (1-w) * ewma(t-1) |
| 4059 | * = w * task_util(p) + ewma(t-1) - w * ewma(t-1) |
| 4060 | * = w * (task_util(p) - ewma(t-1)) + ewma(t-1) |
| 4061 | * = w * ( last_ewma_diff ) + ewma(t-1) |
| 4062 | * = w * (last_ewma_diff + ewma(t-1) / w) |
| 4063 | * |
| 4064 | * Where 'w' is the weight of new samples, which is configured to be |
| 4065 | * 0.25, thus making w=1/4 ( >>= UTIL_EST_WEIGHT_SHIFT) |
| 4066 | */ |
| 4067 | ue.ewma <<= UTIL_EST_WEIGHT_SHIFT; |
| 4068 | ue.ewma += last_ewma_diff; |
| 4069 | ue.ewma >>= UTIL_EST_WEIGHT_SHIFT; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4070 | done: |
| 4071 | ue.enqueued |= UTIL_AVG_UNCHANGED; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4072 | WRITE_ONCE(p->se.avg.util_est, ue); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4073 | |
| 4074 | trace_sched_util_est_se_tp(&p->se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4075 | } |
| 4076 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4077 | static inline int task_fits_capacity(struct task_struct *p, long capacity) |
| 4078 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4079 | return fits_capacity(uclamp_task_util(p), capacity); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4080 | } |
| 4081 | |
| 4082 | static inline void update_misfit_status(struct task_struct *p, struct rq *rq) |
| 4083 | { |
| 4084 | if (!static_branch_unlikely(&sched_asym_cpucapacity)) |
| 4085 | return; |
| 4086 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4087 | if (!p || p->nr_cpus_allowed == 1) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4088 | rq->misfit_task_load = 0; |
| 4089 | return; |
| 4090 | } |
| 4091 | |
| 4092 | if (task_fits_capacity(p, capacity_of(cpu_of(rq)))) { |
| 4093 | rq->misfit_task_load = 0; |
| 4094 | return; |
| 4095 | } |
| 4096 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4097 | /* |
| 4098 | * Make sure that misfit_task_load will not be null even if |
| 4099 | * task_h_load() returns 0. |
| 4100 | */ |
| 4101 | rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4102 | } |
| 4103 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4104 | #else /* CONFIG_SMP */ |
| 4105 | |
| 4106 | #define UPDATE_TG 0x0 |
| 4107 | #define SKIP_AGE_LOAD 0x0 |
| 4108 | #define DO_ATTACH 0x0 |
| 4109 | |
| 4110 | static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int not_used1) |
| 4111 | { |
| 4112 | cfs_rq_util_change(cfs_rq, 0); |
| 4113 | } |
| 4114 | |
| 4115 | static inline void remove_entity_load_avg(struct sched_entity *se) {} |
| 4116 | |
| 4117 | static inline void |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4118 | attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4119 | static inline void |
| 4120 | detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} |
| 4121 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4122 | static inline int newidle_balance(struct rq *rq, struct rq_flags *rf) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4123 | { |
| 4124 | return 0; |
| 4125 | } |
| 4126 | |
| 4127 | static inline void |
| 4128 | util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {} |
| 4129 | |
| 4130 | static inline void |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4131 | util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p) {} |
| 4132 | |
| 4133 | static inline void |
| 4134 | util_est_update(struct cfs_rq *cfs_rq, struct task_struct *p, |
| 4135 | bool task_sleep) {} |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4136 | static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4137 | |
| 4138 | #endif /* CONFIG_SMP */ |
| 4139 | |
| 4140 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 4141 | { |
| 4142 | #ifdef CONFIG_SCHED_DEBUG |
| 4143 | s64 d = se->vruntime - cfs_rq->min_vruntime; |
| 4144 | |
| 4145 | if (d < 0) |
| 4146 | d = -d; |
| 4147 | |
| 4148 | if (d > 3*sysctl_sched_latency) |
| 4149 | schedstat_inc(cfs_rq->nr_spread_over); |
| 4150 | #endif |
| 4151 | } |
| 4152 | |
| 4153 | static void |
| 4154 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) |
| 4155 | { |
| 4156 | u64 vruntime = cfs_rq->min_vruntime; |
| 4157 | |
| 4158 | /* |
| 4159 | * The 'current' period is already promised to the current tasks, |
| 4160 | * however the extra weight of the new task will slow them down a |
| 4161 | * little, place the new task so that it fits in the slot that |
| 4162 | * stays open at the end. |
| 4163 | */ |
| 4164 | if (initial && sched_feat(START_DEBIT)) |
| 4165 | vruntime += sched_vslice(cfs_rq, se); |
| 4166 | |
| 4167 | /* sleeps up to a single latency don't count. */ |
| 4168 | if (!initial) { |
| 4169 | unsigned long thresh = sysctl_sched_latency; |
| 4170 | |
| 4171 | /* |
| 4172 | * Halve their sleep time's effect, to allow |
| 4173 | * for a gentler effect of sleepers: |
| 4174 | */ |
| 4175 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) |
| 4176 | thresh >>= 1; |
| 4177 | |
| 4178 | vruntime -= thresh; |
| 4179 | } |
| 4180 | |
| 4181 | /* ensure we never gain time by being placed backwards. */ |
| 4182 | se->vruntime = max_vruntime(se->vruntime, vruntime); |
| 4183 | } |
| 4184 | |
| 4185 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq); |
| 4186 | |
| 4187 | static inline void check_schedstat_required(void) |
| 4188 | { |
| 4189 | #ifdef CONFIG_SCHEDSTATS |
| 4190 | if (schedstat_enabled()) |
| 4191 | return; |
| 4192 | |
| 4193 | /* Force schedstat enabled if a dependent tracepoint is active */ |
| 4194 | if (trace_sched_stat_wait_enabled() || |
| 4195 | trace_sched_stat_sleep_enabled() || |
| 4196 | trace_sched_stat_iowait_enabled() || |
| 4197 | trace_sched_stat_blocked_enabled() || |
| 4198 | trace_sched_stat_runtime_enabled()) { |
| 4199 | printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " |
| 4200 | "stat_blocked and stat_runtime require the " |
| 4201 | "kernel parameter schedstats=enable or " |
| 4202 | "kernel.sched_schedstats=1\n"); |
| 4203 | } |
| 4204 | #endif |
| 4205 | } |
| 4206 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4207 | static inline bool cfs_bandwidth_used(void); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4208 | |
| 4209 | /* |
| 4210 | * MIGRATION |
| 4211 | * |
| 4212 | * dequeue |
| 4213 | * update_curr() |
| 4214 | * update_min_vruntime() |
| 4215 | * vruntime -= min_vruntime |
| 4216 | * |
| 4217 | * enqueue |
| 4218 | * update_curr() |
| 4219 | * update_min_vruntime() |
| 4220 | * vruntime += min_vruntime |
| 4221 | * |
| 4222 | * this way the vruntime transition between RQs is done when both |
| 4223 | * min_vruntime are up-to-date. |
| 4224 | * |
| 4225 | * WAKEUP (remote) |
| 4226 | * |
| 4227 | * ->migrate_task_rq_fair() (p->state == TASK_WAKING) |
| 4228 | * vruntime -= min_vruntime |
| 4229 | * |
| 4230 | * enqueue |
| 4231 | * update_curr() |
| 4232 | * update_min_vruntime() |
| 4233 | * vruntime += min_vruntime |
| 4234 | * |
| 4235 | * this way we don't have the most up-to-date min_vruntime on the originating |
| 4236 | * CPU and an up-to-date min_vruntime on the destination CPU. |
| 4237 | */ |
| 4238 | |
| 4239 | static void |
| 4240 | enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
| 4241 | { |
| 4242 | bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED); |
| 4243 | bool curr = cfs_rq->curr == se; |
| 4244 | |
| 4245 | /* |
| 4246 | * If we're the current task, we must renormalise before calling |
| 4247 | * update_curr(). |
| 4248 | */ |
| 4249 | if (renorm && curr) |
| 4250 | se->vruntime += cfs_rq->min_vruntime; |
| 4251 | |
| 4252 | update_curr(cfs_rq); |
| 4253 | |
| 4254 | /* |
| 4255 | * Otherwise, renormalise after, such that we're placed at the current |
| 4256 | * moment in time, instead of some random moment in the past. Being |
| 4257 | * placed in the past could significantly boost this task to the |
| 4258 | * fairness detriment of existing tasks. |
| 4259 | */ |
| 4260 | if (renorm && !curr) |
| 4261 | se->vruntime += cfs_rq->min_vruntime; |
| 4262 | |
| 4263 | /* |
| 4264 | * When enqueuing a sched_entity, we must: |
| 4265 | * - Update loads to have both entity and cfs_rq synced with now. |
| 4266 | * - Add its load to cfs_rq->runnable_avg |
| 4267 | * - For group_entity, update its weight to reflect the new share of |
| 4268 | * its group cfs_rq |
| 4269 | * - Add its new weight to cfs_rq->load.weight |
| 4270 | */ |
| 4271 | update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4272 | se_update_runnable(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4273 | update_cfs_group(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4274 | account_entity_enqueue(cfs_rq, se); |
| 4275 | |
| 4276 | if (flags & ENQUEUE_WAKEUP) |
| 4277 | place_entity(cfs_rq, se, 0); |
| 4278 | |
| 4279 | check_schedstat_required(); |
| 4280 | update_stats_enqueue(cfs_rq, se, flags); |
| 4281 | check_spread(cfs_rq, se); |
| 4282 | if (!curr) |
| 4283 | __enqueue_entity(cfs_rq, se); |
| 4284 | se->on_rq = 1; |
| 4285 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4286 | /* |
| 4287 | * When bandwidth control is enabled, cfs might have been removed |
| 4288 | * because of a parent been throttled but cfs->nr_running > 1. Try to |
| 4289 | * add it unconditionnally. |
| 4290 | */ |
| 4291 | if (cfs_rq->nr_running == 1 || cfs_bandwidth_used()) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4292 | list_add_leaf_cfs_rq(cfs_rq); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4293 | |
| 4294 | if (cfs_rq->nr_running == 1) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4295 | check_enqueue_throttle(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4296 | } |
| 4297 | |
| 4298 | static void __clear_buddies_last(struct sched_entity *se) |
| 4299 | { |
| 4300 | for_each_sched_entity(se) { |
| 4301 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 4302 | if (cfs_rq->last != se) |
| 4303 | break; |
| 4304 | |
| 4305 | cfs_rq->last = NULL; |
| 4306 | } |
| 4307 | } |
| 4308 | |
| 4309 | static void __clear_buddies_next(struct sched_entity *se) |
| 4310 | { |
| 4311 | for_each_sched_entity(se) { |
| 4312 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 4313 | if (cfs_rq->next != se) |
| 4314 | break; |
| 4315 | |
| 4316 | cfs_rq->next = NULL; |
| 4317 | } |
| 4318 | } |
| 4319 | |
| 4320 | static void __clear_buddies_skip(struct sched_entity *se) |
| 4321 | { |
| 4322 | for_each_sched_entity(se) { |
| 4323 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 4324 | if (cfs_rq->skip != se) |
| 4325 | break; |
| 4326 | |
| 4327 | cfs_rq->skip = NULL; |
| 4328 | } |
| 4329 | } |
| 4330 | |
| 4331 | static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 4332 | { |
| 4333 | if (cfs_rq->last == se) |
| 4334 | __clear_buddies_last(se); |
| 4335 | |
| 4336 | if (cfs_rq->next == se) |
| 4337 | __clear_buddies_next(se); |
| 4338 | |
| 4339 | if (cfs_rq->skip == se) |
| 4340 | __clear_buddies_skip(se); |
| 4341 | } |
| 4342 | |
| 4343 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); |
| 4344 | |
| 4345 | static void |
| 4346 | dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) |
| 4347 | { |
| 4348 | /* |
| 4349 | * Update run-time statistics of the 'current'. |
| 4350 | */ |
| 4351 | update_curr(cfs_rq); |
| 4352 | |
| 4353 | /* |
| 4354 | * When dequeuing a sched_entity, we must: |
| 4355 | * - Update loads to have both entity and cfs_rq synced with now. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4356 | * - Subtract its load from the cfs_rq->runnable_avg. |
| 4357 | * - Subtract its previous weight from cfs_rq->load.weight. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4358 | * - For group entity, update its weight to reflect the new share |
| 4359 | * of its group cfs_rq. |
| 4360 | */ |
| 4361 | update_load_avg(cfs_rq, se, UPDATE_TG); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4362 | se_update_runnable(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4363 | |
| 4364 | update_stats_dequeue(cfs_rq, se, flags); |
| 4365 | |
| 4366 | clear_buddies(cfs_rq, se); |
| 4367 | |
| 4368 | if (se != cfs_rq->curr) |
| 4369 | __dequeue_entity(cfs_rq, se); |
| 4370 | se->on_rq = 0; |
| 4371 | account_entity_dequeue(cfs_rq, se); |
| 4372 | |
| 4373 | /* |
| 4374 | * Normalize after update_curr(); which will also have moved |
| 4375 | * min_vruntime if @se is the one holding it back. But before doing |
| 4376 | * update_min_vruntime() again, which will discount @se's position and |
| 4377 | * can move min_vruntime forward still more. |
| 4378 | */ |
| 4379 | if (!(flags & DEQUEUE_SLEEP)) |
| 4380 | se->vruntime -= cfs_rq->min_vruntime; |
| 4381 | |
| 4382 | /* return excess runtime on last dequeue */ |
| 4383 | return_cfs_rq_runtime(cfs_rq); |
| 4384 | |
| 4385 | update_cfs_group(se); |
| 4386 | |
| 4387 | /* |
| 4388 | * Now advance min_vruntime if @se was the entity holding it back, |
| 4389 | * except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be |
| 4390 | * put back on, and if we advance min_vruntime, we'll be placed back |
| 4391 | * further than we started -- ie. we'll be penalized. |
| 4392 | */ |
| 4393 | if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) |
| 4394 | update_min_vruntime(cfs_rq); |
| 4395 | } |
| 4396 | |
| 4397 | /* |
| 4398 | * Preempt the current task with a newly woken task if needed: |
| 4399 | */ |
| 4400 | static void |
| 4401 | check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
| 4402 | { |
| 4403 | unsigned long ideal_runtime, delta_exec; |
| 4404 | struct sched_entity *se; |
| 4405 | s64 delta; |
| 4406 | |
| 4407 | ideal_runtime = sched_slice(cfs_rq, curr); |
| 4408 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
| 4409 | if (delta_exec > ideal_runtime) { |
| 4410 | resched_curr(rq_of(cfs_rq)); |
| 4411 | /* |
| 4412 | * The current task ran long enough, ensure it doesn't get |
| 4413 | * re-elected due to buddy favours. |
| 4414 | */ |
| 4415 | clear_buddies(cfs_rq, curr); |
| 4416 | return; |
| 4417 | } |
| 4418 | |
| 4419 | /* |
| 4420 | * Ensure that a task that missed wakeup preemption by a |
| 4421 | * narrow margin doesn't have to wait for a full slice. |
| 4422 | * This also mitigates buddy induced latencies under load. |
| 4423 | */ |
| 4424 | if (delta_exec < sysctl_sched_min_granularity) |
| 4425 | return; |
| 4426 | |
| 4427 | se = __pick_first_entity(cfs_rq); |
| 4428 | delta = curr->vruntime - se->vruntime; |
| 4429 | |
| 4430 | if (delta < 0) |
| 4431 | return; |
| 4432 | |
| 4433 | if (delta > ideal_runtime) |
| 4434 | resched_curr(rq_of(cfs_rq)); |
| 4435 | } |
| 4436 | |
| 4437 | static void |
| 4438 | set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) |
| 4439 | { |
| 4440 | /* 'current' is not kept within the tree. */ |
| 4441 | if (se->on_rq) { |
| 4442 | /* |
| 4443 | * Any task has to be enqueued before it get to execute on |
| 4444 | * a CPU. So account for the time it spent waiting on the |
| 4445 | * runqueue. |
| 4446 | */ |
| 4447 | update_stats_wait_end(cfs_rq, se); |
| 4448 | __dequeue_entity(cfs_rq, se); |
| 4449 | update_load_avg(cfs_rq, se, UPDATE_TG); |
| 4450 | } |
| 4451 | |
| 4452 | update_stats_curr_start(cfs_rq, se); |
| 4453 | cfs_rq->curr = se; |
| 4454 | |
| 4455 | /* |
| 4456 | * Track our maximum slice length, if the CPU's load is at |
| 4457 | * least twice that of our own weight (i.e. dont track it |
| 4458 | * when there are only lesser-weight tasks around): |
| 4459 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4460 | if (schedstat_enabled() && |
| 4461 | rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4462 | schedstat_set(se->statistics.slice_max, |
| 4463 | max((u64)schedstat_val(se->statistics.slice_max), |
| 4464 | se->sum_exec_runtime - se->prev_sum_exec_runtime)); |
| 4465 | } |
| 4466 | |
| 4467 | se->prev_sum_exec_runtime = se->sum_exec_runtime; |
| 4468 | } |
| 4469 | |
| 4470 | static int |
| 4471 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); |
| 4472 | |
| 4473 | /* |
| 4474 | * Pick the next process, keeping these things in mind, in this order: |
| 4475 | * 1) keep things fair between processes/task groups |
| 4476 | * 2) pick the "next" process, since someone really wants that to run |
| 4477 | * 3) pick the "last" process, for cache locality |
| 4478 | * 4) do not run the "skip" process, if something else is available |
| 4479 | */ |
| 4480 | static struct sched_entity * |
| 4481 | pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
| 4482 | { |
| 4483 | struct sched_entity *left = __pick_first_entity(cfs_rq); |
| 4484 | struct sched_entity *se; |
| 4485 | |
| 4486 | /* |
| 4487 | * If curr is set we have to see if its left of the leftmost entity |
| 4488 | * still in the tree, provided there was anything in the tree at all. |
| 4489 | */ |
| 4490 | if (!left || (curr && entity_before(curr, left))) |
| 4491 | left = curr; |
| 4492 | |
| 4493 | se = left; /* ideally we run the leftmost entity */ |
| 4494 | |
| 4495 | /* |
| 4496 | * Avoid running the skip buddy, if running something else can |
| 4497 | * be done without getting too unfair. |
| 4498 | */ |
| 4499 | if (cfs_rq->skip == se) { |
| 4500 | struct sched_entity *second; |
| 4501 | |
| 4502 | if (se == curr) { |
| 4503 | second = __pick_first_entity(cfs_rq); |
| 4504 | } else { |
| 4505 | second = __pick_next_entity(se); |
| 4506 | if (!second || (curr && entity_before(curr, second))) |
| 4507 | second = curr; |
| 4508 | } |
| 4509 | |
| 4510 | if (second && wakeup_preempt_entity(second, left) < 1) |
| 4511 | se = second; |
| 4512 | } |
| 4513 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4514 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) { |
| 4515 | /* |
| 4516 | * Someone really wants this to run. If it's not unfair, run it. |
| 4517 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4518 | se = cfs_rq->next; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4519 | } else if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) { |
| 4520 | /* |
| 4521 | * Prefer last buddy, try to return the CPU to a preempted task. |
| 4522 | */ |
| 4523 | se = cfs_rq->last; |
| 4524 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4525 | |
| 4526 | clear_buddies(cfs_rq, se); |
| 4527 | |
| 4528 | return se; |
| 4529 | } |
| 4530 | |
| 4531 | static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); |
| 4532 | |
| 4533 | static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) |
| 4534 | { |
| 4535 | /* |
| 4536 | * If still on the runqueue then deactivate_task() |
| 4537 | * was not called and update_curr() has to be done: |
| 4538 | */ |
| 4539 | if (prev->on_rq) |
| 4540 | update_curr(cfs_rq); |
| 4541 | |
| 4542 | /* throttle cfs_rqs exceeding runtime */ |
| 4543 | check_cfs_rq_runtime(cfs_rq); |
| 4544 | |
| 4545 | check_spread(cfs_rq, prev); |
| 4546 | |
| 4547 | if (prev->on_rq) { |
| 4548 | update_stats_wait_start(cfs_rq, prev); |
| 4549 | /* Put 'current' back into the tree. */ |
| 4550 | __enqueue_entity(cfs_rq, prev); |
| 4551 | /* in !on_rq case, update occurred at dequeue */ |
| 4552 | update_load_avg(cfs_rq, prev, 0); |
| 4553 | } |
| 4554 | cfs_rq->curr = NULL; |
| 4555 | } |
| 4556 | |
| 4557 | static void |
| 4558 | entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) |
| 4559 | { |
| 4560 | /* |
| 4561 | * Update run-time statistics of the 'current'. |
| 4562 | */ |
| 4563 | update_curr(cfs_rq); |
| 4564 | |
| 4565 | /* |
| 4566 | * Ensure that runnable average is periodically updated. |
| 4567 | */ |
| 4568 | update_load_avg(cfs_rq, curr, UPDATE_TG); |
| 4569 | update_cfs_group(curr); |
| 4570 | |
| 4571 | #ifdef CONFIG_SCHED_HRTICK |
| 4572 | /* |
| 4573 | * queued ticks are scheduled to match the slice, so don't bother |
| 4574 | * validating it and just reschedule. |
| 4575 | */ |
| 4576 | if (queued) { |
| 4577 | resched_curr(rq_of(cfs_rq)); |
| 4578 | return; |
| 4579 | } |
| 4580 | /* |
| 4581 | * don't let the period tick interfere with the hrtick preemption |
| 4582 | */ |
| 4583 | if (!sched_feat(DOUBLE_TICK) && |
| 4584 | hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) |
| 4585 | return; |
| 4586 | #endif |
| 4587 | |
| 4588 | if (cfs_rq->nr_running > 1) |
| 4589 | check_preempt_tick(cfs_rq, curr); |
| 4590 | } |
| 4591 | |
| 4592 | |
| 4593 | /************************************************** |
| 4594 | * CFS bandwidth control machinery |
| 4595 | */ |
| 4596 | |
| 4597 | #ifdef CONFIG_CFS_BANDWIDTH |
| 4598 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4599 | #ifdef CONFIG_JUMP_LABEL |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4600 | static struct static_key __cfs_bandwidth_used; |
| 4601 | |
| 4602 | static inline bool cfs_bandwidth_used(void) |
| 4603 | { |
| 4604 | return static_key_false(&__cfs_bandwidth_used); |
| 4605 | } |
| 4606 | |
| 4607 | void cfs_bandwidth_usage_inc(void) |
| 4608 | { |
| 4609 | static_key_slow_inc_cpuslocked(&__cfs_bandwidth_used); |
| 4610 | } |
| 4611 | |
| 4612 | void cfs_bandwidth_usage_dec(void) |
| 4613 | { |
| 4614 | static_key_slow_dec_cpuslocked(&__cfs_bandwidth_used); |
| 4615 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4616 | #else /* CONFIG_JUMP_LABEL */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4617 | static bool cfs_bandwidth_used(void) |
| 4618 | { |
| 4619 | return true; |
| 4620 | } |
| 4621 | |
| 4622 | void cfs_bandwidth_usage_inc(void) {} |
| 4623 | void cfs_bandwidth_usage_dec(void) {} |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4624 | #endif /* CONFIG_JUMP_LABEL */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4625 | |
| 4626 | /* |
| 4627 | * default period for cfs group bandwidth. |
| 4628 | * default: 0.1s, units: nanoseconds |
| 4629 | */ |
| 4630 | static inline u64 default_cfs_period(void) |
| 4631 | { |
| 4632 | return 100000000ULL; |
| 4633 | } |
| 4634 | |
| 4635 | static inline u64 sched_cfs_bandwidth_slice(void) |
| 4636 | { |
| 4637 | return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; |
| 4638 | } |
| 4639 | |
| 4640 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4641 | * Replenish runtime according to assigned quota. We use sched_clock_cpu |
| 4642 | * directly instead of rq->clock to avoid adding additional synchronization |
| 4643 | * around rq->lock. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4644 | * |
| 4645 | * requires cfs_b->lock |
| 4646 | */ |
| 4647 | void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) |
| 4648 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4649 | if (cfs_b->quota != RUNTIME_INF) |
| 4650 | cfs_b->runtime = cfs_b->quota; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4651 | } |
| 4652 | |
| 4653 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
| 4654 | { |
| 4655 | return &tg->cfs_bandwidth; |
| 4656 | } |
| 4657 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4658 | /* returns 0 on failure to allocate runtime */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4659 | static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, |
| 4660 | struct cfs_rq *cfs_rq, u64 target_runtime) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4661 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4662 | u64 min_amount, amount = 0; |
| 4663 | |
| 4664 | lockdep_assert_held(&cfs_b->lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4665 | |
| 4666 | /* note: this is a positive sum as runtime_remaining <= 0 */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4667 | min_amount = target_runtime - cfs_rq->runtime_remaining; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4668 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4669 | if (cfs_b->quota == RUNTIME_INF) |
| 4670 | amount = min_amount; |
| 4671 | else { |
| 4672 | start_cfs_bandwidth(cfs_b); |
| 4673 | |
| 4674 | if (cfs_b->runtime > 0) { |
| 4675 | amount = min(cfs_b->runtime, min_amount); |
| 4676 | cfs_b->runtime -= amount; |
| 4677 | cfs_b->idle = 0; |
| 4678 | } |
| 4679 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4680 | |
| 4681 | cfs_rq->runtime_remaining += amount; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4682 | |
| 4683 | return cfs_rq->runtime_remaining > 0; |
| 4684 | } |
| 4685 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4686 | /* returns 0 on failure to allocate runtime */ |
| 4687 | static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 4688 | { |
| 4689 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 4690 | int ret; |
| 4691 | |
| 4692 | raw_spin_lock(&cfs_b->lock); |
| 4693 | ret = __assign_cfs_rq_runtime(cfs_b, cfs_rq, sched_cfs_bandwidth_slice()); |
| 4694 | raw_spin_unlock(&cfs_b->lock); |
| 4695 | |
| 4696 | return ret; |
| 4697 | } |
| 4698 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4699 | static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
| 4700 | { |
| 4701 | /* dock delta_exec before expiring quota (as it could span periods) */ |
| 4702 | cfs_rq->runtime_remaining -= delta_exec; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4703 | |
| 4704 | if (likely(cfs_rq->runtime_remaining > 0)) |
| 4705 | return; |
| 4706 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4707 | if (cfs_rq->throttled) |
| 4708 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4709 | /* |
| 4710 | * if we're unable to extend our runtime we resched so that the active |
| 4711 | * hierarchy can be throttled |
| 4712 | */ |
| 4713 | if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) |
| 4714 | resched_curr(rq_of(cfs_rq)); |
| 4715 | } |
| 4716 | |
| 4717 | static __always_inline |
| 4718 | void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) |
| 4719 | { |
| 4720 | if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled) |
| 4721 | return; |
| 4722 | |
| 4723 | __account_cfs_rq_runtime(cfs_rq, delta_exec); |
| 4724 | } |
| 4725 | |
| 4726 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) |
| 4727 | { |
| 4728 | return cfs_bandwidth_used() && cfs_rq->throttled; |
| 4729 | } |
| 4730 | |
| 4731 | /* check whether cfs_rq, or any parent, is throttled */ |
| 4732 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) |
| 4733 | { |
| 4734 | return cfs_bandwidth_used() && cfs_rq->throttle_count; |
| 4735 | } |
| 4736 | |
| 4737 | /* |
| 4738 | * Ensure that neither of the group entities corresponding to src_cpu or |
| 4739 | * dest_cpu are members of a throttled hierarchy when performing group |
| 4740 | * load-balance operations. |
| 4741 | */ |
| 4742 | static inline int throttled_lb_pair(struct task_group *tg, |
| 4743 | int src_cpu, int dest_cpu) |
| 4744 | { |
| 4745 | struct cfs_rq *src_cfs_rq, *dest_cfs_rq; |
| 4746 | |
| 4747 | src_cfs_rq = tg->cfs_rq[src_cpu]; |
| 4748 | dest_cfs_rq = tg->cfs_rq[dest_cpu]; |
| 4749 | |
| 4750 | return throttled_hierarchy(src_cfs_rq) || |
| 4751 | throttled_hierarchy(dest_cfs_rq); |
| 4752 | } |
| 4753 | |
| 4754 | static int tg_unthrottle_up(struct task_group *tg, void *data) |
| 4755 | { |
| 4756 | struct rq *rq = data; |
| 4757 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; |
| 4758 | |
| 4759 | cfs_rq->throttle_count--; |
| 4760 | if (!cfs_rq->throttle_count) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4761 | cfs_rq->throttled_clock_task_time += rq_clock_task(rq) - |
| 4762 | cfs_rq->throttled_clock_task; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4763 | |
| 4764 | /* Add cfs_rq with already running entity in the list */ |
| 4765 | if (cfs_rq->nr_running >= 1) |
| 4766 | list_add_leaf_cfs_rq(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4767 | } |
| 4768 | |
| 4769 | return 0; |
| 4770 | } |
| 4771 | |
| 4772 | static int tg_throttle_down(struct task_group *tg, void *data) |
| 4773 | { |
| 4774 | struct rq *rq = data; |
| 4775 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; |
| 4776 | |
| 4777 | /* group is entering throttled state, stop time */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4778 | if (!cfs_rq->throttle_count) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4779 | cfs_rq->throttled_clock_task = rq_clock_task(rq); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4780 | list_del_leaf_cfs_rq(cfs_rq); |
| 4781 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4782 | cfs_rq->throttle_count++; |
| 4783 | |
| 4784 | return 0; |
| 4785 | } |
| 4786 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4787 | static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4788 | { |
| 4789 | struct rq *rq = rq_of(cfs_rq); |
| 4790 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 4791 | struct sched_entity *se; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4792 | long task_delta, idle_task_delta, dequeue = 1; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4793 | |
| 4794 | raw_spin_lock(&cfs_b->lock); |
| 4795 | /* This will start the period timer if necessary */ |
| 4796 | if (__assign_cfs_rq_runtime(cfs_b, cfs_rq, 1)) { |
| 4797 | /* |
| 4798 | * We have raced with bandwidth becoming available, and if we |
| 4799 | * actually throttled the timer might not unthrottle us for an |
| 4800 | * entire period. We additionally needed to make sure that any |
| 4801 | * subsequent check_cfs_rq_runtime calls agree not to throttle |
| 4802 | * us, as we may commit to do cfs put_prev+pick_next, so we ask |
| 4803 | * for 1ns of runtime rather than just check cfs_b. |
| 4804 | */ |
| 4805 | dequeue = 0; |
| 4806 | } else { |
| 4807 | list_add_tail_rcu(&cfs_rq->throttled_list, |
| 4808 | &cfs_b->throttled_cfs_rq); |
| 4809 | } |
| 4810 | raw_spin_unlock(&cfs_b->lock); |
| 4811 | |
| 4812 | if (!dequeue) |
| 4813 | return false; /* Throttle no longer required. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4814 | |
| 4815 | se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; |
| 4816 | |
| 4817 | /* freeze hierarchy runnable averages while throttled */ |
| 4818 | rcu_read_lock(); |
| 4819 | walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); |
| 4820 | rcu_read_unlock(); |
| 4821 | |
| 4822 | task_delta = cfs_rq->h_nr_running; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4823 | idle_task_delta = cfs_rq->idle_h_nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4824 | for_each_sched_entity(se) { |
| 4825 | struct cfs_rq *qcfs_rq = cfs_rq_of(se); |
| 4826 | /* throttled entity or throttle-on-deactivate */ |
| 4827 | if (!se->on_rq) |
| 4828 | break; |
| 4829 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4830 | if (dequeue) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4831 | dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4832 | } else { |
| 4833 | update_load_avg(qcfs_rq, se, 0); |
| 4834 | se_update_runnable(se); |
| 4835 | } |
| 4836 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4837 | qcfs_rq->h_nr_running -= task_delta; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4838 | qcfs_rq->idle_h_nr_running -= idle_task_delta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4839 | |
| 4840 | if (qcfs_rq->load.weight) |
| 4841 | dequeue = 0; |
| 4842 | } |
| 4843 | |
| 4844 | if (!se) |
| 4845 | sub_nr_running(rq, task_delta); |
| 4846 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4847 | /* |
| 4848 | * Note: distribution will already see us throttled via the |
| 4849 | * throttled-list. rq->lock protects completion. |
| 4850 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4851 | cfs_rq->throttled = 1; |
| 4852 | cfs_rq->throttled_clock = rq_clock(rq); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4853 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4854 | } |
| 4855 | |
| 4856 | void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) |
| 4857 | { |
| 4858 | struct rq *rq = rq_of(cfs_rq); |
| 4859 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 4860 | struct sched_entity *se; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4861 | long task_delta, idle_task_delta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4862 | |
| 4863 | se = cfs_rq->tg->se[cpu_of(rq)]; |
| 4864 | |
| 4865 | cfs_rq->throttled = 0; |
| 4866 | |
| 4867 | update_rq_clock(rq); |
| 4868 | |
| 4869 | raw_spin_lock(&cfs_b->lock); |
| 4870 | cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock; |
| 4871 | list_del_rcu(&cfs_rq->throttled_list); |
| 4872 | raw_spin_unlock(&cfs_b->lock); |
| 4873 | |
| 4874 | /* update hierarchical throttle state */ |
| 4875 | walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); |
| 4876 | |
| 4877 | if (!cfs_rq->load.weight) |
| 4878 | return; |
| 4879 | |
| 4880 | task_delta = cfs_rq->h_nr_running; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4881 | idle_task_delta = cfs_rq->idle_h_nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4882 | for_each_sched_entity(se) { |
| 4883 | if (se->on_rq) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4884 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4885 | cfs_rq = cfs_rq_of(se); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4886 | enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); |
| 4887 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4888 | cfs_rq->h_nr_running += task_delta; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4889 | cfs_rq->idle_h_nr_running += idle_task_delta; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4890 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4891 | /* end evaluation on encountering a throttled cfs_rq */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4892 | if (cfs_rq_throttled(cfs_rq)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4893 | goto unthrottle_throttle; |
| 4894 | } |
| 4895 | |
| 4896 | for_each_sched_entity(se) { |
| 4897 | cfs_rq = cfs_rq_of(se); |
| 4898 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4899 | update_load_avg(cfs_rq, se, UPDATE_TG); |
| 4900 | se_update_runnable(se); |
| 4901 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 4902 | cfs_rq->h_nr_running += task_delta; |
| 4903 | cfs_rq->idle_h_nr_running += idle_task_delta; |
| 4904 | |
| 4905 | |
| 4906 | /* end evaluation on encountering a throttled cfs_rq */ |
| 4907 | if (cfs_rq_throttled(cfs_rq)) |
| 4908 | goto unthrottle_throttle; |
| 4909 | |
| 4910 | /* |
| 4911 | * One parent has been throttled and cfs_rq removed from the |
| 4912 | * list. Add it back to not break the leaf list. |
| 4913 | */ |
| 4914 | if (throttled_hierarchy(cfs_rq)) |
| 4915 | list_add_leaf_cfs_rq(cfs_rq); |
| 4916 | } |
| 4917 | |
| 4918 | /* At this point se is NULL and we are at root level*/ |
| 4919 | add_nr_running(rq, task_delta); |
| 4920 | |
| 4921 | unthrottle_throttle: |
| 4922 | /* |
| 4923 | * The cfs_rq_throttled() breaks in the above iteration can result in |
| 4924 | * incomplete leaf list maintenance, resulting in triggering the |
| 4925 | * assertion below. |
| 4926 | */ |
| 4927 | for_each_sched_entity(se) { |
| 4928 | cfs_rq = cfs_rq_of(se); |
| 4929 | |
| 4930 | if (list_add_leaf_cfs_rq(cfs_rq)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4931 | break; |
| 4932 | } |
| 4933 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4934 | assert_list_leaf_cfs_rq(rq); |
| 4935 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4936 | /* Determine whether we need to wake up potentially idle CPU: */ |
| 4937 | if (rq->curr == rq->idle && rq->cfs.nr_running) |
| 4938 | resched_curr(rq); |
| 4939 | } |
| 4940 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4941 | static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4942 | { |
| 4943 | struct cfs_rq *cfs_rq; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4944 | u64 runtime, remaining = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4945 | |
| 4946 | rcu_read_lock(); |
| 4947 | list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, |
| 4948 | throttled_list) { |
| 4949 | struct rq *rq = rq_of(cfs_rq); |
| 4950 | struct rq_flags rf; |
| 4951 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4952 | rq_lock_irqsave(rq, &rf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4953 | if (!cfs_rq_throttled(cfs_rq)) |
| 4954 | goto next; |
| 4955 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4956 | /* By the above check, this should never be true */ |
| 4957 | SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); |
| 4958 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4959 | raw_spin_lock(&cfs_b->lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4960 | runtime = -cfs_rq->runtime_remaining + 1; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 4961 | if (runtime > cfs_b->runtime) |
| 4962 | runtime = cfs_b->runtime; |
| 4963 | cfs_b->runtime -= runtime; |
| 4964 | remaining = cfs_b->runtime; |
| 4965 | raw_spin_unlock(&cfs_b->lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4966 | |
| 4967 | cfs_rq->runtime_remaining += runtime; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4968 | |
| 4969 | /* we check whether we're throttled above */ |
| 4970 | if (cfs_rq->runtime_remaining > 0) |
| 4971 | unthrottle_cfs_rq(cfs_rq); |
| 4972 | |
| 4973 | next: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4974 | rq_unlock_irqrestore(rq, &rf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4975 | |
| 4976 | if (!remaining) |
| 4977 | break; |
| 4978 | } |
| 4979 | rcu_read_unlock(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4980 | } |
| 4981 | |
| 4982 | /* |
| 4983 | * Responsible for refilling a task_group's bandwidth and unthrottling its |
| 4984 | * cfs_rqs as appropriate. If there has been no activity within the last |
| 4985 | * period the timer is deactivated until scheduling resumes; cfs_b->idle is |
| 4986 | * used to track this state. |
| 4987 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 4988 | static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4989 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4990 | int throttled; |
| 4991 | |
| 4992 | /* no need to continue the timer with no bandwidth constraint */ |
| 4993 | if (cfs_b->quota == RUNTIME_INF) |
| 4994 | goto out_deactivate; |
| 4995 | |
| 4996 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); |
| 4997 | cfs_b->nr_periods += overrun; |
| 4998 | |
| 4999 | /* |
| 5000 | * idle depends on !throttled (for the case of a large deficit), and if |
| 5001 | * we're going inactive then everything else can be deferred |
| 5002 | */ |
| 5003 | if (cfs_b->idle && !throttled) |
| 5004 | goto out_deactivate; |
| 5005 | |
| 5006 | __refill_cfs_bandwidth_runtime(cfs_b); |
| 5007 | |
| 5008 | if (!throttled) { |
| 5009 | /* mark as potentially idle for the upcoming period */ |
| 5010 | cfs_b->idle = 1; |
| 5011 | return 0; |
| 5012 | } |
| 5013 | |
| 5014 | /* account preceding periods in which throttling occurred */ |
| 5015 | cfs_b->nr_throttled += overrun; |
| 5016 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5017 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5018 | * This check is repeated as we release cfs_b->lock while we unthrottle. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5019 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5020 | while (throttled && cfs_b->runtime > 0) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5021 | raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5022 | /* we can't nest cfs_b->lock while distributing bandwidth */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5023 | distribute_cfs_runtime(cfs_b); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5024 | raw_spin_lock_irqsave(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5025 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5026 | throttled = !list_empty(&cfs_b->throttled_cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5027 | } |
| 5028 | |
| 5029 | /* |
| 5030 | * While we are ensured activity in the period following an |
| 5031 | * unthrottle, this also covers the case in which the new bandwidth is |
| 5032 | * insufficient to cover the existing bandwidth deficit. (Forcing the |
| 5033 | * timer to remain active while there are any throttled entities.) |
| 5034 | */ |
| 5035 | cfs_b->idle = 0; |
| 5036 | |
| 5037 | return 0; |
| 5038 | |
| 5039 | out_deactivate: |
| 5040 | return 1; |
| 5041 | } |
| 5042 | |
| 5043 | /* a cfs_rq won't donate quota below this amount */ |
| 5044 | static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; |
| 5045 | /* minimum remaining period time to redistribute slack quota */ |
| 5046 | static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; |
| 5047 | /* how long we wait to gather additional slack before distributing */ |
| 5048 | static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; |
| 5049 | |
| 5050 | /* |
| 5051 | * Are we near the end of the current quota period? |
| 5052 | * |
| 5053 | * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the |
| 5054 | * hrtimer base being cleared by hrtimer_start. In the case of |
| 5055 | * migrate_hrtimers, base is never cleared, so we are fine. |
| 5056 | */ |
| 5057 | static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) |
| 5058 | { |
| 5059 | struct hrtimer *refresh_timer = &cfs_b->period_timer; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5060 | s64 remaining; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5061 | |
| 5062 | /* if the call-back is running a quota refresh is already occurring */ |
| 5063 | if (hrtimer_callback_running(refresh_timer)) |
| 5064 | return 1; |
| 5065 | |
| 5066 | /* is a quota refresh about to occur? */ |
| 5067 | remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5068 | if (remaining < (s64)min_expire) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5069 | return 1; |
| 5070 | |
| 5071 | return 0; |
| 5072 | } |
| 5073 | |
| 5074 | static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) |
| 5075 | { |
| 5076 | u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; |
| 5077 | |
| 5078 | /* if there's a quota refresh soon don't bother with slack */ |
| 5079 | if (runtime_refresh_within(cfs_b, min_left)) |
| 5080 | return; |
| 5081 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5082 | /* don't push forwards an existing deferred unthrottle */ |
| 5083 | if (cfs_b->slack_started) |
| 5084 | return; |
| 5085 | cfs_b->slack_started = true; |
| 5086 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5087 | hrtimer_start(&cfs_b->slack_timer, |
| 5088 | ns_to_ktime(cfs_bandwidth_slack_period), |
| 5089 | HRTIMER_MODE_REL); |
| 5090 | } |
| 5091 | |
| 5092 | /* we know any runtime found here is valid as update_curr() precedes return */ |
| 5093 | static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 5094 | { |
| 5095 | struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); |
| 5096 | s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; |
| 5097 | |
| 5098 | if (slack_runtime <= 0) |
| 5099 | return; |
| 5100 | |
| 5101 | raw_spin_lock(&cfs_b->lock); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5102 | if (cfs_b->quota != RUNTIME_INF) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5103 | cfs_b->runtime += slack_runtime; |
| 5104 | |
| 5105 | /* we are under rq->lock, defer unthrottling using a timer */ |
| 5106 | if (cfs_b->runtime > sched_cfs_bandwidth_slice() && |
| 5107 | !list_empty(&cfs_b->throttled_cfs_rq)) |
| 5108 | start_cfs_slack_bandwidth(cfs_b); |
| 5109 | } |
| 5110 | raw_spin_unlock(&cfs_b->lock); |
| 5111 | |
| 5112 | /* even if it's not valid for return we don't want to try again */ |
| 5113 | cfs_rq->runtime_remaining -= slack_runtime; |
| 5114 | } |
| 5115 | |
| 5116 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 5117 | { |
| 5118 | if (!cfs_bandwidth_used()) |
| 5119 | return; |
| 5120 | |
| 5121 | if (!cfs_rq->runtime_enabled || cfs_rq->nr_running) |
| 5122 | return; |
| 5123 | |
| 5124 | __return_cfs_rq_runtime(cfs_rq); |
| 5125 | } |
| 5126 | |
| 5127 | /* |
| 5128 | * This is done with a timer (instead of inline with bandwidth return) since |
| 5129 | * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. |
| 5130 | */ |
| 5131 | static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) |
| 5132 | { |
| 5133 | u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5134 | unsigned long flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5135 | |
| 5136 | /* confirm we're still not at a refresh boundary */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5137 | raw_spin_lock_irqsave(&cfs_b->lock, flags); |
| 5138 | cfs_b->slack_started = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5139 | |
| 5140 | if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5141 | raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5142 | return; |
| 5143 | } |
| 5144 | |
| 5145 | if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) |
| 5146 | runtime = cfs_b->runtime; |
| 5147 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5148 | raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5149 | |
| 5150 | if (!runtime) |
| 5151 | return; |
| 5152 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5153 | distribute_cfs_runtime(cfs_b); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5154 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5155 | raw_spin_lock_irqsave(&cfs_b->lock, flags); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5156 | raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5157 | } |
| 5158 | |
| 5159 | /* |
| 5160 | * When a group wakes up we want to make sure that its quota is not already |
| 5161 | * expired/exceeded, otherwise it may be allowed to steal additional ticks of |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5162 | * runtime as update_curr() throttling can not trigger until it's on-rq. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5163 | */ |
| 5164 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) |
| 5165 | { |
| 5166 | if (!cfs_bandwidth_used()) |
| 5167 | return; |
| 5168 | |
| 5169 | /* an active group must be handled by the update_curr()->put() path */ |
| 5170 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) |
| 5171 | return; |
| 5172 | |
| 5173 | /* ensure the group is not already throttled */ |
| 5174 | if (cfs_rq_throttled(cfs_rq)) |
| 5175 | return; |
| 5176 | |
| 5177 | /* update runtime allocation */ |
| 5178 | account_cfs_rq_runtime(cfs_rq, 0); |
| 5179 | if (cfs_rq->runtime_remaining <= 0) |
| 5180 | throttle_cfs_rq(cfs_rq); |
| 5181 | } |
| 5182 | |
| 5183 | static void sync_throttle(struct task_group *tg, int cpu) |
| 5184 | { |
| 5185 | struct cfs_rq *pcfs_rq, *cfs_rq; |
| 5186 | |
| 5187 | if (!cfs_bandwidth_used()) |
| 5188 | return; |
| 5189 | |
| 5190 | if (!tg->parent) |
| 5191 | return; |
| 5192 | |
| 5193 | cfs_rq = tg->cfs_rq[cpu]; |
| 5194 | pcfs_rq = tg->parent->cfs_rq[cpu]; |
| 5195 | |
| 5196 | cfs_rq->throttle_count = pcfs_rq->throttle_count; |
| 5197 | cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu)); |
| 5198 | } |
| 5199 | |
| 5200 | /* conditionally throttle active cfs_rq's from put_prev_entity() */ |
| 5201 | static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 5202 | { |
| 5203 | if (!cfs_bandwidth_used()) |
| 5204 | return false; |
| 5205 | |
| 5206 | if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) |
| 5207 | return false; |
| 5208 | |
| 5209 | /* |
| 5210 | * it's possible for a throttled entity to be forced into a running |
| 5211 | * state (e.g. set_curr_task), in this case we're finished. |
| 5212 | */ |
| 5213 | if (cfs_rq_throttled(cfs_rq)) |
| 5214 | return true; |
| 5215 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5216 | return throttle_cfs_rq(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5217 | } |
| 5218 | |
| 5219 | static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) |
| 5220 | { |
| 5221 | struct cfs_bandwidth *cfs_b = |
| 5222 | container_of(timer, struct cfs_bandwidth, slack_timer); |
| 5223 | |
| 5224 | do_sched_cfs_slack_timer(cfs_b); |
| 5225 | |
| 5226 | return HRTIMER_NORESTART; |
| 5227 | } |
| 5228 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5229 | extern const u64 max_cfs_quota_period; |
| 5230 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5231 | static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) |
| 5232 | { |
| 5233 | struct cfs_bandwidth *cfs_b = |
| 5234 | container_of(timer, struct cfs_bandwidth, period_timer); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5235 | unsigned long flags; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5236 | int overrun; |
| 5237 | int idle = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5238 | int count = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5239 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5240 | raw_spin_lock_irqsave(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5241 | for (;;) { |
| 5242 | overrun = hrtimer_forward_now(timer, cfs_b->period); |
| 5243 | if (!overrun) |
| 5244 | break; |
| 5245 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5246 | idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); |
| 5247 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5248 | if (++count > 3) { |
| 5249 | u64 new, old = ktime_to_ns(cfs_b->period); |
| 5250 | |
| 5251 | /* |
| 5252 | * Grow period by a factor of 2 to avoid losing precision. |
| 5253 | * Precision loss in the quota/period ratio can cause __cfs_schedulable |
| 5254 | * to fail. |
| 5255 | */ |
| 5256 | new = old * 2; |
| 5257 | if (new < max_cfs_quota_period) { |
| 5258 | cfs_b->period = ns_to_ktime(new); |
| 5259 | cfs_b->quota *= 2; |
| 5260 | |
| 5261 | pr_warn_ratelimited( |
| 5262 | "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", |
| 5263 | smp_processor_id(), |
| 5264 | div_u64(new, NSEC_PER_USEC), |
| 5265 | div_u64(cfs_b->quota, NSEC_PER_USEC)); |
| 5266 | } else { |
| 5267 | pr_warn_ratelimited( |
| 5268 | "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n", |
| 5269 | smp_processor_id(), |
| 5270 | div_u64(old, NSEC_PER_USEC), |
| 5271 | div_u64(cfs_b->quota, NSEC_PER_USEC)); |
| 5272 | } |
| 5273 | |
| 5274 | /* reset count so we don't come right back in here */ |
| 5275 | count = 0; |
| 5276 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5277 | } |
| 5278 | if (idle) |
| 5279 | cfs_b->period_active = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5280 | raw_spin_unlock_irqrestore(&cfs_b->lock, flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5281 | |
| 5282 | return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; |
| 5283 | } |
| 5284 | |
| 5285 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 5286 | { |
| 5287 | raw_spin_lock_init(&cfs_b->lock); |
| 5288 | cfs_b->runtime = 0; |
| 5289 | cfs_b->quota = RUNTIME_INF; |
| 5290 | cfs_b->period = ns_to_ktime(default_cfs_period()); |
| 5291 | |
| 5292 | INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); |
| 5293 | hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); |
| 5294 | cfs_b->period_timer.function = sched_cfs_period_timer; |
| 5295 | hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 5296 | cfs_b->slack_timer.function = sched_cfs_slack_timer; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5297 | cfs_b->slack_started = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5298 | } |
| 5299 | |
| 5300 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) |
| 5301 | { |
| 5302 | cfs_rq->runtime_enabled = 0; |
| 5303 | INIT_LIST_HEAD(&cfs_rq->throttled_list); |
| 5304 | } |
| 5305 | |
| 5306 | void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 5307 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5308 | lockdep_assert_held(&cfs_b->lock); |
| 5309 | |
| 5310 | if (cfs_b->period_active) |
| 5311 | return; |
| 5312 | |
| 5313 | cfs_b->period_active = 1; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5314 | hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5315 | hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); |
| 5316 | } |
| 5317 | |
| 5318 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
| 5319 | { |
| 5320 | /* init_cfs_bandwidth() was not called */ |
| 5321 | if (!cfs_b->throttled_cfs_rq.next) |
| 5322 | return; |
| 5323 | |
| 5324 | hrtimer_cancel(&cfs_b->period_timer); |
| 5325 | hrtimer_cancel(&cfs_b->slack_timer); |
| 5326 | } |
| 5327 | |
| 5328 | /* |
| 5329 | * Both these CPU hotplug callbacks race against unregister_fair_sched_group() |
| 5330 | * |
| 5331 | * The race is harmless, since modifying bandwidth settings of unhooked group |
| 5332 | * bits doesn't do much. |
| 5333 | */ |
| 5334 | |
| 5335 | /* cpu online calback */ |
| 5336 | static void __maybe_unused update_runtime_enabled(struct rq *rq) |
| 5337 | { |
| 5338 | struct task_group *tg; |
| 5339 | |
| 5340 | lockdep_assert_held(&rq->lock); |
| 5341 | |
| 5342 | rcu_read_lock(); |
| 5343 | list_for_each_entry_rcu(tg, &task_groups, list) { |
| 5344 | struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; |
| 5345 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; |
| 5346 | |
| 5347 | raw_spin_lock(&cfs_b->lock); |
| 5348 | cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF; |
| 5349 | raw_spin_unlock(&cfs_b->lock); |
| 5350 | } |
| 5351 | rcu_read_unlock(); |
| 5352 | } |
| 5353 | |
| 5354 | /* cpu offline callback */ |
| 5355 | static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq) |
| 5356 | { |
| 5357 | struct task_group *tg; |
| 5358 | |
| 5359 | lockdep_assert_held(&rq->lock); |
| 5360 | |
| 5361 | rcu_read_lock(); |
| 5362 | list_for_each_entry_rcu(tg, &task_groups, list) { |
| 5363 | struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; |
| 5364 | |
| 5365 | if (!cfs_rq->runtime_enabled) |
| 5366 | continue; |
| 5367 | |
| 5368 | /* |
| 5369 | * clock_task is not advancing so we just need to make sure |
| 5370 | * there's some valid quota amount |
| 5371 | */ |
| 5372 | cfs_rq->runtime_remaining = 1; |
| 5373 | /* |
| 5374 | * Offline rq is schedulable till CPU is completely disabled |
| 5375 | * in take_cpu_down(), so we prevent new cfs throttling here. |
| 5376 | */ |
| 5377 | cfs_rq->runtime_enabled = 0; |
| 5378 | |
| 5379 | if (cfs_rq_throttled(cfs_rq)) |
| 5380 | unthrottle_cfs_rq(cfs_rq); |
| 5381 | } |
| 5382 | rcu_read_unlock(); |
| 5383 | } |
| 5384 | |
| 5385 | #else /* CONFIG_CFS_BANDWIDTH */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5386 | |
| 5387 | static inline bool cfs_bandwidth_used(void) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5388 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5389 | return false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5390 | } |
| 5391 | |
| 5392 | static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {} |
| 5393 | static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; } |
| 5394 | static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} |
| 5395 | static inline void sync_throttle(struct task_group *tg, int cpu) {} |
| 5396 | static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 5397 | |
| 5398 | static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) |
| 5399 | { |
| 5400 | return 0; |
| 5401 | } |
| 5402 | |
| 5403 | static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) |
| 5404 | { |
| 5405 | return 0; |
| 5406 | } |
| 5407 | |
| 5408 | static inline int throttled_lb_pair(struct task_group *tg, |
| 5409 | int src_cpu, int dest_cpu) |
| 5410 | { |
| 5411 | return 0; |
| 5412 | } |
| 5413 | |
| 5414 | void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
| 5415 | |
| 5416 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 5417 | static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} |
| 5418 | #endif |
| 5419 | |
| 5420 | static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) |
| 5421 | { |
| 5422 | return NULL; |
| 5423 | } |
| 5424 | static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} |
| 5425 | static inline void update_runtime_enabled(struct rq *rq) {} |
| 5426 | static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} |
| 5427 | |
| 5428 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 5429 | |
| 5430 | /************************************************** |
| 5431 | * CFS operations on tasks: |
| 5432 | */ |
| 5433 | |
| 5434 | #ifdef CONFIG_SCHED_HRTICK |
| 5435 | static void hrtick_start_fair(struct rq *rq, struct task_struct *p) |
| 5436 | { |
| 5437 | struct sched_entity *se = &p->se; |
| 5438 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 5439 | |
| 5440 | SCHED_WARN_ON(task_rq(p) != rq); |
| 5441 | |
| 5442 | if (rq->cfs.h_nr_running > 1) { |
| 5443 | u64 slice = sched_slice(cfs_rq, se); |
| 5444 | u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; |
| 5445 | s64 delta = slice - ran; |
| 5446 | |
| 5447 | if (delta < 0) { |
| 5448 | if (rq->curr == p) |
| 5449 | resched_curr(rq); |
| 5450 | return; |
| 5451 | } |
| 5452 | hrtick_start(rq, delta); |
| 5453 | } |
| 5454 | } |
| 5455 | |
| 5456 | /* |
| 5457 | * called from enqueue/dequeue and updates the hrtick when the |
| 5458 | * current task is from our class and nr_running is low enough |
| 5459 | * to matter. |
| 5460 | */ |
| 5461 | static void hrtick_update(struct rq *rq) |
| 5462 | { |
| 5463 | struct task_struct *curr = rq->curr; |
| 5464 | |
| 5465 | if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) |
| 5466 | return; |
| 5467 | |
| 5468 | if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) |
| 5469 | hrtick_start_fair(rq, curr); |
| 5470 | } |
| 5471 | #else /* !CONFIG_SCHED_HRTICK */ |
| 5472 | static inline void |
| 5473 | hrtick_start_fair(struct rq *rq, struct task_struct *p) |
| 5474 | { |
| 5475 | } |
| 5476 | |
| 5477 | static inline void hrtick_update(struct rq *rq) |
| 5478 | { |
| 5479 | } |
| 5480 | #endif |
| 5481 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5482 | #ifdef CONFIG_SMP |
| 5483 | static inline unsigned long cpu_util(int cpu); |
| 5484 | |
| 5485 | static inline bool cpu_overutilized(int cpu) |
| 5486 | { |
| 5487 | return !fits_capacity(cpu_util(cpu), capacity_of(cpu)); |
| 5488 | } |
| 5489 | |
| 5490 | static inline void update_overutilized_status(struct rq *rq) |
| 5491 | { |
| 5492 | if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) { |
| 5493 | WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED); |
| 5494 | trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED); |
| 5495 | } |
| 5496 | } |
| 5497 | #else |
| 5498 | static inline void update_overutilized_status(struct rq *rq) { } |
| 5499 | #endif |
| 5500 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5501 | /* Runqueue only has SCHED_IDLE tasks enqueued */ |
| 5502 | static int sched_idle_rq(struct rq *rq) |
| 5503 | { |
| 5504 | return unlikely(rq->nr_running == rq->cfs.idle_h_nr_running && |
| 5505 | rq->nr_running); |
| 5506 | } |
| 5507 | |
| 5508 | #ifdef CONFIG_SMP |
| 5509 | static int sched_idle_cpu(int cpu) |
| 5510 | { |
| 5511 | return sched_idle_rq(cpu_rq(cpu)); |
| 5512 | } |
| 5513 | #endif |
| 5514 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5515 | /* |
| 5516 | * The enqueue_task method is called before nr_running is |
| 5517 | * increased. Here we update the fair scheduling stats and |
| 5518 | * then put the task into the rbtree: |
| 5519 | */ |
| 5520 | static void |
| 5521 | enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| 5522 | { |
| 5523 | struct cfs_rq *cfs_rq; |
| 5524 | struct sched_entity *se = &p->se; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5525 | int idle_h_nr_running = task_has_idle_policy(p); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5526 | int task_new = !(flags & ENQUEUE_WAKEUP); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5527 | |
| 5528 | /* |
| 5529 | * The code below (indirectly) updates schedutil which looks at |
| 5530 | * the cfs_rq utilization to select a frequency. |
| 5531 | * Let's add the task's estimated utilization to the cfs_rq's |
| 5532 | * estimated utilization, before we update schedutil. |
| 5533 | */ |
| 5534 | util_est_enqueue(&rq->cfs, p); |
| 5535 | |
| 5536 | /* |
| 5537 | * If in_iowait is set, the code below may not trigger any cpufreq |
| 5538 | * utilization updates, so do it here explicitly with the IOWAIT flag |
| 5539 | * passed. |
| 5540 | */ |
| 5541 | if (p->in_iowait) |
| 5542 | cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); |
| 5543 | |
| 5544 | for_each_sched_entity(se) { |
| 5545 | if (se->on_rq) |
| 5546 | break; |
| 5547 | cfs_rq = cfs_rq_of(se); |
| 5548 | enqueue_entity(cfs_rq, se, flags); |
| 5549 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5550 | cfs_rq->h_nr_running++; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5551 | cfs_rq->idle_h_nr_running += idle_h_nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5552 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5553 | /* end evaluation on encountering a throttled cfs_rq */ |
| 5554 | if (cfs_rq_throttled(cfs_rq)) |
| 5555 | goto enqueue_throttle; |
| 5556 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5557 | flags = ENQUEUE_WAKEUP; |
| 5558 | } |
| 5559 | |
| 5560 | for_each_sched_entity(se) { |
| 5561 | cfs_rq = cfs_rq_of(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5562 | |
| 5563 | update_load_avg(cfs_rq, se, UPDATE_TG); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5564 | se_update_runnable(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5565 | update_cfs_group(se); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5566 | |
| 5567 | cfs_rq->h_nr_running++; |
| 5568 | cfs_rq->idle_h_nr_running += idle_h_nr_running; |
| 5569 | |
| 5570 | /* end evaluation on encountering a throttled cfs_rq */ |
| 5571 | if (cfs_rq_throttled(cfs_rq)) |
| 5572 | goto enqueue_throttle; |
| 5573 | |
| 5574 | /* |
| 5575 | * One parent has been throttled and cfs_rq removed from the |
| 5576 | * list. Add it back to not break the leaf list. |
| 5577 | */ |
| 5578 | if (throttled_hierarchy(cfs_rq)) |
| 5579 | list_add_leaf_cfs_rq(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5580 | } |
| 5581 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5582 | /* At this point se is NULL and we are at root level*/ |
| 5583 | add_nr_running(rq, 1); |
| 5584 | |
| 5585 | /* |
| 5586 | * Since new tasks are assigned an initial util_avg equal to |
| 5587 | * half of the spare capacity of their CPU, tiny tasks have the |
| 5588 | * ability to cross the overutilized threshold, which will |
| 5589 | * result in the load balancer ruining all the task placement |
| 5590 | * done by EAS. As a way to mitigate that effect, do not account |
| 5591 | * for the first enqueue operation of new tasks during the |
| 5592 | * overutilized flag detection. |
| 5593 | * |
| 5594 | * A better way of solving this problem would be to wait for |
| 5595 | * the PELT signals of tasks to converge before taking them |
| 5596 | * into account, but that is not straightforward to implement, |
| 5597 | * and the following generally works well enough in practice. |
| 5598 | */ |
| 5599 | if (!task_new) |
| 5600 | update_overutilized_status(rq); |
| 5601 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5602 | enqueue_throttle: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5603 | if (cfs_bandwidth_used()) { |
| 5604 | /* |
| 5605 | * When bandwidth control is enabled; the cfs_rq_throttled() |
| 5606 | * breaks in the above iteration can result in incomplete |
| 5607 | * leaf list maintenance, resulting in triggering the assertion |
| 5608 | * below. |
| 5609 | */ |
| 5610 | for_each_sched_entity(se) { |
| 5611 | cfs_rq = cfs_rq_of(se); |
| 5612 | |
| 5613 | if (list_add_leaf_cfs_rq(cfs_rq)) |
| 5614 | break; |
| 5615 | } |
| 5616 | } |
| 5617 | |
| 5618 | assert_list_leaf_cfs_rq(rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5619 | |
| 5620 | hrtick_update(rq); |
| 5621 | } |
| 5622 | |
| 5623 | static void set_next_buddy(struct sched_entity *se); |
| 5624 | |
| 5625 | /* |
| 5626 | * The dequeue_task method is called before nr_running is |
| 5627 | * decreased. We remove the task from the rbtree and |
| 5628 | * update the fair scheduling stats: |
| 5629 | */ |
| 5630 | static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) |
| 5631 | { |
| 5632 | struct cfs_rq *cfs_rq; |
| 5633 | struct sched_entity *se = &p->se; |
| 5634 | int task_sleep = flags & DEQUEUE_SLEEP; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5635 | int idle_h_nr_running = task_has_idle_policy(p); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5636 | bool was_sched_idle = sched_idle_rq(rq); |
| 5637 | |
| 5638 | util_est_dequeue(&rq->cfs, p); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5639 | |
| 5640 | for_each_sched_entity(se) { |
| 5641 | cfs_rq = cfs_rq_of(se); |
| 5642 | dequeue_entity(cfs_rq, se, flags); |
| 5643 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5644 | cfs_rq->h_nr_running--; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5645 | cfs_rq->idle_h_nr_running -= idle_h_nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5646 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5647 | /* end evaluation on encountering a throttled cfs_rq */ |
| 5648 | if (cfs_rq_throttled(cfs_rq)) |
| 5649 | goto dequeue_throttle; |
| 5650 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5651 | /* Don't dequeue parent if it has other entities besides us */ |
| 5652 | if (cfs_rq->load.weight) { |
| 5653 | /* Avoid re-evaluating load for this entity: */ |
| 5654 | se = parent_entity(se); |
| 5655 | /* |
| 5656 | * Bias pick_next to pick a task from this cfs_rq, as |
| 5657 | * p is sleeping when it is within its sched_slice. |
| 5658 | */ |
| 5659 | if (task_sleep && se && !throttled_hierarchy(cfs_rq)) |
| 5660 | set_next_buddy(se); |
| 5661 | break; |
| 5662 | } |
| 5663 | flags |= DEQUEUE_SLEEP; |
| 5664 | } |
| 5665 | |
| 5666 | for_each_sched_entity(se) { |
| 5667 | cfs_rq = cfs_rq_of(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5668 | |
| 5669 | update_load_avg(cfs_rq, se, UPDATE_TG); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5670 | se_update_runnable(se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5671 | update_cfs_group(se); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 5672 | |
| 5673 | cfs_rq->h_nr_running--; |
| 5674 | cfs_rq->idle_h_nr_running -= idle_h_nr_running; |
| 5675 | |
| 5676 | /* end evaluation on encountering a throttled cfs_rq */ |
| 5677 | if (cfs_rq_throttled(cfs_rq)) |
| 5678 | goto dequeue_throttle; |
| 5679 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5680 | } |
| 5681 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5682 | /* At this point se is NULL and we are at root level*/ |
| 5683 | sub_nr_running(rq, 1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5684 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5685 | /* balance early to pull high priority tasks */ |
| 5686 | if (unlikely(!was_sched_idle && sched_idle_rq(rq))) |
| 5687 | rq->next_balance = jiffies; |
| 5688 | |
| 5689 | dequeue_throttle: |
| 5690 | util_est_update(&rq->cfs, p, task_sleep); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5691 | hrtick_update(rq); |
| 5692 | } |
| 5693 | |
| 5694 | #ifdef CONFIG_SMP |
| 5695 | |
| 5696 | /* Working cpumask for: load_balance, load_balance_newidle. */ |
| 5697 | DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); |
| 5698 | DEFINE_PER_CPU(cpumask_var_t, select_idle_mask); |
| 5699 | |
| 5700 | #ifdef CONFIG_NO_HZ_COMMON |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5701 | |
| 5702 | static struct { |
| 5703 | cpumask_var_t idle_cpus_mask; |
| 5704 | atomic_t nr_cpus; |
| 5705 | int has_blocked; /* Idle CPUS has blocked load */ |
| 5706 | unsigned long next_balance; /* in jiffy units */ |
| 5707 | unsigned long next_blocked; /* Next update of blocked load in jiffies */ |
| 5708 | } nohz ____cacheline_aligned; |
| 5709 | |
| 5710 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 5711 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5712 | static unsigned long cpu_load(struct rq *rq) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5713 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5714 | return cfs_rq_load_avg(&rq->cfs); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5715 | } |
| 5716 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5717 | /* |
| 5718 | * cpu_load_without - compute CPU load without any contributions from *p |
| 5719 | * @cpu: the CPU which load is requested |
| 5720 | * @p: the task which load should be discounted |
| 5721 | * |
| 5722 | * The load of a CPU is defined by the load of tasks currently enqueued on that |
| 5723 | * CPU as well as tasks which are currently sleeping after an execution on that |
| 5724 | * CPU. |
| 5725 | * |
| 5726 | * This method returns the load of the specified CPU by discounting the load of |
| 5727 | * the specified task, whenever the task is currently contributing to the CPU |
| 5728 | * load. |
| 5729 | */ |
| 5730 | static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5731 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5732 | struct cfs_rq *cfs_rq; |
| 5733 | unsigned int load; |
| 5734 | |
| 5735 | /* Task has no contribution or is new */ |
| 5736 | if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) |
| 5737 | return cpu_load(rq); |
| 5738 | |
| 5739 | cfs_rq = &rq->cfs; |
| 5740 | load = READ_ONCE(cfs_rq->avg.load_avg); |
| 5741 | |
| 5742 | /* Discount task's util from CPU's util */ |
| 5743 | lsub_positive(&load, task_h_load(p)); |
| 5744 | |
| 5745 | return load; |
| 5746 | } |
| 5747 | |
| 5748 | static unsigned long cpu_runnable(struct rq *rq) |
| 5749 | { |
| 5750 | return cfs_rq_runnable_avg(&rq->cfs); |
| 5751 | } |
| 5752 | |
| 5753 | static unsigned long cpu_runnable_without(struct rq *rq, struct task_struct *p) |
| 5754 | { |
| 5755 | struct cfs_rq *cfs_rq; |
| 5756 | unsigned int runnable; |
| 5757 | |
| 5758 | /* Task has no contribution or is new */ |
| 5759 | if (cpu_of(rq) != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) |
| 5760 | return cpu_runnable(rq); |
| 5761 | |
| 5762 | cfs_rq = &rq->cfs; |
| 5763 | runnable = READ_ONCE(cfs_rq->avg.runnable_avg); |
| 5764 | |
| 5765 | /* Discount task's runnable from CPU's runnable */ |
| 5766 | lsub_positive(&runnable, p->se.avg.runnable_avg); |
| 5767 | |
| 5768 | return runnable; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5769 | } |
| 5770 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5771 | static unsigned long capacity_of(int cpu) |
| 5772 | { |
| 5773 | return cpu_rq(cpu)->cpu_capacity; |
| 5774 | } |
| 5775 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5776 | static void record_wakee(struct task_struct *p) |
| 5777 | { |
| 5778 | /* |
| 5779 | * Only decay a single time; tasks that have less then 1 wakeup per |
| 5780 | * jiffy will not have built up many flips. |
| 5781 | */ |
| 5782 | if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) { |
| 5783 | current->wakee_flips >>= 1; |
| 5784 | current->wakee_flip_decay_ts = jiffies; |
| 5785 | } |
| 5786 | |
| 5787 | if (current->last_wakee != p) { |
| 5788 | current->last_wakee = p; |
| 5789 | current->wakee_flips++; |
| 5790 | } |
| 5791 | } |
| 5792 | |
| 5793 | /* |
| 5794 | * Detect M:N waker/wakee relationships via a switching-frequency heuristic. |
| 5795 | * |
| 5796 | * A waker of many should wake a different task than the one last awakened |
| 5797 | * at a frequency roughly N times higher than one of its wakees. |
| 5798 | * |
| 5799 | * In order to determine whether we should let the load spread vs consolidating |
| 5800 | * to shared cache, we look for a minimum 'flip' frequency of llc_size in one |
| 5801 | * partner, and a factor of lls_size higher frequency in the other. |
| 5802 | * |
| 5803 | * With both conditions met, we can be relatively sure that the relationship is |
| 5804 | * non-monogamous, with partner count exceeding socket size. |
| 5805 | * |
| 5806 | * Waker/wakee being client/server, worker/dispatcher, interrupt source or |
| 5807 | * whatever is irrelevant, spread criteria is apparent partner count exceeds |
| 5808 | * socket size. |
| 5809 | */ |
| 5810 | static int wake_wide(struct task_struct *p) |
| 5811 | { |
| 5812 | unsigned int master = current->wakee_flips; |
| 5813 | unsigned int slave = p->wakee_flips; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5814 | int factor = __this_cpu_read(sd_llc_size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5815 | |
| 5816 | if (master < slave) |
| 5817 | swap(master, slave); |
| 5818 | if (slave < factor || master < slave * factor) |
| 5819 | return 0; |
| 5820 | return 1; |
| 5821 | } |
| 5822 | |
| 5823 | /* |
| 5824 | * The purpose of wake_affine() is to quickly determine on which CPU we can run |
| 5825 | * soonest. For the purpose of speed we only consider the waking and previous |
| 5826 | * CPU. |
| 5827 | * |
| 5828 | * wake_affine_idle() - only considers 'now', it check if the waking CPU is |
| 5829 | * cache-affine and is (or will be) idle. |
| 5830 | * |
| 5831 | * wake_affine_weight() - considers the weight to reflect the average |
| 5832 | * scheduling latency of the CPUs. This seems to work |
| 5833 | * for the overloaded case. |
| 5834 | */ |
| 5835 | static int |
| 5836 | wake_affine_idle(int this_cpu, int prev_cpu, int sync) |
| 5837 | { |
| 5838 | /* |
| 5839 | * If this_cpu is idle, it implies the wakeup is from interrupt |
| 5840 | * context. Only allow the move if cache is shared. Otherwise an |
| 5841 | * interrupt intensive workload could force all tasks onto one |
| 5842 | * node depending on the IO topology or IRQ affinity settings. |
| 5843 | * |
| 5844 | * If the prev_cpu is idle and cache affine then avoid a migration. |
| 5845 | * There is no guarantee that the cache hot data from an interrupt |
| 5846 | * is more important than cache hot data on the prev_cpu and from |
| 5847 | * a cpufreq perspective, it's better to have higher utilisation |
| 5848 | * on one CPU. |
| 5849 | */ |
| 5850 | if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) |
| 5851 | return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; |
| 5852 | |
| 5853 | if (sync && cpu_rq(this_cpu)->nr_running == 1) |
| 5854 | return this_cpu; |
| 5855 | |
| 5856 | return nr_cpumask_bits; |
| 5857 | } |
| 5858 | |
| 5859 | static int |
| 5860 | wake_affine_weight(struct sched_domain *sd, struct task_struct *p, |
| 5861 | int this_cpu, int prev_cpu, int sync) |
| 5862 | { |
| 5863 | s64 this_eff_load, prev_eff_load; |
| 5864 | unsigned long task_load; |
| 5865 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5866 | this_eff_load = cpu_load(cpu_rq(this_cpu)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5867 | |
| 5868 | if (sync) { |
| 5869 | unsigned long current_load = task_h_load(current); |
| 5870 | |
| 5871 | if (current_load > this_eff_load) |
| 5872 | return this_cpu; |
| 5873 | |
| 5874 | this_eff_load -= current_load; |
| 5875 | } |
| 5876 | |
| 5877 | task_load = task_h_load(p); |
| 5878 | |
| 5879 | this_eff_load += task_load; |
| 5880 | if (sched_feat(WA_BIAS)) |
| 5881 | this_eff_load *= 100; |
| 5882 | this_eff_load *= capacity_of(prev_cpu); |
| 5883 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5884 | prev_eff_load = cpu_load(cpu_rq(prev_cpu)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5885 | prev_eff_load -= task_load; |
| 5886 | if (sched_feat(WA_BIAS)) |
| 5887 | prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; |
| 5888 | prev_eff_load *= capacity_of(this_cpu); |
| 5889 | |
| 5890 | /* |
| 5891 | * If sync, adjust the weight of prev_eff_load such that if |
| 5892 | * prev_eff == this_eff that select_idle_sibling() will consider |
| 5893 | * stacking the wakee on top of the waker if no other CPU is |
| 5894 | * idle. |
| 5895 | */ |
| 5896 | if (sync) |
| 5897 | prev_eff_load += 1; |
| 5898 | |
| 5899 | return this_eff_load < prev_eff_load ? this_cpu : nr_cpumask_bits; |
| 5900 | } |
| 5901 | |
| 5902 | static int wake_affine(struct sched_domain *sd, struct task_struct *p, |
| 5903 | int this_cpu, int prev_cpu, int sync) |
| 5904 | { |
| 5905 | int target = nr_cpumask_bits; |
| 5906 | |
| 5907 | if (sched_feat(WA_IDLE)) |
| 5908 | target = wake_affine_idle(this_cpu, prev_cpu, sync); |
| 5909 | |
| 5910 | if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits) |
| 5911 | target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); |
| 5912 | |
| 5913 | schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); |
| 5914 | if (target == nr_cpumask_bits) |
| 5915 | return prev_cpu; |
| 5916 | |
| 5917 | schedstat_inc(sd->ttwu_move_affine); |
| 5918 | schedstat_inc(p->se.statistics.nr_wakeups_affine); |
| 5919 | return target; |
| 5920 | } |
| 5921 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5922 | static struct sched_group * |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5923 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5924 | |
| 5925 | /* |
| 5926 | * find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. |
| 5927 | */ |
| 5928 | static int |
| 5929 | find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
| 5930 | { |
| 5931 | unsigned long load, min_load = ULONG_MAX; |
| 5932 | unsigned int min_exit_latency = UINT_MAX; |
| 5933 | u64 latest_idle_timestamp = 0; |
| 5934 | int least_loaded_cpu = this_cpu; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5935 | int shallowest_idle_cpu = -1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5936 | int i; |
| 5937 | |
| 5938 | /* Check if we have any choice: */ |
| 5939 | if (group->group_weight == 1) |
| 5940 | return cpumask_first(sched_group_span(group)); |
| 5941 | |
| 5942 | /* Traverse only the allowed CPUs */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5943 | for_each_cpu_and(i, sched_group_span(group), p->cpus_ptr) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5944 | if (sched_idle_cpu(i)) |
| 5945 | return i; |
| 5946 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5947 | if (available_idle_cpu(i)) { |
| 5948 | struct rq *rq = cpu_rq(i); |
| 5949 | struct cpuidle_state *idle = idle_get_state(rq); |
| 5950 | if (idle && idle->exit_latency < min_exit_latency) { |
| 5951 | /* |
| 5952 | * We give priority to a CPU whose idle state |
| 5953 | * has the smallest exit latency irrespective |
| 5954 | * of any idle timestamp. |
| 5955 | */ |
| 5956 | min_exit_latency = idle->exit_latency; |
| 5957 | latest_idle_timestamp = rq->idle_stamp; |
| 5958 | shallowest_idle_cpu = i; |
| 5959 | } else if ((!idle || idle->exit_latency == min_exit_latency) && |
| 5960 | rq->idle_stamp > latest_idle_timestamp) { |
| 5961 | /* |
| 5962 | * If equal or no active idle state, then |
| 5963 | * the most recently idled CPU might have |
| 5964 | * a warmer cache. |
| 5965 | */ |
| 5966 | latest_idle_timestamp = rq->idle_stamp; |
| 5967 | shallowest_idle_cpu = i; |
| 5968 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5969 | } else if (shallowest_idle_cpu == -1) { |
| 5970 | load = cpu_load(cpu_rq(i)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5971 | if (load < min_load) { |
| 5972 | min_load = load; |
| 5973 | least_loaded_cpu = i; |
| 5974 | } |
| 5975 | } |
| 5976 | } |
| 5977 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5978 | return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5979 | } |
| 5980 | |
| 5981 | static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p, |
| 5982 | int cpu, int prev_cpu, int sd_flag) |
| 5983 | { |
| 5984 | int new_cpu = cpu; |
| 5985 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 5986 | if (!cpumask_intersects(sched_domain_span(sd), p->cpus_ptr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5987 | return prev_cpu; |
| 5988 | |
| 5989 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 5990 | * We need task's util for cpu_util_without, sync it up to |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5991 | * prev_cpu's last_update_time. |
| 5992 | */ |
| 5993 | if (!(sd_flag & SD_BALANCE_FORK)) |
| 5994 | sync_entity_load_avg(&p->se); |
| 5995 | |
| 5996 | while (sd) { |
| 5997 | struct sched_group *group; |
| 5998 | struct sched_domain *tmp; |
| 5999 | int weight; |
| 6000 | |
| 6001 | if (!(sd->flags & sd_flag)) { |
| 6002 | sd = sd->child; |
| 6003 | continue; |
| 6004 | } |
| 6005 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6006 | group = find_idlest_group(sd, p, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6007 | if (!group) { |
| 6008 | sd = sd->child; |
| 6009 | continue; |
| 6010 | } |
| 6011 | |
| 6012 | new_cpu = find_idlest_group_cpu(group, p, cpu); |
| 6013 | if (new_cpu == cpu) { |
| 6014 | /* Now try balancing at a lower domain level of 'cpu': */ |
| 6015 | sd = sd->child; |
| 6016 | continue; |
| 6017 | } |
| 6018 | |
| 6019 | /* Now try balancing at a lower domain level of 'new_cpu': */ |
| 6020 | cpu = new_cpu; |
| 6021 | weight = sd->span_weight; |
| 6022 | sd = NULL; |
| 6023 | for_each_domain(cpu, tmp) { |
| 6024 | if (weight <= tmp->span_weight) |
| 6025 | break; |
| 6026 | if (tmp->flags & sd_flag) |
| 6027 | sd = tmp; |
| 6028 | } |
| 6029 | } |
| 6030 | |
| 6031 | return new_cpu; |
| 6032 | } |
| 6033 | |
| 6034 | #ifdef CONFIG_SCHED_SMT |
| 6035 | DEFINE_STATIC_KEY_FALSE(sched_smt_present); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6036 | EXPORT_SYMBOL_GPL(sched_smt_present); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6037 | |
| 6038 | static inline void set_idle_cores(int cpu, int val) |
| 6039 | { |
| 6040 | struct sched_domain_shared *sds; |
| 6041 | |
| 6042 | sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); |
| 6043 | if (sds) |
| 6044 | WRITE_ONCE(sds->has_idle_cores, val); |
| 6045 | } |
| 6046 | |
| 6047 | static inline bool test_idle_cores(int cpu, bool def) |
| 6048 | { |
| 6049 | struct sched_domain_shared *sds; |
| 6050 | |
| 6051 | sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); |
| 6052 | if (sds) |
| 6053 | return READ_ONCE(sds->has_idle_cores); |
| 6054 | |
| 6055 | return def; |
| 6056 | } |
| 6057 | |
| 6058 | /* |
| 6059 | * Scans the local SMT mask to see if the entire core is idle, and records this |
| 6060 | * information in sd_llc_shared->has_idle_cores. |
| 6061 | * |
| 6062 | * Since SMT siblings share all cache levels, inspecting this limited remote |
| 6063 | * state should be fairly cheap. |
| 6064 | */ |
| 6065 | void __update_idle_core(struct rq *rq) |
| 6066 | { |
| 6067 | int core = cpu_of(rq); |
| 6068 | int cpu; |
| 6069 | |
| 6070 | rcu_read_lock(); |
| 6071 | if (test_idle_cores(core, true)) |
| 6072 | goto unlock; |
| 6073 | |
| 6074 | for_each_cpu(cpu, cpu_smt_mask(core)) { |
| 6075 | if (cpu == core) |
| 6076 | continue; |
| 6077 | |
| 6078 | if (!available_idle_cpu(cpu)) |
| 6079 | goto unlock; |
| 6080 | } |
| 6081 | |
| 6082 | set_idle_cores(core, 1); |
| 6083 | unlock: |
| 6084 | rcu_read_unlock(); |
| 6085 | } |
| 6086 | |
| 6087 | /* |
| 6088 | * Scan the entire LLC domain for idle cores; this dynamically switches off if |
| 6089 | * there are no idle cores left in the system; tracked through |
| 6090 | * sd_llc->shared->has_idle_cores and enabled through update_idle_core() above. |
| 6091 | */ |
| 6092 | static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) |
| 6093 | { |
| 6094 | struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); |
| 6095 | int core, cpu; |
| 6096 | |
| 6097 | if (!static_branch_likely(&sched_smt_present)) |
| 6098 | return -1; |
| 6099 | |
| 6100 | if (!test_idle_cores(target, false)) |
| 6101 | return -1; |
| 6102 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6103 | cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6104 | |
| 6105 | for_each_cpu_wrap(core, cpus, target) { |
| 6106 | bool idle = true; |
| 6107 | |
| 6108 | for_each_cpu(cpu, cpu_smt_mask(core)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6109 | if (!available_idle_cpu(cpu)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6110 | idle = false; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6111 | break; |
| 6112 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6113 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6114 | cpumask_andnot(cpus, cpus, cpu_smt_mask(core)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6115 | |
| 6116 | if (idle) |
| 6117 | return core; |
| 6118 | } |
| 6119 | |
| 6120 | /* |
| 6121 | * Failed to find an idle core; stop looking for one. |
| 6122 | */ |
| 6123 | set_idle_cores(target, 0); |
| 6124 | |
| 6125 | return -1; |
| 6126 | } |
| 6127 | |
| 6128 | /* |
| 6129 | * Scan the local SMT mask for idle CPUs. |
| 6130 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 6131 | static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6132 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6133 | int cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6134 | |
| 6135 | if (!static_branch_likely(&sched_smt_present)) |
| 6136 | return -1; |
| 6137 | |
| 6138 | for_each_cpu(cpu, cpu_smt_mask(target)) { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 6139 | if (!cpumask_test_cpu(cpu, p->cpus_ptr) || |
| 6140 | !cpumask_test_cpu(cpu, sched_domain_span(sd))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6141 | continue; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6142 | if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6143 | return cpu; |
| 6144 | } |
| 6145 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6146 | return -1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6147 | } |
| 6148 | |
| 6149 | #else /* CONFIG_SCHED_SMT */ |
| 6150 | |
| 6151 | static inline int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target) |
| 6152 | { |
| 6153 | return -1; |
| 6154 | } |
| 6155 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 6156 | static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6157 | { |
| 6158 | return -1; |
| 6159 | } |
| 6160 | |
| 6161 | #endif /* CONFIG_SCHED_SMT */ |
| 6162 | |
| 6163 | /* |
| 6164 | * Scan the LLC domain for idle CPUs; this is dynamically regulated by |
| 6165 | * comparing the average scan cost (tracked in sd->avg_scan_cost) against the |
| 6166 | * average idle time for this rq (as found in rq->avg_idle). |
| 6167 | */ |
| 6168 | static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) |
| 6169 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 6170 | struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6171 | struct sched_domain *this_sd; |
| 6172 | u64 avg_cost, avg_idle; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6173 | u64 time; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6174 | int this = smp_processor_id(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6175 | int cpu, nr = INT_MAX; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6176 | |
| 6177 | this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc)); |
| 6178 | if (!this_sd) |
| 6179 | return -1; |
| 6180 | |
| 6181 | /* |
| 6182 | * Due to large variance we need a large fuzz factor; hackbench in |
| 6183 | * particularly is sensitive here. |
| 6184 | */ |
| 6185 | avg_idle = this_rq()->avg_idle / 512; |
| 6186 | avg_cost = this_sd->avg_scan_cost + 1; |
| 6187 | |
| 6188 | if (sched_feat(SIS_AVG_CPU) && avg_idle < avg_cost) |
| 6189 | return -1; |
| 6190 | |
| 6191 | if (sched_feat(SIS_PROP)) { |
| 6192 | u64 span_avg = sd->span_weight * avg_idle; |
| 6193 | if (span_avg > 4*avg_cost) |
| 6194 | nr = div_u64(span_avg, avg_cost); |
| 6195 | else |
| 6196 | nr = 4; |
| 6197 | } |
| 6198 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6199 | time = cpu_clock(this); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6200 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 6201 | cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); |
| 6202 | |
| 6203 | for_each_cpu_wrap(cpu, cpus, target) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6204 | if (!--nr) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6205 | return -1; |
| 6206 | if (available_idle_cpu(cpu) || sched_idle_cpu(cpu)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6207 | break; |
| 6208 | } |
| 6209 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6210 | time = cpu_clock(this) - time; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6211 | update_avg(&this_sd->avg_scan_cost, time); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6212 | |
| 6213 | return cpu; |
| 6214 | } |
| 6215 | |
| 6216 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6217 | * Scan the asym_capacity domain for idle CPUs; pick the first idle one on which |
| 6218 | * the task fits. If no CPU is big enough, but there are idle ones, try to |
| 6219 | * maximize capacity. |
| 6220 | */ |
| 6221 | static int |
| 6222 | select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target) |
| 6223 | { |
| 6224 | unsigned long task_util, best_cap = 0; |
| 6225 | int cpu, best_cpu = -1; |
| 6226 | struct cpumask *cpus; |
| 6227 | |
| 6228 | cpus = this_cpu_cpumask_var_ptr(select_idle_mask); |
| 6229 | cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr); |
| 6230 | |
| 6231 | task_util = uclamp_task_util(p); |
| 6232 | |
| 6233 | for_each_cpu_wrap(cpu, cpus, target) { |
| 6234 | unsigned long cpu_cap = capacity_of(cpu); |
| 6235 | |
| 6236 | if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu)) |
| 6237 | continue; |
| 6238 | if (fits_capacity(task_util, cpu_cap)) |
| 6239 | return cpu; |
| 6240 | |
| 6241 | if (cpu_cap > best_cap) { |
| 6242 | best_cap = cpu_cap; |
| 6243 | best_cpu = cpu; |
| 6244 | } |
| 6245 | } |
| 6246 | |
| 6247 | return best_cpu; |
| 6248 | } |
| 6249 | |
| 6250 | static inline bool asym_fits_capacity(int task_util, int cpu) |
| 6251 | { |
| 6252 | if (static_branch_unlikely(&sched_asym_cpucapacity)) |
| 6253 | return fits_capacity(task_util, capacity_of(cpu)); |
| 6254 | |
| 6255 | return true; |
| 6256 | } |
| 6257 | |
| 6258 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6259 | * Try and locate an idle core/thread in the LLC cache domain. |
| 6260 | */ |
| 6261 | static int select_idle_sibling(struct task_struct *p, int prev, int target) |
| 6262 | { |
| 6263 | struct sched_domain *sd; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6264 | unsigned long task_util; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6265 | int i, recent_used_cpu; |
| 6266 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6267 | /* |
| 6268 | * On asymmetric system, update task utilization because we will check |
| 6269 | * that the task fits with cpu's capacity. |
| 6270 | */ |
| 6271 | if (static_branch_unlikely(&sched_asym_cpucapacity)) { |
| 6272 | sync_entity_load_avg(&p->se); |
| 6273 | task_util = uclamp_task_util(p); |
| 6274 | } |
| 6275 | |
| 6276 | if ((available_idle_cpu(target) || sched_idle_cpu(target)) && |
| 6277 | asym_fits_capacity(task_util, target)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6278 | return target; |
| 6279 | |
| 6280 | /* |
| 6281 | * If the previous CPU is cache affine and idle, don't be stupid: |
| 6282 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6283 | if (prev != target && cpus_share_cache(prev, target) && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6284 | (available_idle_cpu(prev) || sched_idle_cpu(prev)) && |
| 6285 | asym_fits_capacity(task_util, prev)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6286 | return prev; |
| 6287 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6288 | /* |
| 6289 | * Allow a per-cpu kthread to stack with the wakee if the |
| 6290 | * kworker thread and the tasks previous CPUs are the same. |
| 6291 | * The assumption is that the wakee queued work for the |
| 6292 | * per-cpu kthread that is now complete and the wakeup is |
| 6293 | * essentially a sync wakeup. An obvious example of this |
| 6294 | * pattern is IO completions. |
| 6295 | */ |
| 6296 | if (is_per_cpu_kthread(current) && |
| 6297 | in_task() && |
| 6298 | prev == smp_processor_id() && |
| 6299 | this_rq()->nr_running <= 1 && |
| 6300 | asym_fits_capacity(task_util, prev)) { |
| 6301 | return prev; |
| 6302 | } |
| 6303 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6304 | /* Check a recently used CPU as a potential idle candidate: */ |
| 6305 | recent_used_cpu = p->recent_used_cpu; |
| 6306 | if (recent_used_cpu != prev && |
| 6307 | recent_used_cpu != target && |
| 6308 | cpus_share_cache(recent_used_cpu, target) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6309 | (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6310 | cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && |
| 6311 | asym_fits_capacity(task_util, recent_used_cpu)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6312 | /* |
| 6313 | * Replace recent_used_cpu with prev as it is a potential |
| 6314 | * candidate for the next wake: |
| 6315 | */ |
| 6316 | p->recent_used_cpu = prev; |
| 6317 | return recent_used_cpu; |
| 6318 | } |
| 6319 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6320 | /* |
| 6321 | * For asymmetric CPU capacity systems, our domain of interest is |
| 6322 | * sd_asym_cpucapacity rather than sd_llc. |
| 6323 | */ |
| 6324 | if (static_branch_unlikely(&sched_asym_cpucapacity)) { |
| 6325 | sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, target)); |
| 6326 | /* |
| 6327 | * On an asymmetric CPU capacity system where an exclusive |
| 6328 | * cpuset defines a symmetric island (i.e. one unique |
| 6329 | * capacity_orig value through the cpuset), the key will be set |
| 6330 | * but the CPUs within that cpuset will not have a domain with |
| 6331 | * SD_ASYM_CPUCAPACITY. These should follow the usual symmetric |
| 6332 | * capacity path. |
| 6333 | */ |
| 6334 | if (sd) { |
| 6335 | i = select_idle_capacity(p, sd, target); |
| 6336 | return ((unsigned)i < nr_cpumask_bits) ? i : target; |
| 6337 | } |
| 6338 | } |
| 6339 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6340 | sd = rcu_dereference(per_cpu(sd_llc, target)); |
| 6341 | if (!sd) |
| 6342 | return target; |
| 6343 | |
| 6344 | i = select_idle_core(p, sd, target); |
| 6345 | if ((unsigned)i < nr_cpumask_bits) |
| 6346 | return i; |
| 6347 | |
| 6348 | i = select_idle_cpu(p, sd, target); |
| 6349 | if ((unsigned)i < nr_cpumask_bits) |
| 6350 | return i; |
| 6351 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 6352 | i = select_idle_smt(p, sd, target); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6353 | if ((unsigned)i < nr_cpumask_bits) |
| 6354 | return i; |
| 6355 | |
| 6356 | return target; |
| 6357 | } |
| 6358 | |
| 6359 | /** |
| 6360 | * Amount of capacity of a CPU that is (estimated to be) used by CFS tasks |
| 6361 | * @cpu: the CPU to get the utilization of |
| 6362 | * |
| 6363 | * The unit of the return value must be the one of capacity so we can compare |
| 6364 | * the utilization with the capacity of the CPU that is available for CFS task |
| 6365 | * (ie cpu_capacity). |
| 6366 | * |
| 6367 | * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the |
| 6368 | * recent utilization of currently non-runnable tasks on a CPU. It represents |
| 6369 | * the amount of utilization of a CPU in the range [0..capacity_orig] where |
| 6370 | * capacity_orig is the cpu_capacity available at the highest frequency |
| 6371 | * (arch_scale_freq_capacity()). |
| 6372 | * The utilization of a CPU converges towards a sum equal to or less than the |
| 6373 | * current capacity (capacity_curr <= capacity_orig) of the CPU because it is |
| 6374 | * the running time on this CPU scaled by capacity_curr. |
| 6375 | * |
| 6376 | * The estimated utilization of a CPU is defined to be the maximum between its |
| 6377 | * cfs_rq.avg.util_avg and the sum of the estimated utilization of the tasks |
| 6378 | * currently RUNNABLE on that CPU. |
| 6379 | * This allows to properly represent the expected utilization of a CPU which |
| 6380 | * has just got a big task running since a long sleep period. At the same time |
| 6381 | * however it preserves the benefits of the "blocked utilization" in |
| 6382 | * describing the potential for other tasks waking up on the same CPU. |
| 6383 | * |
| 6384 | * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even |
| 6385 | * higher than capacity_orig because of unfortunate rounding in |
| 6386 | * cfs.avg.util_avg or just after migrating tasks and new task wakeups until |
| 6387 | * the average stabilizes with the new running time. We need to check that the |
| 6388 | * utilization stays within the range of [0..capacity_orig] and cap it if |
| 6389 | * necessary. Without utilization capping, a group could be seen as overloaded |
| 6390 | * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of |
| 6391 | * available capacity. We allow utilization to overshoot capacity_curr (but not |
| 6392 | * capacity_orig) as it useful for predicting the capacity required after task |
| 6393 | * migrations (scheduler-driven DVFS). |
| 6394 | * |
| 6395 | * Return: the (estimated) utilization for the specified CPU |
| 6396 | */ |
| 6397 | static inline unsigned long cpu_util(int cpu) |
| 6398 | { |
| 6399 | struct cfs_rq *cfs_rq; |
| 6400 | unsigned int util; |
| 6401 | |
| 6402 | cfs_rq = &cpu_rq(cpu)->cfs; |
| 6403 | util = READ_ONCE(cfs_rq->avg.util_avg); |
| 6404 | |
| 6405 | if (sched_feat(UTIL_EST)) |
| 6406 | util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued)); |
| 6407 | |
| 6408 | return min_t(unsigned long, util, capacity_orig_of(cpu)); |
| 6409 | } |
| 6410 | |
| 6411 | /* |
| 6412 | * cpu_util_without: compute cpu utilization without any contributions from *p |
| 6413 | * @cpu: the CPU which utilization is requested |
| 6414 | * @p: the task which utilization should be discounted |
| 6415 | * |
| 6416 | * The utilization of a CPU is defined by the utilization of tasks currently |
| 6417 | * enqueued on that CPU as well as tasks which are currently sleeping after an |
| 6418 | * execution on that CPU. |
| 6419 | * |
| 6420 | * This method returns the utilization of the specified CPU by discounting the |
| 6421 | * utilization of the specified task, whenever the task is currently |
| 6422 | * contributing to the CPU utilization. |
| 6423 | */ |
| 6424 | static unsigned long cpu_util_without(int cpu, struct task_struct *p) |
| 6425 | { |
| 6426 | struct cfs_rq *cfs_rq; |
| 6427 | unsigned int util; |
| 6428 | |
| 6429 | /* Task has no contribution or is new */ |
| 6430 | if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) |
| 6431 | return cpu_util(cpu); |
| 6432 | |
| 6433 | cfs_rq = &cpu_rq(cpu)->cfs; |
| 6434 | util = READ_ONCE(cfs_rq->avg.util_avg); |
| 6435 | |
| 6436 | /* Discount task's util from CPU's util */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6437 | lsub_positive(&util, task_util(p)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6438 | |
| 6439 | /* |
| 6440 | * Covered cases: |
| 6441 | * |
| 6442 | * a) if *p is the only task sleeping on this CPU, then: |
| 6443 | * cpu_util (== task_util) > util_est (== 0) |
| 6444 | * and thus we return: |
| 6445 | * cpu_util_without = (cpu_util - task_util) = 0 |
| 6446 | * |
| 6447 | * b) if other tasks are SLEEPING on this CPU, which is now exiting |
| 6448 | * IDLE, then: |
| 6449 | * cpu_util >= task_util |
| 6450 | * cpu_util > util_est (== 0) |
| 6451 | * and thus we discount *p's blocked utilization to return: |
| 6452 | * cpu_util_without = (cpu_util - task_util) >= 0 |
| 6453 | * |
| 6454 | * c) if other tasks are RUNNABLE on that CPU and |
| 6455 | * util_est > cpu_util |
| 6456 | * then we use util_est since it returns a more restrictive |
| 6457 | * estimation of the spare capacity on that CPU, by just |
| 6458 | * considering the expected utilization of tasks already |
| 6459 | * runnable on that CPU. |
| 6460 | * |
| 6461 | * Cases a) and b) are covered by the above code, while case c) is |
| 6462 | * covered by the following code when estimated utilization is |
| 6463 | * enabled. |
| 6464 | */ |
| 6465 | if (sched_feat(UTIL_EST)) { |
| 6466 | unsigned int estimated = |
| 6467 | READ_ONCE(cfs_rq->avg.util_est.enqueued); |
| 6468 | |
| 6469 | /* |
| 6470 | * Despite the following checks we still have a small window |
| 6471 | * for a possible race, when an execl's select_task_rq_fair() |
| 6472 | * races with LB's detach_task(): |
| 6473 | * |
| 6474 | * detach_task() |
| 6475 | * p->on_rq = TASK_ON_RQ_MIGRATING; |
| 6476 | * ---------------------------------- A |
| 6477 | * deactivate_task() \ |
| 6478 | * dequeue_task() + RaceTime |
| 6479 | * util_est_dequeue() / |
| 6480 | * ---------------------------------- B |
| 6481 | * |
| 6482 | * The additional check on "current == p" it's required to |
| 6483 | * properly fix the execl regression and it helps in further |
| 6484 | * reducing the chances for the above race. |
| 6485 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6486 | if (unlikely(task_on_rq_queued(p) || current == p)) |
| 6487 | lsub_positive(&estimated, _task_util_est(p)); |
| 6488 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6489 | util = max(util, estimated); |
| 6490 | } |
| 6491 | |
| 6492 | /* |
| 6493 | * Utilization (estimated) can exceed the CPU capacity, thus let's |
| 6494 | * clamp to the maximum CPU capacity to ensure consistency with |
| 6495 | * the cpu_util call. |
| 6496 | */ |
| 6497 | return min_t(unsigned long, util, capacity_orig_of(cpu)); |
| 6498 | } |
| 6499 | |
| 6500 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6501 | * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued) |
| 6502 | * to @dst_cpu. |
| 6503 | */ |
| 6504 | static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu) |
| 6505 | { |
| 6506 | struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs; |
| 6507 | unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg); |
| 6508 | |
| 6509 | /* |
| 6510 | * If @p migrates from @cpu to another, remove its contribution. Or, |
| 6511 | * if @p migrates from another CPU to @cpu, add its contribution. In |
| 6512 | * the other cases, @cpu is not impacted by the migration, so the |
| 6513 | * util_avg should already be correct. |
| 6514 | */ |
| 6515 | if (task_cpu(p) == cpu && dst_cpu != cpu) |
| 6516 | sub_positive(&util, task_util(p)); |
| 6517 | else if (task_cpu(p) != cpu && dst_cpu == cpu) |
| 6518 | util += task_util(p); |
| 6519 | |
| 6520 | if (sched_feat(UTIL_EST)) { |
| 6521 | util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued); |
| 6522 | |
| 6523 | /* |
| 6524 | * During wake-up, the task isn't enqueued yet and doesn't |
| 6525 | * appear in the cfs_rq->avg.util_est.enqueued of any rq, |
| 6526 | * so just add it (if needed) to "simulate" what will be |
| 6527 | * cpu_util() after the task has been enqueued. |
| 6528 | */ |
| 6529 | if (dst_cpu == cpu) |
| 6530 | util_est += _task_util_est(p); |
| 6531 | |
| 6532 | util = max(util, util_est); |
| 6533 | } |
| 6534 | |
| 6535 | return min(util, capacity_orig_of(cpu)); |
| 6536 | } |
| 6537 | |
| 6538 | /* |
| 6539 | * compute_energy(): Estimates the energy that @pd would consume if @p was |
| 6540 | * migrated to @dst_cpu. compute_energy() predicts what will be the utilization |
| 6541 | * landscape of @pd's CPUs after the task migration, and uses the Energy Model |
| 6542 | * to compute what would be the energy if we decided to actually migrate that |
| 6543 | * task. |
| 6544 | */ |
| 6545 | static long |
| 6546 | compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd) |
| 6547 | { |
| 6548 | struct cpumask *pd_mask = perf_domain_span(pd); |
| 6549 | unsigned long cpu_cap = arch_scale_cpu_capacity(cpumask_first(pd_mask)); |
| 6550 | unsigned long max_util = 0, sum_util = 0; |
| 6551 | int cpu; |
| 6552 | |
| 6553 | /* |
| 6554 | * The capacity state of CPUs of the current rd can be driven by CPUs |
| 6555 | * of another rd if they belong to the same pd. So, account for the |
| 6556 | * utilization of these CPUs too by masking pd with cpu_online_mask |
| 6557 | * instead of the rd span. |
| 6558 | * |
| 6559 | * If an entire pd is outside of the current rd, it will not appear in |
| 6560 | * its pd list and will not be accounted by compute_energy(). |
| 6561 | */ |
| 6562 | for_each_cpu_and(cpu, pd_mask, cpu_online_mask) { |
| 6563 | unsigned long cpu_util, util_cfs = cpu_util_next(cpu, p, dst_cpu); |
| 6564 | struct task_struct *tsk = cpu == dst_cpu ? p : NULL; |
| 6565 | |
| 6566 | /* |
| 6567 | * Busy time computation: utilization clamping is not |
| 6568 | * required since the ratio (sum_util / cpu_capacity) |
| 6569 | * is already enough to scale the EM reported power |
| 6570 | * consumption at the (eventually clamped) cpu_capacity. |
| 6571 | */ |
| 6572 | sum_util += schedutil_cpu_util(cpu, util_cfs, cpu_cap, |
| 6573 | ENERGY_UTIL, NULL); |
| 6574 | |
| 6575 | /* |
| 6576 | * Performance domain frequency: utilization clamping |
| 6577 | * must be considered since it affects the selection |
| 6578 | * of the performance domain frequency. |
| 6579 | * NOTE: in case RT tasks are running, by default the |
| 6580 | * FREQUENCY_UTIL's utilization can be max OPP. |
| 6581 | */ |
| 6582 | cpu_util = schedutil_cpu_util(cpu, util_cfs, cpu_cap, |
| 6583 | FREQUENCY_UTIL, tsk); |
| 6584 | max_util = max(max_util, cpu_util); |
| 6585 | } |
| 6586 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6587 | return em_cpu_energy(pd->em_pd, max_util, sum_util); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6588 | } |
| 6589 | |
| 6590 | /* |
| 6591 | * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the |
| 6592 | * waking task. find_energy_efficient_cpu() looks for the CPU with maximum |
| 6593 | * spare capacity in each performance domain and uses it as a potential |
| 6594 | * candidate to execute the task. Then, it uses the Energy Model to figure |
| 6595 | * out which of the CPU candidates is the most energy-efficient. |
| 6596 | * |
| 6597 | * The rationale for this heuristic is as follows. In a performance domain, |
| 6598 | * all the most energy efficient CPU candidates (according to the Energy |
| 6599 | * Model) are those for which we'll request a low frequency. When there are |
| 6600 | * several CPUs for which the frequency request will be the same, we don't |
| 6601 | * have enough data to break the tie between them, because the Energy Model |
| 6602 | * only includes active power costs. With this model, if we assume that |
| 6603 | * frequency requests follow utilization (e.g. using schedutil), the CPU with |
| 6604 | * the maximum spare capacity in a performance domain is guaranteed to be among |
| 6605 | * the best candidates of the performance domain. |
| 6606 | * |
| 6607 | * In practice, it could be preferable from an energy standpoint to pack |
| 6608 | * small tasks on a CPU in order to let other CPUs go in deeper idle states, |
| 6609 | * but that could also hurt our chances to go cluster idle, and we have no |
| 6610 | * ways to tell with the current Energy Model if this is actually a good |
| 6611 | * idea or not. So, find_energy_efficient_cpu() basically favors |
| 6612 | * cluster-packing, and spreading inside a cluster. That should at least be |
| 6613 | * a good thing for latency, and this is consistent with the idea that most |
| 6614 | * of the energy savings of EAS come from the asymmetry of the system, and |
| 6615 | * not so much from breaking the tie between identical CPUs. That's also the |
| 6616 | * reason why EAS is enabled in the topology code only for systems where |
| 6617 | * SD_ASYM_CPUCAPACITY is set. |
| 6618 | * |
| 6619 | * NOTE: Forkees are not accepted in the energy-aware wake-up path because |
| 6620 | * they don't have any useful utilization data yet and it's not possible to |
| 6621 | * forecast their impact on energy consumption. Consequently, they will be |
| 6622 | * placed by find_idlest_cpu() on the least loaded CPU, which might turn out |
| 6623 | * to be energy-inefficient in some use-cases. The alternative would be to |
| 6624 | * bias new tasks towards specific types of CPUs first, or to try to infer |
| 6625 | * their util_avg from the parent task, but those heuristics could hurt |
| 6626 | * other use-cases too. So, until someone finds a better way to solve this, |
| 6627 | * let's keep things simple by re-using the existing slow path. |
| 6628 | */ |
| 6629 | static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) |
| 6630 | { |
| 6631 | unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX; |
| 6632 | struct root_domain *rd = cpu_rq(smp_processor_id())->rd; |
| 6633 | unsigned long cpu_cap, util, base_energy = 0; |
| 6634 | int cpu, best_energy_cpu = prev_cpu; |
| 6635 | struct sched_domain *sd; |
| 6636 | struct perf_domain *pd; |
| 6637 | |
| 6638 | rcu_read_lock(); |
| 6639 | pd = rcu_dereference(rd->pd); |
| 6640 | if (!pd || READ_ONCE(rd->overutilized)) |
| 6641 | goto fail; |
| 6642 | |
| 6643 | /* |
| 6644 | * Energy-aware wake-up happens on the lowest sched_domain starting |
| 6645 | * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu. |
| 6646 | */ |
| 6647 | sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity)); |
| 6648 | while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) |
| 6649 | sd = sd->parent; |
| 6650 | if (!sd) |
| 6651 | goto fail; |
| 6652 | |
| 6653 | sync_entity_load_avg(&p->se); |
| 6654 | if (!task_util_est(p)) |
| 6655 | goto unlock; |
| 6656 | |
| 6657 | for (; pd; pd = pd->next) { |
| 6658 | unsigned long cur_delta, spare_cap, max_spare_cap = 0; |
| 6659 | unsigned long base_energy_pd; |
| 6660 | int max_spare_cap_cpu = -1; |
| 6661 | |
| 6662 | /* Compute the 'base' energy of the pd, without @p */ |
| 6663 | base_energy_pd = compute_energy(p, -1, pd); |
| 6664 | base_energy += base_energy_pd; |
| 6665 | |
| 6666 | for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) { |
| 6667 | if (!cpumask_test_cpu(cpu, p->cpus_ptr)) |
| 6668 | continue; |
| 6669 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6670 | util = cpu_util_next(cpu, p, cpu); |
| 6671 | cpu_cap = capacity_of(cpu); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6672 | spare_cap = cpu_cap; |
| 6673 | lsub_positive(&spare_cap, util); |
| 6674 | |
| 6675 | /* |
| 6676 | * Skip CPUs that cannot satisfy the capacity request. |
| 6677 | * IOW, placing the task there would make the CPU |
| 6678 | * overutilized. Take uclamp into account to see how |
| 6679 | * much capacity we can get out of the CPU; this is |
| 6680 | * aligned with schedutil_cpu_util(). |
| 6681 | */ |
| 6682 | util = uclamp_rq_util_with(cpu_rq(cpu), util, p); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6683 | if (!fits_capacity(util, cpu_cap)) |
| 6684 | continue; |
| 6685 | |
| 6686 | /* Always use prev_cpu as a candidate. */ |
| 6687 | if (cpu == prev_cpu) { |
| 6688 | prev_delta = compute_energy(p, prev_cpu, pd); |
| 6689 | prev_delta -= base_energy_pd; |
| 6690 | best_delta = min(best_delta, prev_delta); |
| 6691 | } |
| 6692 | |
| 6693 | /* |
| 6694 | * Find the CPU with the maximum spare capacity in |
| 6695 | * the performance domain |
| 6696 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6697 | if (spare_cap > max_spare_cap) { |
| 6698 | max_spare_cap = spare_cap; |
| 6699 | max_spare_cap_cpu = cpu; |
| 6700 | } |
| 6701 | } |
| 6702 | |
| 6703 | /* Evaluate the energy impact of using this CPU. */ |
| 6704 | if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) { |
| 6705 | cur_delta = compute_energy(p, max_spare_cap_cpu, pd); |
| 6706 | cur_delta -= base_energy_pd; |
| 6707 | if (cur_delta < best_delta) { |
| 6708 | best_delta = cur_delta; |
| 6709 | best_energy_cpu = max_spare_cap_cpu; |
| 6710 | } |
| 6711 | } |
| 6712 | } |
| 6713 | unlock: |
| 6714 | rcu_read_unlock(); |
| 6715 | |
| 6716 | /* |
| 6717 | * Pick the best CPU if prev_cpu cannot be used, or if it saves at |
| 6718 | * least 6% of the energy used by prev_cpu. |
| 6719 | */ |
| 6720 | if (prev_delta == ULONG_MAX) |
| 6721 | return best_energy_cpu; |
| 6722 | |
| 6723 | if ((prev_delta - best_delta) > ((prev_delta + base_energy) >> 4)) |
| 6724 | return best_energy_cpu; |
| 6725 | |
| 6726 | return prev_cpu; |
| 6727 | |
| 6728 | fail: |
| 6729 | rcu_read_unlock(); |
| 6730 | |
| 6731 | return -1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6732 | } |
| 6733 | |
| 6734 | /* |
| 6735 | * select_task_rq_fair: Select target runqueue for the waking task in domains |
| 6736 | * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, |
| 6737 | * SD_BALANCE_FORK, or SD_BALANCE_EXEC. |
| 6738 | * |
| 6739 | * Balances load by selecting the idlest CPU in the idlest group, or under |
| 6740 | * certain conditions an idle sibling CPU if the domain has SD_WAKE_AFFINE set. |
| 6741 | * |
| 6742 | * Returns the target CPU number. |
| 6743 | * |
| 6744 | * preempt must be disabled. |
| 6745 | */ |
| 6746 | static int |
| 6747 | select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) |
| 6748 | { |
| 6749 | struct sched_domain *tmp, *sd = NULL; |
| 6750 | int cpu = smp_processor_id(); |
| 6751 | int new_cpu = prev_cpu; |
| 6752 | int want_affine = 0; |
| 6753 | int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING); |
| 6754 | |
| 6755 | if (sd_flag & SD_BALANCE_WAKE) { |
| 6756 | record_wakee(p); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6757 | |
| 6758 | if (sched_energy_enabled()) { |
| 6759 | new_cpu = find_energy_efficient_cpu(p, prev_cpu); |
| 6760 | if (new_cpu >= 0) |
| 6761 | return new_cpu; |
| 6762 | new_cpu = prev_cpu; |
| 6763 | } |
| 6764 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 6765 | want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, p->cpus_ptr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6766 | } |
| 6767 | |
| 6768 | rcu_read_lock(); |
| 6769 | for_each_domain(cpu, tmp) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6770 | /* |
| 6771 | * If both 'cpu' and 'prev_cpu' are part of this domain, |
| 6772 | * cpu is a valid SD_WAKE_AFFINE target. |
| 6773 | */ |
| 6774 | if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && |
| 6775 | cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { |
| 6776 | if (cpu != prev_cpu) |
| 6777 | new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); |
| 6778 | |
| 6779 | sd = NULL; /* Prefer wake_affine over balance flags */ |
| 6780 | break; |
| 6781 | } |
| 6782 | |
| 6783 | if (tmp->flags & sd_flag) |
| 6784 | sd = tmp; |
| 6785 | else if (!want_affine) |
| 6786 | break; |
| 6787 | } |
| 6788 | |
| 6789 | if (unlikely(sd)) { |
| 6790 | /* Slow path */ |
| 6791 | new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); |
| 6792 | } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ |
| 6793 | /* Fast path */ |
| 6794 | |
| 6795 | new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); |
| 6796 | |
| 6797 | if (want_affine) |
| 6798 | current->recent_used_cpu = cpu; |
| 6799 | } |
| 6800 | rcu_read_unlock(); |
| 6801 | |
| 6802 | return new_cpu; |
| 6803 | } |
| 6804 | |
| 6805 | static void detach_entity_cfs_rq(struct sched_entity *se); |
| 6806 | |
| 6807 | /* |
| 6808 | * Called immediately before a task is migrated to a new CPU; task_cpu(p) and |
| 6809 | * cfs_rq_of(p) references at time of call are still valid and identify the |
| 6810 | * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held. |
| 6811 | */ |
| 6812 | static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) |
| 6813 | { |
| 6814 | /* |
| 6815 | * As blocked tasks retain absolute vruntime the migration needs to |
| 6816 | * deal with this by subtracting the old and adding the new |
| 6817 | * min_vruntime -- the latter is done by enqueue_entity() when placing |
| 6818 | * the task on the new runqueue. |
| 6819 | */ |
| 6820 | if (p->state == TASK_WAKING) { |
| 6821 | struct sched_entity *se = &p->se; |
| 6822 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 6823 | u64 min_vruntime; |
| 6824 | |
| 6825 | #ifndef CONFIG_64BIT |
| 6826 | u64 min_vruntime_copy; |
| 6827 | |
| 6828 | do { |
| 6829 | min_vruntime_copy = cfs_rq->min_vruntime_copy; |
| 6830 | smp_rmb(); |
| 6831 | min_vruntime = cfs_rq->min_vruntime; |
| 6832 | } while (min_vruntime != min_vruntime_copy); |
| 6833 | #else |
| 6834 | min_vruntime = cfs_rq->min_vruntime; |
| 6835 | #endif |
| 6836 | |
| 6837 | se->vruntime -= min_vruntime; |
| 6838 | } |
| 6839 | |
| 6840 | if (p->on_rq == TASK_ON_RQ_MIGRATING) { |
| 6841 | /* |
| 6842 | * In case of TASK_ON_RQ_MIGRATING we in fact hold the 'old' |
| 6843 | * rq->lock and can modify state directly. |
| 6844 | */ |
| 6845 | lockdep_assert_held(&task_rq(p)->lock); |
| 6846 | detach_entity_cfs_rq(&p->se); |
| 6847 | |
| 6848 | } else { |
| 6849 | /* |
| 6850 | * We are supposed to update the task to "current" time, then |
| 6851 | * its up to date and ready to go to new CPU/cfs_rq. But we |
| 6852 | * have difficulty in getting what current time is, so simply |
| 6853 | * throw away the out-of-date time. This will result in the |
| 6854 | * wakee task is less decayed, but giving the wakee more load |
| 6855 | * sounds not bad. |
| 6856 | */ |
| 6857 | remove_entity_load_avg(&p->se); |
| 6858 | } |
| 6859 | |
| 6860 | /* Tell new CPU we are migrated */ |
| 6861 | p->se.avg.last_update_time = 0; |
| 6862 | |
| 6863 | /* We have migrated, no longer consider this task hot */ |
| 6864 | p->se.exec_start = 0; |
| 6865 | |
| 6866 | update_scan_period(p, new_cpu); |
| 6867 | } |
| 6868 | |
| 6869 | static void task_dead_fair(struct task_struct *p) |
| 6870 | { |
| 6871 | remove_entity_load_avg(&p->se); |
| 6872 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6873 | |
| 6874 | static int |
| 6875 | balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
| 6876 | { |
| 6877 | if (rq->nr_running) |
| 6878 | return 1; |
| 6879 | |
| 6880 | return newidle_balance(rq, rf) != 0; |
| 6881 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6882 | #endif /* CONFIG_SMP */ |
| 6883 | |
| 6884 | static unsigned long wakeup_gran(struct sched_entity *se) |
| 6885 | { |
| 6886 | unsigned long gran = sysctl_sched_wakeup_granularity; |
| 6887 | |
| 6888 | /* |
| 6889 | * Since its curr running now, convert the gran from real-time |
| 6890 | * to virtual-time in his units. |
| 6891 | * |
| 6892 | * By using 'se' instead of 'curr' we penalize light tasks, so |
| 6893 | * they get preempted easier. That is, if 'se' < 'curr' then |
| 6894 | * the resulting gran will be larger, therefore penalizing the |
| 6895 | * lighter, if otoh 'se' > 'curr' then the resulting gran will |
| 6896 | * be smaller, again penalizing the lighter task. |
| 6897 | * |
| 6898 | * This is especially important for buddies when the leftmost |
| 6899 | * task is higher priority than the buddy. |
| 6900 | */ |
| 6901 | return calc_delta_fair(gran, se); |
| 6902 | } |
| 6903 | |
| 6904 | /* |
| 6905 | * Should 'se' preempt 'curr'. |
| 6906 | * |
| 6907 | * |s1 |
| 6908 | * |s2 |
| 6909 | * |s3 |
| 6910 | * g |
| 6911 | * |<--->|c |
| 6912 | * |
| 6913 | * w(c, s1) = -1 |
| 6914 | * w(c, s2) = 0 |
| 6915 | * w(c, s3) = 1 |
| 6916 | * |
| 6917 | */ |
| 6918 | static int |
| 6919 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) |
| 6920 | { |
| 6921 | s64 gran, vdiff = curr->vruntime - se->vruntime; |
| 6922 | |
| 6923 | if (vdiff <= 0) |
| 6924 | return -1; |
| 6925 | |
| 6926 | gran = wakeup_gran(se); |
| 6927 | if (vdiff > gran) |
| 6928 | return 1; |
| 6929 | |
| 6930 | return 0; |
| 6931 | } |
| 6932 | |
| 6933 | static void set_last_buddy(struct sched_entity *se) |
| 6934 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6935 | if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6936 | return; |
| 6937 | |
| 6938 | for_each_sched_entity(se) { |
| 6939 | if (SCHED_WARN_ON(!se->on_rq)) |
| 6940 | return; |
| 6941 | cfs_rq_of(se)->last = se; |
| 6942 | } |
| 6943 | } |
| 6944 | |
| 6945 | static void set_next_buddy(struct sched_entity *se) |
| 6946 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 6947 | if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se)))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6948 | return; |
| 6949 | |
| 6950 | for_each_sched_entity(se) { |
| 6951 | if (SCHED_WARN_ON(!se->on_rq)) |
| 6952 | return; |
| 6953 | cfs_rq_of(se)->next = se; |
| 6954 | } |
| 6955 | } |
| 6956 | |
| 6957 | static void set_skip_buddy(struct sched_entity *se) |
| 6958 | { |
| 6959 | for_each_sched_entity(se) |
| 6960 | cfs_rq_of(se)->skip = se; |
| 6961 | } |
| 6962 | |
| 6963 | /* |
| 6964 | * Preempt the current task with a newly woken task if needed: |
| 6965 | */ |
| 6966 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) |
| 6967 | { |
| 6968 | struct task_struct *curr = rq->curr; |
| 6969 | struct sched_entity *se = &curr->se, *pse = &p->se; |
| 6970 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 6971 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
| 6972 | int next_buddy_marked = 0; |
| 6973 | |
| 6974 | if (unlikely(se == pse)) |
| 6975 | return; |
| 6976 | |
| 6977 | /* |
| 6978 | * This is possible from callers such as attach_tasks(), in which we |
| 6979 | * unconditionally check_prempt_curr() after an enqueue (which may have |
| 6980 | * lead to a throttle). This both saves work and prevents false |
| 6981 | * next-buddy nomination below. |
| 6982 | */ |
| 6983 | if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) |
| 6984 | return; |
| 6985 | |
| 6986 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { |
| 6987 | set_next_buddy(pse); |
| 6988 | next_buddy_marked = 1; |
| 6989 | } |
| 6990 | |
| 6991 | /* |
| 6992 | * We can come here with TIF_NEED_RESCHED already set from new task |
| 6993 | * wake up path. |
| 6994 | * |
| 6995 | * Note: this also catches the edge-case of curr being in a throttled |
| 6996 | * group (e.g. via set_curr_task), since update_curr() (in the |
| 6997 | * enqueue of curr) will have resulted in resched being set. This |
| 6998 | * prevents us from potentially nominating it as a false LAST_BUDDY |
| 6999 | * below. |
| 7000 | */ |
| 7001 | if (test_tsk_need_resched(curr)) |
| 7002 | return; |
| 7003 | |
| 7004 | /* Idle tasks are by definition preempted by non-idle tasks. */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7005 | if (unlikely(task_has_idle_policy(curr)) && |
| 7006 | likely(!task_has_idle_policy(p))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7007 | goto preempt; |
| 7008 | |
| 7009 | /* |
| 7010 | * Batch and idle tasks do not preempt non-idle tasks (their preemption |
| 7011 | * is driven by the tick): |
| 7012 | */ |
| 7013 | if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) |
| 7014 | return; |
| 7015 | |
| 7016 | find_matching_se(&se, &pse); |
| 7017 | update_curr(cfs_rq_of(se)); |
| 7018 | BUG_ON(!pse); |
| 7019 | if (wakeup_preempt_entity(se, pse) == 1) { |
| 7020 | /* |
| 7021 | * Bias pick_next to pick the sched entity that is |
| 7022 | * triggering this preemption. |
| 7023 | */ |
| 7024 | if (!next_buddy_marked) |
| 7025 | set_next_buddy(pse); |
| 7026 | goto preempt; |
| 7027 | } |
| 7028 | |
| 7029 | return; |
| 7030 | |
| 7031 | preempt: |
| 7032 | resched_curr(rq); |
| 7033 | /* |
| 7034 | * Only set the backward buddy when the current task is still |
| 7035 | * on the rq. This can happen when a wakeup gets interleaved |
| 7036 | * with schedule on the ->pre_schedule() or idle_balance() |
| 7037 | * point, either of which can * drop the rq lock. |
| 7038 | * |
| 7039 | * Also, during early boot the idle thread is in the fair class, |
| 7040 | * for obvious reasons its a bad idea to schedule back to it. |
| 7041 | */ |
| 7042 | if (unlikely(!se->on_rq || curr == rq->idle)) |
| 7043 | return; |
| 7044 | |
| 7045 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) |
| 7046 | set_last_buddy(se); |
| 7047 | } |
| 7048 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7049 | struct task_struct * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7050 | pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) |
| 7051 | { |
| 7052 | struct cfs_rq *cfs_rq = &rq->cfs; |
| 7053 | struct sched_entity *se; |
| 7054 | struct task_struct *p; |
| 7055 | int new_tasks; |
| 7056 | |
| 7057 | again: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7058 | if (!sched_fair_runnable(rq)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7059 | goto idle; |
| 7060 | |
| 7061 | #ifdef CONFIG_FAIR_GROUP_SCHED |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7062 | if (!prev || prev->sched_class != &fair_sched_class) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7063 | goto simple; |
| 7064 | |
| 7065 | /* |
| 7066 | * Because of the set_next_buddy() in dequeue_task_fair() it is rather |
| 7067 | * likely that a next task is from the same cgroup as the current. |
| 7068 | * |
| 7069 | * Therefore attempt to avoid putting and setting the entire cgroup |
| 7070 | * hierarchy, only change the part that actually changes. |
| 7071 | */ |
| 7072 | |
| 7073 | do { |
| 7074 | struct sched_entity *curr = cfs_rq->curr; |
| 7075 | |
| 7076 | /* |
| 7077 | * Since we got here without doing put_prev_entity() we also |
| 7078 | * have to consider cfs_rq->curr. If it is still a runnable |
| 7079 | * entity, update_curr() will update its vruntime, otherwise |
| 7080 | * forget we've ever seen it. |
| 7081 | */ |
| 7082 | if (curr) { |
| 7083 | if (curr->on_rq) |
| 7084 | update_curr(cfs_rq); |
| 7085 | else |
| 7086 | curr = NULL; |
| 7087 | |
| 7088 | /* |
| 7089 | * This call to check_cfs_rq_runtime() will do the |
| 7090 | * throttle and dequeue its entity in the parent(s). |
| 7091 | * Therefore the nr_running test will indeed |
| 7092 | * be correct. |
| 7093 | */ |
| 7094 | if (unlikely(check_cfs_rq_runtime(cfs_rq))) { |
| 7095 | cfs_rq = &rq->cfs; |
| 7096 | |
| 7097 | if (!cfs_rq->nr_running) |
| 7098 | goto idle; |
| 7099 | |
| 7100 | goto simple; |
| 7101 | } |
| 7102 | } |
| 7103 | |
| 7104 | se = pick_next_entity(cfs_rq, curr); |
| 7105 | cfs_rq = group_cfs_rq(se); |
| 7106 | } while (cfs_rq); |
| 7107 | |
| 7108 | p = task_of(se); |
| 7109 | |
| 7110 | /* |
| 7111 | * Since we haven't yet done put_prev_entity and if the selected task |
| 7112 | * is a different task than we started out with, try and touch the |
| 7113 | * least amount of cfs_rqs. |
| 7114 | */ |
| 7115 | if (prev != p) { |
| 7116 | struct sched_entity *pse = &prev->se; |
| 7117 | |
| 7118 | while (!(cfs_rq = is_same_group(se, pse))) { |
| 7119 | int se_depth = se->depth; |
| 7120 | int pse_depth = pse->depth; |
| 7121 | |
| 7122 | if (se_depth <= pse_depth) { |
| 7123 | put_prev_entity(cfs_rq_of(pse), pse); |
| 7124 | pse = parent_entity(pse); |
| 7125 | } |
| 7126 | if (se_depth >= pse_depth) { |
| 7127 | set_next_entity(cfs_rq_of(se), se); |
| 7128 | se = parent_entity(se); |
| 7129 | } |
| 7130 | } |
| 7131 | |
| 7132 | put_prev_entity(cfs_rq, pse); |
| 7133 | set_next_entity(cfs_rq, se); |
| 7134 | } |
| 7135 | |
| 7136 | goto done; |
| 7137 | simple: |
| 7138 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7139 | if (prev) |
| 7140 | put_prev_task(rq, prev); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7141 | |
| 7142 | do { |
| 7143 | se = pick_next_entity(cfs_rq, NULL); |
| 7144 | set_next_entity(cfs_rq, se); |
| 7145 | cfs_rq = group_cfs_rq(se); |
| 7146 | } while (cfs_rq); |
| 7147 | |
| 7148 | p = task_of(se); |
| 7149 | |
| 7150 | done: __maybe_unused; |
| 7151 | #ifdef CONFIG_SMP |
| 7152 | /* |
| 7153 | * Move the next running task to the front of |
| 7154 | * the list, so our cfs_tasks list becomes MRU |
| 7155 | * one. |
| 7156 | */ |
| 7157 | list_move(&p->se.group_node, &rq->cfs_tasks); |
| 7158 | #endif |
| 7159 | |
| 7160 | if (hrtick_enabled(rq)) |
| 7161 | hrtick_start_fair(rq, p); |
| 7162 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7163 | update_misfit_status(p, rq); |
| 7164 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7165 | return p; |
| 7166 | |
| 7167 | idle: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7168 | if (!rf) |
| 7169 | return NULL; |
| 7170 | |
| 7171 | new_tasks = newidle_balance(rq, rf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7172 | |
| 7173 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7174 | * Because newidle_balance() releases (and re-acquires) rq->lock, it is |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7175 | * possible for any higher priority task to appear. In that case we |
| 7176 | * must re-start the pick_next_entity() loop. |
| 7177 | */ |
| 7178 | if (new_tasks < 0) |
| 7179 | return RETRY_TASK; |
| 7180 | |
| 7181 | if (new_tasks > 0) |
| 7182 | goto again; |
| 7183 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7184 | /* |
| 7185 | * rq is about to be idle, check if we need to update the |
| 7186 | * lost_idle_time of clock_pelt |
| 7187 | */ |
| 7188 | update_idle_rq_clock_pelt(rq); |
| 7189 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7190 | return NULL; |
| 7191 | } |
| 7192 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7193 | static struct task_struct *__pick_next_task_fair(struct rq *rq) |
| 7194 | { |
| 7195 | return pick_next_task_fair(rq, NULL, NULL); |
| 7196 | } |
| 7197 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7198 | /* |
| 7199 | * Account for a descheduled task: |
| 7200 | */ |
| 7201 | static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) |
| 7202 | { |
| 7203 | struct sched_entity *se = &prev->se; |
| 7204 | struct cfs_rq *cfs_rq; |
| 7205 | |
| 7206 | for_each_sched_entity(se) { |
| 7207 | cfs_rq = cfs_rq_of(se); |
| 7208 | put_prev_entity(cfs_rq, se); |
| 7209 | } |
| 7210 | } |
| 7211 | |
| 7212 | /* |
| 7213 | * sched_yield() is very simple |
| 7214 | * |
| 7215 | * The magic of dealing with the ->skip buddy is in pick_next_entity. |
| 7216 | */ |
| 7217 | static void yield_task_fair(struct rq *rq) |
| 7218 | { |
| 7219 | struct task_struct *curr = rq->curr; |
| 7220 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 7221 | struct sched_entity *se = &curr->se; |
| 7222 | |
| 7223 | /* |
| 7224 | * Are we the only task in the tree? |
| 7225 | */ |
| 7226 | if (unlikely(rq->nr_running == 1)) |
| 7227 | return; |
| 7228 | |
| 7229 | clear_buddies(cfs_rq, se); |
| 7230 | |
| 7231 | if (curr->policy != SCHED_BATCH) { |
| 7232 | update_rq_clock(rq); |
| 7233 | /* |
| 7234 | * Update run-time statistics of the 'current'. |
| 7235 | */ |
| 7236 | update_curr(cfs_rq); |
| 7237 | /* |
| 7238 | * Tell update_rq_clock() that we've just updated, |
| 7239 | * so we don't do microscopic update in schedule() |
| 7240 | * and double the fastpath cost. |
| 7241 | */ |
| 7242 | rq_clock_skip_update(rq); |
| 7243 | } |
| 7244 | |
| 7245 | set_skip_buddy(se); |
| 7246 | } |
| 7247 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7248 | static bool yield_to_task_fair(struct rq *rq, struct task_struct *p) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7249 | { |
| 7250 | struct sched_entity *se = &p->se; |
| 7251 | |
| 7252 | /* throttled hierarchies are not runnable */ |
| 7253 | if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) |
| 7254 | return false; |
| 7255 | |
| 7256 | /* Tell the scheduler that we'd really like pse to run next. */ |
| 7257 | set_next_buddy(se); |
| 7258 | |
| 7259 | yield_task_fair(rq); |
| 7260 | |
| 7261 | return true; |
| 7262 | } |
| 7263 | |
| 7264 | #ifdef CONFIG_SMP |
| 7265 | /************************************************** |
| 7266 | * Fair scheduling class load-balancing methods. |
| 7267 | * |
| 7268 | * BASICS |
| 7269 | * |
| 7270 | * The purpose of load-balancing is to achieve the same basic fairness the |
| 7271 | * per-CPU scheduler provides, namely provide a proportional amount of compute |
| 7272 | * time to each task. This is expressed in the following equation: |
| 7273 | * |
| 7274 | * W_i,n/P_i == W_j,n/P_j for all i,j (1) |
| 7275 | * |
| 7276 | * Where W_i,n is the n-th weight average for CPU i. The instantaneous weight |
| 7277 | * W_i,0 is defined as: |
| 7278 | * |
| 7279 | * W_i,0 = \Sum_j w_i,j (2) |
| 7280 | * |
| 7281 | * Where w_i,j is the weight of the j-th runnable task on CPU i. This weight |
| 7282 | * is derived from the nice value as per sched_prio_to_weight[]. |
| 7283 | * |
| 7284 | * The weight average is an exponential decay average of the instantaneous |
| 7285 | * weight: |
| 7286 | * |
| 7287 | * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3) |
| 7288 | * |
| 7289 | * C_i is the compute capacity of CPU i, typically it is the |
| 7290 | * fraction of 'recent' time available for SCHED_OTHER task execution. But it |
| 7291 | * can also include other factors [XXX]. |
| 7292 | * |
| 7293 | * To achieve this balance we define a measure of imbalance which follows |
| 7294 | * directly from (1): |
| 7295 | * |
| 7296 | * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4) |
| 7297 | * |
| 7298 | * We them move tasks around to minimize the imbalance. In the continuous |
| 7299 | * function space it is obvious this converges, in the discrete case we get |
| 7300 | * a few fun cases generally called infeasible weight scenarios. |
| 7301 | * |
| 7302 | * [XXX expand on: |
| 7303 | * - infeasible weights; |
| 7304 | * - local vs global optima in the discrete case. ] |
| 7305 | * |
| 7306 | * |
| 7307 | * SCHED DOMAINS |
| 7308 | * |
| 7309 | * In order to solve the imbalance equation (4), and avoid the obvious O(n^2) |
| 7310 | * for all i,j solution, we create a tree of CPUs that follows the hardware |
| 7311 | * topology where each level pairs two lower groups (or better). This results |
| 7312 | * in O(log n) layers. Furthermore we reduce the number of CPUs going up the |
| 7313 | * tree to only the first of the previous level and we decrease the frequency |
| 7314 | * of load-balance at each level inv. proportional to the number of CPUs in |
| 7315 | * the groups. |
| 7316 | * |
| 7317 | * This yields: |
| 7318 | * |
| 7319 | * log_2 n 1 n |
| 7320 | * \Sum { --- * --- * 2^i } = O(n) (5) |
| 7321 | * i = 0 2^i 2^i |
| 7322 | * `- size of each group |
| 7323 | * | | `- number of CPUs doing load-balance |
| 7324 | * | `- freq |
| 7325 | * `- sum over all levels |
| 7326 | * |
| 7327 | * Coupled with a limit on how many tasks we can migrate every balance pass, |
| 7328 | * this makes (5) the runtime complexity of the balancer. |
| 7329 | * |
| 7330 | * An important property here is that each CPU is still (indirectly) connected |
| 7331 | * to every other CPU in at most O(log n) steps: |
| 7332 | * |
| 7333 | * The adjacency matrix of the resulting graph is given by: |
| 7334 | * |
| 7335 | * log_2 n |
| 7336 | * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6) |
| 7337 | * k = 0 |
| 7338 | * |
| 7339 | * And you'll find that: |
| 7340 | * |
| 7341 | * A^(log_2 n)_i,j != 0 for all i,j (7) |
| 7342 | * |
| 7343 | * Showing there's indeed a path between every CPU in at most O(log n) steps. |
| 7344 | * The task movement gives a factor of O(m), giving a convergence complexity |
| 7345 | * of: |
| 7346 | * |
| 7347 | * O(nm log n), n := nr_cpus, m := nr_tasks (8) |
| 7348 | * |
| 7349 | * |
| 7350 | * WORK CONSERVING |
| 7351 | * |
| 7352 | * In order to avoid CPUs going idle while there's still work to do, new idle |
| 7353 | * balancing is more aggressive and has the newly idle CPU iterate up the domain |
| 7354 | * tree itself instead of relying on other CPUs to bring it work. |
| 7355 | * |
| 7356 | * This adds some complexity to both (5) and (8) but it reduces the total idle |
| 7357 | * time. |
| 7358 | * |
| 7359 | * [XXX more?] |
| 7360 | * |
| 7361 | * |
| 7362 | * CGROUPS |
| 7363 | * |
| 7364 | * Cgroups make a horror show out of (2), instead of a simple sum we get: |
| 7365 | * |
| 7366 | * s_k,i |
| 7367 | * W_i,0 = \Sum_j \Prod_k w_k * ----- (9) |
| 7368 | * S_k |
| 7369 | * |
| 7370 | * Where |
| 7371 | * |
| 7372 | * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10) |
| 7373 | * |
| 7374 | * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on CPU i. |
| 7375 | * |
| 7376 | * The big problem is S_k, its a global sum needed to compute a local (W_i) |
| 7377 | * property. |
| 7378 | * |
| 7379 | * [XXX write more on how we solve this.. _after_ merging pjt's patches that |
| 7380 | * rewrite all of this once again.] |
| 7381 | */ |
| 7382 | |
| 7383 | static unsigned long __read_mostly max_load_balance_interval = HZ/10; |
| 7384 | |
| 7385 | enum fbq_type { regular, remote, all }; |
| 7386 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7387 | /* |
| 7388 | * 'group_type' describes the group of CPUs at the moment of load balancing. |
| 7389 | * |
| 7390 | * The enum is ordered by pulling priority, with the group with lowest priority |
| 7391 | * first so the group_type can simply be compared when selecting the busiest |
| 7392 | * group. See update_sd_pick_busiest(). |
| 7393 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7394 | enum group_type { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7395 | /* The group has spare capacity that can be used to run more tasks. */ |
| 7396 | group_has_spare = 0, |
| 7397 | /* |
| 7398 | * The group is fully used and the tasks don't compete for more CPU |
| 7399 | * cycles. Nevertheless, some tasks might wait before running. |
| 7400 | */ |
| 7401 | group_fully_busy, |
| 7402 | /* |
| 7403 | * SD_ASYM_CPUCAPACITY only: One task doesn't fit with CPU's capacity |
| 7404 | * and must be migrated to a more powerful CPU. |
| 7405 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7406 | group_misfit_task, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7407 | /* |
| 7408 | * SD_ASYM_PACKING only: One local CPU with higher capacity is available, |
| 7409 | * and the task should be migrated to it instead of running on the |
| 7410 | * current CPU. |
| 7411 | */ |
| 7412 | group_asym_packing, |
| 7413 | /* |
| 7414 | * The tasks' affinity constraints previously prevented the scheduler |
| 7415 | * from balancing the load across the system. |
| 7416 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7417 | group_imbalanced, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7418 | /* |
| 7419 | * The CPU is overloaded and can't provide expected CPU cycles to all |
| 7420 | * tasks. |
| 7421 | */ |
| 7422 | group_overloaded |
| 7423 | }; |
| 7424 | |
| 7425 | enum migration_type { |
| 7426 | migrate_load = 0, |
| 7427 | migrate_util, |
| 7428 | migrate_task, |
| 7429 | migrate_misfit |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7430 | }; |
| 7431 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7432 | #define LBF_ALL_PINNED 0x01 |
| 7433 | #define LBF_NEED_BREAK 0x02 |
| 7434 | #define LBF_DST_PINNED 0x04 |
| 7435 | #define LBF_SOME_PINNED 0x08 |
| 7436 | #define LBF_NOHZ_STATS 0x10 |
| 7437 | #define LBF_NOHZ_AGAIN 0x20 |
| 7438 | |
| 7439 | struct lb_env { |
| 7440 | struct sched_domain *sd; |
| 7441 | |
| 7442 | struct rq *src_rq; |
| 7443 | int src_cpu; |
| 7444 | |
| 7445 | int dst_cpu; |
| 7446 | struct rq *dst_rq; |
| 7447 | |
| 7448 | struct cpumask *dst_grpmask; |
| 7449 | int new_dst_cpu; |
| 7450 | enum cpu_idle_type idle; |
| 7451 | long imbalance; |
| 7452 | /* The set of CPUs under consideration for load-balancing */ |
| 7453 | struct cpumask *cpus; |
| 7454 | |
| 7455 | unsigned int flags; |
| 7456 | |
| 7457 | unsigned int loop; |
| 7458 | unsigned int loop_break; |
| 7459 | unsigned int loop_max; |
| 7460 | |
| 7461 | enum fbq_type fbq_type; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7462 | enum migration_type migration_type; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7463 | struct list_head tasks; |
| 7464 | }; |
| 7465 | |
| 7466 | /* |
| 7467 | * Is this task likely cache-hot: |
| 7468 | */ |
| 7469 | static int task_hot(struct task_struct *p, struct lb_env *env) |
| 7470 | { |
| 7471 | s64 delta; |
| 7472 | |
| 7473 | lockdep_assert_held(&env->src_rq->lock); |
| 7474 | |
| 7475 | if (p->sched_class != &fair_sched_class) |
| 7476 | return 0; |
| 7477 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7478 | if (unlikely(task_has_idle_policy(p))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7479 | return 0; |
| 7480 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7481 | /* SMT siblings share cache */ |
| 7482 | if (env->sd->flags & SD_SHARE_CPUCAPACITY) |
| 7483 | return 0; |
| 7484 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7485 | /* |
| 7486 | * Buddy candidates are cache hot: |
| 7487 | */ |
| 7488 | if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running && |
| 7489 | (&p->se == cfs_rq_of(&p->se)->next || |
| 7490 | &p->se == cfs_rq_of(&p->se)->last)) |
| 7491 | return 1; |
| 7492 | |
| 7493 | if (sysctl_sched_migration_cost == -1) |
| 7494 | return 1; |
| 7495 | if (sysctl_sched_migration_cost == 0) |
| 7496 | return 0; |
| 7497 | |
| 7498 | delta = rq_clock_task(env->src_rq) - p->se.exec_start; |
| 7499 | |
| 7500 | return delta < (s64)sysctl_sched_migration_cost; |
| 7501 | } |
| 7502 | |
| 7503 | #ifdef CONFIG_NUMA_BALANCING |
| 7504 | /* |
| 7505 | * Returns 1, if task migration degrades locality |
| 7506 | * Returns 0, if task migration improves locality i.e migration preferred. |
| 7507 | * Returns -1, if task migration is not affected by locality. |
| 7508 | */ |
| 7509 | static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env) |
| 7510 | { |
| 7511 | struct numa_group *numa_group = rcu_dereference(p->numa_group); |
| 7512 | unsigned long src_weight, dst_weight; |
| 7513 | int src_nid, dst_nid, dist; |
| 7514 | |
| 7515 | if (!static_branch_likely(&sched_numa_balancing)) |
| 7516 | return -1; |
| 7517 | |
| 7518 | if (!p->numa_faults || !(env->sd->flags & SD_NUMA)) |
| 7519 | return -1; |
| 7520 | |
| 7521 | src_nid = cpu_to_node(env->src_cpu); |
| 7522 | dst_nid = cpu_to_node(env->dst_cpu); |
| 7523 | |
| 7524 | if (src_nid == dst_nid) |
| 7525 | return -1; |
| 7526 | |
| 7527 | /* Migrating away from the preferred node is always bad. */ |
| 7528 | if (src_nid == p->numa_preferred_nid) { |
| 7529 | if (env->src_rq->nr_running > env->src_rq->nr_preferred_running) |
| 7530 | return 1; |
| 7531 | else |
| 7532 | return -1; |
| 7533 | } |
| 7534 | |
| 7535 | /* Encourage migration to the preferred node. */ |
| 7536 | if (dst_nid == p->numa_preferred_nid) |
| 7537 | return 0; |
| 7538 | |
| 7539 | /* Leaving a core idle is often worse than degrading locality. */ |
| 7540 | if (env->idle == CPU_IDLE) |
| 7541 | return -1; |
| 7542 | |
| 7543 | dist = node_distance(src_nid, dst_nid); |
| 7544 | if (numa_group) { |
| 7545 | src_weight = group_weight(p, src_nid, dist); |
| 7546 | dst_weight = group_weight(p, dst_nid, dist); |
| 7547 | } else { |
| 7548 | src_weight = task_weight(p, src_nid, dist); |
| 7549 | dst_weight = task_weight(p, dst_nid, dist); |
| 7550 | } |
| 7551 | |
| 7552 | return dst_weight < src_weight; |
| 7553 | } |
| 7554 | |
| 7555 | #else |
| 7556 | static inline int migrate_degrades_locality(struct task_struct *p, |
| 7557 | struct lb_env *env) |
| 7558 | { |
| 7559 | return -1; |
| 7560 | } |
| 7561 | #endif |
| 7562 | |
| 7563 | /* |
| 7564 | * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? |
| 7565 | */ |
| 7566 | static |
| 7567 | int can_migrate_task(struct task_struct *p, struct lb_env *env) |
| 7568 | { |
| 7569 | int tsk_cache_hot; |
| 7570 | |
| 7571 | lockdep_assert_held(&env->src_rq->lock); |
| 7572 | |
| 7573 | /* |
| 7574 | * We do not migrate tasks that are: |
| 7575 | * 1) throttled_lb_pair, or |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7576 | * 2) cannot be migrated to this CPU due to cpus_ptr, or |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7577 | * 3) running (obviously), or |
| 7578 | * 4) are cache-hot on their current CPU. |
| 7579 | */ |
| 7580 | if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu)) |
| 7581 | return 0; |
| 7582 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7583 | /* Disregard pcpu kthreads; they are where they need to be. */ |
| 7584 | if (kthread_is_per_cpu(p)) |
| 7585 | return 0; |
| 7586 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7587 | if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7588 | int cpu; |
| 7589 | |
| 7590 | schedstat_inc(p->se.statistics.nr_failed_migrations_affine); |
| 7591 | |
| 7592 | env->flags |= LBF_SOME_PINNED; |
| 7593 | |
| 7594 | /* |
| 7595 | * Remember if this task can be migrated to any other CPU in |
| 7596 | * our sched_group. We may want to revisit it if we couldn't |
| 7597 | * meet load balance goals by pulling other tasks on src_cpu. |
| 7598 | * |
| 7599 | * Avoid computing new_dst_cpu for NEWLY_IDLE or if we have |
| 7600 | * already computed one in current iteration. |
| 7601 | */ |
| 7602 | if (env->idle == CPU_NEWLY_IDLE || (env->flags & LBF_DST_PINNED)) |
| 7603 | return 0; |
| 7604 | |
| 7605 | /* Prevent to re-select dst_cpu via env's CPUs: */ |
| 7606 | for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7607 | if (cpumask_test_cpu(cpu, p->cpus_ptr)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7608 | env->flags |= LBF_DST_PINNED; |
| 7609 | env->new_dst_cpu = cpu; |
| 7610 | break; |
| 7611 | } |
| 7612 | } |
| 7613 | |
| 7614 | return 0; |
| 7615 | } |
| 7616 | |
| 7617 | /* Record that we found atleast one task that could run on dst_cpu */ |
| 7618 | env->flags &= ~LBF_ALL_PINNED; |
| 7619 | |
| 7620 | if (task_running(env->src_rq, p)) { |
| 7621 | schedstat_inc(p->se.statistics.nr_failed_migrations_running); |
| 7622 | return 0; |
| 7623 | } |
| 7624 | |
| 7625 | /* |
| 7626 | * Aggressive migration if: |
| 7627 | * 1) destination numa is preferred |
| 7628 | * 2) task is cache cold, or |
| 7629 | * 3) too many balance attempts have failed. |
| 7630 | */ |
| 7631 | tsk_cache_hot = migrate_degrades_locality(p, env); |
| 7632 | if (tsk_cache_hot == -1) |
| 7633 | tsk_cache_hot = task_hot(p, env); |
| 7634 | |
| 7635 | if (tsk_cache_hot <= 0 || |
| 7636 | env->sd->nr_balance_failed > env->sd->cache_nice_tries) { |
| 7637 | if (tsk_cache_hot == 1) { |
| 7638 | schedstat_inc(env->sd->lb_hot_gained[env->idle]); |
| 7639 | schedstat_inc(p->se.statistics.nr_forced_migrations); |
| 7640 | } |
| 7641 | return 1; |
| 7642 | } |
| 7643 | |
| 7644 | schedstat_inc(p->se.statistics.nr_failed_migrations_hot); |
| 7645 | return 0; |
| 7646 | } |
| 7647 | |
| 7648 | /* |
| 7649 | * detach_task() -- detach the task for the migration specified in env |
| 7650 | */ |
| 7651 | static void detach_task(struct task_struct *p, struct lb_env *env) |
| 7652 | { |
| 7653 | lockdep_assert_held(&env->src_rq->lock); |
| 7654 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7655 | deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); |
| 7656 | set_task_cpu(p, env->dst_cpu); |
| 7657 | } |
| 7658 | |
| 7659 | /* |
| 7660 | * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as |
| 7661 | * part of active balancing operations within "domain". |
| 7662 | * |
| 7663 | * Returns a task if successful and NULL otherwise. |
| 7664 | */ |
| 7665 | static struct task_struct *detach_one_task(struct lb_env *env) |
| 7666 | { |
| 7667 | struct task_struct *p; |
| 7668 | |
| 7669 | lockdep_assert_held(&env->src_rq->lock); |
| 7670 | |
| 7671 | list_for_each_entry_reverse(p, |
| 7672 | &env->src_rq->cfs_tasks, se.group_node) { |
| 7673 | if (!can_migrate_task(p, env)) |
| 7674 | continue; |
| 7675 | |
| 7676 | detach_task(p, env); |
| 7677 | |
| 7678 | /* |
| 7679 | * Right now, this is only the second place where |
| 7680 | * lb_gained[env->idle] is updated (other is detach_tasks) |
| 7681 | * so we can safely collect stats here rather than |
| 7682 | * inside detach_tasks(). |
| 7683 | */ |
| 7684 | schedstat_inc(env->sd->lb_gained[env->idle]); |
| 7685 | return p; |
| 7686 | } |
| 7687 | return NULL; |
| 7688 | } |
| 7689 | |
| 7690 | static const unsigned int sched_nr_migrate_break = 32; |
| 7691 | |
| 7692 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7693 | * detach_tasks() -- tries to detach up to imbalance load/util/tasks from |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7694 | * busiest_rq, as part of a balancing operation within domain "sd". |
| 7695 | * |
| 7696 | * Returns number of detached tasks if successful and 0 otherwise. |
| 7697 | */ |
| 7698 | static int detach_tasks(struct lb_env *env) |
| 7699 | { |
| 7700 | struct list_head *tasks = &env->src_rq->cfs_tasks; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7701 | unsigned long util, load; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7702 | struct task_struct *p; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7703 | int detached = 0; |
| 7704 | |
| 7705 | lockdep_assert_held(&env->src_rq->lock); |
| 7706 | |
| 7707 | if (env->imbalance <= 0) |
| 7708 | return 0; |
| 7709 | |
| 7710 | while (!list_empty(tasks)) { |
| 7711 | /* |
| 7712 | * We don't want to steal all, otherwise we may be treated likewise, |
| 7713 | * which could at worst lead to a livelock crash. |
| 7714 | */ |
| 7715 | if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) |
| 7716 | break; |
| 7717 | |
| 7718 | p = list_last_entry(tasks, struct task_struct, se.group_node); |
| 7719 | |
| 7720 | env->loop++; |
| 7721 | /* We've more or less seen every task there is, call it quits */ |
| 7722 | if (env->loop > env->loop_max) |
| 7723 | break; |
| 7724 | |
| 7725 | /* take a breather every nr_migrate tasks */ |
| 7726 | if (env->loop > env->loop_break) { |
| 7727 | env->loop_break += sched_nr_migrate_break; |
| 7728 | env->flags |= LBF_NEED_BREAK; |
| 7729 | break; |
| 7730 | } |
| 7731 | |
| 7732 | if (!can_migrate_task(p, env)) |
| 7733 | goto next; |
| 7734 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7735 | switch (env->migration_type) { |
| 7736 | case migrate_load: |
| 7737 | /* |
| 7738 | * Depending of the number of CPUs and tasks and the |
| 7739 | * cgroup hierarchy, task_h_load() can return a null |
| 7740 | * value. Make sure that env->imbalance decreases |
| 7741 | * otherwise detach_tasks() will stop only after |
| 7742 | * detaching up to loop_max tasks. |
| 7743 | */ |
| 7744 | load = max_t(unsigned long, task_h_load(p), 1); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7745 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7746 | if (sched_feat(LB_MIN) && |
| 7747 | load < 16 && !env->sd->nr_balance_failed) |
| 7748 | goto next; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7749 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7750 | /* |
| 7751 | * Make sure that we don't migrate too much load. |
| 7752 | * Nevertheless, let relax the constraint if |
| 7753 | * scheduler fails to find a good waiting task to |
| 7754 | * migrate. |
| 7755 | */ |
| 7756 | if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance) |
| 7757 | goto next; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7758 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7759 | env->imbalance -= load; |
| 7760 | break; |
| 7761 | |
| 7762 | case migrate_util: |
| 7763 | util = task_util_est(p); |
| 7764 | |
| 7765 | if (util > env->imbalance) |
| 7766 | goto next; |
| 7767 | |
| 7768 | env->imbalance -= util; |
| 7769 | break; |
| 7770 | |
| 7771 | case migrate_task: |
| 7772 | env->imbalance--; |
| 7773 | break; |
| 7774 | |
| 7775 | case migrate_misfit: |
| 7776 | /* This is not a misfit task */ |
| 7777 | if (task_fits_capacity(p, capacity_of(env->src_cpu))) |
| 7778 | goto next; |
| 7779 | |
| 7780 | env->imbalance = 0; |
| 7781 | break; |
| 7782 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7783 | |
| 7784 | detach_task(p, env); |
| 7785 | list_add(&p->se.group_node, &env->tasks); |
| 7786 | |
| 7787 | detached++; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7788 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7789 | #ifdef CONFIG_PREEMPTION |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7790 | /* |
| 7791 | * NEWIDLE balancing is a source of latency, so preemptible |
| 7792 | * kernels will stop after the first task is detached to minimize |
| 7793 | * the critical section. |
| 7794 | */ |
| 7795 | if (env->idle == CPU_NEWLY_IDLE) |
| 7796 | break; |
| 7797 | #endif |
| 7798 | |
| 7799 | /* |
| 7800 | * We only want to steal up to the prescribed amount of |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7801 | * load/util/tasks. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7802 | */ |
| 7803 | if (env->imbalance <= 0) |
| 7804 | break; |
| 7805 | |
| 7806 | continue; |
| 7807 | next: |
| 7808 | list_move(&p->se.group_node, tasks); |
| 7809 | } |
| 7810 | |
| 7811 | /* |
| 7812 | * Right now, this is one of only two places we collect this stat |
| 7813 | * so we can safely collect detach_one_task() stats here rather |
| 7814 | * than inside detach_one_task(). |
| 7815 | */ |
| 7816 | schedstat_add(env->sd->lb_gained[env->idle], detached); |
| 7817 | |
| 7818 | return detached; |
| 7819 | } |
| 7820 | |
| 7821 | /* |
| 7822 | * attach_task() -- attach the task detached by detach_task() to its new rq. |
| 7823 | */ |
| 7824 | static void attach_task(struct rq *rq, struct task_struct *p) |
| 7825 | { |
| 7826 | lockdep_assert_held(&rq->lock); |
| 7827 | |
| 7828 | BUG_ON(task_rq(p) != rq); |
| 7829 | activate_task(rq, p, ENQUEUE_NOCLOCK); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7830 | check_preempt_curr(rq, p, 0); |
| 7831 | } |
| 7832 | |
| 7833 | /* |
| 7834 | * attach_one_task() -- attaches the task returned from detach_one_task() to |
| 7835 | * its new rq. |
| 7836 | */ |
| 7837 | static void attach_one_task(struct rq *rq, struct task_struct *p) |
| 7838 | { |
| 7839 | struct rq_flags rf; |
| 7840 | |
| 7841 | rq_lock(rq, &rf); |
| 7842 | update_rq_clock(rq); |
| 7843 | attach_task(rq, p); |
| 7844 | rq_unlock(rq, &rf); |
| 7845 | } |
| 7846 | |
| 7847 | /* |
| 7848 | * attach_tasks() -- attaches all tasks detached by detach_tasks() to their |
| 7849 | * new rq. |
| 7850 | */ |
| 7851 | static void attach_tasks(struct lb_env *env) |
| 7852 | { |
| 7853 | struct list_head *tasks = &env->tasks; |
| 7854 | struct task_struct *p; |
| 7855 | struct rq_flags rf; |
| 7856 | |
| 7857 | rq_lock(env->dst_rq, &rf); |
| 7858 | update_rq_clock(env->dst_rq); |
| 7859 | |
| 7860 | while (!list_empty(tasks)) { |
| 7861 | p = list_first_entry(tasks, struct task_struct, se.group_node); |
| 7862 | list_del_init(&p->se.group_node); |
| 7863 | |
| 7864 | attach_task(env->dst_rq, p); |
| 7865 | } |
| 7866 | |
| 7867 | rq_unlock(env->dst_rq, &rf); |
| 7868 | } |
| 7869 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7870 | #ifdef CONFIG_NO_HZ_COMMON |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7871 | static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) |
| 7872 | { |
| 7873 | if (cfs_rq->avg.load_avg) |
| 7874 | return true; |
| 7875 | |
| 7876 | if (cfs_rq->avg.util_avg) |
| 7877 | return true; |
| 7878 | |
| 7879 | return false; |
| 7880 | } |
| 7881 | |
| 7882 | static inline bool others_have_blocked(struct rq *rq) |
| 7883 | { |
| 7884 | if (READ_ONCE(rq->avg_rt.util_avg)) |
| 7885 | return true; |
| 7886 | |
| 7887 | if (READ_ONCE(rq->avg_dl.util_avg)) |
| 7888 | return true; |
| 7889 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7890 | if (thermal_load_avg(rq)) |
| 7891 | return true; |
| 7892 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7893 | #ifdef CONFIG_HAVE_SCHED_AVG_IRQ |
| 7894 | if (READ_ONCE(rq->avg_irq.util_avg)) |
| 7895 | return true; |
| 7896 | #endif |
| 7897 | |
| 7898 | return false; |
| 7899 | } |
| 7900 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7901 | static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) |
| 7902 | { |
| 7903 | rq->last_blocked_load_update_tick = jiffies; |
| 7904 | |
| 7905 | if (!has_blocked) |
| 7906 | rq->has_blocked_load = 0; |
| 7907 | } |
| 7908 | #else |
| 7909 | static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } |
| 7910 | static inline bool others_have_blocked(struct rq *rq) { return false; } |
| 7911 | static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} |
| 7912 | #endif |
| 7913 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7914 | static bool __update_blocked_others(struct rq *rq, bool *done) |
| 7915 | { |
| 7916 | const struct sched_class *curr_class; |
| 7917 | u64 now = rq_clock_pelt(rq); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7918 | unsigned long thermal_pressure; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7919 | bool decayed; |
| 7920 | |
| 7921 | /* |
| 7922 | * update_load_avg() can call cpufreq_update_util(). Make sure that RT, |
| 7923 | * DL and IRQ signals have been updated before updating CFS. |
| 7924 | */ |
| 7925 | curr_class = rq->curr->sched_class; |
| 7926 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7927 | thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); |
| 7928 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7929 | decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | |
| 7930 | update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7931 | update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7932 | update_irq_load_avg(rq, 0); |
| 7933 | |
| 7934 | if (others_have_blocked(rq)) |
| 7935 | *done = false; |
| 7936 | |
| 7937 | return decayed; |
| 7938 | } |
| 7939 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7940 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 7941 | |
| 7942 | static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq) |
| 7943 | { |
| 7944 | if (cfs_rq->load.weight) |
| 7945 | return false; |
| 7946 | |
| 7947 | if (cfs_rq->avg.load_sum) |
| 7948 | return false; |
| 7949 | |
| 7950 | if (cfs_rq->avg.util_sum) |
| 7951 | return false; |
| 7952 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7953 | if (cfs_rq->avg.runnable_sum) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7954 | return false; |
| 7955 | |
| 7956 | return true; |
| 7957 | } |
| 7958 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7959 | static bool __update_blocked_fair(struct rq *rq, bool *done) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7960 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7961 | struct cfs_rq *cfs_rq, *pos; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7962 | bool decayed = false; |
| 7963 | int cpu = cpu_of(rq); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7964 | |
| 7965 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7966 | * Iterates the task_group tree in a bottom up fashion, see |
| 7967 | * list_add_leaf_cfs_rq() for details. |
| 7968 | */ |
| 7969 | for_each_leaf_cfs_rq_safe(rq, cfs_rq, pos) { |
| 7970 | struct sched_entity *se; |
| 7971 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7972 | if (update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 7973 | update_tg_load_avg(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7974 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7975 | if (cfs_rq == &rq->cfs) |
| 7976 | decayed = true; |
| 7977 | } |
| 7978 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7979 | /* Propagate pending load changes to the parent, if any: */ |
| 7980 | se = cfs_rq->tg->se[cpu]; |
| 7981 | if (se && !skip_blocked_update(se)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7982 | update_load_avg(cfs_rq_of(se), se, UPDATE_TG); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7983 | |
| 7984 | /* |
| 7985 | * There can be a lot of idle CPU cgroups. Don't let fully |
| 7986 | * decayed cfs_rqs linger on the list. |
| 7987 | */ |
| 7988 | if (cfs_rq_is_decayed(cfs_rq)) |
| 7989 | list_del_leaf_cfs_rq(cfs_rq); |
| 7990 | |
| 7991 | /* Don't need periodic decay once load/util_avg are null */ |
| 7992 | if (cfs_rq_has_blocked(cfs_rq)) |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7993 | *done = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7994 | } |
| 7995 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 7996 | return decayed; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7997 | } |
| 7998 | |
| 7999 | /* |
| 8000 | * Compute the hierarchical load factor for cfs_rq and all its ascendants. |
| 8001 | * This needs to be done in a top-down fashion because the load of a child |
| 8002 | * group is a fraction of its parents load. |
| 8003 | */ |
| 8004 | static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq) |
| 8005 | { |
| 8006 | struct rq *rq = rq_of(cfs_rq); |
| 8007 | struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)]; |
| 8008 | unsigned long now = jiffies; |
| 8009 | unsigned long load; |
| 8010 | |
| 8011 | if (cfs_rq->last_h_load_update == now) |
| 8012 | return; |
| 8013 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8014 | WRITE_ONCE(cfs_rq->h_load_next, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8015 | for_each_sched_entity(se) { |
| 8016 | cfs_rq = cfs_rq_of(se); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8017 | WRITE_ONCE(cfs_rq->h_load_next, se); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8018 | if (cfs_rq->last_h_load_update == now) |
| 8019 | break; |
| 8020 | } |
| 8021 | |
| 8022 | if (!se) { |
| 8023 | cfs_rq->h_load = cfs_rq_load_avg(cfs_rq); |
| 8024 | cfs_rq->last_h_load_update = now; |
| 8025 | } |
| 8026 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8027 | while ((se = READ_ONCE(cfs_rq->h_load_next)) != NULL) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8028 | load = cfs_rq->h_load; |
| 8029 | load = div64_ul(load * se->avg.load_avg, |
| 8030 | cfs_rq_load_avg(cfs_rq) + 1); |
| 8031 | cfs_rq = group_cfs_rq(se); |
| 8032 | cfs_rq->h_load = load; |
| 8033 | cfs_rq->last_h_load_update = now; |
| 8034 | } |
| 8035 | } |
| 8036 | |
| 8037 | static unsigned long task_h_load(struct task_struct *p) |
| 8038 | { |
| 8039 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
| 8040 | |
| 8041 | update_cfs_rq_h_load(cfs_rq); |
| 8042 | return div64_ul(p->se.avg.load_avg * cfs_rq->h_load, |
| 8043 | cfs_rq_load_avg(cfs_rq) + 1); |
| 8044 | } |
| 8045 | #else |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 8046 | static bool __update_blocked_fair(struct rq *rq, bool *done) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8047 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8048 | struct cfs_rq *cfs_rq = &rq->cfs; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 8049 | bool decayed; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8050 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 8051 | decayed = update_cfs_rq_load_avg(cfs_rq_clock_pelt(cfs_rq), cfs_rq); |
| 8052 | if (cfs_rq_has_blocked(cfs_rq)) |
| 8053 | *done = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8054 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 8055 | return decayed; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8056 | } |
| 8057 | |
| 8058 | static unsigned long task_h_load(struct task_struct *p) |
| 8059 | { |
| 8060 | return p->se.avg.load_avg; |
| 8061 | } |
| 8062 | #endif |
| 8063 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 8064 | static void update_blocked_averages(int cpu) |
| 8065 | { |
| 8066 | bool decayed = false, done = true; |
| 8067 | struct rq *rq = cpu_rq(cpu); |
| 8068 | struct rq_flags rf; |
| 8069 | |
| 8070 | rq_lock_irqsave(rq, &rf); |
| 8071 | update_rq_clock(rq); |
| 8072 | |
| 8073 | decayed |= __update_blocked_others(rq, &done); |
| 8074 | decayed |= __update_blocked_fair(rq, &done); |
| 8075 | |
| 8076 | update_blocked_load_status(rq, !done); |
| 8077 | if (decayed) |
| 8078 | cpufreq_update_util(rq, 0); |
| 8079 | rq_unlock_irqrestore(rq, &rf); |
| 8080 | } |
| 8081 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8082 | /********** Helpers for find_busiest_group ************************/ |
| 8083 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8084 | /* |
| 8085 | * sg_lb_stats - stats of a sched_group required for load_balancing |
| 8086 | */ |
| 8087 | struct sg_lb_stats { |
| 8088 | unsigned long avg_load; /*Avg load across the CPUs of the group */ |
| 8089 | unsigned long group_load; /* Total load over the CPUs of the group */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8090 | unsigned long group_capacity; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8091 | unsigned long group_util; /* Total utilization over the CPUs of the group */ |
| 8092 | unsigned long group_runnable; /* Total runnable time over the CPUs of the group */ |
| 8093 | unsigned int sum_nr_running; /* Nr of tasks running in the group */ |
| 8094 | unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8095 | unsigned int idle_cpus; |
| 8096 | unsigned int group_weight; |
| 8097 | enum group_type group_type; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8098 | unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8099 | unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8100 | #ifdef CONFIG_NUMA_BALANCING |
| 8101 | unsigned int nr_numa_running; |
| 8102 | unsigned int nr_preferred_running; |
| 8103 | #endif |
| 8104 | }; |
| 8105 | |
| 8106 | /* |
| 8107 | * sd_lb_stats - Structure to store the statistics of a sched_domain |
| 8108 | * during load balancing. |
| 8109 | */ |
| 8110 | struct sd_lb_stats { |
| 8111 | struct sched_group *busiest; /* Busiest group in this sd */ |
| 8112 | struct sched_group *local; /* Local group in this sd */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8113 | unsigned long total_load; /* Total load of all groups in sd */ |
| 8114 | unsigned long total_capacity; /* Total capacity of all groups in sd */ |
| 8115 | unsigned long avg_load; /* Average load across all groups in sd */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8116 | unsigned int prefer_sibling; /* tasks should go to sibling first */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8117 | |
| 8118 | struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */ |
| 8119 | struct sg_lb_stats local_stat; /* Statistics of the local group */ |
| 8120 | }; |
| 8121 | |
| 8122 | static inline void init_sd_lb_stats(struct sd_lb_stats *sds) |
| 8123 | { |
| 8124 | /* |
| 8125 | * Skimp on the clearing to avoid duplicate work. We can avoid clearing |
| 8126 | * local_stat because update_sg_lb_stats() does a full clear/assignment. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8127 | * We must however set busiest_stat::group_type and |
| 8128 | * busiest_stat::idle_cpus to the worst busiest group because |
| 8129 | * update_sd_pick_busiest() reads these before assignment. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8130 | */ |
| 8131 | *sds = (struct sd_lb_stats){ |
| 8132 | .busiest = NULL, |
| 8133 | .local = NULL, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8134 | .total_load = 0UL, |
| 8135 | .total_capacity = 0UL, |
| 8136 | .busiest_stat = { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8137 | .idle_cpus = UINT_MAX, |
| 8138 | .group_type = group_has_spare, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8139 | }, |
| 8140 | }; |
| 8141 | } |
| 8142 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8143 | static unsigned long scale_rt_capacity(int cpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8144 | { |
| 8145 | struct rq *rq = cpu_rq(cpu); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8146 | unsigned long max = arch_scale_cpu_capacity(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8147 | unsigned long used, free; |
| 8148 | unsigned long irq; |
| 8149 | |
| 8150 | irq = cpu_util_irq(rq); |
| 8151 | |
| 8152 | if (unlikely(irq >= max)) |
| 8153 | return 1; |
| 8154 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8155 | /* |
| 8156 | * avg_rt.util_avg and avg_dl.util_avg track binary signals |
| 8157 | * (running and not running) with weights 0 and 1024 respectively. |
| 8158 | * avg_thermal.load_avg tracks thermal pressure and the weighted |
| 8159 | * average uses the actual delta max capacity(load). |
| 8160 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8161 | used = READ_ONCE(rq->avg_rt.util_avg); |
| 8162 | used += READ_ONCE(rq->avg_dl.util_avg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8163 | used += thermal_load_avg(rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8164 | |
| 8165 | if (unlikely(used >= max)) |
| 8166 | return 1; |
| 8167 | |
| 8168 | free = max - used; |
| 8169 | |
| 8170 | return scale_irq_capacity(free, irq, max); |
| 8171 | } |
| 8172 | |
| 8173 | static void update_cpu_capacity(struct sched_domain *sd, int cpu) |
| 8174 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8175 | unsigned long capacity = scale_rt_capacity(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8176 | struct sched_group *sdg = sd->groups; |
| 8177 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8178 | cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8179 | |
| 8180 | if (!capacity) |
| 8181 | capacity = 1; |
| 8182 | |
| 8183 | cpu_rq(cpu)->cpu_capacity = capacity; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8184 | trace_sched_cpu_capacity_tp(cpu_rq(cpu)); |
| 8185 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8186 | sdg->sgc->capacity = capacity; |
| 8187 | sdg->sgc->min_capacity = capacity; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8188 | sdg->sgc->max_capacity = capacity; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8189 | } |
| 8190 | |
| 8191 | void update_group_capacity(struct sched_domain *sd, int cpu) |
| 8192 | { |
| 8193 | struct sched_domain *child = sd->child; |
| 8194 | struct sched_group *group, *sdg = sd->groups; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8195 | unsigned long capacity, min_capacity, max_capacity; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8196 | unsigned long interval; |
| 8197 | |
| 8198 | interval = msecs_to_jiffies(sd->balance_interval); |
| 8199 | interval = clamp(interval, 1UL, max_load_balance_interval); |
| 8200 | sdg->sgc->next_update = jiffies + interval; |
| 8201 | |
| 8202 | if (!child) { |
| 8203 | update_cpu_capacity(sd, cpu); |
| 8204 | return; |
| 8205 | } |
| 8206 | |
| 8207 | capacity = 0; |
| 8208 | min_capacity = ULONG_MAX; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8209 | max_capacity = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8210 | |
| 8211 | if (child->flags & SD_OVERLAP) { |
| 8212 | /* |
| 8213 | * SD_OVERLAP domains cannot assume that child groups |
| 8214 | * span the current group. |
| 8215 | */ |
| 8216 | |
| 8217 | for_each_cpu(cpu, sched_group_span(sdg)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8218 | unsigned long cpu_cap = capacity_of(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8219 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8220 | capacity += cpu_cap; |
| 8221 | min_capacity = min(cpu_cap, min_capacity); |
| 8222 | max_capacity = max(cpu_cap, max_capacity); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8223 | } |
| 8224 | } else { |
| 8225 | /* |
| 8226 | * !SD_OVERLAP domains can assume that child groups |
| 8227 | * span the current group. |
| 8228 | */ |
| 8229 | |
| 8230 | group = child->groups; |
| 8231 | do { |
| 8232 | struct sched_group_capacity *sgc = group->sgc; |
| 8233 | |
| 8234 | capacity += sgc->capacity; |
| 8235 | min_capacity = min(sgc->min_capacity, min_capacity); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8236 | max_capacity = max(sgc->max_capacity, max_capacity); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8237 | group = group->next; |
| 8238 | } while (group != child->groups); |
| 8239 | } |
| 8240 | |
| 8241 | sdg->sgc->capacity = capacity; |
| 8242 | sdg->sgc->min_capacity = min_capacity; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8243 | sdg->sgc->max_capacity = max_capacity; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8244 | } |
| 8245 | |
| 8246 | /* |
| 8247 | * Check whether the capacity of the rq has been noticeably reduced by side |
| 8248 | * activity. The imbalance_pct is used for the threshold. |
| 8249 | * Return true is the capacity is reduced |
| 8250 | */ |
| 8251 | static inline int |
| 8252 | check_cpu_capacity(struct rq *rq, struct sched_domain *sd) |
| 8253 | { |
| 8254 | return ((rq->cpu_capacity * sd->imbalance_pct) < |
| 8255 | (rq->cpu_capacity_orig * 100)); |
| 8256 | } |
| 8257 | |
| 8258 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8259 | * Check whether a rq has a misfit task and if it looks like we can actually |
| 8260 | * help that task: we can migrate the task to a CPU of higher capacity, or |
| 8261 | * the task's current CPU is heavily pressured. |
| 8262 | */ |
| 8263 | static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) |
| 8264 | { |
| 8265 | return rq->misfit_task_load && |
| 8266 | (rq->cpu_capacity_orig < rq->rd->max_cpu_capacity || |
| 8267 | check_cpu_capacity(rq, sd)); |
| 8268 | } |
| 8269 | |
| 8270 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8271 | * Group imbalance indicates (and tries to solve) the problem where balancing |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8272 | * groups is inadequate due to ->cpus_ptr constraints. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8273 | * |
| 8274 | * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a |
| 8275 | * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. |
| 8276 | * Something like: |
| 8277 | * |
| 8278 | * { 0 1 2 3 } { 4 5 6 7 } |
| 8279 | * * * * * |
| 8280 | * |
| 8281 | * If we were to balance group-wise we'd place two tasks in the first group and |
| 8282 | * two tasks in the second group. Clearly this is undesired as it will overload |
| 8283 | * cpu 3 and leave one of the CPUs in the second group unused. |
| 8284 | * |
| 8285 | * The current solution to this issue is detecting the skew in the first group |
| 8286 | * by noticing the lower domain failed to reach balance and had difficulty |
| 8287 | * moving tasks due to affinity constraints. |
| 8288 | * |
| 8289 | * When this is so detected; this group becomes a candidate for busiest; see |
| 8290 | * update_sd_pick_busiest(). And calculate_imbalance() and |
| 8291 | * find_busiest_group() avoid some of the usual balance conditions to allow it |
| 8292 | * to create an effective group imbalance. |
| 8293 | * |
| 8294 | * This is a somewhat tricky proposition since the next run might not find the |
| 8295 | * group imbalance and decide the groups need to be balanced again. A most |
| 8296 | * subtle and fragile situation. |
| 8297 | */ |
| 8298 | |
| 8299 | static inline int sg_imbalanced(struct sched_group *group) |
| 8300 | { |
| 8301 | return group->sgc->imbalance; |
| 8302 | } |
| 8303 | |
| 8304 | /* |
| 8305 | * group_has_capacity returns true if the group has spare capacity that could |
| 8306 | * be used by some tasks. |
| 8307 | * We consider that a group has spare capacity if the * number of task is |
| 8308 | * smaller than the number of CPUs or if the utilization is lower than the |
| 8309 | * available capacity for CFS tasks. |
| 8310 | * For the latter, we use a threshold to stabilize the state, to take into |
| 8311 | * account the variance of the tasks' load and to return true if the available |
| 8312 | * capacity in meaningful for the load balancer. |
| 8313 | * As an example, an available capacity of 1% can appear but it doesn't make |
| 8314 | * any benefit for the load balance. |
| 8315 | */ |
| 8316 | static inline bool |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8317 | group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8318 | { |
| 8319 | if (sgs->sum_nr_running < sgs->group_weight) |
| 8320 | return true; |
| 8321 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8322 | if ((sgs->group_capacity * imbalance_pct) < |
| 8323 | (sgs->group_runnable * 100)) |
| 8324 | return false; |
| 8325 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8326 | if ((sgs->group_capacity * 100) > |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8327 | (sgs->group_util * imbalance_pct)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8328 | return true; |
| 8329 | |
| 8330 | return false; |
| 8331 | } |
| 8332 | |
| 8333 | /* |
| 8334 | * group_is_overloaded returns true if the group has more tasks than it can |
| 8335 | * handle. |
| 8336 | * group_is_overloaded is not equals to !group_has_capacity because a group |
| 8337 | * with the exact right number of tasks, has no more spare capacity but is not |
| 8338 | * overloaded so both group_has_capacity and group_is_overloaded return |
| 8339 | * false. |
| 8340 | */ |
| 8341 | static inline bool |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8342 | group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8343 | { |
| 8344 | if (sgs->sum_nr_running <= sgs->group_weight) |
| 8345 | return false; |
| 8346 | |
| 8347 | if ((sgs->group_capacity * 100) < |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8348 | (sgs->group_util * imbalance_pct)) |
| 8349 | return true; |
| 8350 | |
| 8351 | if ((sgs->group_capacity * imbalance_pct) < |
| 8352 | (sgs->group_runnable * 100)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8353 | return true; |
| 8354 | |
| 8355 | return false; |
| 8356 | } |
| 8357 | |
| 8358 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8359 | * group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8360 | * per-CPU capacity than sched_group ref. |
| 8361 | */ |
| 8362 | static inline bool |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8363 | group_smaller_min_cpu_capacity(struct sched_group *sg, struct sched_group *ref) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8364 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8365 | return fits_capacity(sg->sgc->min_capacity, ref->sgc->min_capacity); |
| 8366 | } |
| 8367 | |
| 8368 | /* |
| 8369 | * group_smaller_max_cpu_capacity: Returns true if sched_group sg has smaller |
| 8370 | * per-CPU capacity_orig than sched_group ref. |
| 8371 | */ |
| 8372 | static inline bool |
| 8373 | group_smaller_max_cpu_capacity(struct sched_group *sg, struct sched_group *ref) |
| 8374 | { |
| 8375 | return fits_capacity(sg->sgc->max_capacity, ref->sgc->max_capacity); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8376 | } |
| 8377 | |
| 8378 | static inline enum |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8379 | group_type group_classify(unsigned int imbalance_pct, |
| 8380 | struct sched_group *group, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8381 | struct sg_lb_stats *sgs) |
| 8382 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8383 | if (group_is_overloaded(imbalance_pct, sgs)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8384 | return group_overloaded; |
| 8385 | |
| 8386 | if (sg_imbalanced(group)) |
| 8387 | return group_imbalanced; |
| 8388 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8389 | if (sgs->group_asym_packing) |
| 8390 | return group_asym_packing; |
| 8391 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8392 | if (sgs->group_misfit_task_load) |
| 8393 | return group_misfit_task; |
| 8394 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8395 | if (!group_has_capacity(imbalance_pct, sgs)) |
| 8396 | return group_fully_busy; |
| 8397 | |
| 8398 | return group_has_spare; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8399 | } |
| 8400 | |
| 8401 | static bool update_nohz_stats(struct rq *rq, bool force) |
| 8402 | { |
| 8403 | #ifdef CONFIG_NO_HZ_COMMON |
| 8404 | unsigned int cpu = rq->cpu; |
| 8405 | |
| 8406 | if (!rq->has_blocked_load) |
| 8407 | return false; |
| 8408 | |
| 8409 | if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) |
| 8410 | return false; |
| 8411 | |
| 8412 | if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) |
| 8413 | return true; |
| 8414 | |
| 8415 | update_blocked_averages(cpu); |
| 8416 | |
| 8417 | return rq->has_blocked_load; |
| 8418 | #else |
| 8419 | return false; |
| 8420 | #endif |
| 8421 | } |
| 8422 | |
| 8423 | /** |
| 8424 | * update_sg_lb_stats - Update sched_group's statistics for load balancing. |
| 8425 | * @env: The load balancing environment. |
| 8426 | * @group: sched_group whose statistics are to be updated. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8427 | * @sgs: variable to hold the statistics for this group. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8428 | * @sg_status: Holds flag indicating the status of the sched_group |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8429 | */ |
| 8430 | static inline void update_sg_lb_stats(struct lb_env *env, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8431 | struct sched_group *group, |
| 8432 | struct sg_lb_stats *sgs, |
| 8433 | int *sg_status) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8434 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8435 | int i, nr_running, local_group; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8436 | |
| 8437 | memset(sgs, 0, sizeof(*sgs)); |
| 8438 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8439 | local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); |
| 8440 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8441 | for_each_cpu_and(i, sched_group_span(group), env->cpus) { |
| 8442 | struct rq *rq = cpu_rq(i); |
| 8443 | |
| 8444 | if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) |
| 8445 | env->flags |= LBF_NOHZ_AGAIN; |
| 8446 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8447 | sgs->group_load += cpu_load(rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8448 | sgs->group_util += cpu_util(i); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8449 | sgs->group_runnable += cpu_runnable(rq); |
| 8450 | sgs->sum_h_nr_running += rq->cfs.h_nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8451 | |
| 8452 | nr_running = rq->nr_running; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8453 | sgs->sum_nr_running += nr_running; |
| 8454 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8455 | if (nr_running > 1) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8456 | *sg_status |= SG_OVERLOAD; |
| 8457 | |
| 8458 | if (cpu_overutilized(i)) |
| 8459 | *sg_status |= SG_OVERUTILIZED; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8460 | |
| 8461 | #ifdef CONFIG_NUMA_BALANCING |
| 8462 | sgs->nr_numa_running += rq->nr_numa_running; |
| 8463 | sgs->nr_preferred_running += rq->nr_preferred_running; |
| 8464 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8465 | /* |
| 8466 | * No need to call idle_cpu() if nr_running is not 0 |
| 8467 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8468 | if (!nr_running && idle_cpu(i)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8469 | sgs->idle_cpus++; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8470 | /* Idle cpu can't have misfit task */ |
| 8471 | continue; |
| 8472 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8473 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8474 | if (local_group) |
| 8475 | continue; |
| 8476 | |
| 8477 | /* Check for a misfit task on the cpu */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8478 | if (env->sd->flags & SD_ASYM_CPUCAPACITY && |
| 8479 | sgs->group_misfit_task_load < rq->misfit_task_load) { |
| 8480 | sgs->group_misfit_task_load = rq->misfit_task_load; |
| 8481 | *sg_status |= SG_OVERLOAD; |
| 8482 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8483 | } |
| 8484 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8485 | /* Check if dst CPU is idle and preferred to this group */ |
| 8486 | if (env->sd->flags & SD_ASYM_PACKING && |
| 8487 | env->idle != CPU_NOT_IDLE && |
| 8488 | sgs->sum_h_nr_running && |
| 8489 | sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) { |
| 8490 | sgs->group_asym_packing = 1; |
| 8491 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8492 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8493 | sgs->group_capacity = group->sgc->capacity; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8494 | |
| 8495 | sgs->group_weight = group->group_weight; |
| 8496 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8497 | sgs->group_type = group_classify(env->sd->imbalance_pct, group, sgs); |
| 8498 | |
| 8499 | /* Computing avg_load makes sense only when group is overloaded */ |
| 8500 | if (sgs->group_type == group_overloaded) |
| 8501 | sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / |
| 8502 | sgs->group_capacity; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8503 | } |
| 8504 | |
| 8505 | /** |
| 8506 | * update_sd_pick_busiest - return 1 on busiest group |
| 8507 | * @env: The load balancing environment. |
| 8508 | * @sds: sched_domain statistics |
| 8509 | * @sg: sched_group candidate to be checked for being the busiest |
| 8510 | * @sgs: sched_group statistics |
| 8511 | * |
| 8512 | * Determine if @sg is a busier group than the previously selected |
| 8513 | * busiest group. |
| 8514 | * |
| 8515 | * Return: %true if @sg is a busier group than the previously selected |
| 8516 | * busiest group. %false otherwise. |
| 8517 | */ |
| 8518 | static bool update_sd_pick_busiest(struct lb_env *env, |
| 8519 | struct sd_lb_stats *sds, |
| 8520 | struct sched_group *sg, |
| 8521 | struct sg_lb_stats *sgs) |
| 8522 | { |
| 8523 | struct sg_lb_stats *busiest = &sds->busiest_stat; |
| 8524 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8525 | /* Make sure that there is at least one task to pull */ |
| 8526 | if (!sgs->sum_h_nr_running) |
| 8527 | return false; |
| 8528 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8529 | /* |
| 8530 | * Don't try to pull misfit tasks we can't help. |
| 8531 | * We can use max_capacity here as reduction in capacity on some |
| 8532 | * CPUs in the group should either be possible to resolve |
| 8533 | * internally or be covered by avg_load imbalance (eventually). |
| 8534 | */ |
| 8535 | if (sgs->group_type == group_misfit_task && |
| 8536 | (!group_smaller_max_cpu_capacity(sg, sds->local) || |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8537 | sds->local_stat.group_type != group_has_spare)) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8538 | return false; |
| 8539 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8540 | if (sgs->group_type > busiest->group_type) |
| 8541 | return true; |
| 8542 | |
| 8543 | if (sgs->group_type < busiest->group_type) |
| 8544 | return false; |
| 8545 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8546 | /* |
| 8547 | * The candidate and the current busiest group are the same type of |
| 8548 | * group. Let check which one is the busiest according to the type. |
| 8549 | */ |
| 8550 | |
| 8551 | switch (sgs->group_type) { |
| 8552 | case group_overloaded: |
| 8553 | /* Select the overloaded group with highest avg_load. */ |
| 8554 | if (sgs->avg_load <= busiest->avg_load) |
| 8555 | return false; |
| 8556 | break; |
| 8557 | |
| 8558 | case group_imbalanced: |
| 8559 | /* |
| 8560 | * Select the 1st imbalanced group as we don't have any way to |
| 8561 | * choose one more than another. |
| 8562 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8563 | return false; |
| 8564 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8565 | case group_asym_packing: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8566 | /* Prefer to move from lowest priority CPU's work */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8567 | if (sched_asym_prefer(sg->asym_prefer_cpu, sds->busiest->asym_prefer_cpu)) |
| 8568 | return false; |
| 8569 | break; |
| 8570 | |
| 8571 | case group_misfit_task: |
| 8572 | /* |
| 8573 | * If we have more than one misfit sg go with the biggest |
| 8574 | * misfit. |
| 8575 | */ |
| 8576 | if (sgs->group_misfit_task_load < busiest->group_misfit_task_load) |
| 8577 | return false; |
| 8578 | break; |
| 8579 | |
| 8580 | case group_fully_busy: |
| 8581 | /* |
| 8582 | * Select the fully busy group with highest avg_load. In |
| 8583 | * theory, there is no need to pull task from such kind of |
| 8584 | * group because tasks have all compute capacity that they need |
| 8585 | * but we can still improve the overall throughput by reducing |
| 8586 | * contention when accessing shared HW resources. |
| 8587 | * |
| 8588 | * XXX for now avg_load is not computed and always 0 so we |
| 8589 | * select the 1st one. |
| 8590 | */ |
| 8591 | if (sgs->avg_load <= busiest->avg_load) |
| 8592 | return false; |
| 8593 | break; |
| 8594 | |
| 8595 | case group_has_spare: |
| 8596 | /* |
| 8597 | * Select not overloaded group with lowest number of idle cpus |
| 8598 | * and highest number of running tasks. We could also compare |
| 8599 | * the spare capacity which is more stable but it can end up |
| 8600 | * that the group has less spare capacity but finally more idle |
| 8601 | * CPUs which means less opportunity to pull tasks. |
| 8602 | */ |
| 8603 | if (sgs->idle_cpus > busiest->idle_cpus) |
| 8604 | return false; |
| 8605 | else if ((sgs->idle_cpus == busiest->idle_cpus) && |
| 8606 | (sgs->sum_nr_running <= busiest->sum_nr_running)) |
| 8607 | return false; |
| 8608 | |
| 8609 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8610 | } |
| 8611 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8612 | /* |
| 8613 | * Candidate sg has no more than one task per CPU and has higher |
| 8614 | * per-CPU capacity. Migrating tasks to less capable CPUs may harm |
| 8615 | * throughput. Maximize throughput, power/energy consequences are not |
| 8616 | * considered. |
| 8617 | */ |
| 8618 | if ((env->sd->flags & SD_ASYM_CPUCAPACITY) && |
| 8619 | (sgs->group_type <= group_fully_busy) && |
| 8620 | (group_smaller_min_cpu_capacity(sds->local, sg))) |
| 8621 | return false; |
| 8622 | |
| 8623 | return true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8624 | } |
| 8625 | |
| 8626 | #ifdef CONFIG_NUMA_BALANCING |
| 8627 | static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) |
| 8628 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8629 | if (sgs->sum_h_nr_running > sgs->nr_numa_running) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8630 | return regular; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8631 | if (sgs->sum_h_nr_running > sgs->nr_preferred_running) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8632 | return remote; |
| 8633 | return all; |
| 8634 | } |
| 8635 | |
| 8636 | static inline enum fbq_type fbq_classify_rq(struct rq *rq) |
| 8637 | { |
| 8638 | if (rq->nr_running > rq->nr_numa_running) |
| 8639 | return regular; |
| 8640 | if (rq->nr_running > rq->nr_preferred_running) |
| 8641 | return remote; |
| 8642 | return all; |
| 8643 | } |
| 8644 | #else |
| 8645 | static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs) |
| 8646 | { |
| 8647 | return all; |
| 8648 | } |
| 8649 | |
| 8650 | static inline enum fbq_type fbq_classify_rq(struct rq *rq) |
| 8651 | { |
| 8652 | return regular; |
| 8653 | } |
| 8654 | #endif /* CONFIG_NUMA_BALANCING */ |
| 8655 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8656 | |
| 8657 | struct sg_lb_stats; |
| 8658 | |
| 8659 | /* |
| 8660 | * task_running_on_cpu - return 1 if @p is running on @cpu. |
| 8661 | */ |
| 8662 | |
| 8663 | static unsigned int task_running_on_cpu(int cpu, struct task_struct *p) |
| 8664 | { |
| 8665 | /* Task has no contribution or is new */ |
| 8666 | if (cpu != task_cpu(p) || !READ_ONCE(p->se.avg.last_update_time)) |
| 8667 | return 0; |
| 8668 | |
| 8669 | if (task_on_rq_queued(p)) |
| 8670 | return 1; |
| 8671 | |
| 8672 | return 0; |
| 8673 | } |
| 8674 | |
| 8675 | /** |
| 8676 | * idle_cpu_without - would a given CPU be idle without p ? |
| 8677 | * @cpu: the processor on which idleness is tested. |
| 8678 | * @p: task which should be ignored. |
| 8679 | * |
| 8680 | * Return: 1 if the CPU would be idle. 0 otherwise. |
| 8681 | */ |
| 8682 | static int idle_cpu_without(int cpu, struct task_struct *p) |
| 8683 | { |
| 8684 | struct rq *rq = cpu_rq(cpu); |
| 8685 | |
| 8686 | if (rq->curr != rq->idle && rq->curr != p) |
| 8687 | return 0; |
| 8688 | |
| 8689 | /* |
| 8690 | * rq->nr_running can't be used but an updated version without the |
| 8691 | * impact of p on cpu must be used instead. The updated nr_running |
| 8692 | * be computed and tested before calling idle_cpu_without(). |
| 8693 | */ |
| 8694 | |
| 8695 | #ifdef CONFIG_SMP |
| 8696 | if (rq->ttwu_pending) |
| 8697 | return 0; |
| 8698 | #endif |
| 8699 | |
| 8700 | return 1; |
| 8701 | } |
| 8702 | |
| 8703 | /* |
| 8704 | * update_sg_wakeup_stats - Update sched_group's statistics for wakeup. |
| 8705 | * @sd: The sched_domain level to look for idlest group. |
| 8706 | * @group: sched_group whose statistics are to be updated. |
| 8707 | * @sgs: variable to hold the statistics for this group. |
| 8708 | * @p: The task for which we look for the idlest group/CPU. |
| 8709 | */ |
| 8710 | static inline void update_sg_wakeup_stats(struct sched_domain *sd, |
| 8711 | struct sched_group *group, |
| 8712 | struct sg_lb_stats *sgs, |
| 8713 | struct task_struct *p) |
| 8714 | { |
| 8715 | int i, nr_running; |
| 8716 | |
| 8717 | memset(sgs, 0, sizeof(*sgs)); |
| 8718 | |
| 8719 | for_each_cpu(i, sched_group_span(group)) { |
| 8720 | struct rq *rq = cpu_rq(i); |
| 8721 | unsigned int local; |
| 8722 | |
| 8723 | sgs->group_load += cpu_load_without(rq, p); |
| 8724 | sgs->group_util += cpu_util_without(i, p); |
| 8725 | sgs->group_runnable += cpu_runnable_without(rq, p); |
| 8726 | local = task_running_on_cpu(i, p); |
| 8727 | sgs->sum_h_nr_running += rq->cfs.h_nr_running - local; |
| 8728 | |
| 8729 | nr_running = rq->nr_running - local; |
| 8730 | sgs->sum_nr_running += nr_running; |
| 8731 | |
| 8732 | /* |
| 8733 | * No need to call idle_cpu_without() if nr_running is not 0 |
| 8734 | */ |
| 8735 | if (!nr_running && idle_cpu_without(i, p)) |
| 8736 | sgs->idle_cpus++; |
| 8737 | |
| 8738 | } |
| 8739 | |
| 8740 | /* Check if task fits in the group */ |
| 8741 | if (sd->flags & SD_ASYM_CPUCAPACITY && |
| 8742 | !task_fits_capacity(p, group->sgc->max_capacity)) { |
| 8743 | sgs->group_misfit_task_load = 1; |
| 8744 | } |
| 8745 | |
| 8746 | sgs->group_capacity = group->sgc->capacity; |
| 8747 | |
| 8748 | sgs->group_weight = group->group_weight; |
| 8749 | |
| 8750 | sgs->group_type = group_classify(sd->imbalance_pct, group, sgs); |
| 8751 | |
| 8752 | /* |
| 8753 | * Computing avg_load makes sense only when group is fully busy or |
| 8754 | * overloaded |
| 8755 | */ |
| 8756 | if (sgs->group_type == group_fully_busy || |
| 8757 | sgs->group_type == group_overloaded) |
| 8758 | sgs->avg_load = (sgs->group_load * SCHED_CAPACITY_SCALE) / |
| 8759 | sgs->group_capacity; |
| 8760 | } |
| 8761 | |
| 8762 | static bool update_pick_idlest(struct sched_group *idlest, |
| 8763 | struct sg_lb_stats *idlest_sgs, |
| 8764 | struct sched_group *group, |
| 8765 | struct sg_lb_stats *sgs) |
| 8766 | { |
| 8767 | if (sgs->group_type < idlest_sgs->group_type) |
| 8768 | return true; |
| 8769 | |
| 8770 | if (sgs->group_type > idlest_sgs->group_type) |
| 8771 | return false; |
| 8772 | |
| 8773 | /* |
| 8774 | * The candidate and the current idlest group are the same type of |
| 8775 | * group. Let check which one is the idlest according to the type. |
| 8776 | */ |
| 8777 | |
| 8778 | switch (sgs->group_type) { |
| 8779 | case group_overloaded: |
| 8780 | case group_fully_busy: |
| 8781 | /* Select the group with lowest avg_load. */ |
| 8782 | if (idlest_sgs->avg_load <= sgs->avg_load) |
| 8783 | return false; |
| 8784 | break; |
| 8785 | |
| 8786 | case group_imbalanced: |
| 8787 | case group_asym_packing: |
| 8788 | /* Those types are not used in the slow wakeup path */ |
| 8789 | return false; |
| 8790 | |
| 8791 | case group_misfit_task: |
| 8792 | /* Select group with the highest max capacity */ |
| 8793 | if (idlest->sgc->max_capacity >= group->sgc->max_capacity) |
| 8794 | return false; |
| 8795 | break; |
| 8796 | |
| 8797 | case group_has_spare: |
| 8798 | /* Select group with most idle CPUs */ |
| 8799 | if (idlest_sgs->idle_cpus > sgs->idle_cpus) |
| 8800 | return false; |
| 8801 | |
| 8802 | /* Select group with lowest group_util */ |
| 8803 | if (idlest_sgs->idle_cpus == sgs->idle_cpus && |
| 8804 | idlest_sgs->group_util <= sgs->group_util) |
| 8805 | return false; |
| 8806 | |
| 8807 | break; |
| 8808 | } |
| 8809 | |
| 8810 | return true; |
| 8811 | } |
| 8812 | |
| 8813 | /* |
| 8814 | * find_idlest_group() finds and returns the least busy CPU group within the |
| 8815 | * domain. |
| 8816 | * |
| 8817 | * Assumes p is allowed on at least one CPU in sd. |
| 8818 | */ |
| 8819 | static struct sched_group * |
| 8820 | find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) |
| 8821 | { |
| 8822 | struct sched_group *idlest = NULL, *local = NULL, *group = sd->groups; |
| 8823 | struct sg_lb_stats local_sgs, tmp_sgs; |
| 8824 | struct sg_lb_stats *sgs; |
| 8825 | unsigned long imbalance; |
| 8826 | struct sg_lb_stats idlest_sgs = { |
| 8827 | .avg_load = UINT_MAX, |
| 8828 | .group_type = group_overloaded, |
| 8829 | }; |
| 8830 | |
| 8831 | imbalance = scale_load_down(NICE_0_LOAD) * |
| 8832 | (sd->imbalance_pct-100) / 100; |
| 8833 | |
| 8834 | do { |
| 8835 | int local_group; |
| 8836 | |
| 8837 | /* Skip over this group if it has no CPUs allowed */ |
| 8838 | if (!cpumask_intersects(sched_group_span(group), |
| 8839 | p->cpus_ptr)) |
| 8840 | continue; |
| 8841 | |
| 8842 | local_group = cpumask_test_cpu(this_cpu, |
| 8843 | sched_group_span(group)); |
| 8844 | |
| 8845 | if (local_group) { |
| 8846 | sgs = &local_sgs; |
| 8847 | local = group; |
| 8848 | } else { |
| 8849 | sgs = &tmp_sgs; |
| 8850 | } |
| 8851 | |
| 8852 | update_sg_wakeup_stats(sd, group, sgs, p); |
| 8853 | |
| 8854 | if (!local_group && update_pick_idlest(idlest, &idlest_sgs, group, sgs)) { |
| 8855 | idlest = group; |
| 8856 | idlest_sgs = *sgs; |
| 8857 | } |
| 8858 | |
| 8859 | } while (group = group->next, group != sd->groups); |
| 8860 | |
| 8861 | |
| 8862 | /* There is no idlest group to push tasks to */ |
| 8863 | if (!idlest) |
| 8864 | return NULL; |
| 8865 | |
| 8866 | /* The local group has been skipped because of CPU affinity */ |
| 8867 | if (!local) |
| 8868 | return idlest; |
| 8869 | |
| 8870 | /* |
| 8871 | * If the local group is idler than the selected idlest group |
| 8872 | * don't try and push the task. |
| 8873 | */ |
| 8874 | if (local_sgs.group_type < idlest_sgs.group_type) |
| 8875 | return NULL; |
| 8876 | |
| 8877 | /* |
| 8878 | * If the local group is busier than the selected idlest group |
| 8879 | * try and push the task. |
| 8880 | */ |
| 8881 | if (local_sgs.group_type > idlest_sgs.group_type) |
| 8882 | return idlest; |
| 8883 | |
| 8884 | switch (local_sgs.group_type) { |
| 8885 | case group_overloaded: |
| 8886 | case group_fully_busy: |
| 8887 | /* |
| 8888 | * When comparing groups across NUMA domains, it's possible for |
| 8889 | * the local domain to be very lightly loaded relative to the |
| 8890 | * remote domains but "imbalance" skews the comparison making |
| 8891 | * remote CPUs look much more favourable. When considering |
| 8892 | * cross-domain, add imbalance to the load on the remote node |
| 8893 | * and consider staying local. |
| 8894 | */ |
| 8895 | |
| 8896 | if ((sd->flags & SD_NUMA) && |
| 8897 | ((idlest_sgs.avg_load + imbalance) >= local_sgs.avg_load)) |
| 8898 | return NULL; |
| 8899 | |
| 8900 | /* |
| 8901 | * If the local group is less loaded than the selected |
| 8902 | * idlest group don't try and push any tasks. |
| 8903 | */ |
| 8904 | if (idlest_sgs.avg_load >= (local_sgs.avg_load + imbalance)) |
| 8905 | return NULL; |
| 8906 | |
| 8907 | if (100 * local_sgs.avg_load <= sd->imbalance_pct * idlest_sgs.avg_load) |
| 8908 | return NULL; |
| 8909 | break; |
| 8910 | |
| 8911 | case group_imbalanced: |
| 8912 | case group_asym_packing: |
| 8913 | /* Those type are not used in the slow wakeup path */ |
| 8914 | return NULL; |
| 8915 | |
| 8916 | case group_misfit_task: |
| 8917 | /* Select group with the highest max capacity */ |
| 8918 | if (local->sgc->max_capacity >= idlest->sgc->max_capacity) |
| 8919 | return NULL; |
| 8920 | break; |
| 8921 | |
| 8922 | case group_has_spare: |
| 8923 | if (sd->flags & SD_NUMA) { |
| 8924 | #ifdef CONFIG_NUMA_BALANCING |
| 8925 | int idlest_cpu; |
| 8926 | /* |
| 8927 | * If there is spare capacity at NUMA, try to select |
| 8928 | * the preferred node |
| 8929 | */ |
| 8930 | if (cpu_to_node(this_cpu) == p->numa_preferred_nid) |
| 8931 | return NULL; |
| 8932 | |
| 8933 | idlest_cpu = cpumask_first(sched_group_span(idlest)); |
| 8934 | if (cpu_to_node(idlest_cpu) == p->numa_preferred_nid) |
| 8935 | return idlest; |
| 8936 | #endif |
| 8937 | /* |
| 8938 | * Otherwise, keep the task on this node to stay close |
| 8939 | * its wakeup source and improve locality. If there is |
| 8940 | * a real need of migration, periodic load balance will |
| 8941 | * take care of it. |
| 8942 | */ |
| 8943 | if (local_sgs.idle_cpus) |
| 8944 | return NULL; |
| 8945 | } |
| 8946 | |
| 8947 | /* |
| 8948 | * Select group with highest number of idle CPUs. We could also |
| 8949 | * compare the utilization which is more stable but it can end |
| 8950 | * up that the group has less spare capacity but finally more |
| 8951 | * idle CPUs which means more opportunity to run task. |
| 8952 | */ |
| 8953 | if (local_sgs.idle_cpus >= idlest_sgs.idle_cpus) |
| 8954 | return NULL; |
| 8955 | break; |
| 8956 | } |
| 8957 | |
| 8958 | return idlest; |
| 8959 | } |
| 8960 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8961 | /** |
| 8962 | * update_sd_lb_stats - Update sched_domain's statistics for load balancing. |
| 8963 | * @env: The load balancing environment. |
| 8964 | * @sds: variable to hold the statistics for this sched_domain. |
| 8965 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 8966 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8967 | static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) |
| 8968 | { |
| 8969 | struct sched_domain *child = env->sd->child; |
| 8970 | struct sched_group *sg = env->sd->groups; |
| 8971 | struct sg_lb_stats *local = &sds->local_stat; |
| 8972 | struct sg_lb_stats tmp_sgs; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8973 | int sg_status = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8974 | |
| 8975 | #ifdef CONFIG_NO_HZ_COMMON |
| 8976 | if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked)) |
| 8977 | env->flags |= LBF_NOHZ_STATS; |
| 8978 | #endif |
| 8979 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8980 | do { |
| 8981 | struct sg_lb_stats *sgs = &tmp_sgs; |
| 8982 | int local_group; |
| 8983 | |
| 8984 | local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(sg)); |
| 8985 | if (local_group) { |
| 8986 | sds->local = sg; |
| 8987 | sgs = local; |
| 8988 | |
| 8989 | if (env->idle != CPU_NEWLY_IDLE || |
| 8990 | time_after_eq(jiffies, sg->sgc->next_update)) |
| 8991 | update_group_capacity(env->sd, env->dst_cpu); |
| 8992 | } |
| 8993 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 8994 | update_sg_lb_stats(env, sg, sgs, &sg_status); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8995 | |
| 8996 | if (local_group) |
| 8997 | goto next_group; |
| 8998 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8999 | |
| 9000 | if (update_sd_pick_busiest(env, sds, sg, sgs)) { |
| 9001 | sds->busiest = sg; |
| 9002 | sds->busiest_stat = *sgs; |
| 9003 | } |
| 9004 | |
| 9005 | next_group: |
| 9006 | /* Now, start updating sd_lb_stats */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9007 | sds->total_load += sgs->group_load; |
| 9008 | sds->total_capacity += sgs->group_capacity; |
| 9009 | |
| 9010 | sg = sg->next; |
| 9011 | } while (sg != env->sd->groups); |
| 9012 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9013 | /* Tag domain that child domain prefers tasks go to siblings first */ |
| 9014 | sds->prefer_sibling = child && child->flags & SD_PREFER_SIBLING; |
| 9015 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9016 | #ifdef CONFIG_NO_HZ_COMMON |
| 9017 | if ((env->flags & LBF_NOHZ_AGAIN) && |
| 9018 | cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd))) { |
| 9019 | |
| 9020 | WRITE_ONCE(nohz.next_blocked, |
| 9021 | jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD)); |
| 9022 | } |
| 9023 | #endif |
| 9024 | |
| 9025 | if (env->sd->flags & SD_NUMA) |
| 9026 | env->fbq_type = fbq_classify_group(&sds->busiest_stat); |
| 9027 | |
| 9028 | if (!env->sd->parent) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9029 | struct root_domain *rd = env->dst_rq->rd; |
| 9030 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9031 | /* update overload indicator if we are at root domain */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9032 | WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD); |
| 9033 | |
| 9034 | /* Update over-utilization (tipping point, U >= 0) indicator */ |
| 9035 | WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED); |
| 9036 | trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED); |
| 9037 | } else if (sg_status & SG_OVERUTILIZED) { |
| 9038 | struct root_domain *rd = env->dst_rq->rd; |
| 9039 | |
| 9040 | WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED); |
| 9041 | trace_sched_overutilized_tp(rd, SG_OVERUTILIZED); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9042 | } |
| 9043 | } |
| 9044 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9045 | static inline long adjust_numa_imbalance(int imbalance, int nr_running) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9046 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9047 | unsigned int imbalance_min; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9048 | |
| 9049 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9050 | * Allow a small imbalance based on a simple pair of communicating |
| 9051 | * tasks that remain local when the source domain is almost idle. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9052 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9053 | imbalance_min = 2; |
| 9054 | if (nr_running <= imbalance_min) |
| 9055 | return 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9056 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9057 | return imbalance; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9058 | } |
| 9059 | |
| 9060 | /** |
| 9061 | * calculate_imbalance - Calculate the amount of imbalance present within the |
| 9062 | * groups of a given sched_domain during load balance. |
| 9063 | * @env: load balance environment |
| 9064 | * @sds: statistics of the sched_domain whose imbalance is to be calculated. |
| 9065 | */ |
| 9066 | static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) |
| 9067 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9068 | struct sg_lb_stats *local, *busiest; |
| 9069 | |
| 9070 | local = &sds->local_stat; |
| 9071 | busiest = &sds->busiest_stat; |
| 9072 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9073 | if (busiest->group_type == group_misfit_task) { |
| 9074 | /* Set imbalance to allow misfit tasks to be balanced. */ |
| 9075 | env->migration_type = migrate_misfit; |
| 9076 | env->imbalance = 1; |
| 9077 | return; |
| 9078 | } |
| 9079 | |
| 9080 | if (busiest->group_type == group_asym_packing) { |
| 9081 | /* |
| 9082 | * In case of asym capacity, we will try to migrate all load to |
| 9083 | * the preferred CPU. |
| 9084 | */ |
| 9085 | env->migration_type = migrate_task; |
| 9086 | env->imbalance = busiest->sum_h_nr_running; |
| 9087 | return; |
| 9088 | } |
| 9089 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9090 | if (busiest->group_type == group_imbalanced) { |
| 9091 | /* |
| 9092 | * In the group_imb case we cannot rely on group-wide averages |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9093 | * to ensure CPU-load equilibrium, try to move any task to fix |
| 9094 | * the imbalance. The next load balance will take care of |
| 9095 | * balancing back the system. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9096 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9097 | env->migration_type = migrate_task; |
| 9098 | env->imbalance = 1; |
| 9099 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9100 | } |
| 9101 | |
| 9102 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9103 | * Try to use spare capacity of local group without overloading it or |
| 9104 | * emptying busiest. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9105 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9106 | if (local->group_type == group_has_spare) { |
| 9107 | if ((busiest->group_type > group_fully_busy) && |
| 9108 | !(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { |
| 9109 | /* |
| 9110 | * If busiest is overloaded, try to fill spare |
| 9111 | * capacity. This might end up creating spare capacity |
| 9112 | * in busiest or busiest still being overloaded but |
| 9113 | * there is no simple way to directly compute the |
| 9114 | * amount of load to migrate in order to balance the |
| 9115 | * system. |
| 9116 | */ |
| 9117 | env->migration_type = migrate_util; |
| 9118 | env->imbalance = max(local->group_capacity, local->group_util) - |
| 9119 | local->group_util; |
| 9120 | |
| 9121 | /* |
| 9122 | * In some cases, the group's utilization is max or even |
| 9123 | * higher than capacity because of migrations but the |
| 9124 | * local CPU is (newly) idle. There is at least one |
| 9125 | * waiting task in this overloaded busiest group. Let's |
| 9126 | * try to pull it. |
| 9127 | */ |
| 9128 | if (env->idle != CPU_NOT_IDLE && env->imbalance == 0) { |
| 9129 | env->migration_type = migrate_task; |
| 9130 | env->imbalance = 1; |
| 9131 | } |
| 9132 | |
| 9133 | return; |
| 9134 | } |
| 9135 | |
| 9136 | if (busiest->group_weight == 1 || sds->prefer_sibling) { |
| 9137 | unsigned int nr_diff = busiest->sum_nr_running; |
| 9138 | /* |
| 9139 | * When prefer sibling, evenly spread running tasks on |
| 9140 | * groups. |
| 9141 | */ |
| 9142 | env->migration_type = migrate_task; |
| 9143 | lsub_positive(&nr_diff, local->sum_nr_running); |
| 9144 | env->imbalance = nr_diff >> 1; |
| 9145 | } else { |
| 9146 | |
| 9147 | /* |
| 9148 | * If there is no overload, we just want to even the number of |
| 9149 | * idle cpus. |
| 9150 | */ |
| 9151 | env->migration_type = migrate_task; |
| 9152 | env->imbalance = max_t(long, 0, (local->idle_cpus - |
| 9153 | busiest->idle_cpus) >> 1); |
| 9154 | } |
| 9155 | |
| 9156 | /* Consider allowing a small imbalance between NUMA groups */ |
| 9157 | if (env->sd->flags & SD_NUMA) |
| 9158 | env->imbalance = adjust_numa_imbalance(env->imbalance, |
| 9159 | busiest->sum_nr_running); |
| 9160 | |
| 9161 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9162 | } |
| 9163 | |
| 9164 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9165 | * Local is fully busy but has to take more load to relieve the |
| 9166 | * busiest group |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9167 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9168 | if (local->group_type < group_overloaded) { |
| 9169 | /* |
| 9170 | * Local will become overloaded so the avg_load metrics are |
| 9171 | * finally needed. |
| 9172 | */ |
| 9173 | |
| 9174 | local->avg_load = (local->group_load * SCHED_CAPACITY_SCALE) / |
| 9175 | local->group_capacity; |
| 9176 | |
| 9177 | sds->avg_load = (sds->total_load * SCHED_CAPACITY_SCALE) / |
| 9178 | sds->total_capacity; |
| 9179 | /* |
| 9180 | * If the local group is more loaded than the selected |
| 9181 | * busiest group don't try to pull any tasks. |
| 9182 | */ |
| 9183 | if (local->avg_load >= busiest->avg_load) { |
| 9184 | env->imbalance = 0; |
| 9185 | return; |
| 9186 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9187 | } |
| 9188 | |
| 9189 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9190 | * Both group are or will become overloaded and we're trying to get all |
| 9191 | * the CPUs to the average_load, so we don't want to push ourselves |
| 9192 | * above the average load, nor do we wish to reduce the max loaded CPU |
| 9193 | * below the average load. At the same time, we also don't want to |
| 9194 | * reduce the group load below the group capacity. Thus we look for |
| 9195 | * the minimum possible imbalance. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9196 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9197 | env->migration_type = migrate_load; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9198 | env->imbalance = min( |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9199 | (busiest->avg_load - sds->avg_load) * busiest->group_capacity, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9200 | (sds->avg_load - local->avg_load) * local->group_capacity |
| 9201 | ) / SCHED_CAPACITY_SCALE; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9202 | } |
| 9203 | |
| 9204 | /******* find_busiest_group() helpers end here *********************/ |
| 9205 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9206 | /* |
| 9207 | * Decision matrix according to the local and busiest group type: |
| 9208 | * |
| 9209 | * busiest \ local has_spare fully_busy misfit asym imbalanced overloaded |
| 9210 | * has_spare nr_idle balanced N/A N/A balanced balanced |
| 9211 | * fully_busy nr_idle nr_idle N/A N/A balanced balanced |
| 9212 | * misfit_task force N/A N/A N/A force force |
| 9213 | * asym_packing force force N/A N/A force force |
| 9214 | * imbalanced force force N/A N/A force force |
| 9215 | * overloaded force force N/A N/A force avg_load |
| 9216 | * |
| 9217 | * N/A : Not Applicable because already filtered while updating |
| 9218 | * statistics. |
| 9219 | * balanced : The system is balanced for these 2 groups. |
| 9220 | * force : Calculate the imbalance as load migration is probably needed. |
| 9221 | * avg_load : Only if imbalance is significant enough. |
| 9222 | * nr_idle : dst_cpu is not busy and the number of idle CPUs is quite |
| 9223 | * different in groups. |
| 9224 | */ |
| 9225 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9226 | /** |
| 9227 | * find_busiest_group - Returns the busiest group within the sched_domain |
| 9228 | * if there is an imbalance. |
| 9229 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9230 | * Also calculates the amount of runnable load which should be moved |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9231 | * to restore balance. |
| 9232 | * |
| 9233 | * @env: The load balancing environment. |
| 9234 | * |
| 9235 | * Return: - The busiest group if imbalance exists. |
| 9236 | */ |
| 9237 | static struct sched_group *find_busiest_group(struct lb_env *env) |
| 9238 | { |
| 9239 | struct sg_lb_stats *local, *busiest; |
| 9240 | struct sd_lb_stats sds; |
| 9241 | |
| 9242 | init_sd_lb_stats(&sds); |
| 9243 | |
| 9244 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9245 | * Compute the various statistics relevant for load balancing at |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9246 | * this level. |
| 9247 | */ |
| 9248 | update_sd_lb_stats(env, &sds); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9249 | |
| 9250 | if (sched_energy_enabled()) { |
| 9251 | struct root_domain *rd = env->dst_rq->rd; |
| 9252 | |
| 9253 | if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)) |
| 9254 | goto out_balanced; |
| 9255 | } |
| 9256 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9257 | local = &sds.local_stat; |
| 9258 | busiest = &sds.busiest_stat; |
| 9259 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9260 | /* There is no busy sibling group to pull tasks from */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9261 | if (!sds.busiest) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9262 | goto out_balanced; |
| 9263 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9264 | /* Misfit tasks should be dealt with regardless of the avg load */ |
| 9265 | if (busiest->group_type == group_misfit_task) |
| 9266 | goto force_balance; |
| 9267 | |
| 9268 | /* ASYM feature bypasses nice load balance check */ |
| 9269 | if (busiest->group_type == group_asym_packing) |
| 9270 | goto force_balance; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9271 | |
| 9272 | /* |
| 9273 | * If the busiest group is imbalanced the below checks don't |
| 9274 | * work because they assume all things are equal, which typically |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9275 | * isn't true due to cpus_ptr constraints and the like. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9276 | */ |
| 9277 | if (busiest->group_type == group_imbalanced) |
| 9278 | goto force_balance; |
| 9279 | |
| 9280 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9281 | * If the local group is busier than the selected busiest group |
| 9282 | * don't try and pull any tasks. |
| 9283 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9284 | if (local->group_type > busiest->group_type) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9285 | goto out_balanced; |
| 9286 | |
| 9287 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9288 | * When groups are overloaded, use the avg_load to ensure fairness |
| 9289 | * between tasks. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9290 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9291 | if (local->group_type == group_overloaded) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9292 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9293 | * If the local group is more loaded than the selected |
| 9294 | * busiest group don't try to pull any tasks. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9295 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9296 | if (local->avg_load >= busiest->avg_load) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9297 | goto out_balanced; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9298 | |
| 9299 | /* XXX broken for overlapping NUMA groups */ |
| 9300 | sds.avg_load = (sds.total_load * SCHED_CAPACITY_SCALE) / |
| 9301 | sds.total_capacity; |
| 9302 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9303 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9304 | * Don't pull any tasks if this group is already above the |
| 9305 | * domain average load. |
| 9306 | */ |
| 9307 | if (local->avg_load >= sds.avg_load) |
| 9308 | goto out_balanced; |
| 9309 | |
| 9310 | /* |
| 9311 | * If the busiest group is more loaded, use imbalance_pct to be |
| 9312 | * conservative. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9313 | */ |
| 9314 | if (100 * busiest->avg_load <= |
| 9315 | env->sd->imbalance_pct * local->avg_load) |
| 9316 | goto out_balanced; |
| 9317 | } |
| 9318 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9319 | /* Try to move all excess tasks to child's sibling domain */ |
| 9320 | if (sds.prefer_sibling && local->group_type == group_has_spare && |
| 9321 | busiest->sum_nr_running > local->sum_nr_running + 1) |
| 9322 | goto force_balance; |
| 9323 | |
| 9324 | if (busiest->group_type != group_overloaded) { |
| 9325 | if (env->idle == CPU_NOT_IDLE) |
| 9326 | /* |
| 9327 | * If the busiest group is not overloaded (and as a |
| 9328 | * result the local one too) but this CPU is already |
| 9329 | * busy, let another idle CPU try to pull task. |
| 9330 | */ |
| 9331 | goto out_balanced; |
| 9332 | |
| 9333 | if (busiest->group_weight > 1 && |
| 9334 | local->idle_cpus <= (busiest->idle_cpus + 1)) |
| 9335 | /* |
| 9336 | * If the busiest group is not overloaded |
| 9337 | * and there is no imbalance between this and busiest |
| 9338 | * group wrt idle CPUs, it is balanced. The imbalance |
| 9339 | * becomes significant if the diff is greater than 1 |
| 9340 | * otherwise we might end up to just move the imbalance |
| 9341 | * on another group. Of course this applies only if |
| 9342 | * there is more than 1 CPU per group. |
| 9343 | */ |
| 9344 | goto out_balanced; |
| 9345 | |
| 9346 | if (busiest->sum_h_nr_running == 1) |
| 9347 | /* |
| 9348 | * busiest doesn't have any tasks waiting to run |
| 9349 | */ |
| 9350 | goto out_balanced; |
| 9351 | } |
| 9352 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9353 | force_balance: |
| 9354 | /* Looks like there is an imbalance. Compute it */ |
| 9355 | calculate_imbalance(env, &sds); |
| 9356 | return env->imbalance ? sds.busiest : NULL; |
| 9357 | |
| 9358 | out_balanced: |
| 9359 | env->imbalance = 0; |
| 9360 | return NULL; |
| 9361 | } |
| 9362 | |
| 9363 | /* |
| 9364 | * find_busiest_queue - find the busiest runqueue among the CPUs in the group. |
| 9365 | */ |
| 9366 | static struct rq *find_busiest_queue(struct lb_env *env, |
| 9367 | struct sched_group *group) |
| 9368 | { |
| 9369 | struct rq *busiest = NULL, *rq; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9370 | unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1; |
| 9371 | unsigned int busiest_nr = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9372 | int i; |
| 9373 | |
| 9374 | for_each_cpu_and(i, sched_group_span(group), env->cpus) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9375 | unsigned long capacity, load, util; |
| 9376 | unsigned int nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9377 | enum fbq_type rt; |
| 9378 | |
| 9379 | rq = cpu_rq(i); |
| 9380 | rt = fbq_classify_rq(rq); |
| 9381 | |
| 9382 | /* |
| 9383 | * We classify groups/runqueues into three groups: |
| 9384 | * - regular: there are !numa tasks |
| 9385 | * - remote: there are numa tasks that run on the 'wrong' node |
| 9386 | * - all: there is no distinction |
| 9387 | * |
| 9388 | * In order to avoid migrating ideally placed numa tasks, |
| 9389 | * ignore those when there's better options. |
| 9390 | * |
| 9391 | * If we ignore the actual busiest queue to migrate another |
| 9392 | * task, the next balance pass can still reduce the busiest |
| 9393 | * queue by moving tasks around inside the node. |
| 9394 | * |
| 9395 | * If we cannot move enough load due to this classification |
| 9396 | * the next pass will adjust the group classification and |
| 9397 | * allow migration of more tasks. |
| 9398 | * |
| 9399 | * Both cases only affect the total convergence complexity. |
| 9400 | */ |
| 9401 | if (rt > env->fbq_type) |
| 9402 | continue; |
| 9403 | |
| 9404 | capacity = capacity_of(i); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9405 | nr_running = rq->cfs.h_nr_running; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9406 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9407 | /* |
| 9408 | * For ASYM_CPUCAPACITY domains, don't pick a CPU that could |
| 9409 | * eventually lead to active_balancing high->low capacity. |
| 9410 | * Higher per-CPU capacity is considered better than balancing |
| 9411 | * average load. |
| 9412 | */ |
| 9413 | if (env->sd->flags & SD_ASYM_CPUCAPACITY && |
| 9414 | capacity_of(env->dst_cpu) < capacity && |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9415 | nr_running == 1) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9416 | continue; |
| 9417 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9418 | switch (env->migration_type) { |
| 9419 | case migrate_load: |
| 9420 | /* |
| 9421 | * When comparing with load imbalance, use cpu_load() |
| 9422 | * which is not scaled with the CPU capacity. |
| 9423 | */ |
| 9424 | load = cpu_load(rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9425 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9426 | if (nr_running == 1 && load > env->imbalance && |
| 9427 | !check_cpu_capacity(rq, env->sd)) |
| 9428 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9429 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9430 | /* |
| 9431 | * For the load comparisons with the other CPUs, |
| 9432 | * consider the cpu_load() scaled with the CPU |
| 9433 | * capacity, so that the load can be moved away |
| 9434 | * from the CPU that is potentially running at a |
| 9435 | * lower capacity. |
| 9436 | * |
| 9437 | * Thus we're looking for max(load_i / capacity_i), |
| 9438 | * crosswise multiplication to rid ourselves of the |
| 9439 | * division works out to: |
| 9440 | * load_i * capacity_j > load_j * capacity_i; |
| 9441 | * where j is our previous maximum. |
| 9442 | */ |
| 9443 | if (load * busiest_capacity > busiest_load * capacity) { |
| 9444 | busiest_load = load; |
| 9445 | busiest_capacity = capacity; |
| 9446 | busiest = rq; |
| 9447 | } |
| 9448 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9449 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9450 | case migrate_util: |
| 9451 | util = cpu_util(cpu_of(rq)); |
| 9452 | |
| 9453 | /* |
| 9454 | * Don't try to pull utilization from a CPU with one |
| 9455 | * running task. Whatever its utilization, we will fail |
| 9456 | * detach the task. |
| 9457 | */ |
| 9458 | if (nr_running <= 1) |
| 9459 | continue; |
| 9460 | |
| 9461 | if (busiest_util < util) { |
| 9462 | busiest_util = util; |
| 9463 | busiest = rq; |
| 9464 | } |
| 9465 | break; |
| 9466 | |
| 9467 | case migrate_task: |
| 9468 | if (busiest_nr < nr_running) { |
| 9469 | busiest_nr = nr_running; |
| 9470 | busiest = rq; |
| 9471 | } |
| 9472 | break; |
| 9473 | |
| 9474 | case migrate_misfit: |
| 9475 | /* |
| 9476 | * For ASYM_CPUCAPACITY domains with misfit tasks we |
| 9477 | * simply seek the "biggest" misfit task. |
| 9478 | */ |
| 9479 | if (rq->misfit_task_load > busiest_load) { |
| 9480 | busiest_load = rq->misfit_task_load; |
| 9481 | busiest = rq; |
| 9482 | } |
| 9483 | |
| 9484 | break; |
| 9485 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9486 | } |
| 9487 | } |
| 9488 | |
| 9489 | return busiest; |
| 9490 | } |
| 9491 | |
| 9492 | /* |
| 9493 | * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but |
| 9494 | * so long as it is large enough. |
| 9495 | */ |
| 9496 | #define MAX_PINNED_INTERVAL 512 |
| 9497 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9498 | static inline bool |
| 9499 | asym_active_balance(struct lb_env *env) |
| 9500 | { |
| 9501 | /* |
| 9502 | * ASYM_PACKING needs to force migrate tasks from busy but |
| 9503 | * lower priority CPUs in order to pack all tasks in the |
| 9504 | * highest priority CPUs. |
| 9505 | */ |
| 9506 | return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && |
| 9507 | sched_asym_prefer(env->dst_cpu, env->src_cpu); |
| 9508 | } |
| 9509 | |
| 9510 | static inline bool |
| 9511 | voluntary_active_balance(struct lb_env *env) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9512 | { |
| 9513 | struct sched_domain *sd = env->sd; |
| 9514 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9515 | if (asym_active_balance(env)) |
| 9516 | return 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9517 | |
| 9518 | /* |
| 9519 | * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. |
| 9520 | * It's worth migrating the task if the src_cpu's capacity is reduced |
| 9521 | * because of other sched_class or IRQs if more capacity stays |
| 9522 | * available on dst_cpu. |
| 9523 | */ |
| 9524 | if ((env->idle != CPU_NOT_IDLE) && |
| 9525 | (env->src_rq->cfs.h_nr_running == 1)) { |
| 9526 | if ((check_cpu_capacity(env->src_rq, sd)) && |
| 9527 | (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100)) |
| 9528 | return 1; |
| 9529 | } |
| 9530 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9531 | if (env->migration_type == migrate_misfit) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9532 | return 1; |
| 9533 | |
| 9534 | return 0; |
| 9535 | } |
| 9536 | |
| 9537 | static int need_active_balance(struct lb_env *env) |
| 9538 | { |
| 9539 | struct sched_domain *sd = env->sd; |
| 9540 | |
| 9541 | if (voluntary_active_balance(env)) |
| 9542 | return 1; |
| 9543 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9544 | return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); |
| 9545 | } |
| 9546 | |
| 9547 | static int active_load_balance_cpu_stop(void *data); |
| 9548 | |
| 9549 | static int should_we_balance(struct lb_env *env) |
| 9550 | { |
| 9551 | struct sched_group *sg = env->sd->groups; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9552 | int cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9553 | |
| 9554 | /* |
| 9555 | * Ensure the balancing environment is consistent; can happen |
| 9556 | * when the softirq triggers 'during' hotplug. |
| 9557 | */ |
| 9558 | if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) |
| 9559 | return 0; |
| 9560 | |
| 9561 | /* |
| 9562 | * In the newly idle case, we will allow all the CPUs |
| 9563 | * to do the newly idle load balance. |
| 9564 | */ |
| 9565 | if (env->idle == CPU_NEWLY_IDLE) |
| 9566 | return 1; |
| 9567 | |
| 9568 | /* Try to find first idle CPU */ |
| 9569 | for_each_cpu_and(cpu, group_balance_mask(sg), env->cpus) { |
| 9570 | if (!idle_cpu(cpu)) |
| 9571 | continue; |
| 9572 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9573 | /* Are we the first idle CPU? */ |
| 9574 | return cpu == env->dst_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9575 | } |
| 9576 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9577 | /* Are we the first CPU of this group ? */ |
| 9578 | return group_balance_cpu(sg) == env->dst_cpu; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9579 | } |
| 9580 | |
| 9581 | /* |
| 9582 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
| 9583 | * tasks if there is an imbalance. |
| 9584 | */ |
| 9585 | static int load_balance(int this_cpu, struct rq *this_rq, |
| 9586 | struct sched_domain *sd, enum cpu_idle_type idle, |
| 9587 | int *continue_balancing) |
| 9588 | { |
| 9589 | int ld_moved, cur_ld_moved, active_balance = 0; |
| 9590 | struct sched_domain *sd_parent = sd->parent; |
| 9591 | struct sched_group *group; |
| 9592 | struct rq *busiest; |
| 9593 | struct rq_flags rf; |
| 9594 | struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask); |
| 9595 | |
| 9596 | struct lb_env env = { |
| 9597 | .sd = sd, |
| 9598 | .dst_cpu = this_cpu, |
| 9599 | .dst_rq = this_rq, |
| 9600 | .dst_grpmask = sched_group_span(sd->groups), |
| 9601 | .idle = idle, |
| 9602 | .loop_break = sched_nr_migrate_break, |
| 9603 | .cpus = cpus, |
| 9604 | .fbq_type = all, |
| 9605 | .tasks = LIST_HEAD_INIT(env.tasks), |
| 9606 | }; |
| 9607 | |
| 9608 | cpumask_and(cpus, sched_domain_span(sd), cpu_active_mask); |
| 9609 | |
| 9610 | schedstat_inc(sd->lb_count[idle]); |
| 9611 | |
| 9612 | redo: |
| 9613 | if (!should_we_balance(&env)) { |
| 9614 | *continue_balancing = 0; |
| 9615 | goto out_balanced; |
| 9616 | } |
| 9617 | |
| 9618 | group = find_busiest_group(&env); |
| 9619 | if (!group) { |
| 9620 | schedstat_inc(sd->lb_nobusyg[idle]); |
| 9621 | goto out_balanced; |
| 9622 | } |
| 9623 | |
| 9624 | busiest = find_busiest_queue(&env, group); |
| 9625 | if (!busiest) { |
| 9626 | schedstat_inc(sd->lb_nobusyq[idle]); |
| 9627 | goto out_balanced; |
| 9628 | } |
| 9629 | |
| 9630 | BUG_ON(busiest == env.dst_rq); |
| 9631 | |
| 9632 | schedstat_add(sd->lb_imbalance[idle], env.imbalance); |
| 9633 | |
| 9634 | env.src_cpu = busiest->cpu; |
| 9635 | env.src_rq = busiest; |
| 9636 | |
| 9637 | ld_moved = 0; |
| 9638 | if (busiest->nr_running > 1) { |
| 9639 | /* |
| 9640 | * Attempt to move tasks. If find_busiest_group has found |
| 9641 | * an imbalance but busiest->nr_running <= 1, the group is |
| 9642 | * still unbalanced. ld_moved simply stays zero, so it is |
| 9643 | * correctly treated as an imbalance. |
| 9644 | */ |
| 9645 | env.flags |= LBF_ALL_PINNED; |
| 9646 | env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); |
| 9647 | |
| 9648 | more_balance: |
| 9649 | rq_lock_irqsave(busiest, &rf); |
| 9650 | update_rq_clock(busiest); |
| 9651 | |
| 9652 | /* |
| 9653 | * cur_ld_moved - load moved in current iteration |
| 9654 | * ld_moved - cumulative load moved across iterations |
| 9655 | */ |
| 9656 | cur_ld_moved = detach_tasks(&env); |
| 9657 | |
| 9658 | /* |
| 9659 | * We've detached some tasks from busiest_rq. Every |
| 9660 | * task is masked "TASK_ON_RQ_MIGRATING", so we can safely |
| 9661 | * unlock busiest->lock, and we are able to be sure |
| 9662 | * that nobody can manipulate the tasks in parallel. |
| 9663 | * See task_rq_lock() family for the details. |
| 9664 | */ |
| 9665 | |
| 9666 | rq_unlock(busiest, &rf); |
| 9667 | |
| 9668 | if (cur_ld_moved) { |
| 9669 | attach_tasks(&env); |
| 9670 | ld_moved += cur_ld_moved; |
| 9671 | } |
| 9672 | |
| 9673 | local_irq_restore(rf.flags); |
| 9674 | |
| 9675 | if (env.flags & LBF_NEED_BREAK) { |
| 9676 | env.flags &= ~LBF_NEED_BREAK; |
| 9677 | goto more_balance; |
| 9678 | } |
| 9679 | |
| 9680 | /* |
| 9681 | * Revisit (affine) tasks on src_cpu that couldn't be moved to |
| 9682 | * us and move them to an alternate dst_cpu in our sched_group |
| 9683 | * where they can run. The upper limit on how many times we |
| 9684 | * iterate on same src_cpu is dependent on number of CPUs in our |
| 9685 | * sched_group. |
| 9686 | * |
| 9687 | * This changes load balance semantics a bit on who can move |
| 9688 | * load to a given_cpu. In addition to the given_cpu itself |
| 9689 | * (or a ilb_cpu acting on its behalf where given_cpu is |
| 9690 | * nohz-idle), we now have balance_cpu in a position to move |
| 9691 | * load to given_cpu. In rare situations, this may cause |
| 9692 | * conflicts (balance_cpu and given_cpu/ilb_cpu deciding |
| 9693 | * _independently_ and at _same_ time to move some load to |
| 9694 | * given_cpu) causing exceess load to be moved to given_cpu. |
| 9695 | * This however should not happen so much in practice and |
| 9696 | * moreover subsequent load balance cycles should correct the |
| 9697 | * excess load moved. |
| 9698 | */ |
| 9699 | if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) { |
| 9700 | |
| 9701 | /* Prevent to re-select dst_cpu via env's CPUs */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9702 | __cpumask_clear_cpu(env.dst_cpu, env.cpus); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9703 | |
| 9704 | env.dst_rq = cpu_rq(env.new_dst_cpu); |
| 9705 | env.dst_cpu = env.new_dst_cpu; |
| 9706 | env.flags &= ~LBF_DST_PINNED; |
| 9707 | env.loop = 0; |
| 9708 | env.loop_break = sched_nr_migrate_break; |
| 9709 | |
| 9710 | /* |
| 9711 | * Go back to "more_balance" rather than "redo" since we |
| 9712 | * need to continue with same src_cpu. |
| 9713 | */ |
| 9714 | goto more_balance; |
| 9715 | } |
| 9716 | |
| 9717 | /* |
| 9718 | * We failed to reach balance because of affinity. |
| 9719 | */ |
| 9720 | if (sd_parent) { |
| 9721 | int *group_imbalance = &sd_parent->groups->sgc->imbalance; |
| 9722 | |
| 9723 | if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) |
| 9724 | *group_imbalance = 1; |
| 9725 | } |
| 9726 | |
| 9727 | /* All tasks on this runqueue were pinned by CPU affinity */ |
| 9728 | if (unlikely(env.flags & LBF_ALL_PINNED)) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9729 | __cpumask_clear_cpu(cpu_of(busiest), cpus); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9730 | /* |
| 9731 | * Attempting to continue load balancing at the current |
| 9732 | * sched_domain level only makes sense if there are |
| 9733 | * active CPUs remaining as possible busiest CPUs to |
| 9734 | * pull load from which are not contained within the |
| 9735 | * destination group that is receiving any migrated |
| 9736 | * load. |
| 9737 | */ |
| 9738 | if (!cpumask_subset(cpus, env.dst_grpmask)) { |
| 9739 | env.loop = 0; |
| 9740 | env.loop_break = sched_nr_migrate_break; |
| 9741 | goto redo; |
| 9742 | } |
| 9743 | goto out_all_pinned; |
| 9744 | } |
| 9745 | } |
| 9746 | |
| 9747 | if (!ld_moved) { |
| 9748 | schedstat_inc(sd->lb_failed[idle]); |
| 9749 | /* |
| 9750 | * Increment the failure counter only on periodic balance. |
| 9751 | * We do not want newidle balance, which can be very |
| 9752 | * frequent, pollute the failure counter causing |
| 9753 | * excessive cache_hot migrations and active balances. |
| 9754 | */ |
| 9755 | if (idle != CPU_NEWLY_IDLE) |
| 9756 | sd->nr_balance_failed++; |
| 9757 | |
| 9758 | if (need_active_balance(&env)) { |
| 9759 | unsigned long flags; |
| 9760 | |
| 9761 | raw_spin_lock_irqsave(&busiest->lock, flags); |
| 9762 | |
| 9763 | /* |
| 9764 | * Don't kick the active_load_balance_cpu_stop, |
| 9765 | * if the curr task on busiest CPU can't be |
| 9766 | * moved to this_cpu: |
| 9767 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9768 | if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9769 | raw_spin_unlock_irqrestore(&busiest->lock, |
| 9770 | flags); |
| 9771 | env.flags |= LBF_ALL_PINNED; |
| 9772 | goto out_one_pinned; |
| 9773 | } |
| 9774 | |
| 9775 | /* |
| 9776 | * ->active_balance synchronizes accesses to |
| 9777 | * ->active_balance_work. Once set, it's cleared |
| 9778 | * only after active load balance is finished. |
| 9779 | */ |
| 9780 | if (!busiest->active_balance) { |
| 9781 | busiest->active_balance = 1; |
| 9782 | busiest->push_cpu = this_cpu; |
| 9783 | active_balance = 1; |
| 9784 | } |
| 9785 | raw_spin_unlock_irqrestore(&busiest->lock, flags); |
| 9786 | |
| 9787 | if (active_balance) { |
| 9788 | stop_one_cpu_nowait(cpu_of(busiest), |
| 9789 | active_load_balance_cpu_stop, busiest, |
| 9790 | &busiest->active_balance_work); |
| 9791 | } |
| 9792 | |
| 9793 | /* We've kicked active balancing, force task migration. */ |
| 9794 | sd->nr_balance_failed = sd->cache_nice_tries+1; |
| 9795 | } |
| 9796 | } else |
| 9797 | sd->nr_balance_failed = 0; |
| 9798 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9799 | if (likely(!active_balance) || voluntary_active_balance(&env)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9800 | /* We were unbalanced, so reset the balancing interval */ |
| 9801 | sd->balance_interval = sd->min_interval; |
| 9802 | } else { |
| 9803 | /* |
| 9804 | * If we've begun active balancing, start to back off. This |
| 9805 | * case may not be covered by the all_pinned logic if there |
| 9806 | * is only 1 task on the busy runqueue (because we don't call |
| 9807 | * detach_tasks). |
| 9808 | */ |
| 9809 | if (sd->balance_interval < sd->max_interval) |
| 9810 | sd->balance_interval *= 2; |
| 9811 | } |
| 9812 | |
| 9813 | goto out; |
| 9814 | |
| 9815 | out_balanced: |
| 9816 | /* |
| 9817 | * We reach balance although we may have faced some affinity |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9818 | * constraints. Clear the imbalance flag only if other tasks got |
| 9819 | * a chance to move and fix the imbalance. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9820 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9821 | if (sd_parent && !(env.flags & LBF_ALL_PINNED)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9822 | int *group_imbalance = &sd_parent->groups->sgc->imbalance; |
| 9823 | |
| 9824 | if (*group_imbalance) |
| 9825 | *group_imbalance = 0; |
| 9826 | } |
| 9827 | |
| 9828 | out_all_pinned: |
| 9829 | /* |
| 9830 | * We reach balance because all tasks are pinned at this level so |
| 9831 | * we can't migrate them. Let the imbalance flag set so parent level |
| 9832 | * can try to migrate them. |
| 9833 | */ |
| 9834 | schedstat_inc(sd->lb_balanced[idle]); |
| 9835 | |
| 9836 | sd->nr_balance_failed = 0; |
| 9837 | |
| 9838 | out_one_pinned: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9839 | ld_moved = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9840 | |
| 9841 | /* |
| 9842 | * newidle_balance() disregards balance intervals, so we could |
| 9843 | * repeatedly reach this code, which would lead to balance_interval |
| 9844 | * skyrocketting in a short amount of time. Skip the balance_interval |
| 9845 | * increase logic to avoid that. |
| 9846 | */ |
| 9847 | if (env.idle == CPU_NEWLY_IDLE) |
| 9848 | goto out; |
| 9849 | |
| 9850 | /* tune up the balancing interval */ |
| 9851 | if ((env.flags & LBF_ALL_PINNED && |
| 9852 | sd->balance_interval < MAX_PINNED_INTERVAL) || |
| 9853 | sd->balance_interval < sd->max_interval) |
| 9854 | sd->balance_interval *= 2; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9855 | out: |
| 9856 | return ld_moved; |
| 9857 | } |
| 9858 | |
| 9859 | static inline unsigned long |
| 9860 | get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) |
| 9861 | { |
| 9862 | unsigned long interval = sd->balance_interval; |
| 9863 | |
| 9864 | if (cpu_busy) |
| 9865 | interval *= sd->busy_factor; |
| 9866 | |
| 9867 | /* scale ms to jiffies */ |
| 9868 | interval = msecs_to_jiffies(interval); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9869 | |
| 9870 | /* |
| 9871 | * Reduce likelihood of busy balancing at higher domains racing with |
| 9872 | * balancing at lower domains by preventing their balancing periods |
| 9873 | * from being multiples of each other. |
| 9874 | */ |
| 9875 | if (cpu_busy) |
| 9876 | interval -= 1; |
| 9877 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9878 | interval = clamp(interval, 1UL, max_load_balance_interval); |
| 9879 | |
| 9880 | return interval; |
| 9881 | } |
| 9882 | |
| 9883 | static inline void |
| 9884 | update_next_balance(struct sched_domain *sd, unsigned long *next_balance) |
| 9885 | { |
| 9886 | unsigned long interval, next; |
| 9887 | |
| 9888 | /* used by idle balance, so cpu_busy = 0 */ |
| 9889 | interval = get_sd_balance_interval(sd, 0); |
| 9890 | next = sd->last_balance + interval; |
| 9891 | |
| 9892 | if (time_after(*next_balance, next)) |
| 9893 | *next_balance = next; |
| 9894 | } |
| 9895 | |
| 9896 | /* |
| 9897 | * active_load_balance_cpu_stop is run by the CPU stopper. It pushes |
| 9898 | * running tasks off the busiest CPU onto idle CPUs. It requires at |
| 9899 | * least 1 task to be running on each physical CPU where possible, and |
| 9900 | * avoids physical / logical imbalances. |
| 9901 | */ |
| 9902 | static int active_load_balance_cpu_stop(void *data) |
| 9903 | { |
| 9904 | struct rq *busiest_rq = data; |
| 9905 | int busiest_cpu = cpu_of(busiest_rq); |
| 9906 | int target_cpu = busiest_rq->push_cpu; |
| 9907 | struct rq *target_rq = cpu_rq(target_cpu); |
| 9908 | struct sched_domain *sd; |
| 9909 | struct task_struct *p = NULL; |
| 9910 | struct rq_flags rf; |
| 9911 | |
| 9912 | rq_lock_irq(busiest_rq, &rf); |
| 9913 | /* |
| 9914 | * Between queueing the stop-work and running it is a hole in which |
| 9915 | * CPUs can become inactive. We should not move tasks from or to |
| 9916 | * inactive CPUs. |
| 9917 | */ |
| 9918 | if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu)) |
| 9919 | goto out_unlock; |
| 9920 | |
| 9921 | /* Make sure the requested CPU hasn't gone down in the meantime: */ |
| 9922 | if (unlikely(busiest_cpu != smp_processor_id() || |
| 9923 | !busiest_rq->active_balance)) |
| 9924 | goto out_unlock; |
| 9925 | |
| 9926 | /* Is there any task to move? */ |
| 9927 | if (busiest_rq->nr_running <= 1) |
| 9928 | goto out_unlock; |
| 9929 | |
| 9930 | /* |
| 9931 | * This condition is "impossible", if it occurs |
| 9932 | * we need to fix it. Originally reported by |
| 9933 | * Bjorn Helgaas on a 128-CPU setup. |
| 9934 | */ |
| 9935 | BUG_ON(busiest_rq == target_rq); |
| 9936 | |
| 9937 | /* Search for an sd spanning us and the target CPU. */ |
| 9938 | rcu_read_lock(); |
| 9939 | for_each_domain(target_cpu, sd) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 9940 | if (cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
| 9941 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9942 | } |
| 9943 | |
| 9944 | if (likely(sd)) { |
| 9945 | struct lb_env env = { |
| 9946 | .sd = sd, |
| 9947 | .dst_cpu = target_cpu, |
| 9948 | .dst_rq = target_rq, |
| 9949 | .src_cpu = busiest_rq->cpu, |
| 9950 | .src_rq = busiest_rq, |
| 9951 | .idle = CPU_IDLE, |
| 9952 | /* |
| 9953 | * can_migrate_task() doesn't need to compute new_dst_cpu |
| 9954 | * for active balancing. Since we have CPU_IDLE, but no |
| 9955 | * @dst_grpmask we need to make that test go away with lying |
| 9956 | * about DST_PINNED. |
| 9957 | */ |
| 9958 | .flags = LBF_DST_PINNED, |
| 9959 | }; |
| 9960 | |
| 9961 | schedstat_inc(sd->alb_count); |
| 9962 | update_rq_clock(busiest_rq); |
| 9963 | |
| 9964 | p = detach_one_task(&env); |
| 9965 | if (p) { |
| 9966 | schedstat_inc(sd->alb_pushed); |
| 9967 | /* Active balancing done, reset the failure counter. */ |
| 9968 | sd->nr_balance_failed = 0; |
| 9969 | } else { |
| 9970 | schedstat_inc(sd->alb_failed); |
| 9971 | } |
| 9972 | } |
| 9973 | rcu_read_unlock(); |
| 9974 | out_unlock: |
| 9975 | busiest_rq->active_balance = 0; |
| 9976 | rq_unlock(busiest_rq, &rf); |
| 9977 | |
| 9978 | if (p) |
| 9979 | attach_one_task(target_rq, p); |
| 9980 | |
| 9981 | local_irq_enable(); |
| 9982 | |
| 9983 | return 0; |
| 9984 | } |
| 9985 | |
| 9986 | static DEFINE_SPINLOCK(balancing); |
| 9987 | |
| 9988 | /* |
| 9989 | * Scale the max load_balance interval with the number of CPUs in the system. |
| 9990 | * This trades load-balance latency on larger machines for less cross talk. |
| 9991 | */ |
| 9992 | void update_max_interval(void) |
| 9993 | { |
| 9994 | max_load_balance_interval = HZ*num_online_cpus()/10; |
| 9995 | } |
| 9996 | |
| 9997 | /* |
| 9998 | * It checks each scheduling domain to see if it is due to be balanced, |
| 9999 | * and initiates a balancing operation if so. |
| 10000 | * |
| 10001 | * Balancing parameters are set up in init_sched_domains. |
| 10002 | */ |
| 10003 | static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) |
| 10004 | { |
| 10005 | int continue_balancing = 1; |
| 10006 | int cpu = rq->cpu; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10007 | int busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10008 | unsigned long interval; |
| 10009 | struct sched_domain *sd; |
| 10010 | /* Earliest time when we have to do rebalance again */ |
| 10011 | unsigned long next_balance = jiffies + 60*HZ; |
| 10012 | int update_next_balance = 0; |
| 10013 | int need_serialize, need_decay = 0; |
| 10014 | u64 max_cost = 0; |
| 10015 | |
| 10016 | rcu_read_lock(); |
| 10017 | for_each_domain(cpu, sd) { |
| 10018 | /* |
| 10019 | * Decay the newidle max times here because this is a regular |
| 10020 | * visit to all the domains. Decay ~1% per second. |
| 10021 | */ |
| 10022 | if (time_after(jiffies, sd->next_decay_max_lb_cost)) { |
| 10023 | sd->max_newidle_lb_cost = |
| 10024 | (sd->max_newidle_lb_cost * 253) / 256; |
| 10025 | sd->next_decay_max_lb_cost = jiffies + HZ; |
| 10026 | need_decay = 1; |
| 10027 | } |
| 10028 | max_cost += sd->max_newidle_lb_cost; |
| 10029 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10030 | /* |
| 10031 | * Stop the load balance at this level. There is another |
| 10032 | * CPU in our sched group which is doing load balancing more |
| 10033 | * actively. |
| 10034 | */ |
| 10035 | if (!continue_balancing) { |
| 10036 | if (need_decay) |
| 10037 | continue; |
| 10038 | break; |
| 10039 | } |
| 10040 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10041 | interval = get_sd_balance_interval(sd, busy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10042 | |
| 10043 | need_serialize = sd->flags & SD_SERIALIZE; |
| 10044 | if (need_serialize) { |
| 10045 | if (!spin_trylock(&balancing)) |
| 10046 | goto out; |
| 10047 | } |
| 10048 | |
| 10049 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
| 10050 | if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { |
| 10051 | /* |
| 10052 | * The LBF_DST_PINNED logic could have changed |
| 10053 | * env->dst_cpu, so we can't know our idle |
| 10054 | * state even if we migrated tasks. Update it. |
| 10055 | */ |
| 10056 | idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10057 | busy = idle != CPU_IDLE && !sched_idle_cpu(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10058 | } |
| 10059 | sd->last_balance = jiffies; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10060 | interval = get_sd_balance_interval(sd, busy); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10061 | } |
| 10062 | if (need_serialize) |
| 10063 | spin_unlock(&balancing); |
| 10064 | out: |
| 10065 | if (time_after(next_balance, sd->last_balance + interval)) { |
| 10066 | next_balance = sd->last_balance + interval; |
| 10067 | update_next_balance = 1; |
| 10068 | } |
| 10069 | } |
| 10070 | if (need_decay) { |
| 10071 | /* |
| 10072 | * Ensure the rq-wide value also decays but keep it at a |
| 10073 | * reasonable floor to avoid funnies with rq->avg_idle. |
| 10074 | */ |
| 10075 | rq->max_idle_balance_cost = |
| 10076 | max((u64)sysctl_sched_migration_cost, max_cost); |
| 10077 | } |
| 10078 | rcu_read_unlock(); |
| 10079 | |
| 10080 | /* |
| 10081 | * next_balance will be updated only when there is a need. |
| 10082 | * When the cpu is attached to null domain for ex, it will not be |
| 10083 | * updated. |
| 10084 | */ |
| 10085 | if (likely(update_next_balance)) { |
| 10086 | rq->next_balance = next_balance; |
| 10087 | |
| 10088 | #ifdef CONFIG_NO_HZ_COMMON |
| 10089 | /* |
| 10090 | * If this CPU has been elected to perform the nohz idle |
| 10091 | * balance. Other idle CPUs have already rebalanced with |
| 10092 | * nohz_idle_balance() and nohz.next_balance has been |
| 10093 | * updated accordingly. This CPU is now running the idle load |
| 10094 | * balance for itself and we need to update the |
| 10095 | * nohz.next_balance accordingly. |
| 10096 | */ |
| 10097 | if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance)) |
| 10098 | nohz.next_balance = rq->next_balance; |
| 10099 | #endif |
| 10100 | } |
| 10101 | } |
| 10102 | |
| 10103 | static inline int on_null_domain(struct rq *rq) |
| 10104 | { |
| 10105 | return unlikely(!rcu_dereference_sched(rq->sd)); |
| 10106 | } |
| 10107 | |
| 10108 | #ifdef CONFIG_NO_HZ_COMMON |
| 10109 | /* |
| 10110 | * idle load balancing details |
| 10111 | * - When one of the busy CPUs notice that there may be an idle rebalancing |
| 10112 | * needed, they will kick the idle load balancer, which then does idle |
| 10113 | * load balancing for all the idle CPUs. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10114 | * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set |
| 10115 | * anywhere yet. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10116 | */ |
| 10117 | |
| 10118 | static inline int find_new_ilb(void) |
| 10119 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10120 | int ilb; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10121 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10122 | for_each_cpu_and(ilb, nohz.idle_cpus_mask, |
| 10123 | housekeeping_cpumask(HK_FLAG_MISC)) { |
| 10124 | if (idle_cpu(ilb)) |
| 10125 | return ilb; |
| 10126 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10127 | |
| 10128 | return nr_cpu_ids; |
| 10129 | } |
| 10130 | |
| 10131 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10132 | * Kick a CPU to do the nohz balancing, if it is time for it. We pick any |
| 10133 | * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one). |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10134 | */ |
| 10135 | static void kick_ilb(unsigned int flags) |
| 10136 | { |
| 10137 | int ilb_cpu; |
| 10138 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 10139 | /* |
| 10140 | * Increase nohz.next_balance only when if full ilb is triggered but |
| 10141 | * not if we only update stats. |
| 10142 | */ |
| 10143 | if (flags & NOHZ_BALANCE_KICK) |
| 10144 | nohz.next_balance = jiffies+1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10145 | |
| 10146 | ilb_cpu = find_new_ilb(); |
| 10147 | |
| 10148 | if (ilb_cpu >= nr_cpu_ids) |
| 10149 | return; |
| 10150 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10151 | /* |
| 10152 | * Access to rq::nohz_csd is serialized by NOHZ_KICK_MASK; he who sets |
| 10153 | * the first flag owns it; cleared by nohz_csd_func(). |
| 10154 | */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10155 | flags = atomic_fetch_or(flags, nohz_flags(ilb_cpu)); |
| 10156 | if (flags & NOHZ_KICK_MASK) |
| 10157 | return; |
| 10158 | |
| 10159 | /* |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10160 | * This way we generate an IPI on the target CPU which |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10161 | * is idle. And the softirq performing nohz idle load balance |
| 10162 | * will be run before returning from the IPI. |
| 10163 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10164 | smp_call_function_single_async(ilb_cpu, &cpu_rq(ilb_cpu)->nohz_csd); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10165 | } |
| 10166 | |
| 10167 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10168 | * Current decision point for kicking the idle load balancer in the presence |
| 10169 | * of idle CPUs in the system. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10170 | */ |
| 10171 | static void nohz_balancer_kick(struct rq *rq) |
| 10172 | { |
| 10173 | unsigned long now = jiffies; |
| 10174 | struct sched_domain_shared *sds; |
| 10175 | struct sched_domain *sd; |
| 10176 | int nr_busy, i, cpu = rq->cpu; |
| 10177 | unsigned int flags = 0; |
| 10178 | |
| 10179 | if (unlikely(rq->idle_balance)) |
| 10180 | return; |
| 10181 | |
| 10182 | /* |
| 10183 | * We may be recently in ticked or tickless idle mode. At the first |
| 10184 | * busy tick after returning from idle, we will update the busy stats. |
| 10185 | */ |
| 10186 | nohz_balance_exit_idle(rq); |
| 10187 | |
| 10188 | /* |
| 10189 | * None are in tickless mode and hence no need for NOHZ idle load |
| 10190 | * balancing. |
| 10191 | */ |
| 10192 | if (likely(!atomic_read(&nohz.nr_cpus))) |
| 10193 | return; |
| 10194 | |
| 10195 | if (READ_ONCE(nohz.has_blocked) && |
| 10196 | time_after(now, READ_ONCE(nohz.next_blocked))) |
| 10197 | flags = NOHZ_STATS_KICK; |
| 10198 | |
| 10199 | if (time_before(now, nohz.next_balance)) |
| 10200 | goto out; |
| 10201 | |
| 10202 | if (rq->nr_running >= 2) { |
| 10203 | flags = NOHZ_KICK_MASK; |
| 10204 | goto out; |
| 10205 | } |
| 10206 | |
| 10207 | rcu_read_lock(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10208 | |
| 10209 | sd = rcu_dereference(rq->sd); |
| 10210 | if (sd) { |
| 10211 | /* |
| 10212 | * If there's a CFS task and the current CPU has reduced |
| 10213 | * capacity; kick the ILB to see if there's a better CPU to run |
| 10214 | * on. |
| 10215 | */ |
| 10216 | if (rq->cfs.h_nr_running >= 1 && check_cpu_capacity(rq, sd)) { |
| 10217 | flags = NOHZ_KICK_MASK; |
| 10218 | goto unlock; |
| 10219 | } |
| 10220 | } |
| 10221 | |
| 10222 | sd = rcu_dereference(per_cpu(sd_asym_packing, cpu)); |
| 10223 | if (sd) { |
| 10224 | /* |
| 10225 | * When ASYM_PACKING; see if there's a more preferred CPU |
| 10226 | * currently idle; in which case, kick the ILB to move tasks |
| 10227 | * around. |
| 10228 | */ |
| 10229 | for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) { |
| 10230 | if (sched_asym_prefer(i, cpu)) { |
| 10231 | flags = NOHZ_KICK_MASK; |
| 10232 | goto unlock; |
| 10233 | } |
| 10234 | } |
| 10235 | } |
| 10236 | |
| 10237 | sd = rcu_dereference(per_cpu(sd_asym_cpucapacity, cpu)); |
| 10238 | if (sd) { |
| 10239 | /* |
| 10240 | * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU |
| 10241 | * to run the misfit task on. |
| 10242 | */ |
| 10243 | if (check_misfit_status(rq, sd)) { |
| 10244 | flags = NOHZ_KICK_MASK; |
| 10245 | goto unlock; |
| 10246 | } |
| 10247 | |
| 10248 | /* |
| 10249 | * For asymmetric systems, we do not want to nicely balance |
| 10250 | * cache use, instead we want to embrace asymmetry and only |
| 10251 | * ensure tasks have enough CPU capacity. |
| 10252 | * |
| 10253 | * Skip the LLC logic because it's not relevant in that case. |
| 10254 | */ |
| 10255 | goto unlock; |
| 10256 | } |
| 10257 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10258 | sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); |
| 10259 | if (sds) { |
| 10260 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10261 | * If there is an imbalance between LLC domains (IOW we could |
| 10262 | * increase the overall cache use), we need some less-loaded LLC |
| 10263 | * domain to pull some load. Likewise, we may need to spread |
| 10264 | * load within the current LLC domain (e.g. packed SMT cores but |
| 10265 | * other CPUs are idle). We can't really know from here how busy |
| 10266 | * the others are - so just get a nohz balance going if it looks |
| 10267 | * like this LLC domain has tasks we could move. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10268 | */ |
| 10269 | nr_busy = atomic_read(&sds->nr_busy_cpus); |
| 10270 | if (nr_busy > 1) { |
| 10271 | flags = NOHZ_KICK_MASK; |
| 10272 | goto unlock; |
| 10273 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10274 | } |
| 10275 | unlock: |
| 10276 | rcu_read_unlock(); |
| 10277 | out: |
| 10278 | if (flags) |
| 10279 | kick_ilb(flags); |
| 10280 | } |
| 10281 | |
| 10282 | static void set_cpu_sd_state_busy(int cpu) |
| 10283 | { |
| 10284 | struct sched_domain *sd; |
| 10285 | |
| 10286 | rcu_read_lock(); |
| 10287 | sd = rcu_dereference(per_cpu(sd_llc, cpu)); |
| 10288 | |
| 10289 | if (!sd || !sd->nohz_idle) |
| 10290 | goto unlock; |
| 10291 | sd->nohz_idle = 0; |
| 10292 | |
| 10293 | atomic_inc(&sd->shared->nr_busy_cpus); |
| 10294 | unlock: |
| 10295 | rcu_read_unlock(); |
| 10296 | } |
| 10297 | |
| 10298 | void nohz_balance_exit_idle(struct rq *rq) |
| 10299 | { |
| 10300 | SCHED_WARN_ON(rq != this_rq()); |
| 10301 | |
| 10302 | if (likely(!rq->nohz_tick_stopped)) |
| 10303 | return; |
| 10304 | |
| 10305 | rq->nohz_tick_stopped = 0; |
| 10306 | cpumask_clear_cpu(rq->cpu, nohz.idle_cpus_mask); |
| 10307 | atomic_dec(&nohz.nr_cpus); |
| 10308 | |
| 10309 | set_cpu_sd_state_busy(rq->cpu); |
| 10310 | } |
| 10311 | |
| 10312 | static void set_cpu_sd_state_idle(int cpu) |
| 10313 | { |
| 10314 | struct sched_domain *sd; |
| 10315 | |
| 10316 | rcu_read_lock(); |
| 10317 | sd = rcu_dereference(per_cpu(sd_llc, cpu)); |
| 10318 | |
| 10319 | if (!sd || sd->nohz_idle) |
| 10320 | goto unlock; |
| 10321 | sd->nohz_idle = 1; |
| 10322 | |
| 10323 | atomic_dec(&sd->shared->nr_busy_cpus); |
| 10324 | unlock: |
| 10325 | rcu_read_unlock(); |
| 10326 | } |
| 10327 | |
| 10328 | /* |
| 10329 | * This routine will record that the CPU is going idle with tick stopped. |
| 10330 | * This info will be used in performing idle load balancing in the future. |
| 10331 | */ |
| 10332 | void nohz_balance_enter_idle(int cpu) |
| 10333 | { |
| 10334 | struct rq *rq = cpu_rq(cpu); |
| 10335 | |
| 10336 | SCHED_WARN_ON(cpu != smp_processor_id()); |
| 10337 | |
| 10338 | /* If this CPU is going down, then nothing needs to be done: */ |
| 10339 | if (!cpu_active(cpu)) |
| 10340 | return; |
| 10341 | |
| 10342 | /* Spare idle load balancing on CPUs that don't want to be disturbed: */ |
| 10343 | if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) |
| 10344 | return; |
| 10345 | |
| 10346 | /* |
| 10347 | * Can be set safely without rq->lock held |
| 10348 | * If a clear happens, it will have evaluated last additions because |
| 10349 | * rq->lock is held during the check and the clear |
| 10350 | */ |
| 10351 | rq->has_blocked_load = 1; |
| 10352 | |
| 10353 | /* |
| 10354 | * The tick is still stopped but load could have been added in the |
| 10355 | * meantime. We set the nohz.has_blocked flag to trig a check of the |
| 10356 | * *_avg. The CPU is already part of nohz.idle_cpus_mask so the clear |
| 10357 | * of nohz.has_blocked can only happen after checking the new load |
| 10358 | */ |
| 10359 | if (rq->nohz_tick_stopped) |
| 10360 | goto out; |
| 10361 | |
| 10362 | /* If we're a completely isolated CPU, we don't play: */ |
| 10363 | if (on_null_domain(rq)) |
| 10364 | return; |
| 10365 | |
| 10366 | rq->nohz_tick_stopped = 1; |
| 10367 | |
| 10368 | cpumask_set_cpu(cpu, nohz.idle_cpus_mask); |
| 10369 | atomic_inc(&nohz.nr_cpus); |
| 10370 | |
| 10371 | /* |
| 10372 | * Ensures that if nohz_idle_balance() fails to observe our |
| 10373 | * @idle_cpus_mask store, it must observe the @has_blocked |
| 10374 | * store. |
| 10375 | */ |
| 10376 | smp_mb__after_atomic(); |
| 10377 | |
| 10378 | set_cpu_sd_state_idle(cpu); |
| 10379 | |
| 10380 | out: |
| 10381 | /* |
| 10382 | * Each time a cpu enter idle, we assume that it has blocked load and |
| 10383 | * enable the periodic update of the load of idle cpus |
| 10384 | */ |
| 10385 | WRITE_ONCE(nohz.has_blocked, 1); |
| 10386 | } |
| 10387 | |
| 10388 | /* |
| 10389 | * Internal function that runs load balance for all idle cpus. The load balance |
| 10390 | * can be a simple update of blocked load or a complete load balance with |
| 10391 | * tasks movement depending of flags. |
| 10392 | * The function returns false if the loop has stopped before running |
| 10393 | * through all idle CPUs. |
| 10394 | */ |
| 10395 | static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, |
| 10396 | enum cpu_idle_type idle) |
| 10397 | { |
| 10398 | /* Earliest time when we have to do rebalance again */ |
| 10399 | unsigned long now = jiffies; |
| 10400 | unsigned long next_balance = now + 60*HZ; |
| 10401 | bool has_blocked_load = false; |
| 10402 | int update_next_balance = 0; |
| 10403 | int this_cpu = this_rq->cpu; |
| 10404 | int balance_cpu; |
| 10405 | int ret = false; |
| 10406 | struct rq *rq; |
| 10407 | |
| 10408 | SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK); |
| 10409 | |
| 10410 | /* |
| 10411 | * We assume there will be no idle load after this update and clear |
| 10412 | * the has_blocked flag. If a cpu enters idle in the mean time, it will |
| 10413 | * set the has_blocked flag and trig another update of idle load. |
| 10414 | * Because a cpu that becomes idle, is added to idle_cpus_mask before |
| 10415 | * setting the flag, we are sure to not clear the state and not |
| 10416 | * check the load of an idle cpu. |
| 10417 | */ |
| 10418 | WRITE_ONCE(nohz.has_blocked, 0); |
| 10419 | |
| 10420 | /* |
| 10421 | * Ensures that if we miss the CPU, we must see the has_blocked |
| 10422 | * store from nohz_balance_enter_idle(). |
| 10423 | */ |
| 10424 | smp_mb(); |
| 10425 | |
| 10426 | for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { |
| 10427 | if (balance_cpu == this_cpu || !idle_cpu(balance_cpu)) |
| 10428 | continue; |
| 10429 | |
| 10430 | /* |
| 10431 | * If this CPU gets work to do, stop the load balancing |
| 10432 | * work being done for other CPUs. Next load |
| 10433 | * balancing owner will pick it up. |
| 10434 | */ |
| 10435 | if (need_resched()) { |
| 10436 | has_blocked_load = true; |
| 10437 | goto abort; |
| 10438 | } |
| 10439 | |
| 10440 | rq = cpu_rq(balance_cpu); |
| 10441 | |
| 10442 | has_blocked_load |= update_nohz_stats(rq, true); |
| 10443 | |
| 10444 | /* |
| 10445 | * If time for next balance is due, |
| 10446 | * do the balance. |
| 10447 | */ |
| 10448 | if (time_after_eq(jiffies, rq->next_balance)) { |
| 10449 | struct rq_flags rf; |
| 10450 | |
| 10451 | rq_lock_irqsave(rq, &rf); |
| 10452 | update_rq_clock(rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10453 | rq_unlock_irqrestore(rq, &rf); |
| 10454 | |
| 10455 | if (flags & NOHZ_BALANCE_KICK) |
| 10456 | rebalance_domains(rq, CPU_IDLE); |
| 10457 | } |
| 10458 | |
| 10459 | if (time_after(next_balance, rq->next_balance)) { |
| 10460 | next_balance = rq->next_balance; |
| 10461 | update_next_balance = 1; |
| 10462 | } |
| 10463 | } |
| 10464 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 10465 | /* |
| 10466 | * next_balance will be updated only when there is a need. |
| 10467 | * When the CPU is attached to null domain for ex, it will not be |
| 10468 | * updated. |
| 10469 | */ |
| 10470 | if (likely(update_next_balance)) |
| 10471 | nohz.next_balance = next_balance; |
| 10472 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10473 | /* Newly idle CPU doesn't need an update */ |
| 10474 | if (idle != CPU_NEWLY_IDLE) { |
| 10475 | update_blocked_averages(this_cpu); |
| 10476 | has_blocked_load |= this_rq->has_blocked_load; |
| 10477 | } |
| 10478 | |
| 10479 | if (flags & NOHZ_BALANCE_KICK) |
| 10480 | rebalance_domains(this_rq, CPU_IDLE); |
| 10481 | |
| 10482 | WRITE_ONCE(nohz.next_blocked, |
| 10483 | now + msecs_to_jiffies(LOAD_AVG_PERIOD)); |
| 10484 | |
| 10485 | /* The full idle balance loop has been done */ |
| 10486 | ret = true; |
| 10487 | |
| 10488 | abort: |
| 10489 | /* There is still blocked load, enable periodic update */ |
| 10490 | if (has_blocked_load) |
| 10491 | WRITE_ONCE(nohz.has_blocked, 1); |
| 10492 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10493 | return ret; |
| 10494 | } |
| 10495 | |
| 10496 | /* |
| 10497 | * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the |
| 10498 | * rebalancing for all the cpus for whom scheduler ticks are stopped. |
| 10499 | */ |
| 10500 | static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) |
| 10501 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10502 | unsigned int flags = this_rq->nohz_idle_balance; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10503 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10504 | if (!flags) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10505 | return false; |
| 10506 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10507 | this_rq->nohz_idle_balance = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10508 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10509 | if (idle != CPU_IDLE) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10510 | return false; |
| 10511 | |
| 10512 | _nohz_idle_balance(this_rq, flags, idle); |
| 10513 | |
| 10514 | return true; |
| 10515 | } |
| 10516 | |
| 10517 | static void nohz_newidle_balance(struct rq *this_rq) |
| 10518 | { |
| 10519 | int this_cpu = this_rq->cpu; |
| 10520 | |
| 10521 | /* |
| 10522 | * This CPU doesn't want to be disturbed by scheduler |
| 10523 | * housekeeping |
| 10524 | */ |
| 10525 | if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED)) |
| 10526 | return; |
| 10527 | |
| 10528 | /* Will wake up very soon. No time for doing anything else*/ |
| 10529 | if (this_rq->avg_idle < sysctl_sched_migration_cost) |
| 10530 | return; |
| 10531 | |
| 10532 | /* Don't need to update blocked load of idle CPUs*/ |
| 10533 | if (!READ_ONCE(nohz.has_blocked) || |
| 10534 | time_before(jiffies, READ_ONCE(nohz.next_blocked))) |
| 10535 | return; |
| 10536 | |
| 10537 | raw_spin_unlock(&this_rq->lock); |
| 10538 | /* |
| 10539 | * This CPU is going to be idle and blocked load of idle CPUs |
| 10540 | * need to be updated. Run the ilb locally as it is a good |
| 10541 | * candidate for ilb instead of waking up another idle CPU. |
| 10542 | * Kick an normal ilb if we failed to do the update. |
| 10543 | */ |
| 10544 | if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE)) |
| 10545 | kick_ilb(NOHZ_STATS_KICK); |
| 10546 | raw_spin_lock(&this_rq->lock); |
| 10547 | } |
| 10548 | |
| 10549 | #else /* !CONFIG_NO_HZ_COMMON */ |
| 10550 | static inline void nohz_balancer_kick(struct rq *rq) { } |
| 10551 | |
| 10552 | static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) |
| 10553 | { |
| 10554 | return false; |
| 10555 | } |
| 10556 | |
| 10557 | static inline void nohz_newidle_balance(struct rq *this_rq) { } |
| 10558 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 10559 | |
| 10560 | /* |
| 10561 | * idle_balance is called by schedule() if this_cpu is about to become |
| 10562 | * idle. Attempts to pull tasks from other CPUs. |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10563 | * |
| 10564 | * Returns: |
| 10565 | * < 0 - we released the lock and there are !fair tasks present |
| 10566 | * 0 - failed, no new tasks |
| 10567 | * > 0 - success, new (fair) tasks present |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10568 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10569 | static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10570 | { |
| 10571 | unsigned long next_balance = jiffies + HZ; |
| 10572 | int this_cpu = this_rq->cpu; |
| 10573 | struct sched_domain *sd; |
| 10574 | int pulled_task = 0; |
| 10575 | u64 curr_cost = 0; |
| 10576 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10577 | update_misfit_status(NULL, this_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10578 | /* |
| 10579 | * We must set idle_stamp _before_ calling idle_balance(), such that we |
| 10580 | * measure the duration of idle_balance() as idle time. |
| 10581 | */ |
| 10582 | this_rq->idle_stamp = rq_clock(this_rq); |
| 10583 | |
| 10584 | /* |
| 10585 | * Do not pull tasks towards !active CPUs... |
| 10586 | */ |
| 10587 | if (!cpu_active(this_cpu)) |
| 10588 | return 0; |
| 10589 | |
| 10590 | /* |
| 10591 | * This is OK, because current is on_cpu, which avoids it being picked |
| 10592 | * for load-balance and preemption/IRQs are still disabled avoiding |
| 10593 | * further scheduler activity on it and we're being very careful to |
| 10594 | * re-start the picking loop. |
| 10595 | */ |
| 10596 | rq_unpin_lock(this_rq, rf); |
| 10597 | |
| 10598 | if (this_rq->avg_idle < sysctl_sched_migration_cost || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10599 | !READ_ONCE(this_rq->rd->overload)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10600 | |
| 10601 | rcu_read_lock(); |
| 10602 | sd = rcu_dereference_check_sched_domain(this_rq->sd); |
| 10603 | if (sd) |
| 10604 | update_next_balance(sd, &next_balance); |
| 10605 | rcu_read_unlock(); |
| 10606 | |
| 10607 | nohz_newidle_balance(this_rq); |
| 10608 | |
| 10609 | goto out; |
| 10610 | } |
| 10611 | |
| 10612 | raw_spin_unlock(&this_rq->lock); |
| 10613 | |
| 10614 | update_blocked_averages(this_cpu); |
| 10615 | rcu_read_lock(); |
| 10616 | for_each_domain(this_cpu, sd) { |
| 10617 | int continue_balancing = 1; |
| 10618 | u64 t0, domain_cost; |
| 10619 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10620 | if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) { |
| 10621 | update_next_balance(sd, &next_balance); |
| 10622 | break; |
| 10623 | } |
| 10624 | |
| 10625 | if (sd->flags & SD_BALANCE_NEWIDLE) { |
| 10626 | t0 = sched_clock_cpu(this_cpu); |
| 10627 | |
| 10628 | pulled_task = load_balance(this_cpu, this_rq, |
| 10629 | sd, CPU_NEWLY_IDLE, |
| 10630 | &continue_balancing); |
| 10631 | |
| 10632 | domain_cost = sched_clock_cpu(this_cpu) - t0; |
| 10633 | if (domain_cost > sd->max_newidle_lb_cost) |
| 10634 | sd->max_newidle_lb_cost = domain_cost; |
| 10635 | |
| 10636 | curr_cost += domain_cost; |
| 10637 | } |
| 10638 | |
| 10639 | update_next_balance(sd, &next_balance); |
| 10640 | |
| 10641 | /* |
| 10642 | * Stop searching for tasks to pull if there are |
| 10643 | * now runnable tasks on this rq. |
| 10644 | */ |
| 10645 | if (pulled_task || this_rq->nr_running > 0) |
| 10646 | break; |
| 10647 | } |
| 10648 | rcu_read_unlock(); |
| 10649 | |
| 10650 | raw_spin_lock(&this_rq->lock); |
| 10651 | |
| 10652 | if (curr_cost > this_rq->max_idle_balance_cost) |
| 10653 | this_rq->max_idle_balance_cost = curr_cost; |
| 10654 | |
| 10655 | out: |
| 10656 | /* |
| 10657 | * While browsing the domains, we released the rq lock, a task could |
| 10658 | * have been enqueued in the meantime. Since we're not going idle, |
| 10659 | * pretend we pulled a task. |
| 10660 | */ |
| 10661 | if (this_rq->cfs.h_nr_running && !pulled_task) |
| 10662 | pulled_task = 1; |
| 10663 | |
| 10664 | /* Move the next balance forward */ |
| 10665 | if (time_after(this_rq->next_balance, next_balance)) |
| 10666 | this_rq->next_balance = next_balance; |
| 10667 | |
| 10668 | /* Is there a task of a high priority class? */ |
| 10669 | if (this_rq->nr_running != this_rq->cfs.h_nr_running) |
| 10670 | pulled_task = -1; |
| 10671 | |
| 10672 | if (pulled_task) |
| 10673 | this_rq->idle_stamp = 0; |
| 10674 | |
| 10675 | rq_repin_lock(this_rq, rf); |
| 10676 | |
| 10677 | return pulled_task; |
| 10678 | } |
| 10679 | |
| 10680 | /* |
| 10681 | * run_rebalance_domains is triggered when needed from the scheduler tick. |
| 10682 | * Also triggered for nohz idle balancing (with nohz_balancing_kick set). |
| 10683 | */ |
| 10684 | static __latent_entropy void run_rebalance_domains(struct softirq_action *h) |
| 10685 | { |
| 10686 | struct rq *this_rq = this_rq(); |
| 10687 | enum cpu_idle_type idle = this_rq->idle_balance ? |
| 10688 | CPU_IDLE : CPU_NOT_IDLE; |
| 10689 | |
| 10690 | /* |
| 10691 | * If this CPU has a pending nohz_balance_kick, then do the |
| 10692 | * balancing on behalf of the other idle CPUs whose ticks are |
| 10693 | * stopped. Do nohz_idle_balance *before* rebalance_domains to |
| 10694 | * give the idle CPUs a chance to load balance. Else we may |
| 10695 | * load balance only within the local sched_domain hierarchy |
| 10696 | * and abort nohz_idle_balance altogether if we pull some load. |
| 10697 | */ |
| 10698 | if (nohz_idle_balance(this_rq, idle)) |
| 10699 | return; |
| 10700 | |
| 10701 | /* normal load balance */ |
| 10702 | update_blocked_averages(this_rq->cpu); |
| 10703 | rebalance_domains(this_rq, idle); |
| 10704 | } |
| 10705 | |
| 10706 | /* |
| 10707 | * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. |
| 10708 | */ |
| 10709 | void trigger_load_balance(struct rq *rq) |
| 10710 | { |
| 10711 | /* Don't need to rebalance while attached to NULL domain */ |
| 10712 | if (unlikely(on_null_domain(rq))) |
| 10713 | return; |
| 10714 | |
| 10715 | if (time_after_eq(jiffies, rq->next_balance)) |
| 10716 | raise_softirq(SCHED_SOFTIRQ); |
| 10717 | |
| 10718 | nohz_balancer_kick(rq); |
| 10719 | } |
| 10720 | |
| 10721 | static void rq_online_fair(struct rq *rq) |
| 10722 | { |
| 10723 | update_sysctl(); |
| 10724 | |
| 10725 | update_runtime_enabled(rq); |
| 10726 | } |
| 10727 | |
| 10728 | static void rq_offline_fair(struct rq *rq) |
| 10729 | { |
| 10730 | update_sysctl(); |
| 10731 | |
| 10732 | /* Ensure any throttled groups are reachable by pick_next_task */ |
| 10733 | unthrottle_offline_cfs_rqs(rq); |
| 10734 | } |
| 10735 | |
| 10736 | #endif /* CONFIG_SMP */ |
| 10737 | |
| 10738 | /* |
| 10739 | * scheduler tick hitting a task of our scheduling class. |
| 10740 | * |
| 10741 | * NOTE: This function can be called remotely by the tick offload that |
| 10742 | * goes along full dynticks. Therefore no local assumption can be made |
| 10743 | * and everything must be accessed through the @rq and @curr passed in |
| 10744 | * parameters. |
| 10745 | */ |
| 10746 | static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) |
| 10747 | { |
| 10748 | struct cfs_rq *cfs_rq; |
| 10749 | struct sched_entity *se = &curr->se; |
| 10750 | |
| 10751 | for_each_sched_entity(se) { |
| 10752 | cfs_rq = cfs_rq_of(se); |
| 10753 | entity_tick(cfs_rq, se, queued); |
| 10754 | } |
| 10755 | |
| 10756 | if (static_branch_unlikely(&sched_numa_balancing)) |
| 10757 | task_tick_numa(rq, curr); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10758 | |
| 10759 | update_misfit_status(curr, rq); |
| 10760 | update_overutilized_status(task_rq(curr)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10761 | } |
| 10762 | |
| 10763 | /* |
| 10764 | * called on fork with the child task as argument from the parent's context |
| 10765 | * - child not yet on the tasklist |
| 10766 | * - preemption disabled |
| 10767 | */ |
| 10768 | static void task_fork_fair(struct task_struct *p) |
| 10769 | { |
| 10770 | struct cfs_rq *cfs_rq; |
| 10771 | struct sched_entity *se = &p->se, *curr; |
| 10772 | struct rq *rq = this_rq(); |
| 10773 | struct rq_flags rf; |
| 10774 | |
| 10775 | rq_lock(rq, &rf); |
| 10776 | update_rq_clock(rq); |
| 10777 | |
| 10778 | cfs_rq = task_cfs_rq(current); |
| 10779 | curr = cfs_rq->curr; |
| 10780 | if (curr) { |
| 10781 | update_curr(cfs_rq); |
| 10782 | se->vruntime = curr->vruntime; |
| 10783 | } |
| 10784 | place_entity(cfs_rq, se, 1); |
| 10785 | |
| 10786 | if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { |
| 10787 | /* |
| 10788 | * Upon rescheduling, sched_class::put_prev_task() will place |
| 10789 | * 'current' within the tree based on its new key value. |
| 10790 | */ |
| 10791 | swap(curr->vruntime, se->vruntime); |
| 10792 | resched_curr(rq); |
| 10793 | } |
| 10794 | |
| 10795 | se->vruntime -= cfs_rq->min_vruntime; |
| 10796 | rq_unlock(rq, &rf); |
| 10797 | } |
| 10798 | |
| 10799 | /* |
| 10800 | * Priority of the task has changed. Check to see if we preempt |
| 10801 | * the current task. |
| 10802 | */ |
| 10803 | static void |
| 10804 | prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) |
| 10805 | { |
| 10806 | if (!task_on_rq_queued(p)) |
| 10807 | return; |
| 10808 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10809 | if (rq->cfs.nr_running == 1) |
| 10810 | return; |
| 10811 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10812 | /* |
| 10813 | * Reschedule if we are currently running on this runqueue and |
| 10814 | * our priority decreased, or if we are not currently running on |
| 10815 | * this runqueue and our priority is higher than the current's |
| 10816 | */ |
| 10817 | if (rq->curr == p) { |
| 10818 | if (p->prio > oldprio) |
| 10819 | resched_curr(rq); |
| 10820 | } else |
| 10821 | check_preempt_curr(rq, p, 0); |
| 10822 | } |
| 10823 | |
| 10824 | static inline bool vruntime_normalized(struct task_struct *p) |
| 10825 | { |
| 10826 | struct sched_entity *se = &p->se; |
| 10827 | |
| 10828 | /* |
| 10829 | * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases, |
| 10830 | * the dequeue_entity(.flags=0) will already have normalized the |
| 10831 | * vruntime. |
| 10832 | */ |
| 10833 | if (p->on_rq) |
| 10834 | return true; |
| 10835 | |
| 10836 | /* |
| 10837 | * When !on_rq, vruntime of the task has usually NOT been normalized. |
| 10838 | * But there are some cases where it has already been normalized: |
| 10839 | * |
| 10840 | * - A forked child which is waiting for being woken up by |
| 10841 | * wake_up_new_task(). |
| 10842 | * - A task which has been woken up by try_to_wake_up() and |
| 10843 | * waiting for actually being woken up by sched_ttwu_pending(). |
| 10844 | */ |
| 10845 | if (!se->sum_exec_runtime || |
| 10846 | (p->state == TASK_WAKING && p->sched_remote_wakeup)) |
| 10847 | return true; |
| 10848 | |
| 10849 | return false; |
| 10850 | } |
| 10851 | |
| 10852 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 10853 | /* |
| 10854 | * Propagate the changes of the sched_entity across the tg tree to make it |
| 10855 | * visible to the root |
| 10856 | */ |
| 10857 | static void propagate_entity_cfs_rq(struct sched_entity *se) |
| 10858 | { |
| 10859 | struct cfs_rq *cfs_rq; |
| 10860 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 10861 | list_add_leaf_cfs_rq(cfs_rq_of(se)); |
| 10862 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10863 | /* Start to propagate at parent */ |
| 10864 | se = se->parent; |
| 10865 | |
| 10866 | for_each_sched_entity(se) { |
| 10867 | cfs_rq = cfs_rq_of(se); |
| 10868 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 10869 | if (!cfs_rq_throttled(cfs_rq)){ |
| 10870 | update_load_avg(cfs_rq, se, UPDATE_TG); |
| 10871 | list_add_leaf_cfs_rq(cfs_rq); |
| 10872 | continue; |
| 10873 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10874 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 10875 | if (list_add_leaf_cfs_rq(cfs_rq)) |
| 10876 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10877 | } |
| 10878 | } |
| 10879 | #else |
| 10880 | static void propagate_entity_cfs_rq(struct sched_entity *se) { } |
| 10881 | #endif |
| 10882 | |
| 10883 | static void detach_entity_cfs_rq(struct sched_entity *se) |
| 10884 | { |
| 10885 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 10886 | |
| 10887 | /* Catch up with the cfs_rq and remove our load when we leave */ |
| 10888 | update_load_avg(cfs_rq, se, 0); |
| 10889 | detach_entity_load_avg(cfs_rq, se); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10890 | update_tg_load_avg(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10891 | propagate_entity_cfs_rq(se); |
| 10892 | } |
| 10893 | |
| 10894 | static void attach_entity_cfs_rq(struct sched_entity *se) |
| 10895 | { |
| 10896 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 10897 | |
| 10898 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 10899 | /* |
| 10900 | * Since the real-depth could have been changed (only FAIR |
| 10901 | * class maintain depth value), reset depth properly. |
| 10902 | */ |
| 10903 | se->depth = se->parent ? se->parent->depth + 1 : 0; |
| 10904 | #endif |
| 10905 | |
| 10906 | /* Synchronize entity with its cfs_rq */ |
| 10907 | update_load_avg(cfs_rq, se, sched_feat(ATTACH_AGE_LOAD) ? 0 : SKIP_AGE_LOAD); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 10908 | attach_entity_load_avg(cfs_rq, se); |
| 10909 | update_tg_load_avg(cfs_rq); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10910 | propagate_entity_cfs_rq(se); |
| 10911 | } |
| 10912 | |
| 10913 | static void detach_task_cfs_rq(struct task_struct *p) |
| 10914 | { |
| 10915 | struct sched_entity *se = &p->se; |
| 10916 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 10917 | |
| 10918 | if (!vruntime_normalized(p)) { |
| 10919 | /* |
| 10920 | * Fix up our vruntime so that the current sleep doesn't |
| 10921 | * cause 'unlimited' sleep bonus. |
| 10922 | */ |
| 10923 | place_entity(cfs_rq, se, 0); |
| 10924 | se->vruntime -= cfs_rq->min_vruntime; |
| 10925 | } |
| 10926 | |
| 10927 | detach_entity_cfs_rq(se); |
| 10928 | } |
| 10929 | |
| 10930 | static void attach_task_cfs_rq(struct task_struct *p) |
| 10931 | { |
| 10932 | struct sched_entity *se = &p->se; |
| 10933 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 10934 | |
| 10935 | attach_entity_cfs_rq(se); |
| 10936 | |
| 10937 | if (!vruntime_normalized(p)) |
| 10938 | se->vruntime += cfs_rq->min_vruntime; |
| 10939 | } |
| 10940 | |
| 10941 | static void switched_from_fair(struct rq *rq, struct task_struct *p) |
| 10942 | { |
| 10943 | detach_task_cfs_rq(p); |
| 10944 | } |
| 10945 | |
| 10946 | static void switched_to_fair(struct rq *rq, struct task_struct *p) |
| 10947 | { |
| 10948 | attach_task_cfs_rq(p); |
| 10949 | |
| 10950 | if (task_on_rq_queued(p)) { |
| 10951 | /* |
| 10952 | * We were most likely switched from sched_rt, so |
| 10953 | * kick off the schedule if running, otherwise just see |
| 10954 | * if we can still preempt the current task. |
| 10955 | */ |
| 10956 | if (rq->curr == p) |
| 10957 | resched_curr(rq); |
| 10958 | else |
| 10959 | check_preempt_curr(rq, p, 0); |
| 10960 | } |
| 10961 | } |
| 10962 | |
| 10963 | /* Account for a task changing its policy or group. |
| 10964 | * |
| 10965 | * This routine is mostly called to set cfs_rq->curr field when a task |
| 10966 | * migrates between groups/classes. |
| 10967 | */ |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 10968 | static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10969 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 10970 | struct sched_entity *se = &p->se; |
| 10971 | |
| 10972 | #ifdef CONFIG_SMP |
| 10973 | if (task_on_rq_queued(p)) { |
| 10974 | /* |
| 10975 | * Move the next running task to the front of the list, so our |
| 10976 | * cfs_tasks list becomes MRU one. |
| 10977 | */ |
| 10978 | list_move(&se->group_node, &rq->cfs_tasks); |
| 10979 | } |
| 10980 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10981 | |
| 10982 | for_each_sched_entity(se) { |
| 10983 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
| 10984 | |
| 10985 | set_next_entity(cfs_rq, se); |
| 10986 | /* ensure bandwidth has been allocated on our new cfs_rq */ |
| 10987 | account_cfs_rq_runtime(cfs_rq, 0); |
| 10988 | } |
| 10989 | } |
| 10990 | |
| 10991 | void init_cfs_rq(struct cfs_rq *cfs_rq) |
| 10992 | { |
| 10993 | cfs_rq->tasks_timeline = RB_ROOT_CACHED; |
| 10994 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
| 10995 | #ifndef CONFIG_64BIT |
| 10996 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
| 10997 | #endif |
| 10998 | #ifdef CONFIG_SMP |
| 10999 | raw_spin_lock_init(&cfs_rq->removed.lock); |
| 11000 | #endif |
| 11001 | } |
| 11002 | |
| 11003 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 11004 | static void task_set_group_fair(struct task_struct *p) |
| 11005 | { |
| 11006 | struct sched_entity *se = &p->se; |
| 11007 | |
| 11008 | set_task_rq(p, task_cpu(p)); |
| 11009 | se->depth = se->parent ? se->parent->depth + 1 : 0; |
| 11010 | } |
| 11011 | |
| 11012 | static void task_move_group_fair(struct task_struct *p) |
| 11013 | { |
| 11014 | detach_task_cfs_rq(p); |
| 11015 | set_task_rq(p, task_cpu(p)); |
| 11016 | |
| 11017 | #ifdef CONFIG_SMP |
| 11018 | /* Tell se's cfs_rq has been changed -- migrated */ |
| 11019 | p->se.avg.last_update_time = 0; |
| 11020 | #endif |
| 11021 | attach_task_cfs_rq(p); |
| 11022 | } |
| 11023 | |
| 11024 | static void task_change_group_fair(struct task_struct *p, int type) |
| 11025 | { |
| 11026 | switch (type) { |
| 11027 | case TASK_SET_GROUP: |
| 11028 | task_set_group_fair(p); |
| 11029 | break; |
| 11030 | |
| 11031 | case TASK_MOVE_GROUP: |
| 11032 | task_move_group_fair(p); |
| 11033 | break; |
| 11034 | } |
| 11035 | } |
| 11036 | |
| 11037 | void free_fair_sched_group(struct task_group *tg) |
| 11038 | { |
| 11039 | int i; |
| 11040 | |
| 11041 | destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 11042 | |
| 11043 | for_each_possible_cpu(i) { |
| 11044 | if (tg->cfs_rq) |
| 11045 | kfree(tg->cfs_rq[i]); |
| 11046 | if (tg->se) |
| 11047 | kfree(tg->se[i]); |
| 11048 | } |
| 11049 | |
| 11050 | kfree(tg->cfs_rq); |
| 11051 | kfree(tg->se); |
| 11052 | } |
| 11053 | |
| 11054 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 11055 | { |
| 11056 | struct sched_entity *se; |
| 11057 | struct cfs_rq *cfs_rq; |
| 11058 | int i; |
| 11059 | |
| 11060 | tg->cfs_rq = kcalloc(nr_cpu_ids, sizeof(cfs_rq), GFP_KERNEL); |
| 11061 | if (!tg->cfs_rq) |
| 11062 | goto err; |
| 11063 | tg->se = kcalloc(nr_cpu_ids, sizeof(se), GFP_KERNEL); |
| 11064 | if (!tg->se) |
| 11065 | goto err; |
| 11066 | |
| 11067 | tg->shares = NICE_0_LOAD; |
| 11068 | |
| 11069 | init_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 11070 | |
| 11071 | for_each_possible_cpu(i) { |
| 11072 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 11073 | GFP_KERNEL, cpu_to_node(i)); |
| 11074 | if (!cfs_rq) |
| 11075 | goto err; |
| 11076 | |
| 11077 | se = kzalloc_node(sizeof(struct sched_entity), |
| 11078 | GFP_KERNEL, cpu_to_node(i)); |
| 11079 | if (!se) |
| 11080 | goto err_free_rq; |
| 11081 | |
| 11082 | init_cfs_rq(cfs_rq); |
| 11083 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
| 11084 | init_entity_runnable_average(se); |
| 11085 | } |
| 11086 | |
| 11087 | return 1; |
| 11088 | |
| 11089 | err_free_rq: |
| 11090 | kfree(cfs_rq); |
| 11091 | err: |
| 11092 | return 0; |
| 11093 | } |
| 11094 | |
| 11095 | void online_fair_sched_group(struct task_group *tg) |
| 11096 | { |
| 11097 | struct sched_entity *se; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11098 | struct rq_flags rf; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11099 | struct rq *rq; |
| 11100 | int i; |
| 11101 | |
| 11102 | for_each_possible_cpu(i) { |
| 11103 | rq = cpu_rq(i); |
| 11104 | se = tg->se[i]; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11105 | rq_lock_irq(rq, &rf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11106 | update_rq_clock(rq); |
| 11107 | attach_entity_cfs_rq(se); |
| 11108 | sync_throttle(tg, i); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11109 | rq_unlock_irq(rq, &rf); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11110 | } |
| 11111 | } |
| 11112 | |
| 11113 | void unregister_fair_sched_group(struct task_group *tg) |
| 11114 | { |
| 11115 | unsigned long flags; |
| 11116 | struct rq *rq; |
| 11117 | int cpu; |
| 11118 | |
| 11119 | for_each_possible_cpu(cpu) { |
| 11120 | if (tg->se[cpu]) |
| 11121 | remove_entity_load_avg(tg->se[cpu]); |
| 11122 | |
| 11123 | /* |
| 11124 | * Only empty task groups can be destroyed; so we can speculatively |
| 11125 | * check on_list without danger of it being re-added. |
| 11126 | */ |
| 11127 | if (!tg->cfs_rq[cpu]->on_list) |
| 11128 | continue; |
| 11129 | |
| 11130 | rq = cpu_rq(cpu); |
| 11131 | |
| 11132 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 11133 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); |
| 11134 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 11135 | } |
| 11136 | } |
| 11137 | |
| 11138 | void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, |
| 11139 | struct sched_entity *se, int cpu, |
| 11140 | struct sched_entity *parent) |
| 11141 | { |
| 11142 | struct rq *rq = cpu_rq(cpu); |
| 11143 | |
| 11144 | cfs_rq->tg = tg; |
| 11145 | cfs_rq->rq = rq; |
| 11146 | init_cfs_rq_runtime(cfs_rq); |
| 11147 | |
| 11148 | tg->cfs_rq[cpu] = cfs_rq; |
| 11149 | tg->se[cpu] = se; |
| 11150 | |
| 11151 | /* se could be NULL for root_task_group */ |
| 11152 | if (!se) |
| 11153 | return; |
| 11154 | |
| 11155 | if (!parent) { |
| 11156 | se->cfs_rq = &rq->cfs; |
| 11157 | se->depth = 0; |
| 11158 | } else { |
| 11159 | se->cfs_rq = parent->my_q; |
| 11160 | se->depth = parent->depth + 1; |
| 11161 | } |
| 11162 | |
| 11163 | se->my_q = cfs_rq; |
| 11164 | /* guarantee group entities always have weight */ |
| 11165 | update_load_set(&se->load, NICE_0_LOAD); |
| 11166 | se->parent = parent; |
| 11167 | } |
| 11168 | |
| 11169 | static DEFINE_MUTEX(shares_mutex); |
| 11170 | |
| 11171 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
| 11172 | { |
| 11173 | int i; |
| 11174 | |
| 11175 | /* |
| 11176 | * We can't change the weight of the root cgroup. |
| 11177 | */ |
| 11178 | if (!tg->se[0]) |
| 11179 | return -EINVAL; |
| 11180 | |
| 11181 | shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); |
| 11182 | |
| 11183 | mutex_lock(&shares_mutex); |
| 11184 | if (tg->shares == shares) |
| 11185 | goto done; |
| 11186 | |
| 11187 | tg->shares = shares; |
| 11188 | for_each_possible_cpu(i) { |
| 11189 | struct rq *rq = cpu_rq(i); |
| 11190 | struct sched_entity *se = tg->se[i]; |
| 11191 | struct rq_flags rf; |
| 11192 | |
| 11193 | /* Propagate contribution to hierarchy */ |
| 11194 | rq_lock_irqsave(rq, &rf); |
| 11195 | update_rq_clock(rq); |
| 11196 | for_each_sched_entity(se) { |
| 11197 | update_load_avg(cfs_rq_of(se), se, UPDATE_TG); |
| 11198 | update_cfs_group(se); |
| 11199 | } |
| 11200 | rq_unlock_irqrestore(rq, &rf); |
| 11201 | } |
| 11202 | |
| 11203 | done: |
| 11204 | mutex_unlock(&shares_mutex); |
| 11205 | return 0; |
| 11206 | } |
| 11207 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
| 11208 | |
| 11209 | void free_fair_sched_group(struct task_group *tg) { } |
| 11210 | |
| 11211 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 11212 | { |
| 11213 | return 1; |
| 11214 | } |
| 11215 | |
| 11216 | void online_fair_sched_group(struct task_group *tg) { } |
| 11217 | |
| 11218 | void unregister_fair_sched_group(struct task_group *tg) { } |
| 11219 | |
| 11220 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
| 11221 | |
| 11222 | |
| 11223 | static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task) |
| 11224 | { |
| 11225 | struct sched_entity *se = &task->se; |
| 11226 | unsigned int rr_interval = 0; |
| 11227 | |
| 11228 | /* |
| 11229 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise |
| 11230 | * idle runqueue: |
| 11231 | */ |
| 11232 | if (rq->cfs.load.weight) |
| 11233 | rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); |
| 11234 | |
| 11235 | return rr_interval; |
| 11236 | } |
| 11237 | |
| 11238 | /* |
| 11239 | * All the scheduling class methods: |
| 11240 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11241 | const struct sched_class fair_sched_class |
| 11242 | __section("__fair_sched_class") = { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11243 | .enqueue_task = enqueue_task_fair, |
| 11244 | .dequeue_task = dequeue_task_fair, |
| 11245 | .yield_task = yield_task_fair, |
| 11246 | .yield_to_task = yield_to_task_fair, |
| 11247 | |
| 11248 | .check_preempt_curr = check_preempt_wakeup, |
| 11249 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11250 | .pick_next_task = __pick_next_task_fair, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11251 | .put_prev_task = put_prev_task_fair, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11252 | .set_next_task = set_next_task_fair, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11253 | |
| 11254 | #ifdef CONFIG_SMP |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11255 | .balance = balance_fair, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11256 | .select_task_rq = select_task_rq_fair, |
| 11257 | .migrate_task_rq = migrate_task_rq_fair, |
| 11258 | |
| 11259 | .rq_online = rq_online_fair, |
| 11260 | .rq_offline = rq_offline_fair, |
| 11261 | |
| 11262 | .task_dead = task_dead_fair, |
| 11263 | .set_cpus_allowed = set_cpus_allowed_common, |
| 11264 | #endif |
| 11265 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11266 | .task_tick = task_tick_fair, |
| 11267 | .task_fork = task_fork_fair, |
| 11268 | |
| 11269 | .prio_changed = prio_changed_fair, |
| 11270 | .switched_from = switched_from_fair, |
| 11271 | .switched_to = switched_to_fair, |
| 11272 | |
| 11273 | .get_rr_interval = get_rr_interval_fair, |
| 11274 | |
| 11275 | .update_curr = update_curr_fair, |
| 11276 | |
| 11277 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 11278 | .task_change_group = task_change_group_fair, |
| 11279 | #endif |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11280 | |
| 11281 | #ifdef CONFIG_UCLAMP_TASK |
| 11282 | .uclamp_enabled = 1, |
| 11283 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11284 | }; |
| 11285 | |
| 11286 | #ifdef CONFIG_SCHED_DEBUG |
| 11287 | void print_cfs_stats(struct seq_file *m, int cpu) |
| 11288 | { |
| 11289 | struct cfs_rq *cfs_rq, *pos; |
| 11290 | |
| 11291 | rcu_read_lock(); |
| 11292 | for_each_leaf_cfs_rq_safe(cpu_rq(cpu), cfs_rq, pos) |
| 11293 | print_cfs_rq(m, cpu, cfs_rq); |
| 11294 | rcu_read_unlock(); |
| 11295 | } |
| 11296 | |
| 11297 | #ifdef CONFIG_NUMA_BALANCING |
| 11298 | void show_numa_stats(struct task_struct *p, struct seq_file *m) |
| 11299 | { |
| 11300 | int node; |
| 11301 | unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11302 | struct numa_group *ng; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11303 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11304 | rcu_read_lock(); |
| 11305 | ng = rcu_dereference(p->numa_group); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11306 | for_each_online_node(node) { |
| 11307 | if (p->numa_faults) { |
| 11308 | tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)]; |
| 11309 | tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)]; |
| 11310 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11311 | if (ng) { |
| 11312 | gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)], |
| 11313 | gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)]; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11314 | } |
| 11315 | print_numa_stats(m, node, tsf, tpf, gsf, gpf); |
| 11316 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11317 | rcu_read_unlock(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11318 | } |
| 11319 | #endif /* CONFIG_NUMA_BALANCING */ |
| 11320 | #endif /* CONFIG_SCHED_DEBUG */ |
| 11321 | |
| 11322 | __init void init_sched_fair_class(void) |
| 11323 | { |
| 11324 | #ifdef CONFIG_SMP |
| 11325 | open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); |
| 11326 | |
| 11327 | #ifdef CONFIG_NO_HZ_COMMON |
| 11328 | nohz.next_balance = jiffies; |
| 11329 | nohz.next_blocked = jiffies; |
| 11330 | zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); |
| 11331 | #endif |
| 11332 | #endif /* SMP */ |
| 11333 | |
| 11334 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11335 | |
| 11336 | /* |
| 11337 | * Helper functions to facilitate extracting info from tracepoints. |
| 11338 | */ |
| 11339 | |
| 11340 | const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq) |
| 11341 | { |
| 11342 | #ifdef CONFIG_SMP |
| 11343 | return cfs_rq ? &cfs_rq->avg : NULL; |
| 11344 | #else |
| 11345 | return NULL; |
| 11346 | #endif |
| 11347 | } |
| 11348 | EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_avg); |
| 11349 | |
| 11350 | char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len) |
| 11351 | { |
| 11352 | if (!cfs_rq) { |
| 11353 | if (str) |
| 11354 | strlcpy(str, "(null)", len); |
| 11355 | else |
| 11356 | return NULL; |
| 11357 | } |
| 11358 | |
| 11359 | cfs_rq_tg_path(cfs_rq, str, len); |
| 11360 | return str; |
| 11361 | } |
| 11362 | EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_path); |
| 11363 | |
| 11364 | int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq) |
| 11365 | { |
| 11366 | return cfs_rq ? cpu_of(rq_of(cfs_rq)) : -1; |
| 11367 | } |
| 11368 | EXPORT_SYMBOL_GPL(sched_trace_cfs_rq_cpu); |
| 11369 | |
| 11370 | const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq) |
| 11371 | { |
| 11372 | #ifdef CONFIG_SMP |
| 11373 | return rq ? &rq->avg_rt : NULL; |
| 11374 | #else |
| 11375 | return NULL; |
| 11376 | #endif |
| 11377 | } |
| 11378 | EXPORT_SYMBOL_GPL(sched_trace_rq_avg_rt); |
| 11379 | |
| 11380 | const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq) |
| 11381 | { |
| 11382 | #ifdef CONFIG_SMP |
| 11383 | return rq ? &rq->avg_dl : NULL; |
| 11384 | #else |
| 11385 | return NULL; |
| 11386 | #endif |
| 11387 | } |
| 11388 | EXPORT_SYMBOL_GPL(sched_trace_rq_avg_dl); |
| 11389 | |
| 11390 | const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq) |
| 11391 | { |
| 11392 | #if defined(CONFIG_SMP) && defined(CONFIG_HAVE_SCHED_AVG_IRQ) |
| 11393 | return rq ? &rq->avg_irq : NULL; |
| 11394 | #else |
| 11395 | return NULL; |
| 11396 | #endif |
| 11397 | } |
| 11398 | EXPORT_SYMBOL_GPL(sched_trace_rq_avg_irq); |
| 11399 | |
| 11400 | int sched_trace_rq_cpu(struct rq *rq) |
| 11401 | { |
| 11402 | return rq ? cpu_of(rq) : -1; |
| 11403 | } |
| 11404 | EXPORT_SYMBOL_GPL(sched_trace_rq_cpu); |
| 11405 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11406 | int sched_trace_rq_cpu_capacity(struct rq *rq) |
| 11407 | { |
| 11408 | return rq ? |
| 11409 | #ifdef CONFIG_SMP |
| 11410 | rq->cpu_capacity |
| 11411 | #else |
| 11412 | SCHED_CAPACITY_SCALE |
| 11413 | #endif |
| 11414 | : -1; |
| 11415 | } |
| 11416 | EXPORT_SYMBOL_GPL(sched_trace_rq_cpu_capacity); |
| 11417 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 11418 | const struct cpumask *sched_trace_rd_span(struct root_domain *rd) |
| 11419 | { |
| 11420 | #ifdef CONFIG_SMP |
| 11421 | return rd ? rd->span : NULL; |
| 11422 | #else |
| 11423 | return NULL; |
| 11424 | #endif |
| 11425 | } |
| 11426 | EXPORT_SYMBOL_GPL(sched_trace_rd_span); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 11427 | |
| 11428 | int sched_trace_rq_nr_running(struct rq *rq) |
| 11429 | { |
| 11430 | return rq ? rq->nr_running : -1; |
| 11431 | } |
| 11432 | EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running); |