David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * kernel/sched/debug.c |
| 4 | * |
| 5 | * Print the CFS rbtree and other debugging details |
| 6 | * |
| 7 | * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | */ |
| 9 | #include "sched.h" |
| 10 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 11 | /* |
| 12 | * This allows printing both to /proc/sched_debug and |
| 13 | * to the console |
| 14 | */ |
| 15 | #define SEQ_printf(m, x...) \ |
| 16 | do { \ |
| 17 | if (m) \ |
| 18 | seq_printf(m, x); \ |
| 19 | else \ |
| 20 | pr_cont(x); \ |
| 21 | } while (0) |
| 22 | |
| 23 | /* |
| 24 | * Ease the printing of nsec fields: |
| 25 | */ |
| 26 | static long long nsec_high(unsigned long long nsec) |
| 27 | { |
| 28 | if ((long long)nsec < 0) { |
| 29 | nsec = -nsec; |
| 30 | do_div(nsec, 1000000); |
| 31 | return -nsec; |
| 32 | } |
| 33 | do_div(nsec, 1000000); |
| 34 | |
| 35 | return nsec; |
| 36 | } |
| 37 | |
| 38 | static unsigned long nsec_low(unsigned long long nsec) |
| 39 | { |
| 40 | if ((long long)nsec < 0) |
| 41 | nsec = -nsec; |
| 42 | |
| 43 | return do_div(nsec, 1000000); |
| 44 | } |
| 45 | |
| 46 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) |
| 47 | |
| 48 | #define SCHED_FEAT(name, enabled) \ |
| 49 | #name , |
| 50 | |
| 51 | static const char * const sched_feat_names[] = { |
| 52 | #include "features.h" |
| 53 | }; |
| 54 | |
| 55 | #undef SCHED_FEAT |
| 56 | |
| 57 | static int sched_feat_show(struct seq_file *m, void *v) |
| 58 | { |
| 59 | int i; |
| 60 | |
| 61 | for (i = 0; i < __SCHED_FEAT_NR; i++) { |
| 62 | if (!(sysctl_sched_features & (1UL << i))) |
| 63 | seq_puts(m, "NO_"); |
| 64 | seq_printf(m, "%s ", sched_feat_names[i]); |
| 65 | } |
| 66 | seq_puts(m, "\n"); |
| 67 | |
| 68 | return 0; |
| 69 | } |
| 70 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 71 | #ifdef CONFIG_JUMP_LABEL |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 72 | |
| 73 | #define jump_label_key__true STATIC_KEY_INIT_TRUE |
| 74 | #define jump_label_key__false STATIC_KEY_INIT_FALSE |
| 75 | |
| 76 | #define SCHED_FEAT(name, enabled) \ |
| 77 | jump_label_key__##enabled , |
| 78 | |
| 79 | struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { |
| 80 | #include "features.h" |
| 81 | }; |
| 82 | |
| 83 | #undef SCHED_FEAT |
| 84 | |
| 85 | static void sched_feat_disable(int i) |
| 86 | { |
| 87 | static_key_disable_cpuslocked(&sched_feat_keys[i]); |
| 88 | } |
| 89 | |
| 90 | static void sched_feat_enable(int i) |
| 91 | { |
| 92 | static_key_enable_cpuslocked(&sched_feat_keys[i]); |
| 93 | } |
| 94 | #else |
| 95 | static void sched_feat_disable(int i) { }; |
| 96 | static void sched_feat_enable(int i) { }; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 97 | #endif /* CONFIG_JUMP_LABEL */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 98 | |
| 99 | static int sched_feat_set(char *cmp) |
| 100 | { |
| 101 | int i; |
| 102 | int neg = 0; |
| 103 | |
| 104 | if (strncmp(cmp, "NO_", 3) == 0) { |
| 105 | neg = 1; |
| 106 | cmp += 3; |
| 107 | } |
| 108 | |
| 109 | i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp); |
| 110 | if (i < 0) |
| 111 | return i; |
| 112 | |
| 113 | if (neg) { |
| 114 | sysctl_sched_features &= ~(1UL << i); |
| 115 | sched_feat_disable(i); |
| 116 | } else { |
| 117 | sysctl_sched_features |= (1UL << i); |
| 118 | sched_feat_enable(i); |
| 119 | } |
| 120 | |
| 121 | return 0; |
| 122 | } |
| 123 | |
| 124 | static ssize_t |
| 125 | sched_feat_write(struct file *filp, const char __user *ubuf, |
| 126 | size_t cnt, loff_t *ppos) |
| 127 | { |
| 128 | char buf[64]; |
| 129 | char *cmp; |
| 130 | int ret; |
| 131 | struct inode *inode; |
| 132 | |
| 133 | if (cnt > 63) |
| 134 | cnt = 63; |
| 135 | |
| 136 | if (copy_from_user(&buf, ubuf, cnt)) |
| 137 | return -EFAULT; |
| 138 | |
| 139 | buf[cnt] = 0; |
| 140 | cmp = strstrip(buf); |
| 141 | |
| 142 | /* Ensure the static_key remains in a consistent state */ |
| 143 | inode = file_inode(filp); |
| 144 | cpus_read_lock(); |
| 145 | inode_lock(inode); |
| 146 | ret = sched_feat_set(cmp); |
| 147 | inode_unlock(inode); |
| 148 | cpus_read_unlock(); |
| 149 | if (ret < 0) |
| 150 | return ret; |
| 151 | |
| 152 | *ppos += cnt; |
| 153 | |
| 154 | return cnt; |
| 155 | } |
| 156 | |
| 157 | static int sched_feat_open(struct inode *inode, struct file *filp) |
| 158 | { |
| 159 | return single_open(filp, sched_feat_show, NULL); |
| 160 | } |
| 161 | |
| 162 | static const struct file_operations sched_feat_fops = { |
| 163 | .open = sched_feat_open, |
| 164 | .write = sched_feat_write, |
| 165 | .read = seq_read, |
| 166 | .llseek = seq_lseek, |
| 167 | .release = single_release, |
| 168 | }; |
| 169 | |
| 170 | __read_mostly bool sched_debug_enabled; |
| 171 | |
| 172 | static __init int sched_init_debug(void) |
| 173 | { |
| 174 | debugfs_create_file("sched_features", 0644, NULL, NULL, |
| 175 | &sched_feat_fops); |
| 176 | |
| 177 | debugfs_create_bool("sched_debug", 0644, NULL, |
| 178 | &sched_debug_enabled); |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | late_initcall(sched_init_debug); |
| 183 | |
| 184 | #ifdef CONFIG_SMP |
| 185 | |
| 186 | #ifdef CONFIG_SYSCTL |
| 187 | |
| 188 | static struct ctl_table sd_ctl_dir[] = { |
| 189 | { |
| 190 | .procname = "sched_domain", |
| 191 | .mode = 0555, |
| 192 | }, |
| 193 | {} |
| 194 | }; |
| 195 | |
| 196 | static struct ctl_table sd_ctl_root[] = { |
| 197 | { |
| 198 | .procname = "kernel", |
| 199 | .mode = 0555, |
| 200 | .child = sd_ctl_dir, |
| 201 | }, |
| 202 | {} |
| 203 | }; |
| 204 | |
| 205 | static struct ctl_table *sd_alloc_ctl_entry(int n) |
| 206 | { |
| 207 | struct ctl_table *entry = |
| 208 | kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); |
| 209 | |
| 210 | return entry; |
| 211 | } |
| 212 | |
| 213 | static void sd_free_ctl_entry(struct ctl_table **tablep) |
| 214 | { |
| 215 | struct ctl_table *entry; |
| 216 | |
| 217 | /* |
| 218 | * In the intermediate directories, both the child directory and |
| 219 | * procname are dynamically allocated and could fail but the mode |
| 220 | * will always be set. In the lowest directory the names are |
| 221 | * static strings and all have proc handlers. |
| 222 | */ |
| 223 | for (entry = *tablep; entry->mode; entry++) { |
| 224 | if (entry->child) |
| 225 | sd_free_ctl_entry(&entry->child); |
| 226 | if (entry->proc_handler == NULL) |
| 227 | kfree(entry->procname); |
| 228 | } |
| 229 | |
| 230 | kfree(*tablep); |
| 231 | *tablep = NULL; |
| 232 | } |
| 233 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 234 | static void |
| 235 | set_table_entry(struct ctl_table *entry, |
| 236 | const char *procname, void *data, int maxlen, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 237 | umode_t mode, proc_handler *proc_handler) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | { |
| 239 | entry->procname = procname; |
| 240 | entry->data = data; |
| 241 | entry->maxlen = maxlen; |
| 242 | entry->mode = mode; |
| 243 | entry->proc_handler = proc_handler; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 244 | } |
| 245 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 246 | static int sd_ctl_doflags(struct ctl_table *table, int write, |
| 247 | void *buffer, size_t *lenp, loff_t *ppos) |
| 248 | { |
| 249 | unsigned long flags = *(unsigned long *)table->data; |
| 250 | size_t data_size = 0; |
| 251 | size_t len = 0; |
| 252 | char *tmp, *buf; |
| 253 | int idx; |
| 254 | |
| 255 | if (write) |
| 256 | return 0; |
| 257 | |
| 258 | for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { |
| 259 | char *name = sd_flag_debug[idx].name; |
| 260 | |
| 261 | /* Name plus whitespace */ |
| 262 | data_size += strlen(name) + 1; |
| 263 | } |
| 264 | |
| 265 | if (*ppos > data_size) { |
| 266 | *lenp = 0; |
| 267 | return 0; |
| 268 | } |
| 269 | |
| 270 | buf = kcalloc(data_size + 1, sizeof(*buf), GFP_KERNEL); |
| 271 | if (!buf) |
| 272 | return -ENOMEM; |
| 273 | |
| 274 | for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { |
| 275 | char *name = sd_flag_debug[idx].name; |
| 276 | |
| 277 | len += snprintf(buf + len, strlen(name) + 2, "%s ", name); |
| 278 | } |
| 279 | |
| 280 | tmp = buf + *ppos; |
| 281 | len -= *ppos; |
| 282 | |
| 283 | if (len > *lenp) |
| 284 | len = *lenp; |
| 285 | if (len) |
| 286 | memcpy(buffer, tmp, len); |
| 287 | if (len < *lenp) { |
| 288 | ((char *)buffer)[len] = '\n'; |
| 289 | len++; |
| 290 | } |
| 291 | |
| 292 | *lenp = len; |
| 293 | *ppos += len; |
| 294 | |
| 295 | kfree(buf); |
| 296 | |
| 297 | return 0; |
| 298 | } |
| 299 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 300 | static struct ctl_table * |
| 301 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
| 302 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 303 | struct ctl_table *table = sd_alloc_ctl_entry(9); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 304 | |
| 305 | if (table == NULL) |
| 306 | return NULL; |
| 307 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 308 | set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax); |
| 309 | set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax); |
| 310 | set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax); |
| 311 | set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); |
| 312 | set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 313 | set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, sd_ctl_doflags); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 314 | set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax); |
| 315 | set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); |
| 316 | /* &table[8] is terminator */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 317 | |
| 318 | return table; |
| 319 | } |
| 320 | |
| 321 | static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) |
| 322 | { |
| 323 | struct ctl_table *entry, *table; |
| 324 | struct sched_domain *sd; |
| 325 | int domain_num = 0, i; |
| 326 | char buf[32]; |
| 327 | |
| 328 | for_each_domain(cpu, sd) |
| 329 | domain_num++; |
| 330 | entry = table = sd_alloc_ctl_entry(domain_num + 1); |
| 331 | if (table == NULL) |
| 332 | return NULL; |
| 333 | |
| 334 | i = 0; |
| 335 | for_each_domain(cpu, sd) { |
| 336 | snprintf(buf, 32, "domain%d", i); |
| 337 | entry->procname = kstrdup(buf, GFP_KERNEL); |
| 338 | entry->mode = 0555; |
| 339 | entry->child = sd_alloc_ctl_domain_table(sd); |
| 340 | entry++; |
| 341 | i++; |
| 342 | } |
| 343 | return table; |
| 344 | } |
| 345 | |
| 346 | static cpumask_var_t sd_sysctl_cpus; |
| 347 | static struct ctl_table_header *sd_sysctl_header; |
| 348 | |
| 349 | void register_sched_domain_sysctl(void) |
| 350 | { |
| 351 | static struct ctl_table *cpu_entries; |
| 352 | static struct ctl_table **cpu_idx; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 353 | static bool init_done = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 354 | char buf[32]; |
| 355 | int i; |
| 356 | |
| 357 | if (!cpu_entries) { |
| 358 | cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); |
| 359 | if (!cpu_entries) |
| 360 | return; |
| 361 | |
| 362 | WARN_ON(sd_ctl_dir[0].child); |
| 363 | sd_ctl_dir[0].child = cpu_entries; |
| 364 | } |
| 365 | |
| 366 | if (!cpu_idx) { |
| 367 | struct ctl_table *e = cpu_entries; |
| 368 | |
| 369 | cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); |
| 370 | if (!cpu_idx) |
| 371 | return; |
| 372 | |
| 373 | /* deal with sparse possible map */ |
| 374 | for_each_possible_cpu(i) { |
| 375 | cpu_idx[i] = e; |
| 376 | e++; |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | if (!cpumask_available(sd_sysctl_cpus)) { |
| 381 | if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) |
| 382 | return; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 383 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 384 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 385 | if (!init_done) { |
| 386 | init_done = true; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 387 | /* init to possible to not have holes in @cpu_entries */ |
| 388 | cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); |
| 389 | } |
| 390 | |
| 391 | for_each_cpu(i, sd_sysctl_cpus) { |
| 392 | struct ctl_table *e = cpu_idx[i]; |
| 393 | |
| 394 | if (e->child) |
| 395 | sd_free_ctl_entry(&e->child); |
| 396 | |
| 397 | if (!e->procname) { |
| 398 | snprintf(buf, 32, "cpu%d", i); |
| 399 | e->procname = kstrdup(buf, GFP_KERNEL); |
| 400 | } |
| 401 | e->mode = 0555; |
| 402 | e->child = sd_alloc_ctl_cpu_table(i); |
| 403 | |
| 404 | __cpumask_clear_cpu(i, sd_sysctl_cpus); |
| 405 | } |
| 406 | |
| 407 | WARN_ON(sd_sysctl_header); |
| 408 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
| 409 | } |
| 410 | |
| 411 | void dirty_sched_domain_sysctl(int cpu) |
| 412 | { |
| 413 | if (cpumask_available(sd_sysctl_cpus)) |
| 414 | __cpumask_set_cpu(cpu, sd_sysctl_cpus); |
| 415 | } |
| 416 | |
| 417 | /* may be called multiple times per register */ |
| 418 | void unregister_sched_domain_sysctl(void) |
| 419 | { |
| 420 | unregister_sysctl_table(sd_sysctl_header); |
| 421 | sd_sysctl_header = NULL; |
| 422 | } |
| 423 | #endif /* CONFIG_SYSCTL */ |
| 424 | #endif /* CONFIG_SMP */ |
| 425 | |
| 426 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 427 | static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) |
| 428 | { |
| 429 | struct sched_entity *se = tg->se[cpu]; |
| 430 | |
| 431 | #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) |
| 432 | #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) |
| 433 | #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) |
| 434 | #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F))) |
| 435 | |
| 436 | if (!se) |
| 437 | return; |
| 438 | |
| 439 | PN(se->exec_start); |
| 440 | PN(se->vruntime); |
| 441 | PN(se->sum_exec_runtime); |
| 442 | |
| 443 | if (schedstat_enabled()) { |
| 444 | PN_SCHEDSTAT(se->statistics.wait_start); |
| 445 | PN_SCHEDSTAT(se->statistics.sleep_start); |
| 446 | PN_SCHEDSTAT(se->statistics.block_start); |
| 447 | PN_SCHEDSTAT(se->statistics.sleep_max); |
| 448 | PN_SCHEDSTAT(se->statistics.block_max); |
| 449 | PN_SCHEDSTAT(se->statistics.exec_max); |
| 450 | PN_SCHEDSTAT(se->statistics.slice_max); |
| 451 | PN_SCHEDSTAT(se->statistics.wait_max); |
| 452 | PN_SCHEDSTAT(se->statistics.wait_sum); |
| 453 | P_SCHEDSTAT(se->statistics.wait_count); |
| 454 | } |
| 455 | |
| 456 | P(se->load.weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 457 | #ifdef CONFIG_SMP |
| 458 | P(se->avg.load_avg); |
| 459 | P(se->avg.util_avg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 460 | P(se->avg.runnable_avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 461 | #endif |
| 462 | |
| 463 | #undef PN_SCHEDSTAT |
| 464 | #undef PN |
| 465 | #undef P_SCHEDSTAT |
| 466 | #undef P |
| 467 | } |
| 468 | #endif |
| 469 | |
| 470 | #ifdef CONFIG_CGROUP_SCHED |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 471 | static DEFINE_SPINLOCK(sched_debug_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 472 | static char group_path[PATH_MAX]; |
| 473 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 474 | static void task_group_path(struct task_group *tg, char *path, int plen) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 475 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 476 | if (autogroup_path(tg, path, plen)) |
| 477 | return; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 478 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 479 | cgroup_path(tg->css.cgroup, path, plen); |
| 480 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 481 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 482 | /* |
| 483 | * Only 1 SEQ_printf_task_group_path() caller can use the full length |
| 484 | * group_path[] for cgroup path. Other simultaneous callers will have |
| 485 | * to use a shorter stack buffer. A "..." suffix is appended at the end |
| 486 | * of the stack buffer so that it will show up in case the output length |
| 487 | * matches the given buffer size to indicate possible path name truncation. |
| 488 | */ |
| 489 | #define SEQ_printf_task_group_path(m, tg, fmt...) \ |
| 490 | { \ |
| 491 | if (spin_trylock(&sched_debug_lock)) { \ |
| 492 | task_group_path(tg, group_path, sizeof(group_path)); \ |
| 493 | SEQ_printf(m, fmt, group_path); \ |
| 494 | spin_unlock(&sched_debug_lock); \ |
| 495 | } else { \ |
| 496 | char buf[128]; \ |
| 497 | char *bufend = buf + sizeof(buf) - 3; \ |
| 498 | task_group_path(tg, buf, bufend - buf); \ |
| 499 | strcpy(bufend - 1, "..."); \ |
| 500 | SEQ_printf(m, fmt, buf); \ |
| 501 | } \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 502 | } |
| 503 | #endif |
| 504 | |
| 505 | static void |
| 506 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
| 507 | { |
| 508 | if (rq->curr == p) |
| 509 | SEQ_printf(m, ">R"); |
| 510 | else |
| 511 | SEQ_printf(m, " %c", task_state_to_char(p)); |
| 512 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 513 | SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 514 | p->comm, task_pid_nr(p), |
| 515 | SPLIT_NS(p->se.vruntime), |
| 516 | (long long)(p->nvcsw + p->nivcsw), |
| 517 | p->prio); |
| 518 | |
| 519 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", |
| 520 | SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)), |
| 521 | SPLIT_NS(p->se.sum_exec_runtime), |
| 522 | SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime))); |
| 523 | |
| 524 | #ifdef CONFIG_NUMA_BALANCING |
| 525 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); |
| 526 | #endif |
| 527 | #ifdef CONFIG_CGROUP_SCHED |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 528 | SEQ_printf_task_group_path(m, task_group(p), " %s") |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 529 | #endif |
| 530 | |
| 531 | SEQ_printf(m, "\n"); |
| 532 | } |
| 533 | |
| 534 | static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) |
| 535 | { |
| 536 | struct task_struct *g, *p; |
| 537 | |
| 538 | SEQ_printf(m, "\n"); |
| 539 | SEQ_printf(m, "runnable tasks:\n"); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 540 | SEQ_printf(m, " S task PID tree-key switches prio" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 541 | " wait-time sum-exec sum-sleep\n"); |
| 542 | SEQ_printf(m, "-------------------------------------------------------" |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 543 | "------------------------------------------------------\n"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 544 | |
| 545 | rcu_read_lock(); |
| 546 | for_each_process_thread(g, p) { |
| 547 | if (task_cpu(p) != rq_cpu) |
| 548 | continue; |
| 549 | |
| 550 | print_task(m, rq, p); |
| 551 | } |
| 552 | rcu_read_unlock(); |
| 553 | } |
| 554 | |
| 555 | void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) |
| 556 | { |
| 557 | s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, |
| 558 | spread, rq0_min_vruntime, spread0; |
| 559 | struct rq *rq = cpu_rq(cpu); |
| 560 | struct sched_entity *last; |
| 561 | unsigned long flags; |
| 562 | |
| 563 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 564 | SEQ_printf(m, "\n"); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 565 | SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 566 | #else |
| 567 | SEQ_printf(m, "\n"); |
| 568 | SEQ_printf(m, "cfs_rq[%d]:\n", cpu); |
| 569 | #endif |
| 570 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
| 571 | SPLIT_NS(cfs_rq->exec_clock)); |
| 572 | |
| 573 | raw_spin_lock_irqsave(&rq->lock, flags); |
| 574 | if (rb_first_cached(&cfs_rq->tasks_timeline)) |
| 575 | MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; |
| 576 | last = __pick_last_entity(cfs_rq); |
| 577 | if (last) |
| 578 | max_vruntime = last->vruntime; |
| 579 | min_vruntime = cfs_rq->min_vruntime; |
| 580 | rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; |
| 581 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
| 582 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", |
| 583 | SPLIT_NS(MIN_vruntime)); |
| 584 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", |
| 585 | SPLIT_NS(min_vruntime)); |
| 586 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", |
| 587 | SPLIT_NS(max_vruntime)); |
| 588 | spread = max_vruntime - MIN_vruntime; |
| 589 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", |
| 590 | SPLIT_NS(spread)); |
| 591 | spread0 = min_vruntime - rq0_min_vruntime; |
| 592 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", |
| 593 | SPLIT_NS(spread0)); |
| 594 | SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", |
| 595 | cfs_rq->nr_spread_over); |
| 596 | SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); |
| 597 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
| 598 | #ifdef CONFIG_SMP |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 599 | SEQ_printf(m, " .%-30s: %lu\n", "load_avg", |
| 600 | cfs_rq->avg.load_avg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 601 | SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg", |
| 602 | cfs_rq->avg.runnable_avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 603 | SEQ_printf(m, " .%-30s: %lu\n", "util_avg", |
| 604 | cfs_rq->avg.util_avg); |
| 605 | SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued", |
| 606 | cfs_rq->avg.util_est.enqueued); |
| 607 | SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", |
| 608 | cfs_rq->removed.load_avg); |
| 609 | SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", |
| 610 | cfs_rq->removed.util_avg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 611 | SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg", |
| 612 | cfs_rq->removed.runnable_avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 613 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 614 | SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib", |
| 615 | cfs_rq->tg_load_avg_contrib); |
| 616 | SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", |
| 617 | atomic_long_read(&cfs_rq->tg->load_avg)); |
| 618 | #endif |
| 619 | #endif |
| 620 | #ifdef CONFIG_CFS_BANDWIDTH |
| 621 | SEQ_printf(m, " .%-30s: %d\n", "throttled", |
| 622 | cfs_rq->throttled); |
| 623 | SEQ_printf(m, " .%-30s: %d\n", "throttle_count", |
| 624 | cfs_rq->throttle_count); |
| 625 | #endif |
| 626 | |
| 627 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 628 | print_cfs_group_stats(m, cpu, cfs_rq->tg); |
| 629 | #endif |
| 630 | } |
| 631 | |
| 632 | void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) |
| 633 | { |
| 634 | #ifdef CONFIG_RT_GROUP_SCHED |
| 635 | SEQ_printf(m, "\n"); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 636 | SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 637 | #else |
| 638 | SEQ_printf(m, "\n"); |
| 639 | SEQ_printf(m, "rt_rq[%d]:\n", cpu); |
| 640 | #endif |
| 641 | |
| 642 | #define P(x) \ |
| 643 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) |
| 644 | #define PU(x) \ |
| 645 | SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x)) |
| 646 | #define PN(x) \ |
| 647 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) |
| 648 | |
| 649 | PU(rt_nr_running); |
| 650 | #ifdef CONFIG_SMP |
| 651 | PU(rt_nr_migratory); |
| 652 | #endif |
| 653 | P(rt_throttled); |
| 654 | PN(rt_time); |
| 655 | PN(rt_runtime); |
| 656 | |
| 657 | #undef PN |
| 658 | #undef PU |
| 659 | #undef P |
| 660 | } |
| 661 | |
| 662 | void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) |
| 663 | { |
| 664 | struct dl_bw *dl_bw; |
| 665 | |
| 666 | SEQ_printf(m, "\n"); |
| 667 | SEQ_printf(m, "dl_rq[%d]:\n", cpu); |
| 668 | |
| 669 | #define PU(x) \ |
| 670 | SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x)) |
| 671 | |
| 672 | PU(dl_nr_running); |
| 673 | #ifdef CONFIG_SMP |
| 674 | PU(dl_nr_migratory); |
| 675 | dl_bw = &cpu_rq(cpu)->rd->dl_bw; |
| 676 | #else |
| 677 | dl_bw = &dl_rq->dl_bw; |
| 678 | #endif |
| 679 | SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); |
| 680 | SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); |
| 681 | |
| 682 | #undef PU |
| 683 | } |
| 684 | |
| 685 | static void print_cpu(struct seq_file *m, int cpu) |
| 686 | { |
| 687 | struct rq *rq = cpu_rq(cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 688 | |
| 689 | #ifdef CONFIG_X86 |
| 690 | { |
| 691 | unsigned int freq = cpu_khz ? : 1; |
| 692 | |
| 693 | SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", |
| 694 | cpu, freq / 1000, (freq % 1000)); |
| 695 | } |
| 696 | #else |
| 697 | SEQ_printf(m, "cpu#%d\n", cpu); |
| 698 | #endif |
| 699 | |
| 700 | #define P(x) \ |
| 701 | do { \ |
| 702 | if (sizeof(rq->x) == 4) \ |
| 703 | SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ |
| 704 | else \ |
| 705 | SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ |
| 706 | } while (0) |
| 707 | |
| 708 | #define PN(x) \ |
| 709 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) |
| 710 | |
| 711 | P(nr_running); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 712 | P(nr_switches); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 713 | P(nr_uninterruptible); |
| 714 | PN(next_balance); |
| 715 | SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); |
| 716 | PN(clock); |
| 717 | PN(clock_task); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 718 | #undef P |
| 719 | #undef PN |
| 720 | |
| 721 | #ifdef CONFIG_SMP |
| 722 | #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); |
| 723 | P64(avg_idle); |
| 724 | P64(max_idle_balance_cost); |
| 725 | #undef P64 |
| 726 | #endif |
| 727 | |
| 728 | #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); |
| 729 | if (schedstat_enabled()) { |
| 730 | P(yld_count); |
| 731 | P(sched_count); |
| 732 | P(sched_goidle); |
| 733 | P(ttwu_count); |
| 734 | P(ttwu_local); |
| 735 | } |
| 736 | #undef P |
| 737 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 738 | print_cfs_stats(m, cpu); |
| 739 | print_rt_stats(m, cpu); |
| 740 | print_dl_stats(m, cpu); |
| 741 | |
| 742 | print_rq(m, rq, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 743 | SEQ_printf(m, "\n"); |
| 744 | } |
| 745 | |
| 746 | static const char *sched_tunable_scaling_names[] = { |
| 747 | "none", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 748 | "logarithmic", |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 749 | "linear" |
| 750 | }; |
| 751 | |
| 752 | static void sched_debug_header(struct seq_file *m) |
| 753 | { |
| 754 | u64 ktime, sched_clk, cpu_clk; |
| 755 | unsigned long flags; |
| 756 | |
| 757 | local_irq_save(flags); |
| 758 | ktime = ktime_to_ns(ktime_get()); |
| 759 | sched_clk = sched_clock(); |
| 760 | cpu_clk = local_clock(); |
| 761 | local_irq_restore(flags); |
| 762 | |
| 763 | SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", |
| 764 | init_utsname()->release, |
| 765 | (int)strcspn(init_utsname()->version, " "), |
| 766 | init_utsname()->version); |
| 767 | |
| 768 | #define P(x) \ |
| 769 | SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) |
| 770 | #define PN(x) \ |
| 771 | SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) |
| 772 | PN(ktime); |
| 773 | PN(sched_clk); |
| 774 | PN(cpu_clk); |
| 775 | P(jiffies); |
| 776 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 777 | P(sched_clock_stable()); |
| 778 | #endif |
| 779 | #undef PN |
| 780 | #undef P |
| 781 | |
| 782 | SEQ_printf(m, "\n"); |
| 783 | SEQ_printf(m, "sysctl_sched\n"); |
| 784 | |
| 785 | #define P(x) \ |
| 786 | SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) |
| 787 | #define PN(x) \ |
| 788 | SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) |
| 789 | PN(sysctl_sched_latency); |
| 790 | PN(sysctl_sched_min_granularity); |
| 791 | PN(sysctl_sched_wakeup_granularity); |
| 792 | P(sysctl_sched_child_runs_first); |
| 793 | P(sysctl_sched_features); |
| 794 | #undef PN |
| 795 | #undef P |
| 796 | |
| 797 | SEQ_printf(m, " .%-40s: %d (%s)\n", |
| 798 | "sysctl_sched_tunable_scaling", |
| 799 | sysctl_sched_tunable_scaling, |
| 800 | sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); |
| 801 | SEQ_printf(m, "\n"); |
| 802 | } |
| 803 | |
| 804 | static int sched_debug_show(struct seq_file *m, void *v) |
| 805 | { |
| 806 | int cpu = (unsigned long)(v - 2); |
| 807 | |
| 808 | if (cpu != -1) |
| 809 | print_cpu(m, cpu); |
| 810 | else |
| 811 | sched_debug_header(m); |
| 812 | |
| 813 | return 0; |
| 814 | } |
| 815 | |
| 816 | void sysrq_sched_debug_show(void) |
| 817 | { |
| 818 | int cpu; |
| 819 | |
| 820 | sched_debug_header(NULL); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 821 | for_each_online_cpu(cpu) { |
| 822 | /* |
| 823 | * Need to reset softlockup watchdogs on all CPUs, because |
| 824 | * another CPU might be blocked waiting for us to process |
| 825 | * an IPI or stop_machine. |
| 826 | */ |
| 827 | touch_nmi_watchdog(); |
| 828 | touch_all_softlockup_watchdogs(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 829 | print_cpu(NULL, cpu); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 830 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 831 | } |
| 832 | |
| 833 | /* |
| 834 | * This itererator needs some explanation. |
| 835 | * It returns 1 for the header position. |
| 836 | * This means 2 is CPU 0. |
| 837 | * In a hotplugged system some CPUs, including CPU 0, may be missing so we have |
| 838 | * to use cpumask_* to iterate over the CPUs. |
| 839 | */ |
| 840 | static void *sched_debug_start(struct seq_file *file, loff_t *offset) |
| 841 | { |
| 842 | unsigned long n = *offset; |
| 843 | |
| 844 | if (n == 0) |
| 845 | return (void *) 1; |
| 846 | |
| 847 | n--; |
| 848 | |
| 849 | if (n > 0) |
| 850 | n = cpumask_next(n - 1, cpu_online_mask); |
| 851 | else |
| 852 | n = cpumask_first(cpu_online_mask); |
| 853 | |
| 854 | *offset = n + 1; |
| 855 | |
| 856 | if (n < nr_cpu_ids) |
| 857 | return (void *)(unsigned long)(n + 2); |
| 858 | |
| 859 | return NULL; |
| 860 | } |
| 861 | |
| 862 | static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) |
| 863 | { |
| 864 | (*offset)++; |
| 865 | return sched_debug_start(file, offset); |
| 866 | } |
| 867 | |
| 868 | static void sched_debug_stop(struct seq_file *file, void *data) |
| 869 | { |
| 870 | } |
| 871 | |
| 872 | static const struct seq_operations sched_debug_sops = { |
| 873 | .start = sched_debug_start, |
| 874 | .next = sched_debug_next, |
| 875 | .stop = sched_debug_stop, |
| 876 | .show = sched_debug_show, |
| 877 | }; |
| 878 | |
| 879 | static int __init init_sched_debug_procfs(void) |
| 880 | { |
| 881 | if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops)) |
| 882 | return -ENOMEM; |
| 883 | return 0; |
| 884 | } |
| 885 | |
| 886 | __initcall(init_sched_debug_procfs); |
| 887 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 888 | #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F)) |
| 889 | #define __P(F) __PS(#F, F) |
| 890 | #define P(F) __PS(#F, p->F) |
| 891 | #define PM(F, M) __PS(#F, p->F & (M)) |
| 892 | #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F))) |
| 893 | #define __PN(F) __PSN(#F, F) |
| 894 | #define PN(F) __PSN(#F, p->F) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 895 | |
| 896 | |
| 897 | #ifdef CONFIG_NUMA_BALANCING |
| 898 | void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, |
| 899 | unsigned long tpf, unsigned long gsf, unsigned long gpf) |
| 900 | { |
| 901 | SEQ_printf(m, "numa_faults node=%d ", node); |
| 902 | SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf); |
| 903 | SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf); |
| 904 | } |
| 905 | #endif |
| 906 | |
| 907 | |
| 908 | static void sched_show_numa(struct task_struct *p, struct seq_file *m) |
| 909 | { |
| 910 | #ifdef CONFIG_NUMA_BALANCING |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 911 | if (p->mm) |
| 912 | P(mm->numa_scan_seq); |
| 913 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 914 | P(numa_pages_migrated); |
| 915 | P(numa_preferred_nid); |
| 916 | P(total_numa_faults); |
| 917 | SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", |
| 918 | task_node(p), task_numa_group_id(p)); |
| 919 | show_numa_stats(p, m); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 920 | #endif |
| 921 | } |
| 922 | |
| 923 | void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, |
| 924 | struct seq_file *m) |
| 925 | { |
| 926 | unsigned long nr_switches; |
| 927 | |
| 928 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), |
| 929 | get_nr_threads(p)); |
| 930 | SEQ_printf(m, |
| 931 | "---------------------------------------------------------" |
| 932 | "----------\n"); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 933 | |
| 934 | #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F)) |
| 935 | #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 936 | |
| 937 | PN(se.exec_start); |
| 938 | PN(se.vruntime); |
| 939 | PN(se.sum_exec_runtime); |
| 940 | |
| 941 | nr_switches = p->nvcsw + p->nivcsw; |
| 942 | |
| 943 | P(se.nr_migrations); |
| 944 | |
| 945 | if (schedstat_enabled()) { |
| 946 | u64 avg_atom, avg_per_cpu; |
| 947 | |
| 948 | PN_SCHEDSTAT(se.statistics.sum_sleep_runtime); |
| 949 | PN_SCHEDSTAT(se.statistics.wait_start); |
| 950 | PN_SCHEDSTAT(se.statistics.sleep_start); |
| 951 | PN_SCHEDSTAT(se.statistics.block_start); |
| 952 | PN_SCHEDSTAT(se.statistics.sleep_max); |
| 953 | PN_SCHEDSTAT(se.statistics.block_max); |
| 954 | PN_SCHEDSTAT(se.statistics.exec_max); |
| 955 | PN_SCHEDSTAT(se.statistics.slice_max); |
| 956 | PN_SCHEDSTAT(se.statistics.wait_max); |
| 957 | PN_SCHEDSTAT(se.statistics.wait_sum); |
| 958 | P_SCHEDSTAT(se.statistics.wait_count); |
| 959 | PN_SCHEDSTAT(se.statistics.iowait_sum); |
| 960 | P_SCHEDSTAT(se.statistics.iowait_count); |
| 961 | P_SCHEDSTAT(se.statistics.nr_migrations_cold); |
| 962 | P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine); |
| 963 | P_SCHEDSTAT(se.statistics.nr_failed_migrations_running); |
| 964 | P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot); |
| 965 | P_SCHEDSTAT(se.statistics.nr_forced_migrations); |
| 966 | P_SCHEDSTAT(se.statistics.nr_wakeups); |
| 967 | P_SCHEDSTAT(se.statistics.nr_wakeups_sync); |
| 968 | P_SCHEDSTAT(se.statistics.nr_wakeups_migrate); |
| 969 | P_SCHEDSTAT(se.statistics.nr_wakeups_local); |
| 970 | P_SCHEDSTAT(se.statistics.nr_wakeups_remote); |
| 971 | P_SCHEDSTAT(se.statistics.nr_wakeups_affine); |
| 972 | P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts); |
| 973 | P_SCHEDSTAT(se.statistics.nr_wakeups_passive); |
| 974 | P_SCHEDSTAT(se.statistics.nr_wakeups_idle); |
| 975 | |
| 976 | avg_atom = p->se.sum_exec_runtime; |
| 977 | if (nr_switches) |
| 978 | avg_atom = div64_ul(avg_atom, nr_switches); |
| 979 | else |
| 980 | avg_atom = -1LL; |
| 981 | |
| 982 | avg_per_cpu = p->se.sum_exec_runtime; |
| 983 | if (p->se.nr_migrations) { |
| 984 | avg_per_cpu = div64_u64(avg_per_cpu, |
| 985 | p->se.nr_migrations); |
| 986 | } else { |
| 987 | avg_per_cpu = -1LL; |
| 988 | } |
| 989 | |
| 990 | __PN(avg_atom); |
| 991 | __PN(avg_per_cpu); |
| 992 | } |
| 993 | |
| 994 | __P(nr_switches); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 995 | __PS("nr_voluntary_switches", p->nvcsw); |
| 996 | __PS("nr_involuntary_switches", p->nivcsw); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 997 | |
| 998 | P(se.load.weight); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 999 | #ifdef CONFIG_SMP |
| 1000 | P(se.avg.load_sum); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1001 | P(se.avg.runnable_sum); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1002 | P(se.avg.util_sum); |
| 1003 | P(se.avg.load_avg); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1004 | P(se.avg.runnable_avg); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1005 | P(se.avg.util_avg); |
| 1006 | P(se.avg.last_update_time); |
| 1007 | P(se.avg.util_est.ewma); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1008 | PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED); |
| 1009 | #endif |
| 1010 | #ifdef CONFIG_UCLAMP_TASK |
| 1011 | __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); |
| 1012 | __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value); |
| 1013 | __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN)); |
| 1014 | __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1015 | #endif |
| 1016 | P(policy); |
| 1017 | P(prio); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1018 | if (task_has_dl_policy(p)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1019 | P(dl.runtime); |
| 1020 | P(dl.deadline); |
| 1021 | } |
| 1022 | #undef PN_SCHEDSTAT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1023 | #undef P_SCHEDSTAT |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1024 | |
| 1025 | { |
| 1026 | unsigned int this_cpu = raw_smp_processor_id(); |
| 1027 | u64 t0, t1; |
| 1028 | |
| 1029 | t0 = cpu_clock(this_cpu); |
| 1030 | t1 = cpu_clock(this_cpu); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 1031 | __PS("clock-delta", t1-t0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1032 | } |
| 1033 | |
| 1034 | sched_show_numa(p, m); |
| 1035 | } |
| 1036 | |
| 1037 | void proc_sched_set_task(struct task_struct *p) |
| 1038 | { |
| 1039 | #ifdef CONFIG_SCHEDSTATS |
| 1040 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
| 1041 | #endif |
| 1042 | } |