blob: faada713cfae8378dceb616f4be55a15a9f1be50 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * kernel/sched/debug.c
4 *
5 * Print the CFS rbtree and other debugging details
6 *
7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00008 */
9#include "sched.h"
10
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011/*
12 * This allows printing both to /proc/sched_debug and
13 * to the console
14 */
15#define SEQ_printf(m, x...) \
16 do { \
17 if (m) \
18 seq_printf(m, x); \
19 else \
20 pr_cont(x); \
21 } while (0)
22
23/*
24 * Ease the printing of nsec fields:
25 */
26static long long nsec_high(unsigned long long nsec)
27{
28 if ((long long)nsec < 0) {
29 nsec = -nsec;
30 do_div(nsec, 1000000);
31 return -nsec;
32 }
33 do_div(nsec, 1000000);
34
35 return nsec;
36}
37
38static unsigned long nsec_low(unsigned long long nsec)
39{
40 if ((long long)nsec < 0)
41 nsec = -nsec;
42
43 return do_div(nsec, 1000000);
44}
45
46#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
47
48#define SCHED_FEAT(name, enabled) \
49 #name ,
50
51static const char * const sched_feat_names[] = {
52#include "features.h"
53};
54
55#undef SCHED_FEAT
56
57static int sched_feat_show(struct seq_file *m, void *v)
58{
59 int i;
60
61 for (i = 0; i < __SCHED_FEAT_NR; i++) {
62 if (!(sysctl_sched_features & (1UL << i)))
63 seq_puts(m, "NO_");
64 seq_printf(m, "%s ", sched_feat_names[i]);
65 }
66 seq_puts(m, "\n");
67
68 return 0;
69}
70
David Brazdil0f672f62019-12-10 10:32:29 +000071#ifdef CONFIG_JUMP_LABEL
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072
73#define jump_label_key__true STATIC_KEY_INIT_TRUE
74#define jump_label_key__false STATIC_KEY_INIT_FALSE
75
76#define SCHED_FEAT(name, enabled) \
77 jump_label_key__##enabled ,
78
79struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
80#include "features.h"
81};
82
83#undef SCHED_FEAT
84
85static void sched_feat_disable(int i)
86{
87 static_key_disable_cpuslocked(&sched_feat_keys[i]);
88}
89
90static void sched_feat_enable(int i)
91{
92 static_key_enable_cpuslocked(&sched_feat_keys[i]);
93}
94#else
95static void sched_feat_disable(int i) { };
96static void sched_feat_enable(int i) { };
David Brazdil0f672f62019-12-10 10:32:29 +000097#endif /* CONFIG_JUMP_LABEL */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000098
99static int sched_feat_set(char *cmp)
100{
101 int i;
102 int neg = 0;
103
104 if (strncmp(cmp, "NO_", 3) == 0) {
105 neg = 1;
106 cmp += 3;
107 }
108
109 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
110 if (i < 0)
111 return i;
112
113 if (neg) {
114 sysctl_sched_features &= ~(1UL << i);
115 sched_feat_disable(i);
116 } else {
117 sysctl_sched_features |= (1UL << i);
118 sched_feat_enable(i);
119 }
120
121 return 0;
122}
123
124static ssize_t
125sched_feat_write(struct file *filp, const char __user *ubuf,
126 size_t cnt, loff_t *ppos)
127{
128 char buf[64];
129 char *cmp;
130 int ret;
131 struct inode *inode;
132
133 if (cnt > 63)
134 cnt = 63;
135
136 if (copy_from_user(&buf, ubuf, cnt))
137 return -EFAULT;
138
139 buf[cnt] = 0;
140 cmp = strstrip(buf);
141
142 /* Ensure the static_key remains in a consistent state */
143 inode = file_inode(filp);
144 cpus_read_lock();
145 inode_lock(inode);
146 ret = sched_feat_set(cmp);
147 inode_unlock(inode);
148 cpus_read_unlock();
149 if (ret < 0)
150 return ret;
151
152 *ppos += cnt;
153
154 return cnt;
155}
156
157static int sched_feat_open(struct inode *inode, struct file *filp)
158{
159 return single_open(filp, sched_feat_show, NULL);
160}
161
162static const struct file_operations sched_feat_fops = {
163 .open = sched_feat_open,
164 .write = sched_feat_write,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168};
169
170__read_mostly bool sched_debug_enabled;
171
172static __init int sched_init_debug(void)
173{
174 debugfs_create_file("sched_features", 0644, NULL, NULL,
175 &sched_feat_fops);
176
177 debugfs_create_bool("sched_debug", 0644, NULL,
178 &sched_debug_enabled);
179
180 return 0;
181}
182late_initcall(sched_init_debug);
183
184#ifdef CONFIG_SMP
185
186#ifdef CONFIG_SYSCTL
187
188static struct ctl_table sd_ctl_dir[] = {
189 {
190 .procname = "sched_domain",
191 .mode = 0555,
192 },
193 {}
194};
195
196static struct ctl_table sd_ctl_root[] = {
197 {
198 .procname = "kernel",
199 .mode = 0555,
200 .child = sd_ctl_dir,
201 },
202 {}
203};
204
205static struct ctl_table *sd_alloc_ctl_entry(int n)
206{
207 struct ctl_table *entry =
208 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
209
210 return entry;
211}
212
213static void sd_free_ctl_entry(struct ctl_table **tablep)
214{
215 struct ctl_table *entry;
216
217 /*
218 * In the intermediate directories, both the child directory and
219 * procname are dynamically allocated and could fail but the mode
220 * will always be set. In the lowest directory the names are
221 * static strings and all have proc handlers.
222 */
223 for (entry = *tablep; entry->mode; entry++) {
224 if (entry->child)
225 sd_free_ctl_entry(&entry->child);
226 if (entry->proc_handler == NULL)
227 kfree(entry->procname);
228 }
229
230 kfree(*tablep);
231 *tablep = NULL;
232}
233
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000234static void
235set_table_entry(struct ctl_table *entry,
236 const char *procname, void *data, int maxlen,
David Brazdil0f672f62019-12-10 10:32:29 +0000237 umode_t mode, proc_handler *proc_handler)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000238{
239 entry->procname = procname;
240 entry->data = data;
241 entry->maxlen = maxlen;
242 entry->mode = mode;
243 entry->proc_handler = proc_handler;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000244}
245
246static struct ctl_table *
247sd_alloc_ctl_domain_table(struct sched_domain *sd)
248{
David Brazdil0f672f62019-12-10 10:32:29 +0000249 struct ctl_table *table = sd_alloc_ctl_entry(9);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250
251 if (table == NULL)
252 return NULL;
253
David Brazdil0f672f62019-12-10 10:32:29 +0000254 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax);
255 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax);
256 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax);
257 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax);
258 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax);
Olivier Deprez0e641232021-09-23 10:07:05 +0200259 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, proc_dointvec_minmax);
David Brazdil0f672f62019-12-10 10:32:29 +0000260 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax);
261 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring);
262 /* &table[8] is terminator */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000263
264 return table;
265}
266
267static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
268{
269 struct ctl_table *entry, *table;
270 struct sched_domain *sd;
271 int domain_num = 0, i;
272 char buf[32];
273
274 for_each_domain(cpu, sd)
275 domain_num++;
276 entry = table = sd_alloc_ctl_entry(domain_num + 1);
277 if (table == NULL)
278 return NULL;
279
280 i = 0;
281 for_each_domain(cpu, sd) {
282 snprintf(buf, 32, "domain%d", i);
283 entry->procname = kstrdup(buf, GFP_KERNEL);
284 entry->mode = 0555;
285 entry->child = sd_alloc_ctl_domain_table(sd);
286 entry++;
287 i++;
288 }
289 return table;
290}
291
292static cpumask_var_t sd_sysctl_cpus;
293static struct ctl_table_header *sd_sysctl_header;
294
295void register_sched_domain_sysctl(void)
296{
297 static struct ctl_table *cpu_entries;
298 static struct ctl_table **cpu_idx;
David Brazdil0f672f62019-12-10 10:32:29 +0000299 static bool init_done = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000300 char buf[32];
301 int i;
302
303 if (!cpu_entries) {
304 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
305 if (!cpu_entries)
306 return;
307
308 WARN_ON(sd_ctl_dir[0].child);
309 sd_ctl_dir[0].child = cpu_entries;
310 }
311
312 if (!cpu_idx) {
313 struct ctl_table *e = cpu_entries;
314
315 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
316 if (!cpu_idx)
317 return;
318
319 /* deal with sparse possible map */
320 for_each_possible_cpu(i) {
321 cpu_idx[i] = e;
322 e++;
323 }
324 }
325
326 if (!cpumask_available(sd_sysctl_cpus)) {
327 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
328 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000329 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000330
David Brazdil0f672f62019-12-10 10:32:29 +0000331 if (!init_done) {
332 init_done = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000333 /* init to possible to not have holes in @cpu_entries */
334 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
335 }
336
337 for_each_cpu(i, sd_sysctl_cpus) {
338 struct ctl_table *e = cpu_idx[i];
339
340 if (e->child)
341 sd_free_ctl_entry(&e->child);
342
343 if (!e->procname) {
344 snprintf(buf, 32, "cpu%d", i);
345 e->procname = kstrdup(buf, GFP_KERNEL);
346 }
347 e->mode = 0555;
348 e->child = sd_alloc_ctl_cpu_table(i);
349
350 __cpumask_clear_cpu(i, sd_sysctl_cpus);
351 }
352
353 WARN_ON(sd_sysctl_header);
354 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
355}
356
357void dirty_sched_domain_sysctl(int cpu)
358{
359 if (cpumask_available(sd_sysctl_cpus))
360 __cpumask_set_cpu(cpu, sd_sysctl_cpus);
361}
362
363/* may be called multiple times per register */
364void unregister_sched_domain_sysctl(void)
365{
366 unregister_sysctl_table(sd_sysctl_header);
367 sd_sysctl_header = NULL;
368}
369#endif /* CONFIG_SYSCTL */
370#endif /* CONFIG_SMP */
371
372#ifdef CONFIG_FAIR_GROUP_SCHED
373static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
374{
375 struct sched_entity *se = tg->se[cpu];
376
377#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
378#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
379#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
380#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
381
382 if (!se)
383 return;
384
385 PN(se->exec_start);
386 PN(se->vruntime);
387 PN(se->sum_exec_runtime);
388
389 if (schedstat_enabled()) {
390 PN_SCHEDSTAT(se->statistics.wait_start);
391 PN_SCHEDSTAT(se->statistics.sleep_start);
392 PN_SCHEDSTAT(se->statistics.block_start);
393 PN_SCHEDSTAT(se->statistics.sleep_max);
394 PN_SCHEDSTAT(se->statistics.block_max);
395 PN_SCHEDSTAT(se->statistics.exec_max);
396 PN_SCHEDSTAT(se->statistics.slice_max);
397 PN_SCHEDSTAT(se->statistics.wait_max);
398 PN_SCHEDSTAT(se->statistics.wait_sum);
399 P_SCHEDSTAT(se->statistics.wait_count);
400 }
401
402 P(se->load.weight);
403 P(se->runnable_weight);
404#ifdef CONFIG_SMP
405 P(se->avg.load_avg);
406 P(se->avg.util_avg);
407 P(se->avg.runnable_load_avg);
408#endif
409
410#undef PN_SCHEDSTAT
411#undef PN
412#undef P_SCHEDSTAT
413#undef P
414}
415#endif
416
417#ifdef CONFIG_CGROUP_SCHED
Olivier Deprez0e641232021-09-23 10:07:05 +0200418static DEFINE_SPINLOCK(sched_debug_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419static char group_path[PATH_MAX];
420
Olivier Deprez0e641232021-09-23 10:07:05 +0200421static void task_group_path(struct task_group *tg, char *path, int plen)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000422{
Olivier Deprez0e641232021-09-23 10:07:05 +0200423 if (autogroup_path(tg, path, plen))
424 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425
Olivier Deprez0e641232021-09-23 10:07:05 +0200426 cgroup_path(tg->css.cgroup, path, plen);
427}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000428
Olivier Deprez0e641232021-09-23 10:07:05 +0200429/*
430 * Only 1 SEQ_printf_task_group_path() caller can use the full length
431 * group_path[] for cgroup path. Other simultaneous callers will have
432 * to use a shorter stack buffer. A "..." suffix is appended at the end
433 * of the stack buffer so that it will show up in case the output length
434 * matches the given buffer size to indicate possible path name truncation.
435 */
436#define SEQ_printf_task_group_path(m, tg, fmt...) \
437{ \
438 if (spin_trylock(&sched_debug_lock)) { \
439 task_group_path(tg, group_path, sizeof(group_path)); \
440 SEQ_printf(m, fmt, group_path); \
441 spin_unlock(&sched_debug_lock); \
442 } else { \
443 char buf[128]; \
444 char *bufend = buf + sizeof(buf) - 3; \
445 task_group_path(tg, buf, bufend - buf); \
446 strcpy(bufend - 1, "..."); \
447 SEQ_printf(m, fmt, buf); \
448 } \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000449}
450#endif
451
452static void
453print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
454{
455 if (rq->curr == p)
456 SEQ_printf(m, ">R");
457 else
458 SEQ_printf(m, " %c", task_state_to_char(p));
459
460 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
461 p->comm, task_pid_nr(p),
462 SPLIT_NS(p->se.vruntime),
463 (long long)(p->nvcsw + p->nivcsw),
464 p->prio);
465
466 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
467 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
468 SPLIT_NS(p->se.sum_exec_runtime),
469 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
470
471#ifdef CONFIG_NUMA_BALANCING
472 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
473#endif
474#ifdef CONFIG_CGROUP_SCHED
Olivier Deprez0e641232021-09-23 10:07:05 +0200475 SEQ_printf_task_group_path(m, task_group(p), " %s")
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000476#endif
477
478 SEQ_printf(m, "\n");
479}
480
481static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
482{
483 struct task_struct *g, *p;
484
485 SEQ_printf(m, "\n");
486 SEQ_printf(m, "runnable tasks:\n");
487 SEQ_printf(m, " S task PID tree-key switches prio"
488 " wait-time sum-exec sum-sleep\n");
489 SEQ_printf(m, "-------------------------------------------------------"
490 "----------------------------------------------------\n");
491
492 rcu_read_lock();
493 for_each_process_thread(g, p) {
494 if (task_cpu(p) != rq_cpu)
495 continue;
496
497 print_task(m, rq, p);
498 }
499 rcu_read_unlock();
500}
501
502void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
503{
504 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
505 spread, rq0_min_vruntime, spread0;
506 struct rq *rq = cpu_rq(cpu);
507 struct sched_entity *last;
508 unsigned long flags;
509
510#ifdef CONFIG_FAIR_GROUP_SCHED
511 SEQ_printf(m, "\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200512 SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513#else
514 SEQ_printf(m, "\n");
515 SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
516#endif
517 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
518 SPLIT_NS(cfs_rq->exec_clock));
519
520 raw_spin_lock_irqsave(&rq->lock, flags);
521 if (rb_first_cached(&cfs_rq->tasks_timeline))
522 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
523 last = __pick_last_entity(cfs_rq);
524 if (last)
525 max_vruntime = last->vruntime;
526 min_vruntime = cfs_rq->min_vruntime;
527 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
528 raw_spin_unlock_irqrestore(&rq->lock, flags);
529 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
530 SPLIT_NS(MIN_vruntime));
531 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
532 SPLIT_NS(min_vruntime));
533 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
534 SPLIT_NS(max_vruntime));
535 spread = max_vruntime - MIN_vruntime;
536 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
537 SPLIT_NS(spread));
538 spread0 = min_vruntime - rq0_min_vruntime;
539 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
540 SPLIT_NS(spread0));
541 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
542 cfs_rq->nr_spread_over);
543 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
544 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
545#ifdef CONFIG_SMP
546 SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight);
547 SEQ_printf(m, " .%-30s: %lu\n", "load_avg",
548 cfs_rq->avg.load_avg);
549 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg",
550 cfs_rq->avg.runnable_load_avg);
551 SEQ_printf(m, " .%-30s: %lu\n", "util_avg",
552 cfs_rq->avg.util_avg);
553 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued",
554 cfs_rq->avg.util_est.enqueued);
555 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg",
556 cfs_rq->removed.load_avg);
557 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg",
558 cfs_rq->removed.util_avg);
559 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum",
560 cfs_rq->removed.runnable_sum);
561#ifdef CONFIG_FAIR_GROUP_SCHED
562 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib",
563 cfs_rq->tg_load_avg_contrib);
564 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
565 atomic_long_read(&cfs_rq->tg->load_avg));
566#endif
567#endif
568#ifdef CONFIG_CFS_BANDWIDTH
569 SEQ_printf(m, " .%-30s: %d\n", "throttled",
570 cfs_rq->throttled);
571 SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
572 cfs_rq->throttle_count);
573#endif
574
575#ifdef CONFIG_FAIR_GROUP_SCHED
576 print_cfs_group_stats(m, cpu, cfs_rq->tg);
577#endif
578}
579
580void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
581{
582#ifdef CONFIG_RT_GROUP_SCHED
583 SEQ_printf(m, "\n");
Olivier Deprez0e641232021-09-23 10:07:05 +0200584 SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585#else
586 SEQ_printf(m, "\n");
587 SEQ_printf(m, "rt_rq[%d]:\n", cpu);
588#endif
589
590#define P(x) \
591 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
592#define PU(x) \
593 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
594#define PN(x) \
595 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
596
597 PU(rt_nr_running);
598#ifdef CONFIG_SMP
599 PU(rt_nr_migratory);
600#endif
601 P(rt_throttled);
602 PN(rt_time);
603 PN(rt_runtime);
604
605#undef PN
606#undef PU
607#undef P
608}
609
610void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
611{
612 struct dl_bw *dl_bw;
613
614 SEQ_printf(m, "\n");
615 SEQ_printf(m, "dl_rq[%d]:\n", cpu);
616
617#define PU(x) \
618 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
619
620 PU(dl_nr_running);
621#ifdef CONFIG_SMP
622 PU(dl_nr_migratory);
623 dl_bw = &cpu_rq(cpu)->rd->dl_bw;
624#else
625 dl_bw = &dl_rq->dl_bw;
626#endif
627 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
628 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
629
630#undef PU
631}
632
633static void print_cpu(struct seq_file *m, int cpu)
634{
635 struct rq *rq = cpu_rq(cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000636
637#ifdef CONFIG_X86
638 {
639 unsigned int freq = cpu_khz ? : 1;
640
641 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
642 cpu, freq / 1000, (freq % 1000));
643 }
644#else
645 SEQ_printf(m, "cpu#%d\n", cpu);
646#endif
647
648#define P(x) \
649do { \
650 if (sizeof(rq->x) == 4) \
651 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
652 else \
653 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
654} while (0)
655
656#define PN(x) \
657 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
658
659 P(nr_running);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660 P(nr_switches);
661 P(nr_load_updates);
662 P(nr_uninterruptible);
663 PN(next_balance);
664 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
665 PN(clock);
666 PN(clock_task);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000667#undef P
668#undef PN
669
670#ifdef CONFIG_SMP
671#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
672 P64(avg_idle);
673 P64(max_idle_balance_cost);
674#undef P64
675#endif
676
677#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n));
678 if (schedstat_enabled()) {
679 P(yld_count);
680 P(sched_count);
681 P(sched_goidle);
682 P(ttwu_count);
683 P(ttwu_local);
684 }
685#undef P
686
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000687 print_cfs_stats(m, cpu);
688 print_rt_stats(m, cpu);
689 print_dl_stats(m, cpu);
690
691 print_rq(m, rq, cpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000692 SEQ_printf(m, "\n");
693}
694
695static const char *sched_tunable_scaling_names[] = {
696 "none",
David Brazdil0f672f62019-12-10 10:32:29 +0000697 "logarithmic",
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000698 "linear"
699};
700
701static void sched_debug_header(struct seq_file *m)
702{
703 u64 ktime, sched_clk, cpu_clk;
704 unsigned long flags;
705
706 local_irq_save(flags);
707 ktime = ktime_to_ns(ktime_get());
708 sched_clk = sched_clock();
709 cpu_clk = local_clock();
710 local_irq_restore(flags);
711
712 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
713 init_utsname()->release,
714 (int)strcspn(init_utsname()->version, " "),
715 init_utsname()->version);
716
717#define P(x) \
718 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
719#define PN(x) \
720 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
721 PN(ktime);
722 PN(sched_clk);
723 PN(cpu_clk);
724 P(jiffies);
725#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
726 P(sched_clock_stable());
727#endif
728#undef PN
729#undef P
730
731 SEQ_printf(m, "\n");
732 SEQ_printf(m, "sysctl_sched\n");
733
734#define P(x) \
735 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
736#define PN(x) \
737 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
738 PN(sysctl_sched_latency);
739 PN(sysctl_sched_min_granularity);
740 PN(sysctl_sched_wakeup_granularity);
741 P(sysctl_sched_child_runs_first);
742 P(sysctl_sched_features);
743#undef PN
744#undef P
745
746 SEQ_printf(m, " .%-40s: %d (%s)\n",
747 "sysctl_sched_tunable_scaling",
748 sysctl_sched_tunable_scaling,
749 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
750 SEQ_printf(m, "\n");
751}
752
753static int sched_debug_show(struct seq_file *m, void *v)
754{
755 int cpu = (unsigned long)(v - 2);
756
757 if (cpu != -1)
758 print_cpu(m, cpu);
759 else
760 sched_debug_header(m);
761
762 return 0;
763}
764
765void sysrq_sched_debug_show(void)
766{
767 int cpu;
768
769 sched_debug_header(NULL);
770 for_each_online_cpu(cpu)
771 print_cpu(NULL, cpu);
772
773}
774
775/*
776 * This itererator needs some explanation.
777 * It returns 1 for the header position.
778 * This means 2 is CPU 0.
779 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
780 * to use cpumask_* to iterate over the CPUs.
781 */
782static void *sched_debug_start(struct seq_file *file, loff_t *offset)
783{
784 unsigned long n = *offset;
785
786 if (n == 0)
787 return (void *) 1;
788
789 n--;
790
791 if (n > 0)
792 n = cpumask_next(n - 1, cpu_online_mask);
793 else
794 n = cpumask_first(cpu_online_mask);
795
796 *offset = n + 1;
797
798 if (n < nr_cpu_ids)
799 return (void *)(unsigned long)(n + 2);
800
801 return NULL;
802}
803
804static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
805{
806 (*offset)++;
807 return sched_debug_start(file, offset);
808}
809
810static void sched_debug_stop(struct seq_file *file, void *data)
811{
812}
813
814static const struct seq_operations sched_debug_sops = {
815 .start = sched_debug_start,
816 .next = sched_debug_next,
817 .stop = sched_debug_stop,
818 .show = sched_debug_show,
819};
820
821static int __init init_sched_debug_procfs(void)
822{
823 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops))
824 return -ENOMEM;
825 return 0;
826}
827
828__initcall(init_sched_debug_procfs);
829
830#define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
831#define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
832#define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
833#define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
834
835
836#ifdef CONFIG_NUMA_BALANCING
837void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
838 unsigned long tpf, unsigned long gsf, unsigned long gpf)
839{
840 SEQ_printf(m, "numa_faults node=%d ", node);
841 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
842 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
843}
844#endif
845
846
847static void sched_show_numa(struct task_struct *p, struct seq_file *m)
848{
849#ifdef CONFIG_NUMA_BALANCING
850 struct mempolicy *pol;
851
852 if (p->mm)
853 P(mm->numa_scan_seq);
854
855 task_lock(p);
856 pol = p->mempolicy;
857 if (pol && !(pol->flags & MPOL_F_MORON))
858 pol = NULL;
859 mpol_get(pol);
860 task_unlock(p);
861
862 P(numa_pages_migrated);
863 P(numa_preferred_nid);
864 P(total_numa_faults);
865 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
866 task_node(p), task_numa_group_id(p));
867 show_numa_stats(p, m);
868 mpol_put(pol);
869#endif
870}
871
872void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
873 struct seq_file *m)
874{
875 unsigned long nr_switches;
876
877 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
878 get_nr_threads(p));
879 SEQ_printf(m,
880 "---------------------------------------------------------"
881 "----------\n");
882#define __P(F) \
883 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
884#define P(F) \
885 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
886#define P_SCHEDSTAT(F) \
887 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
888#define __PN(F) \
889 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
890#define PN(F) \
891 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
892#define PN_SCHEDSTAT(F) \
893 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
894
895 PN(se.exec_start);
896 PN(se.vruntime);
897 PN(se.sum_exec_runtime);
898
899 nr_switches = p->nvcsw + p->nivcsw;
900
901 P(se.nr_migrations);
902
903 if (schedstat_enabled()) {
904 u64 avg_atom, avg_per_cpu;
905
906 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
907 PN_SCHEDSTAT(se.statistics.wait_start);
908 PN_SCHEDSTAT(se.statistics.sleep_start);
909 PN_SCHEDSTAT(se.statistics.block_start);
910 PN_SCHEDSTAT(se.statistics.sleep_max);
911 PN_SCHEDSTAT(se.statistics.block_max);
912 PN_SCHEDSTAT(se.statistics.exec_max);
913 PN_SCHEDSTAT(se.statistics.slice_max);
914 PN_SCHEDSTAT(se.statistics.wait_max);
915 PN_SCHEDSTAT(se.statistics.wait_sum);
916 P_SCHEDSTAT(se.statistics.wait_count);
917 PN_SCHEDSTAT(se.statistics.iowait_sum);
918 P_SCHEDSTAT(se.statistics.iowait_count);
919 P_SCHEDSTAT(se.statistics.nr_migrations_cold);
920 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
921 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
922 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
923 P_SCHEDSTAT(se.statistics.nr_forced_migrations);
924 P_SCHEDSTAT(se.statistics.nr_wakeups);
925 P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
926 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
927 P_SCHEDSTAT(se.statistics.nr_wakeups_local);
928 P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
929 P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
930 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
931 P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
932 P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
933
934 avg_atom = p->se.sum_exec_runtime;
935 if (nr_switches)
936 avg_atom = div64_ul(avg_atom, nr_switches);
937 else
938 avg_atom = -1LL;
939
940 avg_per_cpu = p->se.sum_exec_runtime;
941 if (p->se.nr_migrations) {
942 avg_per_cpu = div64_u64(avg_per_cpu,
943 p->se.nr_migrations);
944 } else {
945 avg_per_cpu = -1LL;
946 }
947
948 __PN(avg_atom);
949 __PN(avg_per_cpu);
950 }
951
952 __P(nr_switches);
953 SEQ_printf(m, "%-45s:%21Ld\n",
954 "nr_voluntary_switches", (long long)p->nvcsw);
955 SEQ_printf(m, "%-45s:%21Ld\n",
956 "nr_involuntary_switches", (long long)p->nivcsw);
957
958 P(se.load.weight);
959 P(se.runnable_weight);
960#ifdef CONFIG_SMP
961 P(se.avg.load_sum);
962 P(se.avg.runnable_load_sum);
963 P(se.avg.util_sum);
964 P(se.avg.load_avg);
965 P(se.avg.runnable_load_avg);
966 P(se.avg.util_avg);
967 P(se.avg.last_update_time);
968 P(se.avg.util_est.ewma);
969 P(se.avg.util_est.enqueued);
970#endif
971 P(policy);
972 P(prio);
David Brazdil0f672f62019-12-10 10:32:29 +0000973 if (task_has_dl_policy(p)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000974 P(dl.runtime);
975 P(dl.deadline);
976 }
977#undef PN_SCHEDSTAT
978#undef PN
979#undef __PN
980#undef P_SCHEDSTAT
981#undef P
982#undef __P
983
984 {
985 unsigned int this_cpu = raw_smp_processor_id();
986 u64 t0, t1;
987
988 t0 = cpu_clock(this_cpu);
989 t1 = cpu_clock(this_cpu);
990 SEQ_printf(m, "%-45s:%21Ld\n",
991 "clock-delta", (long long)(t1-t0));
992 }
993
994 sched_show_numa(p, m);
995}
996
997void proc_sched_set_task(struct task_struct *p)
998{
999#ifdef CONFIG_SCHEDSTATS
1000 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1001#endif
1002}