blob: 086af4f5c3e846755f7c0da269bf6cda70b4893c [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 *
4 * Function graph tracer.
5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 * Mostly borrowed from function tracer which
7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
8 *
9 */
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12#include <linux/interrupt.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15
16#include "trace.h"
17#include "trace_output.h"
18
19static bool kill_ftrace_graph;
20
21/**
22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
23 *
24 * ftrace_graph_stop() is called when a severe error is detected in
25 * the function graph tracing. This function is called by the critical
26 * paths of function graph to keep those paths from doing any more harm.
27 */
28bool ftrace_graph_is_dead(void)
29{
30 return kill_ftrace_graph;
31}
32
33/**
34 * ftrace_graph_stop - set to permanently disable function graph tracincg
35 *
36 * In case of an error int function graph tracing, this is called
37 * to try to keep function graph tracing from causing any more harm.
38 * Usually this is pretty severe and this is called to try to at least
39 * get a warning out to the user.
40 */
41void ftrace_graph_stop(void)
42{
43 kill_ftrace_graph = true;
44}
45
46/* When set, irq functions will be ignored */
47static int ftrace_graph_skip_irqs;
48
49struct fgraph_cpu_data {
50 pid_t last_pid;
51 int depth;
52 int depth_irq;
53 int ignore;
54 unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
55};
56
57struct fgraph_data {
58 struct fgraph_cpu_data __percpu *cpu_data;
59
60 /* Place to preserve last processed entry. */
61 struct ftrace_graph_ent_entry ent;
62 struct ftrace_graph_ret_entry ret;
63 int failed;
64 int cpu;
65};
66
67#define TRACE_GRAPH_INDENT 2
68
69unsigned int fgraph_max_depth;
70
71static struct tracer_opt trace_opts[] = {
72 /* Display overruns? (for self-debug purpose) */
73 { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
74 /* Display CPU ? */
75 { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
76 /* Display Overhead ? */
77 { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
78 /* Display proc name/pid */
79 { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
80 /* Display duration of execution */
81 { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
82 /* Display absolute time of an entry */
83 { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
84 /* Display interrupts */
85 { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
86 /* Display function name after trailing } */
87 { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
88 /* Include sleep time (scheduled out) between entry and return */
89 { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
90 /* Include time within nested functions */
91 { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
92 { } /* Empty entry */
93};
94
95static struct tracer_flags tracer_flags = {
96 /* Don't display overruns, proc, or tail by default */
97 .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
98 TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
99 TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
100 .opts = trace_opts
101};
102
103static struct trace_array *graph_array;
104
105/*
106 * DURATION column is being also used to display IRQ signs,
107 * following values are used by print_graph_irq and others
108 * to fill in space into DURATION column.
109 */
110enum {
111 FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112 FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
114};
115
116static void
117print_graph_duration(struct trace_array *tr, unsigned long long duration,
118 struct trace_seq *s, u32 flags);
119
120/* Add a function return address to the trace stack on thread info.*/
121static int
122ftrace_push_return_trace(unsigned long ret, unsigned long func,
123 unsigned long frame_pointer, unsigned long *retp)
124{
125 unsigned long long calltime;
126 int index;
127
128 if (unlikely(ftrace_graph_is_dead()))
129 return -EBUSY;
130
131 if (!current->ret_stack)
132 return -EBUSY;
133
134 /*
135 * We must make sure the ret_stack is tested before we read
136 * anything else.
137 */
138 smp_rmb();
139
140 /* The return trace stack is full */
141 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
142 atomic_inc(&current->trace_overrun);
143 return -EBUSY;
144 }
145
146 /*
147 * The curr_ret_stack is an index to ftrace return stack of
148 * current task. Its value should be in [0, FTRACE_RETFUNC_
149 * DEPTH) when the function graph tracer is used. To support
150 * filtering out specific functions, it makes the index
151 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
152 * so when it sees a negative index the ftrace will ignore
153 * the record. And the index gets recovered when returning
154 * from the filtered function by adding the FTRACE_NOTRACE_
155 * DEPTH and then it'll continue to record functions normally.
156 *
157 * The curr_ret_stack is initialized to -1 and get increased
158 * in this function. So it can be less than -1 only if it was
159 * filtered out via ftrace_graph_notrace_addr() which can be
160 * set from set_graph_notrace file in tracefs by user.
161 */
162 if (current->curr_ret_stack < -1)
163 return -EBUSY;
164
165 calltime = trace_clock_local();
166
167 index = ++current->curr_ret_stack;
168 if (ftrace_graph_notrace_addr(func))
169 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 barrier();
171 current->ret_stack[index].ret = ret;
172 current->ret_stack[index].func = func;
173 current->ret_stack[index].calltime = calltime;
174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175 current->ret_stack[index].fp = frame_pointer;
176#endif
177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
178 current->ret_stack[index].retp = retp;
179#endif
180 return 0;
181}
182
183int function_graph_enter(unsigned long ret, unsigned long func,
184 unsigned long frame_pointer, unsigned long *retp)
185{
186 struct ftrace_graph_ent trace;
187
188 trace.func = func;
189 trace.depth = ++current->curr_ret_depth;
190
191 if (ftrace_push_return_trace(ret, func,
192 frame_pointer, retp))
193 goto out;
194
195 /* Only trace if the calling function expects to */
196 if (!ftrace_graph_entry(&trace))
197 goto out_ret;
198
199 return 0;
200 out_ret:
201 current->curr_ret_stack--;
202 out:
203 current->curr_ret_depth--;
204 return -EBUSY;
205}
206
207/* Retrieve a function return address to the trace stack on thread info.*/
208static void
209ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
210 unsigned long frame_pointer)
211{
212 int index;
213
214 index = current->curr_ret_stack;
215
216 /*
217 * A negative index here means that it's just returned from a
218 * notrace'd function. Recover index to get an original
219 * return address. See ftrace_push_return_trace().
220 *
221 * TODO: Need to check whether the stack gets corrupted.
222 */
223 if (index < 0)
224 index += FTRACE_NOTRACE_DEPTH;
225
226 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
227 ftrace_graph_stop();
228 WARN_ON(1);
229 /* Might as well panic, otherwise we have no where to go */
230 *ret = (unsigned long)panic;
231 return;
232 }
233
234#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
235 /*
236 * The arch may choose to record the frame pointer used
237 * and check it here to make sure that it is what we expect it
238 * to be. If gcc does not set the place holder of the return
239 * address in the frame pointer, and does a copy instead, then
240 * the function graph trace will fail. This test detects this
241 * case.
242 *
243 * Currently, x86_32 with optimize for size (-Os) makes the latest
244 * gcc do the above.
245 *
246 * Note, -mfentry does not use frame pointers, and this test
247 * is not needed if CC_USING_FENTRY is set.
248 */
249 if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
250 ftrace_graph_stop();
251 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
252 " from func %ps return to %lx\n",
253 current->ret_stack[index].fp,
254 frame_pointer,
255 (void *)current->ret_stack[index].func,
256 current->ret_stack[index].ret);
257 *ret = (unsigned long)panic;
258 return;
259 }
260#endif
261
262 *ret = current->ret_stack[index].ret;
263 trace->func = current->ret_stack[index].func;
264 trace->calltime = current->ret_stack[index].calltime;
265 trace->overrun = atomic_read(&current->trace_overrun);
266 trace->depth = current->curr_ret_depth--;
267 /*
268 * We still want to trace interrupts coming in if
269 * max_depth is set to 1. Make sure the decrement is
270 * seen before ftrace_graph_return.
271 */
272 barrier();
273}
274
275/*
276 * Send the trace to the ring-buffer.
277 * @return the original return address.
278 */
279unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
280{
281 struct ftrace_graph_ret trace;
282 unsigned long ret;
283
284 ftrace_pop_return_trace(&trace, &ret, frame_pointer);
285 trace.rettime = trace_clock_local();
286 ftrace_graph_return(&trace);
287 /*
288 * The ftrace_graph_return() may still access the current
289 * ret_stack structure, we need to make sure the update of
290 * curr_ret_stack is after that.
291 */
292 barrier();
293 current->curr_ret_stack--;
294 /*
295 * The curr_ret_stack can be less than -1 only if it was
296 * filtered out and it's about to return from the function.
297 * Recover the index and continue to trace normal functions.
298 */
299 if (current->curr_ret_stack < -1) {
300 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
301 return ret;
302 }
303
304 if (unlikely(!ret)) {
305 ftrace_graph_stop();
306 WARN_ON(1);
307 /* Might as well panic. What else to do? */
308 ret = (unsigned long)panic;
309 }
310
311 return ret;
312}
313
314/**
315 * ftrace_graph_ret_addr - convert a potentially modified stack return address
316 * to its original value
317 *
318 * This function can be called by stack unwinding code to convert a found stack
319 * return address ('ret') to its original value, in case the function graph
320 * tracer has modified it to be 'return_to_handler'. If the address hasn't
321 * been modified, the unchanged value of 'ret' is returned.
322 *
323 * 'idx' is a state variable which should be initialized by the caller to zero
324 * before the first call.
325 *
326 * 'retp' is a pointer to the return address on the stack. It's ignored if
327 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
328 */
329#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
330unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
331 unsigned long ret, unsigned long *retp)
332{
333 int index = task->curr_ret_stack;
334 int i;
335
336 if (ret != (unsigned long)return_to_handler)
337 return ret;
338
339 if (index < -1)
340 index += FTRACE_NOTRACE_DEPTH;
341
342 if (index < 0)
343 return ret;
344
345 for (i = 0; i <= index; i++)
346 if (task->ret_stack[i].retp == retp)
347 return task->ret_stack[i].ret;
348
349 return ret;
350}
351#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
352unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
353 unsigned long ret, unsigned long *retp)
354{
355 int task_idx;
356
357 if (ret != (unsigned long)return_to_handler)
358 return ret;
359
360 task_idx = task->curr_ret_stack;
361
362 if (!task->ret_stack || task_idx < *idx)
363 return ret;
364
365 task_idx -= *idx;
366 (*idx)++;
367
368 return task->ret_stack[task_idx].ret;
369}
370#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
371
372int __trace_graph_entry(struct trace_array *tr,
373 struct ftrace_graph_ent *trace,
374 unsigned long flags,
375 int pc)
376{
377 struct trace_event_call *call = &event_funcgraph_entry;
378 struct ring_buffer_event *event;
379 struct ring_buffer *buffer = tr->trace_buffer.buffer;
380 struct ftrace_graph_ent_entry *entry;
381
382 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
383 sizeof(*entry), flags, pc);
384 if (!event)
385 return 0;
386 entry = ring_buffer_event_data(event);
387 entry->graph_ent = *trace;
388 if (!call_filter_check_discard(call, entry, buffer, event))
389 trace_buffer_unlock_commit_nostack(buffer, event);
390
391 return 1;
392}
393
394static inline int ftrace_graph_ignore_irqs(void)
395{
396 if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
397 return 0;
398
399 return in_irq();
400}
401
402int trace_graph_entry(struct ftrace_graph_ent *trace)
403{
404 struct trace_array *tr = graph_array;
405 struct trace_array_cpu *data;
406 unsigned long flags;
407 long disabled;
408 int ret;
409 int cpu;
410 int pc;
411
412 if (!ftrace_trace_task(tr))
413 return 0;
414
415 if (ftrace_graph_ignore_func(trace))
416 return 0;
417
418 if (ftrace_graph_ignore_irqs())
419 return 0;
420
421 /*
422 * Do not trace a function if it's filtered by set_graph_notrace.
423 * Make the index of ret stack negative to indicate that it should
424 * ignore further functions. But it needs its own ret stack entry
425 * to recover the original index in order to continue tracing after
426 * returning from the function.
427 */
428 if (ftrace_graph_notrace_addr(trace->func))
429 return 1;
430
431 /*
432 * Stop here if tracing_threshold is set. We only write function return
433 * events to the ring buffer.
434 */
435 if (tracing_thresh)
436 return 1;
437
438 local_irq_save(flags);
439 cpu = raw_smp_processor_id();
440 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
441 disabled = atomic_inc_return(&data->disabled);
442 if (likely(disabled == 1)) {
443 pc = preempt_count();
444 ret = __trace_graph_entry(tr, trace, flags, pc);
445 } else {
446 ret = 0;
447 }
448
449 atomic_dec(&data->disabled);
450 local_irq_restore(flags);
451
452 return ret;
453}
454
455static void
456__trace_graph_function(struct trace_array *tr,
457 unsigned long ip, unsigned long flags, int pc)
458{
459 u64 time = trace_clock_local();
460 struct ftrace_graph_ent ent = {
461 .func = ip,
462 .depth = 0,
463 };
464 struct ftrace_graph_ret ret = {
465 .func = ip,
466 .depth = 0,
467 .calltime = time,
468 .rettime = time,
469 };
470
471 __trace_graph_entry(tr, &ent, flags, pc);
472 __trace_graph_return(tr, &ret, flags, pc);
473}
474
475void
476trace_graph_function(struct trace_array *tr,
477 unsigned long ip, unsigned long parent_ip,
478 unsigned long flags, int pc)
479{
480 __trace_graph_function(tr, ip, flags, pc);
481}
482
483void __trace_graph_return(struct trace_array *tr,
484 struct ftrace_graph_ret *trace,
485 unsigned long flags,
486 int pc)
487{
488 struct trace_event_call *call = &event_funcgraph_exit;
489 struct ring_buffer_event *event;
490 struct ring_buffer *buffer = tr->trace_buffer.buffer;
491 struct ftrace_graph_ret_entry *entry;
492
493 event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
494 sizeof(*entry), flags, pc);
495 if (!event)
496 return;
497 entry = ring_buffer_event_data(event);
498 entry->ret = *trace;
499 if (!call_filter_check_discard(call, entry, buffer, event))
500 trace_buffer_unlock_commit_nostack(buffer, event);
501}
502
503void trace_graph_return(struct ftrace_graph_ret *trace)
504{
505 struct trace_array *tr = graph_array;
506 struct trace_array_cpu *data;
507 unsigned long flags;
508 long disabled;
509 int cpu;
510 int pc;
511
512 ftrace_graph_addr_finish(trace);
513
514 local_irq_save(flags);
515 cpu = raw_smp_processor_id();
516 data = per_cpu_ptr(tr->trace_buffer.data, cpu);
517 disabled = atomic_inc_return(&data->disabled);
518 if (likely(disabled == 1)) {
519 pc = preempt_count();
520 __trace_graph_return(tr, trace, flags, pc);
521 }
522 atomic_dec(&data->disabled);
523 local_irq_restore(flags);
524}
525
526void set_graph_array(struct trace_array *tr)
527{
528 graph_array = tr;
529
530 /* Make graph_array visible before we start tracing */
531
532 smp_mb();
533}
534
535static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
536{
537 ftrace_graph_addr_finish(trace);
538
539 if (tracing_thresh &&
540 (trace->rettime - trace->calltime < tracing_thresh))
541 return;
542 else
543 trace_graph_return(trace);
544}
545
546static int graph_trace_init(struct trace_array *tr)
547{
548 int ret;
549
550 set_graph_array(tr);
551 if (tracing_thresh)
552 ret = register_ftrace_graph(&trace_graph_thresh_return,
553 &trace_graph_entry);
554 else
555 ret = register_ftrace_graph(&trace_graph_return,
556 &trace_graph_entry);
557 if (ret)
558 return ret;
559 tracing_start_cmdline_record();
560
561 return 0;
562}
563
564static void graph_trace_reset(struct trace_array *tr)
565{
566 tracing_stop_cmdline_record();
567 unregister_ftrace_graph();
568}
569
570static int graph_trace_update_thresh(struct trace_array *tr)
571{
572 graph_trace_reset(tr);
573 return graph_trace_init(tr);
574}
575
576static int max_bytes_for_cpu;
577
578static void print_graph_cpu(struct trace_seq *s, int cpu)
579{
580 /*
581 * Start with a space character - to make it stand out
582 * to the right a bit when trace output is pasted into
583 * email:
584 */
585 trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
586}
587
588#define TRACE_GRAPH_PROCINFO_LENGTH 14
589
590static void print_graph_proc(struct trace_seq *s, pid_t pid)
591{
592 char comm[TASK_COMM_LEN];
593 /* sign + log10(MAX_INT) + '\0' */
594 char pid_str[11];
595 int spaces = 0;
596 int len;
597 int i;
598
599 trace_find_cmdline(pid, comm);
600 comm[7] = '\0';
601 sprintf(pid_str, "%d", pid);
602
603 /* 1 stands for the "-" character */
604 len = strlen(comm) + strlen(pid_str) + 1;
605
606 if (len < TRACE_GRAPH_PROCINFO_LENGTH)
607 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
608
609 /* First spaces to align center */
610 for (i = 0; i < spaces / 2; i++)
611 trace_seq_putc(s, ' ');
612
613 trace_seq_printf(s, "%s-%s", comm, pid_str);
614
615 /* Last spaces to align center */
616 for (i = 0; i < spaces - (spaces / 2); i++)
617 trace_seq_putc(s, ' ');
618}
619
620
621static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
622{
623 trace_seq_putc(s, ' ');
624 trace_print_lat_fmt(s, entry);
625}
626
627/* If the pid changed since the last trace, output this event */
628static void
629verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
630{
631 pid_t prev_pid;
632 pid_t *last_pid;
633
634 if (!data)
635 return;
636
637 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
638
639 if (*last_pid == pid)
640 return;
641
642 prev_pid = *last_pid;
643 *last_pid = pid;
644
645 if (prev_pid == -1)
646 return;
647/*
648 * Context-switch trace line:
649
650 ------------------------------------------
651 | 1) migration/0--1 => sshd-1755
652 ------------------------------------------
653
654 */
655 trace_seq_puts(s, " ------------------------------------------\n");
656 print_graph_cpu(s, cpu);
657 print_graph_proc(s, prev_pid);
658 trace_seq_puts(s, " => ");
659 print_graph_proc(s, pid);
660 trace_seq_puts(s, "\n ------------------------------------------\n\n");
661}
662
663static struct ftrace_graph_ret_entry *
664get_return_for_leaf(struct trace_iterator *iter,
665 struct ftrace_graph_ent_entry *curr)
666{
667 struct fgraph_data *data = iter->private;
668 struct ring_buffer_iter *ring_iter = NULL;
669 struct ring_buffer_event *event;
670 struct ftrace_graph_ret_entry *next;
671
672 /*
673 * If the previous output failed to write to the seq buffer,
674 * then we just reuse the data from before.
675 */
676 if (data && data->failed) {
677 curr = &data->ent;
678 next = &data->ret;
679 } else {
680
681 ring_iter = trace_buffer_iter(iter, iter->cpu);
682
683 /* First peek to compare current entry and the next one */
684 if (ring_iter)
685 event = ring_buffer_iter_peek(ring_iter, NULL);
686 else {
687 /*
688 * We need to consume the current entry to see
689 * the next one.
690 */
691 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
692 NULL, NULL);
693 event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
694 NULL, NULL);
695 }
696
697 if (!event)
698 return NULL;
699
700 next = ring_buffer_event_data(event);
701
702 if (data) {
703 /*
704 * Save current and next entries for later reference
705 * if the output fails.
706 */
707 data->ent = *curr;
708 /*
709 * If the next event is not a return type, then
710 * we only care about what type it is. Otherwise we can
711 * safely copy the entire event.
712 */
713 if (next->ent.type == TRACE_GRAPH_RET)
714 data->ret = *next;
715 else
716 data->ret.ent.type = next->ent.type;
717 }
718 }
719
720 if (next->ent.type != TRACE_GRAPH_RET)
721 return NULL;
722
723 if (curr->ent.pid != next->ent.pid ||
724 curr->graph_ent.func != next->ret.func)
725 return NULL;
726
727 /* this is a leaf, now advance the iterator */
728 if (ring_iter)
729 ring_buffer_read(ring_iter, NULL);
730
731 return next;
732}
733
734static void print_graph_abs_time(u64 t, struct trace_seq *s)
735{
736 unsigned long usecs_rem;
737
738 usecs_rem = do_div(t, NSEC_PER_SEC);
739 usecs_rem /= 1000;
740
741 trace_seq_printf(s, "%5lu.%06lu | ",
742 (unsigned long)t, usecs_rem);
743}
744
745static void
746print_graph_irq(struct trace_iterator *iter, unsigned long addr,
747 enum trace_type type, int cpu, pid_t pid, u32 flags)
748{
749 struct trace_array *tr = iter->tr;
750 struct trace_seq *s = &iter->seq;
751 struct trace_entry *ent = iter->ent;
752
753 if (addr < (unsigned long)__irqentry_text_start ||
754 addr >= (unsigned long)__irqentry_text_end)
755 return;
756
757 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
758 /* Absolute time */
759 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
760 print_graph_abs_time(iter->ts, s);
761
762 /* Cpu */
763 if (flags & TRACE_GRAPH_PRINT_CPU)
764 print_graph_cpu(s, cpu);
765
766 /* Proc */
767 if (flags & TRACE_GRAPH_PRINT_PROC) {
768 print_graph_proc(s, pid);
769 trace_seq_puts(s, " | ");
770 }
771
772 /* Latency format */
773 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
774 print_graph_lat_fmt(s, ent);
775 }
776
777 /* No overhead */
778 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
779
780 if (type == TRACE_GRAPH_ENT)
781 trace_seq_puts(s, "==========>");
782 else
783 trace_seq_puts(s, "<==========");
784
785 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
786 trace_seq_putc(s, '\n');
787}
788
789void
790trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
791{
792 unsigned long nsecs_rem = do_div(duration, 1000);
793 /* log10(ULONG_MAX) + '\0' */
794 char usecs_str[21];
795 char nsecs_str[5];
796 int len;
797 int i;
798
799 sprintf(usecs_str, "%lu", (unsigned long) duration);
800
801 /* Print msecs */
802 trace_seq_printf(s, "%s", usecs_str);
803
804 len = strlen(usecs_str);
805
806 /* Print nsecs (we don't want to exceed 7 numbers) */
807 if (len < 7) {
808 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
809
810 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
811 trace_seq_printf(s, ".%s", nsecs_str);
812 len += strlen(nsecs_str) + 1;
813 }
814
815 trace_seq_puts(s, " us ");
816
817 /* Print remaining spaces to fit the row's width */
818 for (i = len; i < 8; i++)
819 trace_seq_putc(s, ' ');
820}
821
822static void
823print_graph_duration(struct trace_array *tr, unsigned long long duration,
824 struct trace_seq *s, u32 flags)
825{
826 if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
827 !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
828 return;
829
830 /* No real adata, just filling the column with spaces */
831 switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
832 case FLAGS_FILL_FULL:
833 trace_seq_puts(s, " | ");
834 return;
835 case FLAGS_FILL_START:
836 trace_seq_puts(s, " ");
837 return;
838 case FLAGS_FILL_END:
839 trace_seq_puts(s, " |");
840 return;
841 }
842
843 /* Signal a overhead of time execution to the output */
844 if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
845 trace_seq_printf(s, "%c ", trace_find_mark(duration));
846 else
847 trace_seq_puts(s, " ");
848
849 trace_print_graph_duration(duration, s);
850 trace_seq_puts(s, "| ");
851}
852
853/* Case of a leaf function on its call entry */
854static enum print_line_t
855print_graph_entry_leaf(struct trace_iterator *iter,
856 struct ftrace_graph_ent_entry *entry,
857 struct ftrace_graph_ret_entry *ret_entry,
858 struct trace_seq *s, u32 flags)
859{
860 struct fgraph_data *data = iter->private;
861 struct trace_array *tr = iter->tr;
862 struct ftrace_graph_ret *graph_ret;
863 struct ftrace_graph_ent *call;
864 unsigned long long duration;
865 int cpu = iter->cpu;
866 int i;
867
868 graph_ret = &ret_entry->ret;
869 call = &entry->graph_ent;
870 duration = graph_ret->rettime - graph_ret->calltime;
871
872 if (data) {
873 struct fgraph_cpu_data *cpu_data;
874
875 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
876
877 /* If a graph tracer ignored set_graph_notrace */
878 if (call->depth < -1)
879 call->depth += FTRACE_NOTRACE_DEPTH;
880
881 /*
882 * Comments display at + 1 to depth. Since
883 * this is a leaf function, keep the comments
884 * equal to this depth.
885 */
886 cpu_data->depth = call->depth - 1;
887
888 /* No need to keep this function around for this depth */
889 if (call->depth < FTRACE_RETFUNC_DEPTH &&
890 !WARN_ON_ONCE(call->depth < 0))
891 cpu_data->enter_funcs[call->depth] = 0;
892 }
893
894 /* Overhead and duration */
895 print_graph_duration(tr, duration, s, flags);
896
897 /* Function */
898 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
899 trace_seq_putc(s, ' ');
900
901 trace_seq_printf(s, "%ps();\n", (void *)call->func);
902
903 print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
904 cpu, iter->ent->pid, flags);
905
906 return trace_handle_return(s);
907}
908
909static enum print_line_t
910print_graph_entry_nested(struct trace_iterator *iter,
911 struct ftrace_graph_ent_entry *entry,
912 struct trace_seq *s, int cpu, u32 flags)
913{
914 struct ftrace_graph_ent *call = &entry->graph_ent;
915 struct fgraph_data *data = iter->private;
916 struct trace_array *tr = iter->tr;
917 int i;
918
919 if (data) {
920 struct fgraph_cpu_data *cpu_data;
921 int cpu = iter->cpu;
922
923 /* If a graph tracer ignored set_graph_notrace */
924 if (call->depth < -1)
925 call->depth += FTRACE_NOTRACE_DEPTH;
926
927 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
928 cpu_data->depth = call->depth;
929
930 /* Save this function pointer to see if the exit matches */
931 if (call->depth < FTRACE_RETFUNC_DEPTH &&
932 !WARN_ON_ONCE(call->depth < 0))
933 cpu_data->enter_funcs[call->depth] = call->func;
934 }
935
936 /* No time */
937 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
938
939 /* Function */
940 for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
941 trace_seq_putc(s, ' ');
942
943 trace_seq_printf(s, "%ps() {\n", (void *)call->func);
944
945 if (trace_seq_has_overflowed(s))
946 return TRACE_TYPE_PARTIAL_LINE;
947
948 /*
949 * we already consumed the current entry to check the next one
950 * and see if this is a leaf.
951 */
952 return TRACE_TYPE_NO_CONSUME;
953}
954
955static void
956print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
957 int type, unsigned long addr, u32 flags)
958{
959 struct fgraph_data *data = iter->private;
960 struct trace_entry *ent = iter->ent;
961 struct trace_array *tr = iter->tr;
962 int cpu = iter->cpu;
963
964 /* Pid */
965 verif_pid(s, ent->pid, cpu, data);
966
967 if (type)
968 /* Interrupt */
969 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
970
971 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
972 return;
973
974 /* Absolute time */
975 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
976 print_graph_abs_time(iter->ts, s);
977
978 /* Cpu */
979 if (flags & TRACE_GRAPH_PRINT_CPU)
980 print_graph_cpu(s, cpu);
981
982 /* Proc */
983 if (flags & TRACE_GRAPH_PRINT_PROC) {
984 print_graph_proc(s, ent->pid);
985 trace_seq_puts(s, " | ");
986 }
987
988 /* Latency format */
989 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
990 print_graph_lat_fmt(s, ent);
991
992 return;
993}
994
995/*
996 * Entry check for irq code
997 *
998 * returns 1 if
999 * - we are inside irq code
1000 * - we just entered irq code
1001 *
1002 * retunns 0 if
1003 * - funcgraph-interrupts option is set
1004 * - we are not inside irq code
1005 */
1006static int
1007check_irq_entry(struct trace_iterator *iter, u32 flags,
1008 unsigned long addr, int depth)
1009{
1010 int cpu = iter->cpu;
1011 int *depth_irq;
1012 struct fgraph_data *data = iter->private;
1013
1014 /*
1015 * If we are either displaying irqs, or we got called as
1016 * a graph event and private data does not exist,
1017 * then we bypass the irq check.
1018 */
1019 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1020 (!data))
1021 return 0;
1022
1023 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1024
1025 /*
1026 * We are inside the irq code
1027 */
1028 if (*depth_irq >= 0)
1029 return 1;
1030
1031 if ((addr < (unsigned long)__irqentry_text_start) ||
1032 (addr >= (unsigned long)__irqentry_text_end))
1033 return 0;
1034
1035 /*
1036 * We are entering irq code.
1037 */
1038 *depth_irq = depth;
1039 return 1;
1040}
1041
1042/*
1043 * Return check for irq code
1044 *
1045 * returns 1 if
1046 * - we are inside irq code
1047 * - we just left irq code
1048 *
1049 * returns 0 if
1050 * - funcgraph-interrupts option is set
1051 * - we are not inside irq code
1052 */
1053static int
1054check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1055{
1056 int cpu = iter->cpu;
1057 int *depth_irq;
1058 struct fgraph_data *data = iter->private;
1059
1060 /*
1061 * If we are either displaying irqs, or we got called as
1062 * a graph event and private data does not exist,
1063 * then we bypass the irq check.
1064 */
1065 if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1066 (!data))
1067 return 0;
1068
1069 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1070
1071 /*
1072 * We are not inside the irq code.
1073 */
1074 if (*depth_irq == -1)
1075 return 0;
1076
1077 /*
1078 * We are inside the irq code, and this is returning entry.
1079 * Let's not trace it and clear the entry depth, since
1080 * we are out of irq code.
1081 *
1082 * This condition ensures that we 'leave the irq code' once
1083 * we are out of the entry depth. Thus protecting us from
1084 * the RETURN entry loss.
1085 */
1086 if (*depth_irq >= depth) {
1087 *depth_irq = -1;
1088 return 1;
1089 }
1090
1091 /*
1092 * We are inside the irq code, and this is not the entry.
1093 */
1094 return 1;
1095}
1096
1097static enum print_line_t
1098print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1099 struct trace_iterator *iter, u32 flags)
1100{
1101 struct fgraph_data *data = iter->private;
1102 struct ftrace_graph_ent *call = &field->graph_ent;
1103 struct ftrace_graph_ret_entry *leaf_ret;
1104 static enum print_line_t ret;
1105 int cpu = iter->cpu;
1106
1107 if (check_irq_entry(iter, flags, call->func, call->depth))
1108 return TRACE_TYPE_HANDLED;
1109
1110 print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1111
1112 leaf_ret = get_return_for_leaf(iter, field);
1113 if (leaf_ret)
1114 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1115 else
1116 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1117
1118 if (data) {
1119 /*
1120 * If we failed to write our output, then we need to make
1121 * note of it. Because we already consumed our entry.
1122 */
1123 if (s->full) {
1124 data->failed = 1;
1125 data->cpu = cpu;
1126 } else
1127 data->failed = 0;
1128 }
1129
1130 return ret;
1131}
1132
1133static enum print_line_t
1134print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1135 struct trace_entry *ent, struct trace_iterator *iter,
1136 u32 flags)
1137{
1138 unsigned long long duration = trace->rettime - trace->calltime;
1139 struct fgraph_data *data = iter->private;
1140 struct trace_array *tr = iter->tr;
1141 pid_t pid = ent->pid;
1142 int cpu = iter->cpu;
1143 int func_match = 1;
1144 int i;
1145
1146 if (check_irq_return(iter, flags, trace->depth))
1147 return TRACE_TYPE_HANDLED;
1148
1149 if (data) {
1150 struct fgraph_cpu_data *cpu_data;
1151 int cpu = iter->cpu;
1152
1153 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1154
1155 /*
1156 * Comments display at + 1 to depth. This is the
1157 * return from a function, we now want the comments
1158 * to display at the same level of the bracket.
1159 */
1160 cpu_data->depth = trace->depth - 1;
1161
1162 if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1163 !WARN_ON_ONCE(trace->depth < 0)) {
1164 if (cpu_data->enter_funcs[trace->depth] != trace->func)
1165 func_match = 0;
1166 cpu_data->enter_funcs[trace->depth] = 0;
1167 }
1168 }
1169
1170 print_graph_prologue(iter, s, 0, 0, flags);
1171
1172 /* Overhead and duration */
1173 print_graph_duration(tr, duration, s, flags);
1174
1175 /* Closing brace */
1176 for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1177 trace_seq_putc(s, ' ');
1178
1179 /*
1180 * If the return function does not have a matching entry,
1181 * then the entry was lost. Instead of just printing
1182 * the '}' and letting the user guess what function this
1183 * belongs to, write out the function name. Always do
1184 * that if the funcgraph-tail option is enabled.
1185 */
1186 if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1187 trace_seq_puts(s, "}\n");
1188 else
1189 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1190
1191 /* Overrun */
1192 if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1193 trace_seq_printf(s, " (Overruns: %lu)\n",
1194 trace->overrun);
1195
1196 print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1197 cpu, pid, flags);
1198
1199 return trace_handle_return(s);
1200}
1201
1202static enum print_line_t
1203print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1204 struct trace_iterator *iter, u32 flags)
1205{
1206 struct trace_array *tr = iter->tr;
1207 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1208 struct fgraph_data *data = iter->private;
1209 struct trace_event *event;
1210 int depth = 0;
1211 int ret;
1212 int i;
1213
1214 if (data)
1215 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1216
1217 print_graph_prologue(iter, s, 0, 0, flags);
1218
1219 /* No time */
1220 print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1221
1222 /* Indentation */
1223 if (depth > 0)
1224 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1225 trace_seq_putc(s, ' ');
1226
1227 /* The comment */
1228 trace_seq_puts(s, "/* ");
1229
1230 switch (iter->ent->type) {
1231 case TRACE_BPUTS:
1232 ret = trace_print_bputs_msg_only(iter);
1233 if (ret != TRACE_TYPE_HANDLED)
1234 return ret;
1235 break;
1236 case TRACE_BPRINT:
1237 ret = trace_print_bprintk_msg_only(iter);
1238 if (ret != TRACE_TYPE_HANDLED)
1239 return ret;
1240 break;
1241 case TRACE_PRINT:
1242 ret = trace_print_printk_msg_only(iter);
1243 if (ret != TRACE_TYPE_HANDLED)
1244 return ret;
1245 break;
1246 default:
1247 event = ftrace_find_event(ent->type);
1248 if (!event)
1249 return TRACE_TYPE_UNHANDLED;
1250
1251 ret = event->funcs->trace(iter, sym_flags, event);
1252 if (ret != TRACE_TYPE_HANDLED)
1253 return ret;
1254 }
1255
1256 if (trace_seq_has_overflowed(s))
1257 goto out;
1258
1259 /* Strip ending newline */
1260 if (s->buffer[s->seq.len - 1] == '\n') {
1261 s->buffer[s->seq.len - 1] = '\0';
1262 s->seq.len--;
1263 }
1264
1265 trace_seq_puts(s, " */\n");
1266 out:
1267 return trace_handle_return(s);
1268}
1269
1270
1271enum print_line_t
1272print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1273{
1274 struct ftrace_graph_ent_entry *field;
1275 struct fgraph_data *data = iter->private;
1276 struct trace_entry *entry = iter->ent;
1277 struct trace_seq *s = &iter->seq;
1278 int cpu = iter->cpu;
1279 int ret;
1280
1281 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1282 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1283 return TRACE_TYPE_HANDLED;
1284 }
1285
1286 /*
1287 * If the last output failed, there's a possibility we need
1288 * to print out the missing entry which would never go out.
1289 */
1290 if (data && data->failed) {
1291 field = &data->ent;
1292 iter->cpu = data->cpu;
1293 ret = print_graph_entry(field, s, iter, flags);
1294 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1295 per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1296 ret = TRACE_TYPE_NO_CONSUME;
1297 }
1298 iter->cpu = cpu;
1299 return ret;
1300 }
1301
1302 switch (entry->type) {
1303 case TRACE_GRAPH_ENT: {
1304 /*
1305 * print_graph_entry() may consume the current event,
1306 * thus @field may become invalid, so we need to save it.
1307 * sizeof(struct ftrace_graph_ent_entry) is very small,
1308 * it can be safely saved at the stack.
1309 */
1310 struct ftrace_graph_ent_entry saved;
1311 trace_assign_type(field, entry);
1312 saved = *field;
1313 return print_graph_entry(&saved, s, iter, flags);
1314 }
1315 case TRACE_GRAPH_RET: {
1316 struct ftrace_graph_ret_entry *field;
1317 trace_assign_type(field, entry);
1318 return print_graph_return(&field->ret, s, entry, iter, flags);
1319 }
1320 case TRACE_STACK:
1321 case TRACE_FN:
1322 /* dont trace stack and functions as comments */
1323 return TRACE_TYPE_UNHANDLED;
1324
1325 default:
1326 return print_graph_comment(s, entry, iter, flags);
1327 }
1328
1329 return TRACE_TYPE_HANDLED;
1330}
1331
1332static enum print_line_t
1333print_graph_function(struct trace_iterator *iter)
1334{
1335 return print_graph_function_flags(iter, tracer_flags.val);
1336}
1337
1338static enum print_line_t
1339print_graph_function_event(struct trace_iterator *iter, int flags,
1340 struct trace_event *event)
1341{
1342 return print_graph_function(iter);
1343}
1344
1345static void print_lat_header(struct seq_file *s, u32 flags)
1346{
1347 static const char spaces[] = " " /* 16 spaces */
1348 " " /* 4 spaces */
1349 " "; /* 17 spaces */
1350 int size = 0;
1351
1352 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1353 size += 16;
1354 if (flags & TRACE_GRAPH_PRINT_CPU)
1355 size += 4;
1356 if (flags & TRACE_GRAPH_PRINT_PROC)
1357 size += 17;
1358
1359 seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
1360 seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
1361 seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1362 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
1363 seq_printf(s, "#%.*s||| / \n", size, spaces);
1364}
1365
1366static void __print_graph_headers_flags(struct trace_array *tr,
1367 struct seq_file *s, u32 flags)
1368{
1369 int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1370
1371 if (lat)
1372 print_lat_header(s, flags);
1373
1374 /* 1st line */
1375 seq_putc(s, '#');
1376 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1377 seq_puts(s, " TIME ");
1378 if (flags & TRACE_GRAPH_PRINT_CPU)
1379 seq_puts(s, " CPU");
1380 if (flags & TRACE_GRAPH_PRINT_PROC)
1381 seq_puts(s, " TASK/PID ");
1382 if (lat)
1383 seq_puts(s, "||||");
1384 if (flags & TRACE_GRAPH_PRINT_DURATION)
1385 seq_puts(s, " DURATION ");
1386 seq_puts(s, " FUNCTION CALLS\n");
1387
1388 /* 2nd line */
1389 seq_putc(s, '#');
1390 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1391 seq_puts(s, " | ");
1392 if (flags & TRACE_GRAPH_PRINT_CPU)
1393 seq_puts(s, " | ");
1394 if (flags & TRACE_GRAPH_PRINT_PROC)
1395 seq_puts(s, " | | ");
1396 if (lat)
1397 seq_puts(s, "||||");
1398 if (flags & TRACE_GRAPH_PRINT_DURATION)
1399 seq_puts(s, " | | ");
1400 seq_puts(s, " | | | |\n");
1401}
1402
1403static void print_graph_headers(struct seq_file *s)
1404{
1405 print_graph_headers_flags(s, tracer_flags.val);
1406}
1407
1408void print_graph_headers_flags(struct seq_file *s, u32 flags)
1409{
1410 struct trace_iterator *iter = s->private;
1411 struct trace_array *tr = iter->tr;
1412
1413 if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1414 return;
1415
1416 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1417 /* print nothing if the buffers are empty */
1418 if (trace_empty(iter))
1419 return;
1420
1421 print_trace_header(s, iter);
1422 }
1423
1424 __print_graph_headers_flags(tr, s, flags);
1425}
1426
1427void graph_trace_open(struct trace_iterator *iter)
1428{
1429 /* pid and depth on the last trace processed */
1430 struct fgraph_data *data;
1431 gfp_t gfpflags;
1432 int cpu;
1433
1434 iter->private = NULL;
1435
1436 /* We can be called in atomic context via ftrace_dump() */
1437 gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1438
1439 data = kzalloc(sizeof(*data), gfpflags);
1440 if (!data)
1441 goto out_err;
1442
1443 data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1444 if (!data->cpu_data)
1445 goto out_err_free;
1446
1447 for_each_possible_cpu(cpu) {
1448 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1449 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1450 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1451 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1452
1453 *pid = -1;
1454 *depth = 0;
1455 *ignore = 0;
1456 *depth_irq = -1;
1457 }
1458
1459 iter->private = data;
1460
1461 return;
1462
1463 out_err_free:
1464 kfree(data);
1465 out_err:
1466 pr_warn("function graph tracer: not enough memory\n");
1467}
1468
1469void graph_trace_close(struct trace_iterator *iter)
1470{
1471 struct fgraph_data *data = iter->private;
1472
1473 if (data) {
1474 free_percpu(data->cpu_data);
1475 kfree(data);
1476 }
1477}
1478
1479static int
1480func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1481{
1482 if (bit == TRACE_GRAPH_PRINT_IRQS)
1483 ftrace_graph_skip_irqs = !set;
1484
1485 if (bit == TRACE_GRAPH_SLEEP_TIME)
1486 ftrace_graph_sleep_time_control(set);
1487
1488 if (bit == TRACE_GRAPH_GRAPH_TIME)
1489 ftrace_graph_graph_time_control(set);
1490
1491 return 0;
1492}
1493
1494static struct trace_event_functions graph_functions = {
1495 .trace = print_graph_function_event,
1496};
1497
1498static struct trace_event graph_trace_entry_event = {
1499 .type = TRACE_GRAPH_ENT,
1500 .funcs = &graph_functions,
1501};
1502
1503static struct trace_event graph_trace_ret_event = {
1504 .type = TRACE_GRAPH_RET,
1505 .funcs = &graph_functions
1506};
1507
1508static struct tracer graph_trace __tracer_data = {
1509 .name = "function_graph",
1510 .update_thresh = graph_trace_update_thresh,
1511 .open = graph_trace_open,
1512 .pipe_open = graph_trace_open,
1513 .close = graph_trace_close,
1514 .pipe_close = graph_trace_close,
1515 .init = graph_trace_init,
1516 .reset = graph_trace_reset,
1517 .print_line = print_graph_function,
1518 .print_header = print_graph_headers,
1519 .flags = &tracer_flags,
1520 .set_flag = func_graph_set_flag,
1521#ifdef CONFIG_FTRACE_SELFTEST
1522 .selftest = trace_selftest_startup_function_graph,
1523#endif
1524};
1525
1526
1527static ssize_t
1528graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1529 loff_t *ppos)
1530{
1531 unsigned long val;
1532 int ret;
1533
1534 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1535 if (ret)
1536 return ret;
1537
1538 fgraph_max_depth = val;
1539
1540 *ppos += cnt;
1541
1542 return cnt;
1543}
1544
1545static ssize_t
1546graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1547 loff_t *ppos)
1548{
1549 char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1550 int n;
1551
1552 n = sprintf(buf, "%d\n", fgraph_max_depth);
1553
1554 return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1555}
1556
1557static const struct file_operations graph_depth_fops = {
1558 .open = tracing_open_generic,
1559 .write = graph_depth_write,
1560 .read = graph_depth_read,
1561 .llseek = generic_file_llseek,
1562};
1563
1564static __init int init_graph_tracefs(void)
1565{
1566 struct dentry *d_tracer;
1567
1568 d_tracer = tracing_init_dentry();
1569 if (IS_ERR(d_tracer))
1570 return 0;
1571
1572 trace_create_file("max_graph_depth", 0644, d_tracer,
1573 NULL, &graph_depth_fops);
1574
1575 return 0;
1576}
1577fs_initcall(init_graph_tracefs);
1578
1579static __init int init_graph_trace(void)
1580{
1581 max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1582
1583 if (!register_trace_event(&graph_trace_entry_event)) {
1584 pr_warn("Warning: could not register graph trace events\n");
1585 return 1;
1586 }
1587
1588 if (!register_trace_event(&graph_trace_ret_event)) {
1589 pr_warn("Warning: could not register graph trace events\n");
1590 return 1;
1591 }
1592
1593 return register_tracer(&graph_trace);
1594}
1595
1596core_initcall(init_graph_trace);