Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * ring buffer based function tracer |
| 4 | * |
| 5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
| 6 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
| 7 | * |
| 8 | * Based on code from the latency_tracer, that is: |
| 9 | * |
| 10 | * Copyright (C) 2004-2006 Ingo Molnar |
| 11 | * Copyright (C) 2004 Nadia Yvette Chambers |
| 12 | */ |
| 13 | #include <linux/ring_buffer.h> |
| 14 | #include <linux/debugfs.h> |
| 15 | #include <linux/uaccess.h> |
| 16 | #include <linux/ftrace.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/fs.h> |
| 19 | |
| 20 | #include "trace.h" |
| 21 | |
| 22 | static void tracing_start_function_trace(struct trace_array *tr); |
| 23 | static void tracing_stop_function_trace(struct trace_array *tr); |
| 24 | static void |
| 25 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 26 | struct ftrace_ops *op, struct pt_regs *pt_regs); |
| 27 | static void |
| 28 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 29 | struct ftrace_ops *op, struct pt_regs *pt_regs); |
| 30 | static struct tracer_flags func_flags; |
| 31 | |
| 32 | /* Our option */ |
| 33 | enum { |
| 34 | TRACE_FUNC_OPT_STACK = 0x1, |
| 35 | }; |
| 36 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 37 | int ftrace_allocate_ftrace_ops(struct trace_array *tr) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | { |
| 39 | struct ftrace_ops *ops; |
| 40 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 41 | /* The top level array uses the "global_ops" */ |
| 42 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) |
| 43 | return 0; |
| 44 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | ops = kzalloc(sizeof(*ops), GFP_KERNEL); |
| 46 | if (!ops) |
| 47 | return -ENOMEM; |
| 48 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 49 | /* Currently only the non stack version is supported */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 50 | ops->func = function_trace_call; |
| 51 | ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID; |
| 52 | |
| 53 | tr->ops = ops; |
| 54 | ops->private = tr; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 55 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | return 0; |
| 57 | } |
| 58 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 59 | void ftrace_free_ftrace_ops(struct trace_array *tr) |
| 60 | { |
| 61 | kfree(tr->ops); |
| 62 | tr->ops = NULL; |
| 63 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | |
| 65 | int ftrace_create_function_files(struct trace_array *tr, |
| 66 | struct dentry *parent) |
| 67 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 68 | /* |
| 69 | * The top level array uses the "global_ops", and the files are |
| 70 | * created on boot up. |
| 71 | */ |
| 72 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL) |
| 73 | return 0; |
| 74 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 75 | if (!tr->ops) |
| 76 | return -EINVAL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 77 | |
| 78 | ftrace_create_filter_files(tr->ops, parent); |
| 79 | |
| 80 | return 0; |
| 81 | } |
| 82 | |
| 83 | void ftrace_destroy_function_files(struct trace_array *tr) |
| 84 | { |
| 85 | ftrace_destroy_filter_files(tr->ops); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 86 | ftrace_free_ftrace_ops(tr); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 87 | } |
| 88 | |
| 89 | static int function_trace_init(struct trace_array *tr) |
| 90 | { |
| 91 | ftrace_func_t func; |
| 92 | |
| 93 | /* |
| 94 | * Instance trace_arrays get their ops allocated |
| 95 | * at instance creation. Unless it failed |
| 96 | * the allocation. |
| 97 | */ |
| 98 | if (!tr->ops) |
| 99 | return -ENOMEM; |
| 100 | |
| 101 | /* Currently only the global instance can do stack tracing */ |
| 102 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && |
| 103 | func_flags.val & TRACE_FUNC_OPT_STACK) |
| 104 | func = function_stack_trace_call; |
| 105 | else |
| 106 | func = function_trace_call; |
| 107 | |
| 108 | ftrace_init_array_ops(tr, func); |
| 109 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 110 | tr->array_buffer.cpu = get_cpu(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 111 | put_cpu(); |
| 112 | |
| 113 | tracing_start_cmdline_record(); |
| 114 | tracing_start_function_trace(tr); |
| 115 | return 0; |
| 116 | } |
| 117 | |
| 118 | static void function_trace_reset(struct trace_array *tr) |
| 119 | { |
| 120 | tracing_stop_function_trace(tr); |
| 121 | tracing_stop_cmdline_record(); |
| 122 | ftrace_reset_array_ops(tr); |
| 123 | } |
| 124 | |
| 125 | static void function_trace_start(struct trace_array *tr) |
| 126 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 127 | tracing_reset_online_cpus(&tr->array_buffer); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | static void |
| 131 | function_trace_call(unsigned long ip, unsigned long parent_ip, |
| 132 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 133 | { |
| 134 | struct trace_array *tr = op->private; |
| 135 | struct trace_array_cpu *data; |
| 136 | unsigned long flags; |
| 137 | int bit; |
| 138 | int cpu; |
| 139 | int pc; |
| 140 | |
| 141 | if (unlikely(!tr->function_enabled)) |
| 142 | return; |
| 143 | |
| 144 | pc = preempt_count(); |
| 145 | preempt_disable_notrace(); |
| 146 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 147 | bit = trace_test_and_set_recursion(TRACE_FTRACE_START); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | if (bit < 0) |
| 149 | goto out; |
| 150 | |
| 151 | cpu = smp_processor_id(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 152 | data = per_cpu_ptr(tr->array_buffer.data, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 153 | if (!atomic_read(&data->disabled)) { |
| 154 | local_save_flags(flags); |
| 155 | trace_function(tr, ip, parent_ip, flags, pc); |
| 156 | } |
| 157 | trace_clear_recursion(bit); |
| 158 | |
| 159 | out: |
| 160 | preempt_enable_notrace(); |
| 161 | } |
| 162 | |
| 163 | #ifdef CONFIG_UNWINDER_ORC |
| 164 | /* |
| 165 | * Skip 2: |
| 166 | * |
| 167 | * function_stack_trace_call() |
| 168 | * ftrace_call() |
| 169 | */ |
| 170 | #define STACK_SKIP 2 |
| 171 | #else |
| 172 | /* |
| 173 | * Skip 3: |
| 174 | * __trace_stack() |
| 175 | * function_stack_trace_call() |
| 176 | * ftrace_call() |
| 177 | */ |
| 178 | #define STACK_SKIP 3 |
| 179 | #endif |
| 180 | |
| 181 | static void |
| 182 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip, |
| 183 | struct ftrace_ops *op, struct pt_regs *pt_regs) |
| 184 | { |
| 185 | struct trace_array *tr = op->private; |
| 186 | struct trace_array_cpu *data; |
| 187 | unsigned long flags; |
| 188 | long disabled; |
| 189 | int cpu; |
| 190 | int pc; |
| 191 | |
| 192 | if (unlikely(!tr->function_enabled)) |
| 193 | return; |
| 194 | |
| 195 | /* |
| 196 | * Need to use raw, since this must be called before the |
| 197 | * recursive protection is performed. |
| 198 | */ |
| 199 | local_irq_save(flags); |
| 200 | cpu = raw_smp_processor_id(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 201 | data = per_cpu_ptr(tr->array_buffer.data, cpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 202 | disabled = atomic_inc_return(&data->disabled); |
| 203 | |
| 204 | if (likely(disabled == 1)) { |
| 205 | pc = preempt_count(); |
| 206 | trace_function(tr, ip, parent_ip, flags, pc); |
| 207 | __trace_stack(tr, flags, STACK_SKIP, pc); |
| 208 | } |
| 209 | |
| 210 | atomic_dec(&data->disabled); |
| 211 | local_irq_restore(flags); |
| 212 | } |
| 213 | |
| 214 | static struct tracer_opt func_opts[] = { |
| 215 | #ifdef CONFIG_STACKTRACE |
| 216 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, |
| 217 | #endif |
| 218 | { } /* Always set a last empty entry */ |
| 219 | }; |
| 220 | |
| 221 | static struct tracer_flags func_flags = { |
| 222 | .val = 0, /* By default: all flags disabled */ |
| 223 | .opts = func_opts |
| 224 | }; |
| 225 | |
| 226 | static void tracing_start_function_trace(struct trace_array *tr) |
| 227 | { |
| 228 | tr->function_enabled = 0; |
| 229 | register_ftrace_function(tr->ops); |
| 230 | tr->function_enabled = 1; |
| 231 | } |
| 232 | |
| 233 | static void tracing_stop_function_trace(struct trace_array *tr) |
| 234 | { |
| 235 | tr->function_enabled = 0; |
| 236 | unregister_ftrace_function(tr->ops); |
| 237 | } |
| 238 | |
| 239 | static struct tracer function_trace; |
| 240 | |
| 241 | static int |
| 242 | func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) |
| 243 | { |
| 244 | switch (bit) { |
| 245 | case TRACE_FUNC_OPT_STACK: |
| 246 | /* do nothing if already set */ |
| 247 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) |
| 248 | break; |
| 249 | |
| 250 | /* We can change this flag when not running. */ |
| 251 | if (tr->current_trace != &function_trace) |
| 252 | break; |
| 253 | |
| 254 | unregister_ftrace_function(tr->ops); |
| 255 | |
| 256 | if (set) { |
| 257 | tr->ops->func = function_stack_trace_call; |
| 258 | register_ftrace_function(tr->ops); |
| 259 | } else { |
| 260 | tr->ops->func = function_trace_call; |
| 261 | register_ftrace_function(tr->ops); |
| 262 | } |
| 263 | |
| 264 | break; |
| 265 | default: |
| 266 | return -EINVAL; |
| 267 | } |
| 268 | |
| 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | static struct tracer function_trace __tracer_data = |
| 273 | { |
| 274 | .name = "function", |
| 275 | .init = function_trace_init, |
| 276 | .reset = function_trace_reset, |
| 277 | .start = function_trace_start, |
| 278 | .flags = &func_flags, |
| 279 | .set_flag = func_set_flag, |
| 280 | .allow_instances = true, |
| 281 | #ifdef CONFIG_FTRACE_SELFTEST |
| 282 | .selftest = trace_selftest_startup_function, |
| 283 | #endif |
| 284 | }; |
| 285 | |
| 286 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 287 | static void update_traceon_count(struct ftrace_probe_ops *ops, |
| 288 | unsigned long ip, |
| 289 | struct trace_array *tr, bool on, |
| 290 | void *data) |
| 291 | { |
| 292 | struct ftrace_func_mapper *mapper = data; |
| 293 | long *count; |
| 294 | long old_count; |
| 295 | |
| 296 | /* |
| 297 | * Tracing gets disabled (or enabled) once per count. |
| 298 | * This function can be called at the same time on multiple CPUs. |
| 299 | * It is fine if both disable (or enable) tracing, as disabling |
| 300 | * (or enabling) the second time doesn't do anything as the |
| 301 | * state of the tracer is already disabled (or enabled). |
| 302 | * What needs to be synchronized in this case is that the count |
| 303 | * only gets decremented once, even if the tracer is disabled |
| 304 | * (or enabled) twice, as the second one is really a nop. |
| 305 | * |
| 306 | * The memory barriers guarantee that we only decrement the |
| 307 | * counter once. First the count is read to a local variable |
| 308 | * and a read barrier is used to make sure that it is loaded |
| 309 | * before checking if the tracer is in the state we want. |
| 310 | * If the tracer is not in the state we want, then the count |
| 311 | * is guaranteed to be the old count. |
| 312 | * |
| 313 | * Next the tracer is set to the state we want (disabled or enabled) |
| 314 | * then a write memory barrier is used to make sure that |
| 315 | * the new state is visible before changing the counter by |
| 316 | * one minus the old counter. This guarantees that another CPU |
| 317 | * executing this code will see the new state before seeing |
| 318 | * the new counter value, and would not do anything if the new |
| 319 | * counter is seen. |
| 320 | * |
| 321 | * Note, there is no synchronization between this and a user |
| 322 | * setting the tracing_on file. But we currently don't care |
| 323 | * about that. |
| 324 | */ |
| 325 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
| 326 | old_count = *count; |
| 327 | |
| 328 | if (old_count <= 0) |
| 329 | return; |
| 330 | |
| 331 | /* Make sure we see count before checking tracing state */ |
| 332 | smp_rmb(); |
| 333 | |
| 334 | if (on == !!tracer_tracing_is_on(tr)) |
| 335 | return; |
| 336 | |
| 337 | if (on) |
| 338 | tracer_tracing_on(tr); |
| 339 | else |
| 340 | tracer_tracing_off(tr); |
| 341 | |
| 342 | /* Make sure tracing state is visible before updating count */ |
| 343 | smp_wmb(); |
| 344 | |
| 345 | *count = old_count - 1; |
| 346 | } |
| 347 | |
| 348 | static void |
| 349 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, |
| 350 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 351 | void *data) |
| 352 | { |
| 353 | update_traceon_count(ops, ip, tr, 1, data); |
| 354 | } |
| 355 | |
| 356 | static void |
| 357 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, |
| 358 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 359 | void *data) |
| 360 | { |
| 361 | update_traceon_count(ops, ip, tr, 0, data); |
| 362 | } |
| 363 | |
| 364 | static void |
| 365 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, |
| 366 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 367 | void *data) |
| 368 | { |
| 369 | if (tracer_tracing_is_on(tr)) |
| 370 | return; |
| 371 | |
| 372 | tracer_tracing_on(tr); |
| 373 | } |
| 374 | |
| 375 | static void |
| 376 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, |
| 377 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 378 | void *data) |
| 379 | { |
| 380 | if (!tracer_tracing_is_on(tr)) |
| 381 | return; |
| 382 | |
| 383 | tracer_tracing_off(tr); |
| 384 | } |
| 385 | |
| 386 | #ifdef CONFIG_UNWINDER_ORC |
| 387 | /* |
| 388 | * Skip 3: |
| 389 | * |
| 390 | * function_trace_probe_call() |
| 391 | * ftrace_ops_assist_func() |
| 392 | * ftrace_call() |
| 393 | */ |
| 394 | #define FTRACE_STACK_SKIP 3 |
| 395 | #else |
| 396 | /* |
| 397 | * Skip 5: |
| 398 | * |
| 399 | * __trace_stack() |
| 400 | * ftrace_stacktrace() |
| 401 | * function_trace_probe_call() |
| 402 | * ftrace_ops_assist_func() |
| 403 | * ftrace_call() |
| 404 | */ |
| 405 | #define FTRACE_STACK_SKIP 5 |
| 406 | #endif |
| 407 | |
| 408 | static __always_inline void trace_stack(struct trace_array *tr) |
| 409 | { |
| 410 | unsigned long flags; |
| 411 | int pc; |
| 412 | |
| 413 | local_save_flags(flags); |
| 414 | pc = preempt_count(); |
| 415 | |
| 416 | __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc); |
| 417 | } |
| 418 | |
| 419 | static void |
| 420 | ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, |
| 421 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 422 | void *data) |
| 423 | { |
| 424 | trace_stack(tr); |
| 425 | } |
| 426 | |
| 427 | static void |
| 428 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, |
| 429 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 430 | void *data) |
| 431 | { |
| 432 | struct ftrace_func_mapper *mapper = data; |
| 433 | long *count; |
| 434 | long old_count; |
| 435 | long new_count; |
| 436 | |
| 437 | if (!tracing_is_on()) |
| 438 | return; |
| 439 | |
| 440 | /* unlimited? */ |
| 441 | if (!mapper) { |
| 442 | trace_stack(tr); |
| 443 | return; |
| 444 | } |
| 445 | |
| 446 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
| 447 | |
| 448 | /* |
| 449 | * Stack traces should only execute the number of times the |
| 450 | * user specified in the counter. |
| 451 | */ |
| 452 | do { |
| 453 | old_count = *count; |
| 454 | |
| 455 | if (!old_count) |
| 456 | return; |
| 457 | |
| 458 | new_count = old_count - 1; |
| 459 | new_count = cmpxchg(count, old_count, new_count); |
| 460 | if (new_count == old_count) |
| 461 | trace_stack(tr); |
| 462 | |
| 463 | if (!tracing_is_on()) |
| 464 | return; |
| 465 | |
| 466 | } while (new_count != old_count); |
| 467 | } |
| 468 | |
| 469 | static int update_count(struct ftrace_probe_ops *ops, unsigned long ip, |
| 470 | void *data) |
| 471 | { |
| 472 | struct ftrace_func_mapper *mapper = data; |
| 473 | long *count = NULL; |
| 474 | |
| 475 | if (mapper) |
| 476 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
| 477 | |
| 478 | if (count) { |
| 479 | if (*count <= 0) |
| 480 | return 0; |
| 481 | (*count)--; |
| 482 | } |
| 483 | |
| 484 | return 1; |
| 485 | } |
| 486 | |
| 487 | static void |
| 488 | ftrace_dump_probe(unsigned long ip, unsigned long parent_ip, |
| 489 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 490 | void *data) |
| 491 | { |
| 492 | if (update_count(ops, ip, data)) |
| 493 | ftrace_dump(DUMP_ALL); |
| 494 | } |
| 495 | |
| 496 | /* Only dump the current CPU buffer. */ |
| 497 | static void |
| 498 | ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip, |
| 499 | struct trace_array *tr, struct ftrace_probe_ops *ops, |
| 500 | void *data) |
| 501 | { |
| 502 | if (update_count(ops, ip, data)) |
| 503 | ftrace_dump(DUMP_ORIG); |
| 504 | } |
| 505 | |
| 506 | static int |
| 507 | ftrace_probe_print(const char *name, struct seq_file *m, |
| 508 | unsigned long ip, struct ftrace_probe_ops *ops, |
| 509 | void *data) |
| 510 | { |
| 511 | struct ftrace_func_mapper *mapper = data; |
| 512 | long *count = NULL; |
| 513 | |
| 514 | seq_printf(m, "%ps:%s", (void *)ip, name); |
| 515 | |
| 516 | if (mapper) |
| 517 | count = (long *)ftrace_func_mapper_find_ip(mapper, ip); |
| 518 | |
| 519 | if (count) |
| 520 | seq_printf(m, ":count=%ld\n", *count); |
| 521 | else |
| 522 | seq_puts(m, ":unlimited\n"); |
| 523 | |
| 524 | return 0; |
| 525 | } |
| 526 | |
| 527 | static int |
| 528 | ftrace_traceon_print(struct seq_file *m, unsigned long ip, |
| 529 | struct ftrace_probe_ops *ops, |
| 530 | void *data) |
| 531 | { |
| 532 | return ftrace_probe_print("traceon", m, ip, ops, data); |
| 533 | } |
| 534 | |
| 535 | static int |
| 536 | ftrace_traceoff_print(struct seq_file *m, unsigned long ip, |
| 537 | struct ftrace_probe_ops *ops, void *data) |
| 538 | { |
| 539 | return ftrace_probe_print("traceoff", m, ip, ops, data); |
| 540 | } |
| 541 | |
| 542 | static int |
| 543 | ftrace_stacktrace_print(struct seq_file *m, unsigned long ip, |
| 544 | struct ftrace_probe_ops *ops, void *data) |
| 545 | { |
| 546 | return ftrace_probe_print("stacktrace", m, ip, ops, data); |
| 547 | } |
| 548 | |
| 549 | static int |
| 550 | ftrace_dump_print(struct seq_file *m, unsigned long ip, |
| 551 | struct ftrace_probe_ops *ops, void *data) |
| 552 | { |
| 553 | return ftrace_probe_print("dump", m, ip, ops, data); |
| 554 | } |
| 555 | |
| 556 | static int |
| 557 | ftrace_cpudump_print(struct seq_file *m, unsigned long ip, |
| 558 | struct ftrace_probe_ops *ops, void *data) |
| 559 | { |
| 560 | return ftrace_probe_print("cpudump", m, ip, ops, data); |
| 561 | } |
| 562 | |
| 563 | |
| 564 | static int |
| 565 | ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr, |
| 566 | unsigned long ip, void *init_data, void **data) |
| 567 | { |
| 568 | struct ftrace_func_mapper *mapper = *data; |
| 569 | |
| 570 | if (!mapper) { |
| 571 | mapper = allocate_ftrace_func_mapper(); |
| 572 | if (!mapper) |
| 573 | return -ENOMEM; |
| 574 | *data = mapper; |
| 575 | } |
| 576 | |
| 577 | return ftrace_func_mapper_add_ip(mapper, ip, init_data); |
| 578 | } |
| 579 | |
| 580 | static void |
| 581 | ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr, |
| 582 | unsigned long ip, void *data) |
| 583 | { |
| 584 | struct ftrace_func_mapper *mapper = data; |
| 585 | |
| 586 | if (!ip) { |
| 587 | free_ftrace_func_mapper(mapper, NULL); |
| 588 | return; |
| 589 | } |
| 590 | |
| 591 | ftrace_func_mapper_remove_ip(mapper, ip); |
| 592 | } |
| 593 | |
| 594 | static struct ftrace_probe_ops traceon_count_probe_ops = { |
| 595 | .func = ftrace_traceon_count, |
| 596 | .print = ftrace_traceon_print, |
| 597 | .init = ftrace_count_init, |
| 598 | .free = ftrace_count_free, |
| 599 | }; |
| 600 | |
| 601 | static struct ftrace_probe_ops traceoff_count_probe_ops = { |
| 602 | .func = ftrace_traceoff_count, |
| 603 | .print = ftrace_traceoff_print, |
| 604 | .init = ftrace_count_init, |
| 605 | .free = ftrace_count_free, |
| 606 | }; |
| 607 | |
| 608 | static struct ftrace_probe_ops stacktrace_count_probe_ops = { |
| 609 | .func = ftrace_stacktrace_count, |
| 610 | .print = ftrace_stacktrace_print, |
| 611 | .init = ftrace_count_init, |
| 612 | .free = ftrace_count_free, |
| 613 | }; |
| 614 | |
| 615 | static struct ftrace_probe_ops dump_probe_ops = { |
| 616 | .func = ftrace_dump_probe, |
| 617 | .print = ftrace_dump_print, |
| 618 | .init = ftrace_count_init, |
| 619 | .free = ftrace_count_free, |
| 620 | }; |
| 621 | |
| 622 | static struct ftrace_probe_ops cpudump_probe_ops = { |
| 623 | .func = ftrace_cpudump_probe, |
| 624 | .print = ftrace_cpudump_print, |
| 625 | }; |
| 626 | |
| 627 | static struct ftrace_probe_ops traceon_probe_ops = { |
| 628 | .func = ftrace_traceon, |
| 629 | .print = ftrace_traceon_print, |
| 630 | }; |
| 631 | |
| 632 | static struct ftrace_probe_ops traceoff_probe_ops = { |
| 633 | .func = ftrace_traceoff, |
| 634 | .print = ftrace_traceoff_print, |
| 635 | }; |
| 636 | |
| 637 | static struct ftrace_probe_ops stacktrace_probe_ops = { |
| 638 | .func = ftrace_stacktrace, |
| 639 | .print = ftrace_stacktrace_print, |
| 640 | }; |
| 641 | |
| 642 | static int |
| 643 | ftrace_trace_probe_callback(struct trace_array *tr, |
| 644 | struct ftrace_probe_ops *ops, |
| 645 | struct ftrace_hash *hash, char *glob, |
| 646 | char *cmd, char *param, int enable) |
| 647 | { |
| 648 | void *count = (void *)-1; |
| 649 | char *number; |
| 650 | int ret; |
| 651 | |
| 652 | /* hash funcs only work with set_ftrace_filter */ |
| 653 | if (!enable) |
| 654 | return -EINVAL; |
| 655 | |
| 656 | if (glob[0] == '!') |
| 657 | return unregister_ftrace_function_probe_func(glob+1, tr, ops); |
| 658 | |
| 659 | if (!param) |
| 660 | goto out_reg; |
| 661 | |
| 662 | number = strsep(¶m, ":"); |
| 663 | |
| 664 | if (!strlen(number)) |
| 665 | goto out_reg; |
| 666 | |
| 667 | /* |
| 668 | * We use the callback data field (which is a pointer) |
| 669 | * as our counter. |
| 670 | */ |
| 671 | ret = kstrtoul(number, 0, (unsigned long *)&count); |
| 672 | if (ret) |
| 673 | return ret; |
| 674 | |
| 675 | out_reg: |
| 676 | ret = register_ftrace_function_probe(glob, tr, ops, count); |
| 677 | |
| 678 | return ret < 0 ? ret : 0; |
| 679 | } |
| 680 | |
| 681 | static int |
| 682 | ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash, |
| 683 | char *glob, char *cmd, char *param, int enable) |
| 684 | { |
| 685 | struct ftrace_probe_ops *ops; |
| 686 | |
| 687 | if (!tr) |
| 688 | return -ENODEV; |
| 689 | |
| 690 | /* we register both traceon and traceoff to this callback */ |
| 691 | if (strcmp(cmd, "traceon") == 0) |
| 692 | ops = param ? &traceon_count_probe_ops : &traceon_probe_ops; |
| 693 | else |
| 694 | ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops; |
| 695 | |
| 696 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
| 697 | param, enable); |
| 698 | } |
| 699 | |
| 700 | static int |
| 701 | ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash, |
| 702 | char *glob, char *cmd, char *param, int enable) |
| 703 | { |
| 704 | struct ftrace_probe_ops *ops; |
| 705 | |
| 706 | if (!tr) |
| 707 | return -ENODEV; |
| 708 | |
| 709 | ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops; |
| 710 | |
| 711 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
| 712 | param, enable); |
| 713 | } |
| 714 | |
| 715 | static int |
| 716 | ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
| 717 | char *glob, char *cmd, char *param, int enable) |
| 718 | { |
| 719 | struct ftrace_probe_ops *ops; |
| 720 | |
| 721 | if (!tr) |
| 722 | return -ENODEV; |
| 723 | |
| 724 | ops = &dump_probe_ops; |
| 725 | |
| 726 | /* Only dump once. */ |
| 727 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
| 728 | "1", enable); |
| 729 | } |
| 730 | |
| 731 | static int |
| 732 | ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash, |
| 733 | char *glob, char *cmd, char *param, int enable) |
| 734 | { |
| 735 | struct ftrace_probe_ops *ops; |
| 736 | |
| 737 | if (!tr) |
| 738 | return -ENODEV; |
| 739 | |
| 740 | ops = &cpudump_probe_ops; |
| 741 | |
| 742 | /* Only dump once. */ |
| 743 | return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd, |
| 744 | "1", enable); |
| 745 | } |
| 746 | |
| 747 | static struct ftrace_func_command ftrace_traceon_cmd = { |
| 748 | .name = "traceon", |
| 749 | .func = ftrace_trace_onoff_callback, |
| 750 | }; |
| 751 | |
| 752 | static struct ftrace_func_command ftrace_traceoff_cmd = { |
| 753 | .name = "traceoff", |
| 754 | .func = ftrace_trace_onoff_callback, |
| 755 | }; |
| 756 | |
| 757 | static struct ftrace_func_command ftrace_stacktrace_cmd = { |
| 758 | .name = "stacktrace", |
| 759 | .func = ftrace_stacktrace_callback, |
| 760 | }; |
| 761 | |
| 762 | static struct ftrace_func_command ftrace_dump_cmd = { |
| 763 | .name = "dump", |
| 764 | .func = ftrace_dump_callback, |
| 765 | }; |
| 766 | |
| 767 | static struct ftrace_func_command ftrace_cpudump_cmd = { |
| 768 | .name = "cpudump", |
| 769 | .func = ftrace_cpudump_callback, |
| 770 | }; |
| 771 | |
| 772 | static int __init init_func_cmd_traceon(void) |
| 773 | { |
| 774 | int ret; |
| 775 | |
| 776 | ret = register_ftrace_command(&ftrace_traceoff_cmd); |
| 777 | if (ret) |
| 778 | return ret; |
| 779 | |
| 780 | ret = register_ftrace_command(&ftrace_traceon_cmd); |
| 781 | if (ret) |
| 782 | goto out_free_traceoff; |
| 783 | |
| 784 | ret = register_ftrace_command(&ftrace_stacktrace_cmd); |
| 785 | if (ret) |
| 786 | goto out_free_traceon; |
| 787 | |
| 788 | ret = register_ftrace_command(&ftrace_dump_cmd); |
| 789 | if (ret) |
| 790 | goto out_free_stacktrace; |
| 791 | |
| 792 | ret = register_ftrace_command(&ftrace_cpudump_cmd); |
| 793 | if (ret) |
| 794 | goto out_free_dump; |
| 795 | |
| 796 | return 0; |
| 797 | |
| 798 | out_free_dump: |
| 799 | unregister_ftrace_command(&ftrace_dump_cmd); |
| 800 | out_free_stacktrace: |
| 801 | unregister_ftrace_command(&ftrace_stacktrace_cmd); |
| 802 | out_free_traceon: |
| 803 | unregister_ftrace_command(&ftrace_traceon_cmd); |
| 804 | out_free_traceoff: |
| 805 | unregister_ftrace_command(&ftrace_traceoff_cmd); |
| 806 | |
| 807 | return ret; |
| 808 | } |
| 809 | #else |
| 810 | static inline int init_func_cmd_traceon(void) |
| 811 | { |
| 812 | return 0; |
| 813 | } |
| 814 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 815 | |
| 816 | __init int init_function_trace(void) |
| 817 | { |
| 818 | init_func_cmd_traceon(); |
| 819 | return register_tracer(&function_trace); |
| 820 | } |