blob: b515db036beccbb41f295b131b2939e990270208 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/*
3 * uprobes-based tracing events
4 *
5 * Copyright (C) IBM Corporation, 2010-2012
6 * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
7 */
David Brazdil0f672f62019-12-10 10:32:29 +00008#define pr_fmt(fmt) "trace_uprobe: " fmt
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009
David Brazdil0f672f62019-12-10 10:32:29 +000010#include <linux/security.h>
11#include <linux/ctype.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012#include <linux/module.h>
13#include <linux/uaccess.h>
14#include <linux/uprobes.h>
15#include <linux/namei.h>
16#include <linux/string.h>
17#include <linux/rculist.h>
18
David Brazdil0f672f62019-12-10 10:32:29 +000019#include "trace_dynevent.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000020#include "trace_probe.h"
David Brazdil0f672f62019-12-10 10:32:29 +000021#include "trace_probe_tmpl.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000022
23#define UPROBE_EVENT_SYSTEM "uprobes"
24
25struct uprobe_trace_entry_head {
26 struct trace_entry ent;
27 unsigned long vaddr[];
28};
29
30#define SIZEOF_TRACE_ENTRY(is_return) \
31 (sizeof(struct uprobe_trace_entry_head) + \
32 sizeof(unsigned long) * (is_return ? 2 : 1))
33
34#define DATAOF_TRACE_ENTRY(entry, is_return) \
35 ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return))
36
David Brazdil0f672f62019-12-10 10:32:29 +000037static int trace_uprobe_create(int argc, const char **argv);
38static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev);
39static int trace_uprobe_release(struct dyn_event *ev);
40static bool trace_uprobe_is_busy(struct dyn_event *ev);
41static bool trace_uprobe_match(const char *system, const char *event,
42 int argc, const char **argv, struct dyn_event *ev);
43
44static struct dyn_event_operations trace_uprobe_ops = {
45 .create = trace_uprobe_create,
46 .show = trace_uprobe_show,
47 .is_busy = trace_uprobe_is_busy,
48 .free = trace_uprobe_release,
49 .match = trace_uprobe_match,
50};
51
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000052/*
53 * uprobe event core functions
54 */
55struct trace_uprobe {
David Brazdil0f672f62019-12-10 10:32:29 +000056 struct dyn_event devent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057 struct uprobe_consumer consumer;
58 struct path path;
59 struct inode *inode;
60 char *filename;
61 unsigned long offset;
David Brazdil0f672f62019-12-10 10:32:29 +000062 unsigned long ref_ctr_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000063 unsigned long nhit;
64 struct trace_probe tp;
65};
66
David Brazdil0f672f62019-12-10 10:32:29 +000067static bool is_trace_uprobe(struct dyn_event *ev)
68{
69 return ev->ops == &trace_uprobe_ops;
70}
71
72static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
73{
74 return container_of(ev, struct trace_uprobe, devent);
75}
76
77/**
78 * for_each_trace_uprobe - iterate over the trace_uprobe list
79 * @pos: the struct trace_uprobe * for each entry
80 * @dpos: the struct dyn_event * to use as a loop cursor
81 */
82#define for_each_trace_uprobe(pos, dpos) \
83 for_each_dyn_event(dpos) \
84 if (is_trace_uprobe(dpos) && (pos = to_trace_uprobe(dpos)))
85
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000086#define SIZEOF_TRACE_UPROBE(n) \
87 (offsetof(struct trace_uprobe, tp.args) + \
88 (sizeof(struct probe_arg) * (n)))
89
90static int register_uprobe_event(struct trace_uprobe *tu);
91static int unregister_uprobe_event(struct trace_uprobe *tu);
92
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000093struct uprobe_dispatch_data {
94 struct trace_uprobe *tu;
95 unsigned long bp_addr;
96};
97
98static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
99static int uretprobe_dispatcher(struct uprobe_consumer *con,
100 unsigned long func, struct pt_regs *regs);
101
102#ifdef CONFIG_STACK_GROWSUP
103static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
104{
105 return addr - (n * sizeof(long));
106}
107#else
108static unsigned long adjust_stack_addr(unsigned long addr, unsigned int n)
109{
110 return addr + (n * sizeof(long));
111}
112#endif
113
114static unsigned long get_user_stack_nth(struct pt_regs *regs, unsigned int n)
115{
116 unsigned long ret;
117 unsigned long addr = user_stack_pointer(regs);
118
119 addr = adjust_stack_addr(addr, n);
120
121 if (copy_from_user(&ret, (void __force __user *) addr, sizeof(ret)))
122 return 0;
123
124 return ret;
125}
126
127/*
128 * Uprobes-specific fetch functions
129 */
David Brazdil0f672f62019-12-10 10:32:29 +0000130static nokprobe_inline int
131probe_mem_read(void *dest, void *src, size_t size)
132{
133 void __user *vaddr = (void __force __user *)src;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134
David Brazdil0f672f62019-12-10 10:32:29 +0000135 return copy_from_user(dest, vaddr, size) ? -EFAULT : 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000136}
David Brazdil0f672f62019-12-10 10:32:29 +0000137
138static nokprobe_inline int
139probe_mem_read_user(void *dest, void *src, size_t size)
140{
141 return probe_mem_read(dest, src, size);
142}
143
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000144/*
145 * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max
146 * length and relative data location.
147 */
David Brazdil0f672f62019-12-10 10:32:29 +0000148static nokprobe_inline int
149fetch_store_string(unsigned long addr, void *dest, void *base)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150{
151 long ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000152 u32 loc = *(u32 *)dest;
153 int maxlen = get_loc_len(loc);
154 u8 *dst = get_loc_data(dest, base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000155 void __user *src = (void __force __user *) addr;
156
David Brazdil0f672f62019-12-10 10:32:29 +0000157 if (unlikely(!maxlen))
158 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159
David Brazdil0f672f62019-12-10 10:32:29 +0000160 if (addr == FETCH_TOKEN_COMM)
161 ret = strlcpy(dst, current->comm, maxlen);
162 else
163 ret = strncpy_from_user(dst, src, maxlen);
164 if (ret >= 0) {
165 if (ret == maxlen)
166 dst[ret - 1] = '\0';
167 else
168 /*
169 * Include the terminating null byte. In this case it
170 * was copied by strncpy_from_user but not accounted
171 * for in ret.
172 */
173 ret++;
174 *(u32 *)dest = make_data_loc(ret, (void *)dst - base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000175 }
David Brazdil0f672f62019-12-10 10:32:29 +0000176
177 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000178}
179
David Brazdil0f672f62019-12-10 10:32:29 +0000180static nokprobe_inline int
181fetch_store_string_user(unsigned long addr, void *dest, void *base)
182{
183 return fetch_store_string(addr, dest, base);
184}
185
186/* Return the length of string -- including null terminal byte */
187static nokprobe_inline int
188fetch_store_strlen(unsigned long addr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189{
190 int len;
191 void __user *vaddr = (void __force __user *) addr;
192
David Brazdil0f672f62019-12-10 10:32:29 +0000193 if (addr == FETCH_TOKEN_COMM)
194 len = strlen(current->comm) + 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195 else
David Brazdil0f672f62019-12-10 10:32:29 +0000196 len = strnlen_user(vaddr, MAX_STRING_SIZE);
197
198 return (len > MAX_STRING_SIZE) ? 0 : len;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199}
200
David Brazdil0f672f62019-12-10 10:32:29 +0000201static nokprobe_inline int
202fetch_store_strlen_user(unsigned long addr)
203{
204 return fetch_store_strlen(addr);
205}
206
207static unsigned long translate_user_vaddr(unsigned long file_offset)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000208{
209 unsigned long base_addr;
210 struct uprobe_dispatch_data *udd;
211
212 udd = (void *) current->utask->vaddr;
213
214 base_addr = udd->bp_addr - udd->tu->offset;
David Brazdil0f672f62019-12-10 10:32:29 +0000215 return base_addr + file_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000216}
217
David Brazdil0f672f62019-12-10 10:32:29 +0000218/* Note that we don't verify it, since the code does not come from user space */
219static int
220process_fetch_insn(struct fetch_insn *code, struct pt_regs *regs, void *dest,
221 void *base)
222{
223 unsigned long val;
224
225 /* 1st stage: get value from context */
226 switch (code->op) {
227 case FETCH_OP_REG:
228 val = regs_get_register(regs, code->param);
229 break;
230 case FETCH_OP_STACK:
231 val = get_user_stack_nth(regs, code->param);
232 break;
233 case FETCH_OP_STACKP:
234 val = user_stack_pointer(regs);
235 break;
236 case FETCH_OP_RETVAL:
237 val = regs_return_value(regs);
238 break;
239 case FETCH_OP_IMM:
240 val = code->immediate;
241 break;
242 case FETCH_OP_COMM:
243 val = FETCH_TOKEN_COMM;
244 break;
245 case FETCH_OP_DATA:
246 val = (unsigned long)code->data;
247 break;
248 case FETCH_OP_FOFFS:
249 val = translate_user_vaddr(code->immediate);
250 break;
251 default:
252 return -EILSEQ;
253 }
254 code++;
255
256 return process_fetch_insn_bottom(code, val, dest, base);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000257}
David Brazdil0f672f62019-12-10 10:32:29 +0000258NOKPROBE_SYMBOL(process_fetch_insn)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000259
260static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter)
261{
262 rwlock_init(&filter->rwlock);
263 filter->nr_systemwide = 0;
264 INIT_LIST_HEAD(&filter->perf_events);
265}
266
267static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter)
268{
269 return !filter->nr_systemwide && list_empty(&filter->perf_events);
270}
271
272static inline bool is_ret_probe(struct trace_uprobe *tu)
273{
274 return tu->consumer.ret_handler != NULL;
275}
276
David Brazdil0f672f62019-12-10 10:32:29 +0000277static bool trace_uprobe_is_busy(struct dyn_event *ev)
278{
279 struct trace_uprobe *tu = to_trace_uprobe(ev);
280
281 return trace_probe_is_enabled(&tu->tp);
282}
283
284static bool trace_uprobe_match_command_head(struct trace_uprobe *tu,
285 int argc, const char **argv)
286{
287 char buf[MAX_ARGSTR_LEN + 1];
288 int len;
289
290 if (!argc)
291 return true;
292
293 len = strlen(tu->filename);
294 if (strncmp(tu->filename, argv[0], len) || argv[0][len] != ':')
295 return false;
296
297 if (tu->ref_ctr_offset == 0)
298 snprintf(buf, sizeof(buf), "0x%0*lx",
299 (int)(sizeof(void *) * 2), tu->offset);
300 else
301 snprintf(buf, sizeof(buf), "0x%0*lx(0x%lx)",
302 (int)(sizeof(void *) * 2), tu->offset,
303 tu->ref_ctr_offset);
304 if (strcmp(buf, &argv[0][len + 1]))
305 return false;
306
307 argc--; argv++;
308
309 return trace_probe_match_command_args(&tu->tp, argc, argv);
310}
311
312static bool trace_uprobe_match(const char *system, const char *event,
313 int argc, const char **argv, struct dyn_event *ev)
314{
315 struct trace_uprobe *tu = to_trace_uprobe(ev);
316
317 return strcmp(trace_probe_name(&tu->tp), event) == 0 &&
318 (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0) &&
319 trace_uprobe_match_command_head(tu, argc, argv);
320}
321
322static nokprobe_inline struct trace_uprobe *
323trace_uprobe_primary_from_call(struct trace_event_call *call)
324{
325 struct trace_probe *tp;
326
327 tp = trace_probe_primary_from_call(call);
328 if (WARN_ON_ONCE(!tp))
329 return NULL;
330
331 return container_of(tp, struct trace_uprobe, tp);
332}
333
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000334/*
335 * Allocate new trace_uprobe and initialize it (including uprobes).
336 */
337static struct trace_uprobe *
338alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
339{
340 struct trace_uprobe *tu;
David Brazdil0f672f62019-12-10 10:32:29 +0000341 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342
343 tu = kzalloc(SIZEOF_TRACE_UPROBE(nargs), GFP_KERNEL);
344 if (!tu)
345 return ERR_PTR(-ENOMEM);
346
Olivier Deprez0e641232021-09-23 10:07:05 +0200347 ret = trace_probe_init(&tu->tp, event, group, true);
David Brazdil0f672f62019-12-10 10:32:29 +0000348 if (ret < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000349 goto error;
350
David Brazdil0f672f62019-12-10 10:32:29 +0000351 dyn_event_init(&tu->devent, &trace_uprobe_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000352 tu->consumer.handler = uprobe_dispatcher;
353 if (is_ret)
354 tu->consumer.ret_handler = uretprobe_dispatcher;
Olivier Deprez0e641232021-09-23 10:07:05 +0200355 init_trace_uprobe_filter(tu->tp.event->filter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000356 return tu;
357
358error:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000359 kfree(tu);
360
David Brazdil0f672f62019-12-10 10:32:29 +0000361 return ERR_PTR(ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362}
363
364static void free_trace_uprobe(struct trace_uprobe *tu)
365{
David Brazdil0f672f62019-12-10 10:32:29 +0000366 if (!tu)
367 return;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000368
369 path_put(&tu->path);
David Brazdil0f672f62019-12-10 10:32:29 +0000370 trace_probe_cleanup(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000371 kfree(tu->filename);
372 kfree(tu);
373}
374
375static struct trace_uprobe *find_probe_event(const char *event, const char *group)
376{
David Brazdil0f672f62019-12-10 10:32:29 +0000377 struct dyn_event *pos;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378 struct trace_uprobe *tu;
379
David Brazdil0f672f62019-12-10 10:32:29 +0000380 for_each_trace_uprobe(tu, pos)
381 if (strcmp(trace_probe_name(&tu->tp), event) == 0 &&
382 strcmp(trace_probe_group_name(&tu->tp), group) == 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000383 return tu;
384
385 return NULL;
386}
387
David Brazdil0f672f62019-12-10 10:32:29 +0000388/* Unregister a trace_uprobe and probe_event */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389static int unregister_trace_uprobe(struct trace_uprobe *tu)
390{
391 int ret;
392
David Brazdil0f672f62019-12-10 10:32:29 +0000393 if (trace_probe_has_sibling(&tu->tp))
394 goto unreg;
395
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396 ret = unregister_uprobe_event(tu);
397 if (ret)
398 return ret;
399
David Brazdil0f672f62019-12-10 10:32:29 +0000400unreg:
401 dyn_event_remove(&tu->devent);
402 trace_probe_unlink(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403 free_trace_uprobe(tu);
404 return 0;
405}
406
David Brazdil0f672f62019-12-10 10:32:29 +0000407static bool trace_uprobe_has_same_uprobe(struct trace_uprobe *orig,
408 struct trace_uprobe *comp)
409{
410 struct trace_probe_event *tpe = orig->tp.event;
411 struct trace_probe *pos;
412 struct inode *comp_inode = d_real_inode(comp->path.dentry);
413 int i;
414
415 list_for_each_entry(pos, &tpe->probes, list) {
416 orig = container_of(pos, struct trace_uprobe, tp);
417 if (comp_inode != d_real_inode(orig->path.dentry) ||
418 comp->offset != orig->offset)
419 continue;
420
421 /*
422 * trace_probe_compare_arg_type() ensured that nr_args and
423 * each argument name and type are same. Let's compare comm.
424 */
425 for (i = 0; i < orig->tp.nr_args; i++) {
426 if (strcmp(orig->tp.args[i].comm,
427 comp->tp.args[i].comm))
428 break;
429 }
430
431 if (i == orig->tp.nr_args)
432 return true;
433 }
434
435 return false;
436}
437
438static int append_trace_uprobe(struct trace_uprobe *tu, struct trace_uprobe *to)
439{
440 int ret;
441
442 ret = trace_probe_compare_arg_type(&tu->tp, &to->tp);
443 if (ret) {
444 /* Note that argument starts index = 2 */
445 trace_probe_log_set_index(ret + 1);
446 trace_probe_log_err(0, DIFF_ARG_TYPE);
447 return -EEXIST;
448 }
449 if (trace_uprobe_has_same_uprobe(to, tu)) {
450 trace_probe_log_set_index(0);
451 trace_probe_log_err(0, SAME_PROBE);
452 return -EEXIST;
453 }
454
455 /* Append to existing event */
456 ret = trace_probe_append(&tu->tp, &to->tp);
457 if (!ret)
458 dyn_event_add(&tu->devent);
459
460 return ret;
461}
462
463/*
464 * Uprobe with multiple reference counter is not allowed. i.e.
465 * If inode and offset matches, reference counter offset *must*
466 * match as well. Though, there is one exception: If user is
467 * replacing old trace_uprobe with new one(same group/event),
468 * then we allow same uprobe with new reference counter as far
469 * as the new one does not conflict with any other existing
470 * ones.
471 */
472static int validate_ref_ctr_offset(struct trace_uprobe *new)
473{
474 struct dyn_event *pos;
475 struct trace_uprobe *tmp;
476 struct inode *new_inode = d_real_inode(new->path.dentry);
477
478 for_each_trace_uprobe(tmp, pos) {
479 if (new_inode == d_real_inode(tmp->path.dentry) &&
480 new->offset == tmp->offset &&
481 new->ref_ctr_offset != tmp->ref_ctr_offset) {
482 pr_warn("Reference counter offset mismatch.");
483 return -EINVAL;
484 }
485 }
486 return 0;
487}
488
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000489/* Register a trace_uprobe and probe_event */
490static int register_trace_uprobe(struct trace_uprobe *tu)
491{
492 struct trace_uprobe *old_tu;
493 int ret;
494
David Brazdil0f672f62019-12-10 10:32:29 +0000495 mutex_lock(&event_mutex);
496
497 ret = validate_ref_ctr_offset(tu);
498 if (ret)
499 goto end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500
501 /* register as an event */
David Brazdil0f672f62019-12-10 10:32:29 +0000502 old_tu = find_probe_event(trace_probe_name(&tu->tp),
503 trace_probe_group_name(&tu->tp));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504 if (old_tu) {
David Brazdil0f672f62019-12-10 10:32:29 +0000505 if (is_ret_probe(tu) != is_ret_probe(old_tu)) {
506 trace_probe_log_set_index(0);
507 trace_probe_log_err(0, DIFF_PROBE_TYPE);
508 ret = -EEXIST;
509 } else {
510 ret = append_trace_uprobe(tu, old_tu);
511 }
512 goto end;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000513 }
514
515 ret = register_uprobe_event(tu);
516 if (ret) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200517 if (ret == -EEXIST) {
518 trace_probe_log_set_index(0);
519 trace_probe_log_err(0, EVENT_EXIST);
520 } else
521 pr_warn("Failed to register probe event(%d)\n", ret);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 goto end;
523 }
524
David Brazdil0f672f62019-12-10 10:32:29 +0000525 dyn_event_add(&tu->devent);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000526
527end:
David Brazdil0f672f62019-12-10 10:32:29 +0000528 mutex_unlock(&event_mutex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000529
530 return ret;
531}
532
533/*
534 * Argument syntax:
535 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000536 */
David Brazdil0f672f62019-12-10 10:32:29 +0000537static int trace_uprobe_create(int argc, const char **argv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000538{
539 struct trace_uprobe *tu;
David Brazdil0f672f62019-12-10 10:32:29 +0000540 const char *event = NULL, *group = UPROBE_EVENT_SYSTEM;
541 char *arg, *filename, *rctr, *rctr_end, *tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000542 char buf[MAX_EVENT_NAME_LEN];
543 struct path path;
David Brazdil0f672f62019-12-10 10:32:29 +0000544 unsigned long offset, ref_ctr_offset;
545 bool is_return = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000546 int i, ret;
547
548 ret = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000549 ref_ctr_offset = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550
David Brazdil0f672f62019-12-10 10:32:29 +0000551 switch (argv[0][0]) {
552 case 'r':
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000553 is_return = true;
David Brazdil0f672f62019-12-10 10:32:29 +0000554 break;
555 case 'p':
556 break;
557 default:
558 return -ECANCELED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000559 }
560
David Brazdil0f672f62019-12-10 10:32:29 +0000561 if (argc < 2)
562 return -ECANCELED;
563
564 if (argv[0][1] == ':')
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000565 event = &argv[0][2];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000566
David Brazdil0f672f62019-12-10 10:32:29 +0000567 if (!strchr(argv[1], '/'))
568 return -ECANCELED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569
David Brazdil0f672f62019-12-10 10:32:29 +0000570 filename = kstrdup(argv[1], GFP_KERNEL);
571 if (!filename)
572 return -ENOMEM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000573
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000574 /* Find the last occurrence, in case the path contains ':' too. */
David Brazdil0f672f62019-12-10 10:32:29 +0000575 arg = strrchr(filename, ':');
576 if (!arg || !isdigit(arg[1])) {
577 kfree(filename);
578 return -ECANCELED;
579 }
580
581 trace_probe_log_init("trace_uprobe", argc, argv);
582 trace_probe_log_set_index(1); /* filename is the 2nd argument */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000583
584 *arg++ = '\0';
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000585 ret = kern_path(filename, LOOKUP_FOLLOW, &path);
David Brazdil0f672f62019-12-10 10:32:29 +0000586 if (ret) {
587 trace_probe_log_err(0, FILE_NOT_FOUND);
588 kfree(filename);
589 trace_probe_log_clear();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000590 return ret;
David Brazdil0f672f62019-12-10 10:32:29 +0000591 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000592 if (!d_is_reg(path.dentry)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000593 trace_probe_log_err(0, NO_REGULAR_FILE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594 ret = -EINVAL;
595 goto fail_address_parse;
596 }
597
David Brazdil0f672f62019-12-10 10:32:29 +0000598 /* Parse reference counter offset if specified. */
599 rctr = strchr(arg, '(');
600 if (rctr) {
601 rctr_end = strchr(rctr, ')');
602 if (!rctr_end) {
603 ret = -EINVAL;
604 rctr_end = rctr + strlen(rctr);
605 trace_probe_log_err(rctr_end - filename,
606 REFCNT_OPEN_BRACE);
607 goto fail_address_parse;
608 } else if (rctr_end[1] != '\0') {
609 ret = -EINVAL;
610 trace_probe_log_err(rctr_end + 1 - filename,
611 BAD_REFCNT_SUFFIX);
612 goto fail_address_parse;
613 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000614
David Brazdil0f672f62019-12-10 10:32:29 +0000615 *rctr++ = '\0';
616 *rctr_end = '\0';
617 ret = kstrtoul(rctr, 0, &ref_ctr_offset);
618 if (ret) {
619 trace_probe_log_err(rctr - filename, BAD_REFCNT);
620 goto fail_address_parse;
621 }
622 }
623
624 /* Parse uprobe offset. */
625 ret = kstrtoul(arg, 0, &offset);
626 if (ret) {
627 trace_probe_log_err(arg - filename, BAD_UPROBE_OFFS);
628 goto fail_address_parse;
629 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000630
631 /* setup a probe */
David Brazdil0f672f62019-12-10 10:32:29 +0000632 trace_probe_log_set_index(0);
633 if (event) {
634 ret = traceprobe_parse_event_name(&event, &group, buf,
635 event - argv[0]);
636 if (ret)
637 goto fail_address_parse;
638 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000639 char *tail;
640 char *ptr;
641
642 tail = kstrdup(kbasename(filename), GFP_KERNEL);
643 if (!tail) {
644 ret = -ENOMEM;
645 goto fail_address_parse;
646 }
647
648 ptr = strpbrk(tail, ".-_");
649 if (ptr)
650 *ptr = '\0';
651
652 snprintf(buf, MAX_EVENT_NAME_LEN, "%c_%s_0x%lx", 'p', tail, offset);
653 event = buf;
654 kfree(tail);
655 }
656
David Brazdil0f672f62019-12-10 10:32:29 +0000657 argc -= 2;
658 argv += 2;
659
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000660 tu = alloc_trace_uprobe(group, event, argc, is_return);
661 if (IS_ERR(tu)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000662 ret = PTR_ERR(tu);
David Brazdil0f672f62019-12-10 10:32:29 +0000663 /* This must return -ENOMEM otherwise there is a bug */
664 WARN_ON_ONCE(ret != -ENOMEM);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000665 goto fail_address_parse;
666 }
667 tu->offset = offset;
David Brazdil0f672f62019-12-10 10:32:29 +0000668 tu->ref_ctr_offset = ref_ctr_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000669 tu->path = path;
David Brazdil0f672f62019-12-10 10:32:29 +0000670 tu->filename = filename;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000671
672 /* parse arguments */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000673 for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
David Brazdil0f672f62019-12-10 10:32:29 +0000674 tmp = kstrdup(argv[i], GFP_KERNEL);
675 if (!tmp) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000676 ret = -ENOMEM;
677 goto error;
678 }
679
David Brazdil0f672f62019-12-10 10:32:29 +0000680 trace_probe_log_set_index(i + 2);
681 ret = traceprobe_parse_probe_arg(&tu->tp, i, tmp,
682 is_return ? TPARG_FL_RETURN : 0);
683 kfree(tmp);
684 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000685 goto error;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000686 }
687
David Brazdil0f672f62019-12-10 10:32:29 +0000688 ret = traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu));
689 if (ret < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000690 goto error;
David Brazdil0f672f62019-12-10 10:32:29 +0000691
692 ret = register_trace_uprobe(tu);
693 if (!ret)
694 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000695
696error:
697 free_trace_uprobe(tu);
David Brazdil0f672f62019-12-10 10:32:29 +0000698out:
699 trace_probe_log_clear();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700 return ret;
701
702fail_address_parse:
David Brazdil0f672f62019-12-10 10:32:29 +0000703 trace_probe_log_clear();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000704 path_put(&path);
David Brazdil0f672f62019-12-10 10:32:29 +0000705 kfree(filename);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000706
707 return ret;
708}
709
David Brazdil0f672f62019-12-10 10:32:29 +0000710static int create_or_delete_trace_uprobe(int argc, char **argv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000711{
David Brazdil0f672f62019-12-10 10:32:29 +0000712 int ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000713
David Brazdil0f672f62019-12-10 10:32:29 +0000714 if (argv[0][0] == '-')
715 return dyn_event_release(argc, argv, &trace_uprobe_ops);
716
717 ret = trace_uprobe_create(argc, (const char **)argv);
718 return ret == -ECANCELED ? -EINVAL : ret;
719}
720
721static int trace_uprobe_release(struct dyn_event *ev)
722{
723 struct trace_uprobe *tu = to_trace_uprobe(ev);
724
725 return unregister_trace_uprobe(tu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000726}
727
728/* Probes listing interfaces */
David Brazdil0f672f62019-12-10 10:32:29 +0000729static int trace_uprobe_show(struct seq_file *m, struct dyn_event *ev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730{
David Brazdil0f672f62019-12-10 10:32:29 +0000731 struct trace_uprobe *tu = to_trace_uprobe(ev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000732 char c = is_ret_probe(tu) ? 'r' : 'p';
733 int i;
734
David Brazdil0f672f62019-12-10 10:32:29 +0000735 seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, trace_probe_group_name(&tu->tp),
736 trace_probe_name(&tu->tp), tu->filename,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000737 (int)(sizeof(void *) * 2), tu->offset);
738
David Brazdil0f672f62019-12-10 10:32:29 +0000739 if (tu->ref_ctr_offset)
740 seq_printf(m, "(0x%lx)", tu->ref_ctr_offset);
741
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000742 for (i = 0; i < tu->tp.nr_args; i++)
743 seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
744
745 seq_putc(m, '\n');
746 return 0;
747}
748
David Brazdil0f672f62019-12-10 10:32:29 +0000749static int probes_seq_show(struct seq_file *m, void *v)
750{
751 struct dyn_event *ev = v;
752
753 if (!is_trace_uprobe(ev))
754 return 0;
755
756 return trace_uprobe_show(m, ev);
757}
758
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000759static const struct seq_operations probes_seq_op = {
David Brazdil0f672f62019-12-10 10:32:29 +0000760 .start = dyn_event_seq_start,
761 .next = dyn_event_seq_next,
762 .stop = dyn_event_seq_stop,
763 .show = probes_seq_show
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764};
765
766static int probes_open(struct inode *inode, struct file *file)
767{
768 int ret;
769
David Brazdil0f672f62019-12-10 10:32:29 +0000770 ret = security_locked_down(LOCKDOWN_TRACEFS);
771 if (ret)
772 return ret;
773
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000774 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000775 ret = dyn_events_release_all(&trace_uprobe_ops);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000776 if (ret)
777 return ret;
778 }
779
780 return seq_open(file, &probes_seq_op);
781}
782
783static ssize_t probes_write(struct file *file, const char __user *buffer,
784 size_t count, loff_t *ppos)
785{
David Brazdil0f672f62019-12-10 10:32:29 +0000786 return trace_parse_run_command(file, buffer, count, ppos,
787 create_or_delete_trace_uprobe);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000788}
789
790static const struct file_operations uprobe_events_ops = {
791 .owner = THIS_MODULE,
792 .open = probes_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = seq_release,
796 .write = probes_write,
797};
798
799/* Probes profiling interfaces */
800static int probes_profile_seq_show(struct seq_file *m, void *v)
801{
David Brazdil0f672f62019-12-10 10:32:29 +0000802 struct dyn_event *ev = v;
803 struct trace_uprobe *tu;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000804
David Brazdil0f672f62019-12-10 10:32:29 +0000805 if (!is_trace_uprobe(ev))
806 return 0;
807
808 tu = to_trace_uprobe(ev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809 seq_printf(m, " %s %-44s %15lu\n", tu->filename,
David Brazdil0f672f62019-12-10 10:32:29 +0000810 trace_probe_name(&tu->tp), tu->nhit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000811 return 0;
812}
813
814static const struct seq_operations profile_seq_op = {
David Brazdil0f672f62019-12-10 10:32:29 +0000815 .start = dyn_event_seq_start,
816 .next = dyn_event_seq_next,
817 .stop = dyn_event_seq_stop,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000818 .show = probes_profile_seq_show
819};
820
821static int profile_open(struct inode *inode, struct file *file)
822{
David Brazdil0f672f62019-12-10 10:32:29 +0000823 int ret;
824
825 ret = security_locked_down(LOCKDOWN_TRACEFS);
826 if (ret)
827 return ret;
828
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000829 return seq_open(file, &profile_seq_op);
830}
831
832static const struct file_operations uprobe_profile_ops = {
833 .owner = THIS_MODULE,
834 .open = profile_open,
835 .read = seq_read,
836 .llseek = seq_lseek,
837 .release = seq_release,
838};
839
840struct uprobe_cpu_buffer {
841 struct mutex mutex;
842 void *buf;
843};
844static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
845static int uprobe_buffer_refcnt;
846
847static int uprobe_buffer_init(void)
848{
849 int cpu, err_cpu;
850
851 uprobe_cpu_buffer = alloc_percpu(struct uprobe_cpu_buffer);
852 if (uprobe_cpu_buffer == NULL)
853 return -ENOMEM;
854
855 for_each_possible_cpu(cpu) {
856 struct page *p = alloc_pages_node(cpu_to_node(cpu),
857 GFP_KERNEL, 0);
858 if (p == NULL) {
859 err_cpu = cpu;
860 goto err;
861 }
862 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p);
863 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex);
864 }
865
866 return 0;
867
868err:
869 for_each_possible_cpu(cpu) {
870 if (cpu == err_cpu)
871 break;
872 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf);
873 }
874
875 free_percpu(uprobe_cpu_buffer);
876 return -ENOMEM;
877}
878
879static int uprobe_buffer_enable(void)
880{
881 int ret = 0;
882
883 BUG_ON(!mutex_is_locked(&event_mutex));
884
885 if (uprobe_buffer_refcnt++ == 0) {
886 ret = uprobe_buffer_init();
887 if (ret < 0)
888 uprobe_buffer_refcnt--;
889 }
890
891 return ret;
892}
893
894static void uprobe_buffer_disable(void)
895{
896 int cpu;
897
898 BUG_ON(!mutex_is_locked(&event_mutex));
899
900 if (--uprobe_buffer_refcnt == 0) {
901 for_each_possible_cpu(cpu)
902 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer,
903 cpu)->buf);
904
905 free_percpu(uprobe_cpu_buffer);
906 uprobe_cpu_buffer = NULL;
907 }
908}
909
910static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
911{
912 struct uprobe_cpu_buffer *ucb;
913 int cpu;
914
915 cpu = raw_smp_processor_id();
916 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu);
917
918 /*
919 * Use per-cpu buffers for fastest access, but we might migrate
920 * so the mutex makes sure we have sole access to it.
921 */
922 mutex_lock(&ucb->mutex);
923
924 return ucb;
925}
926
927static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
928{
929 mutex_unlock(&ucb->mutex);
930}
931
932static void __uprobe_trace_func(struct trace_uprobe *tu,
933 unsigned long func, struct pt_regs *regs,
934 struct uprobe_cpu_buffer *ucb, int dsize,
935 struct trace_event_file *trace_file)
936{
937 struct uprobe_trace_entry_head *entry;
938 struct ring_buffer_event *event;
939 struct ring_buffer *buffer;
940 void *data;
941 int size, esize;
David Brazdil0f672f62019-12-10 10:32:29 +0000942 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000943
944 WARN_ON(call != trace_file->event_call);
945
946 if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
947 return;
948
949 if (trace_trigger_soft_disabled(trace_file))
950 return;
951
952 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
953 size = esize + tu->tp.size + dsize;
954 event = trace_event_buffer_lock_reserve(&buffer, trace_file,
955 call->event.type, size, 0, 0);
956 if (!event)
957 return;
958
959 entry = ring_buffer_event_data(event);
960 if (is_ret_probe(tu)) {
961 entry->vaddr[0] = func;
962 entry->vaddr[1] = instruction_pointer(regs);
963 data = DATAOF_TRACE_ENTRY(entry, true);
964 } else {
965 entry->vaddr[0] = instruction_pointer(regs);
966 data = DATAOF_TRACE_ENTRY(entry, false);
967 }
968
969 memcpy(data, ucb->buf, tu->tp.size + dsize);
970
971 event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
972}
973
974/* uprobe handler */
975static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
976 struct uprobe_cpu_buffer *ucb, int dsize)
977{
978 struct event_file_link *link;
979
980 if (is_ret_probe(tu))
981 return 0;
982
983 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +0000984 trace_probe_for_each_link_rcu(link, &tu->tp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000985 __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
986 rcu_read_unlock();
987
988 return 0;
989}
990
991static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
992 struct pt_regs *regs,
993 struct uprobe_cpu_buffer *ucb, int dsize)
994{
995 struct event_file_link *link;
996
997 rcu_read_lock();
David Brazdil0f672f62019-12-10 10:32:29 +0000998 trace_probe_for_each_link_rcu(link, &tu->tp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000999 __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
1000 rcu_read_unlock();
1001}
1002
1003/* Event entry printers */
1004static enum print_line_t
1005print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event)
1006{
1007 struct uprobe_trace_entry_head *entry;
1008 struct trace_seq *s = &iter->seq;
1009 struct trace_uprobe *tu;
1010 u8 *data;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001011
1012 entry = (struct uprobe_trace_entry_head *)iter->ent;
David Brazdil0f672f62019-12-10 10:32:29 +00001013 tu = trace_uprobe_primary_from_call(
1014 container_of(event, struct trace_event_call, event));
1015 if (unlikely(!tu))
1016 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001017
1018 if (is_ret_probe(tu)) {
1019 trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
David Brazdil0f672f62019-12-10 10:32:29 +00001020 trace_probe_name(&tu->tp),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001021 entry->vaddr[1], entry->vaddr[0]);
1022 data = DATAOF_TRACE_ENTRY(entry, true);
1023 } else {
1024 trace_seq_printf(s, "%s: (0x%lx)",
David Brazdil0f672f62019-12-10 10:32:29 +00001025 trace_probe_name(&tu->tp),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026 entry->vaddr[0]);
1027 data = DATAOF_TRACE_ENTRY(entry, false);
1028 }
1029
David Brazdil0f672f62019-12-10 10:32:29 +00001030 if (print_probe_args(s, tu->tp.args, tu->tp.nr_args, data, entry) < 0)
1031 goto out;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001032
1033 trace_seq_putc(s, '\n');
1034
1035 out:
1036 return trace_handle_return(s);
1037}
1038
1039typedef bool (*filter_func_t)(struct uprobe_consumer *self,
1040 enum uprobe_filter_ctx ctx,
1041 struct mm_struct *mm);
1042
David Brazdil0f672f62019-12-10 10:32:29 +00001043static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001044{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001045 int ret;
1046
David Brazdil0f672f62019-12-10 10:32:29 +00001047 tu->consumer.filter = filter;
1048 tu->inode = d_real_inode(tu->path.dentry);
1049
1050 if (tu->ref_ctr_offset)
1051 ret = uprobe_register_refctr(tu->inode, tu->offset,
1052 tu->ref_ctr_offset, &tu->consumer);
1053 else
1054 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
1055
1056 if (ret)
1057 tu->inode = NULL;
1058
1059 return ret;
1060}
1061
1062static void __probe_event_disable(struct trace_probe *tp)
1063{
1064 struct trace_probe *pos;
1065 struct trace_uprobe *tu;
1066
Olivier Deprez0e641232021-09-23 10:07:05 +02001067 tu = container_of(tp, struct trace_uprobe, tp);
1068 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
1069
David Brazdil0f672f62019-12-10 10:32:29 +00001070 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1071 tu = container_of(pos, struct trace_uprobe, tp);
1072 if (!tu->inode)
1073 continue;
1074
David Brazdil0f672f62019-12-10 10:32:29 +00001075 uprobe_unregister(tu->inode, tu->offset, &tu->consumer);
1076 tu->inode = NULL;
1077 }
1078}
1079
1080static int probe_event_enable(struct trace_event_call *call,
1081 struct trace_event_file *file, filter_func_t filter)
1082{
1083 struct trace_probe *pos, *tp;
1084 struct trace_uprobe *tu;
1085 bool enabled;
1086 int ret;
1087
1088 tp = trace_probe_primary_from_call(call);
1089 if (WARN_ON_ONCE(!tp))
1090 return -ENODEV;
1091 enabled = trace_probe_is_enabled(tp);
1092
1093 /* This may also change "enabled" state */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001094 if (file) {
David Brazdil0f672f62019-12-10 10:32:29 +00001095 if (trace_probe_test_flag(tp, TP_FLAG_PROFILE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001096 return -EINTR;
1097
David Brazdil0f672f62019-12-10 10:32:29 +00001098 ret = trace_probe_add_file(tp, file);
1099 if (ret < 0)
1100 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001101 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001102 if (trace_probe_test_flag(tp, TP_FLAG_TRACE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001103 return -EINTR;
1104
David Brazdil0f672f62019-12-10 10:32:29 +00001105 trace_probe_set_flag(tp, TP_FLAG_PROFILE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106 }
1107
David Brazdil0f672f62019-12-10 10:32:29 +00001108 tu = container_of(tp, struct trace_uprobe, tp);
Olivier Deprez0e641232021-09-23 10:07:05 +02001109 WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001110
1111 if (enabled)
1112 return 0;
1113
1114 ret = uprobe_buffer_enable();
1115 if (ret)
1116 goto err_flags;
1117
David Brazdil0f672f62019-12-10 10:32:29 +00001118 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1119 tu = container_of(pos, struct trace_uprobe, tp);
1120 ret = trace_uprobe_enable(tu, filter);
1121 if (ret) {
1122 __probe_event_disable(tp);
1123 goto err_buffer;
1124 }
1125 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001126
1127 return 0;
1128
1129 err_buffer:
1130 uprobe_buffer_disable();
1131
1132 err_flags:
David Brazdil0f672f62019-12-10 10:32:29 +00001133 if (file)
1134 trace_probe_remove_file(tp, file);
1135 else
1136 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
1137
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001138 return ret;
1139}
1140
David Brazdil0f672f62019-12-10 10:32:29 +00001141static void probe_event_disable(struct trace_event_call *call,
1142 struct trace_event_file *file)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001143{
David Brazdil0f672f62019-12-10 10:32:29 +00001144 struct trace_probe *tp;
1145
1146 tp = trace_probe_primary_from_call(call);
1147 if (WARN_ON_ONCE(!tp))
1148 return;
1149
1150 if (!trace_probe_is_enabled(tp))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001151 return;
1152
1153 if (file) {
David Brazdil0f672f62019-12-10 10:32:29 +00001154 if (trace_probe_remove_file(tp, file) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001155 return;
1156
David Brazdil0f672f62019-12-10 10:32:29 +00001157 if (trace_probe_is_enabled(tp))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001158 return;
David Brazdil0f672f62019-12-10 10:32:29 +00001159 } else
1160 trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001161
David Brazdil0f672f62019-12-10 10:32:29 +00001162 __probe_event_disable(tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001163 uprobe_buffer_disable();
1164}
1165
1166static int uprobe_event_define_fields(struct trace_event_call *event_call)
1167{
David Brazdil0f672f62019-12-10 10:32:29 +00001168 int ret, size;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001169 struct uprobe_trace_entry_head field;
David Brazdil0f672f62019-12-10 10:32:29 +00001170 struct trace_uprobe *tu;
1171
1172 tu = trace_uprobe_primary_from_call(event_call);
1173 if (unlikely(!tu))
1174 return -ENODEV;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001175
1176 if (is_ret_probe(tu)) {
1177 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0);
1178 DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0);
1179 size = SIZEOF_TRACE_ENTRY(true);
1180 } else {
1181 DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0);
1182 size = SIZEOF_TRACE_ENTRY(false);
1183 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001184
David Brazdil0f672f62019-12-10 10:32:29 +00001185 return traceprobe_define_arg_fields(event_call, size, &tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001186}
1187
1188#ifdef CONFIG_PERF_EVENTS
1189static bool
1190__uprobe_perf_filter(struct trace_uprobe_filter *filter, struct mm_struct *mm)
1191{
1192 struct perf_event *event;
1193
1194 if (filter->nr_systemwide)
1195 return true;
1196
1197 list_for_each_entry(event, &filter->perf_events, hw.tp_list) {
1198 if (event->hw.target->mm == mm)
1199 return true;
1200 }
1201
1202 return false;
1203}
1204
1205static inline bool
Olivier Deprez0e641232021-09-23 10:07:05 +02001206trace_uprobe_filter_event(struct trace_uprobe_filter *filter,
1207 struct perf_event *event)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001208{
Olivier Deprez0e641232021-09-23 10:07:05 +02001209 return __uprobe_perf_filter(filter, event->hw.target->mm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001210}
1211
Olivier Deprez0e641232021-09-23 10:07:05 +02001212static bool trace_uprobe_filter_remove(struct trace_uprobe_filter *filter,
1213 struct perf_event *event)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001214{
1215 bool done;
1216
Olivier Deprez0e641232021-09-23 10:07:05 +02001217 write_lock(&filter->rwlock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001218 if (event->hw.target) {
1219 list_del(&event->hw.tp_list);
Olivier Deprez0e641232021-09-23 10:07:05 +02001220 done = filter->nr_systemwide ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001221 (event->hw.target->flags & PF_EXITING) ||
Olivier Deprez0e641232021-09-23 10:07:05 +02001222 trace_uprobe_filter_event(filter, event);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001223 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02001224 filter->nr_systemwide--;
1225 done = filter->nr_systemwide;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001226 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001227 write_unlock(&filter->rwlock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001228
Olivier Deprez0e641232021-09-23 10:07:05 +02001229 return done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001230}
1231
Olivier Deprez0e641232021-09-23 10:07:05 +02001232/* This returns true if the filter always covers target mm */
1233static bool trace_uprobe_filter_add(struct trace_uprobe_filter *filter,
1234 struct perf_event *event)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001235{
1236 bool done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001237
Olivier Deprez0e641232021-09-23 10:07:05 +02001238 write_lock(&filter->rwlock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001239 if (event->hw.target) {
1240 /*
1241 * event->parent != NULL means copy_process(), we can avoid
1242 * uprobe_apply(). current->mm must be probed and we can rely
1243 * on dup_mmap() which preserves the already installed bp's.
1244 *
1245 * attr.enable_on_exec means that exec/mmap will install the
1246 * breakpoints we need.
1247 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001248 done = filter->nr_systemwide ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001249 event->parent || event->attr.enable_on_exec ||
Olivier Deprez0e641232021-09-23 10:07:05 +02001250 trace_uprobe_filter_event(filter, event);
1251 list_add(&event->hw.tp_list, &filter->perf_events);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001252 } else {
Olivier Deprez0e641232021-09-23 10:07:05 +02001253 done = filter->nr_systemwide;
1254 filter->nr_systemwide++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001255 }
Olivier Deprez0e641232021-09-23 10:07:05 +02001256 write_unlock(&filter->rwlock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257
Olivier Deprez0e641232021-09-23 10:07:05 +02001258 return done;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001259}
1260
Olivier Deprez0e641232021-09-23 10:07:05 +02001261static int uprobe_perf_close(struct trace_event_call *call,
1262 struct perf_event *event)
David Brazdil0f672f62019-12-10 10:32:29 +00001263{
1264 struct trace_probe *pos, *tp;
1265 struct trace_uprobe *tu;
1266 int ret = 0;
1267
1268 tp = trace_probe_primary_from_call(call);
1269 if (WARN_ON_ONCE(!tp))
1270 return -ENODEV;
1271
Olivier Deprez0e641232021-09-23 10:07:05 +02001272 tu = container_of(tp, struct trace_uprobe, tp);
1273 if (trace_uprobe_filter_remove(tu->tp.event->filter, event))
1274 return 0;
1275
David Brazdil0f672f62019-12-10 10:32:29 +00001276 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1277 tu = container_of(pos, struct trace_uprobe, tp);
Olivier Deprez0e641232021-09-23 10:07:05 +02001278 ret = uprobe_apply(tu->inode, tu->offset, &tu->consumer, false);
David Brazdil0f672f62019-12-10 10:32:29 +00001279 if (ret)
1280 break;
1281 }
1282
1283 return ret;
1284}
Olivier Deprez0e641232021-09-23 10:07:05 +02001285
1286static int uprobe_perf_open(struct trace_event_call *call,
1287 struct perf_event *event)
1288{
1289 struct trace_probe *pos, *tp;
1290 struct trace_uprobe *tu;
1291 int err = 0;
1292
1293 tp = trace_probe_primary_from_call(call);
1294 if (WARN_ON_ONCE(!tp))
1295 return -ENODEV;
1296
1297 tu = container_of(tp, struct trace_uprobe, tp);
1298 if (trace_uprobe_filter_add(tu->tp.event->filter, event))
1299 return 0;
1300
1301 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
1302 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
1303 if (err) {
1304 uprobe_perf_close(call, event);
1305 break;
1306 }
1307 }
1308
1309 return err;
1310}
1311
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001312static bool uprobe_perf_filter(struct uprobe_consumer *uc,
1313 enum uprobe_filter_ctx ctx, struct mm_struct *mm)
1314{
Olivier Deprez0e641232021-09-23 10:07:05 +02001315 struct trace_uprobe_filter *filter;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001316 struct trace_uprobe *tu;
1317 int ret;
1318
1319 tu = container_of(uc, struct trace_uprobe, consumer);
Olivier Deprez0e641232021-09-23 10:07:05 +02001320 filter = tu->tp.event->filter;
1321
1322 read_lock(&filter->rwlock);
1323 ret = __uprobe_perf_filter(filter, mm);
1324 read_unlock(&filter->rwlock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001325
1326 return ret;
1327}
1328
1329static void __uprobe_perf_func(struct trace_uprobe *tu,
1330 unsigned long func, struct pt_regs *regs,
1331 struct uprobe_cpu_buffer *ucb, int dsize)
1332{
David Brazdil0f672f62019-12-10 10:32:29 +00001333 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001334 struct uprobe_trace_entry_head *entry;
1335 struct hlist_head *head;
1336 void *data;
1337 int size, esize;
1338 int rctx;
1339
1340 if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
1341 return;
1342
1343 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1344
1345 size = esize + tu->tp.size + dsize;
1346 size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
1347 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
1348 return;
1349
1350 preempt_disable();
1351 head = this_cpu_ptr(call->perf_events);
1352 if (hlist_empty(head))
1353 goto out;
1354
1355 entry = perf_trace_buf_alloc(size, NULL, &rctx);
1356 if (!entry)
1357 goto out;
1358
1359 if (is_ret_probe(tu)) {
1360 entry->vaddr[0] = func;
1361 entry->vaddr[1] = instruction_pointer(regs);
1362 data = DATAOF_TRACE_ENTRY(entry, true);
1363 } else {
1364 entry->vaddr[0] = instruction_pointer(regs);
1365 data = DATAOF_TRACE_ENTRY(entry, false);
1366 }
1367
1368 memcpy(data, ucb->buf, tu->tp.size + dsize);
1369
1370 if (size - esize > tu->tp.size + dsize) {
1371 int len = tu->tp.size + dsize;
1372
1373 memset(data + len, 0, size - esize - len);
1374 }
1375
1376 perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
1377 head, NULL);
1378 out:
1379 preempt_enable();
1380}
1381
1382/* uprobe profile handler */
1383static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
1384 struct uprobe_cpu_buffer *ucb, int dsize)
1385{
1386 if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
1387 return UPROBE_HANDLER_REMOVE;
1388
1389 if (!is_ret_probe(tu))
1390 __uprobe_perf_func(tu, 0, regs, ucb, dsize);
1391 return 0;
1392}
1393
1394static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
1395 struct pt_regs *regs,
1396 struct uprobe_cpu_buffer *ucb, int dsize)
1397{
1398 __uprobe_perf_func(tu, func, regs, ucb, dsize);
1399}
1400
1401int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
1402 const char **filename, u64 *probe_offset,
1403 bool perf_type_tracepoint)
1404{
1405 const char *pevent = trace_event_name(event->tp_event);
1406 const char *group = event->tp_event->class->system;
1407 struct trace_uprobe *tu;
1408
1409 if (perf_type_tracepoint)
1410 tu = find_probe_event(pevent, group);
1411 else
Olivier Deprez0e641232021-09-23 10:07:05 +02001412 tu = trace_uprobe_primary_from_call(event->tp_event);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001413 if (!tu)
1414 return -EINVAL;
1415
1416 *fd_type = is_ret_probe(tu) ? BPF_FD_TYPE_URETPROBE
1417 : BPF_FD_TYPE_UPROBE;
1418 *filename = tu->filename;
1419 *probe_offset = tu->offset;
1420 return 0;
1421}
1422#endif /* CONFIG_PERF_EVENTS */
1423
1424static int
1425trace_uprobe_register(struct trace_event_call *event, enum trace_reg type,
1426 void *data)
1427{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001428 struct trace_event_file *file = data;
1429
1430 switch (type) {
1431 case TRACE_REG_REGISTER:
David Brazdil0f672f62019-12-10 10:32:29 +00001432 return probe_event_enable(event, file, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001433
1434 case TRACE_REG_UNREGISTER:
David Brazdil0f672f62019-12-10 10:32:29 +00001435 probe_event_disable(event, file);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001436 return 0;
1437
1438#ifdef CONFIG_PERF_EVENTS
1439 case TRACE_REG_PERF_REGISTER:
David Brazdil0f672f62019-12-10 10:32:29 +00001440 return probe_event_enable(event, NULL, uprobe_perf_filter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001441
1442 case TRACE_REG_PERF_UNREGISTER:
David Brazdil0f672f62019-12-10 10:32:29 +00001443 probe_event_disable(event, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001444 return 0;
1445
1446 case TRACE_REG_PERF_OPEN:
Olivier Deprez0e641232021-09-23 10:07:05 +02001447 return uprobe_perf_open(event, data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001448
1449 case TRACE_REG_PERF_CLOSE:
Olivier Deprez0e641232021-09-23 10:07:05 +02001450 return uprobe_perf_close(event, data);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001451
1452#endif
1453 default:
1454 return 0;
1455 }
1456 return 0;
1457}
1458
1459static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1460{
1461 struct trace_uprobe *tu;
1462 struct uprobe_dispatch_data udd;
1463 struct uprobe_cpu_buffer *ucb;
1464 int dsize, esize;
1465 int ret = 0;
1466
1467
1468 tu = container_of(con, struct trace_uprobe, consumer);
1469 tu->nhit++;
1470
1471 udd.tu = tu;
1472 udd.bp_addr = instruction_pointer(regs);
1473
1474 current->utask->vaddr = (unsigned long) &udd;
1475
1476 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1477 return 0;
1478
1479 dsize = __get_data_size(&tu->tp, regs);
1480 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1481
1482 ucb = uprobe_buffer_get();
David Brazdil0f672f62019-12-10 10:32:29 +00001483 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001484
David Brazdil0f672f62019-12-10 10:32:29 +00001485 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001486 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
1487
1488#ifdef CONFIG_PERF_EVENTS
David Brazdil0f672f62019-12-10 10:32:29 +00001489 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001490 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
1491#endif
1492 uprobe_buffer_put(ucb);
1493 return ret;
1494}
1495
1496static int uretprobe_dispatcher(struct uprobe_consumer *con,
1497 unsigned long func, struct pt_regs *regs)
1498{
1499 struct trace_uprobe *tu;
1500 struct uprobe_dispatch_data udd;
1501 struct uprobe_cpu_buffer *ucb;
1502 int dsize, esize;
1503
1504 tu = container_of(con, struct trace_uprobe, consumer);
1505
1506 udd.tu = tu;
1507 udd.bp_addr = func;
1508
1509 current->utask->vaddr = (unsigned long) &udd;
1510
1511 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1512 return 0;
1513
1514 dsize = __get_data_size(&tu->tp, regs);
1515 esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
1516
1517 ucb = uprobe_buffer_get();
David Brazdil0f672f62019-12-10 10:32:29 +00001518 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001519
David Brazdil0f672f62019-12-10 10:32:29 +00001520 if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001521 uretprobe_trace_func(tu, func, regs, ucb, dsize);
1522
1523#ifdef CONFIG_PERF_EVENTS
David Brazdil0f672f62019-12-10 10:32:29 +00001524 if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001525 uretprobe_perf_func(tu, func, regs, ucb, dsize);
1526#endif
1527 uprobe_buffer_put(ucb);
1528 return 0;
1529}
1530
1531static struct trace_event_functions uprobe_funcs = {
1532 .trace = print_uprobe_event
1533};
1534
David Brazdil0f672f62019-12-10 10:32:29 +00001535static inline void init_trace_event_call(struct trace_uprobe *tu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001536{
David Brazdil0f672f62019-12-10 10:32:29 +00001537 struct trace_event_call *call = trace_probe_event_call(&tu->tp);
1538
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001539 call->event.funcs = &uprobe_funcs;
1540 call->class->define_fields = uprobe_event_define_fields;
1541
David Brazdil0f672f62019-12-10 10:32:29 +00001542 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001543 call->class->reg = trace_uprobe_register;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544}
1545
1546static int register_uprobe_event(struct trace_uprobe *tu)
1547{
David Brazdil0f672f62019-12-10 10:32:29 +00001548 init_trace_event_call(tu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001549
David Brazdil0f672f62019-12-10 10:32:29 +00001550 return trace_probe_register_event_call(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001551}
1552
1553static int unregister_uprobe_event(struct trace_uprobe *tu)
1554{
David Brazdil0f672f62019-12-10 10:32:29 +00001555 return trace_probe_unregister_event_call(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001556}
1557
1558#ifdef CONFIG_PERF_EVENTS
1559struct trace_event_call *
David Brazdil0f672f62019-12-10 10:32:29 +00001560create_local_trace_uprobe(char *name, unsigned long offs,
1561 unsigned long ref_ctr_offset, bool is_return)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001562{
1563 struct trace_uprobe *tu;
1564 struct path path;
1565 int ret;
1566
1567 ret = kern_path(name, LOOKUP_FOLLOW, &path);
1568 if (ret)
1569 return ERR_PTR(ret);
1570
1571 if (!d_is_reg(path.dentry)) {
1572 path_put(&path);
1573 return ERR_PTR(-EINVAL);
1574 }
1575
1576 /*
David Brazdil0f672f62019-12-10 10:32:29 +00001577 * local trace_kprobes are not added to dyn_event, so they are never
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001578 * searched in find_trace_kprobe(). Therefore, there is no concern of
1579 * duplicated name "DUMMY_EVENT" here.
1580 */
1581 tu = alloc_trace_uprobe(UPROBE_EVENT_SYSTEM, "DUMMY_EVENT", 0,
1582 is_return);
1583
1584 if (IS_ERR(tu)) {
1585 pr_info("Failed to allocate trace_uprobe.(%d)\n",
1586 (int)PTR_ERR(tu));
1587 path_put(&path);
1588 return ERR_CAST(tu);
1589 }
1590
1591 tu->offset = offs;
1592 tu->path = path;
David Brazdil0f672f62019-12-10 10:32:29 +00001593 tu->ref_ctr_offset = ref_ctr_offset;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001594 tu->filename = kstrdup(name, GFP_KERNEL);
David Brazdil0f672f62019-12-10 10:32:29 +00001595 init_trace_event_call(tu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001596
David Brazdil0f672f62019-12-10 10:32:29 +00001597 if (traceprobe_set_print_fmt(&tu->tp, is_ret_probe(tu)) < 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001598 ret = -ENOMEM;
1599 goto error;
1600 }
1601
David Brazdil0f672f62019-12-10 10:32:29 +00001602 return trace_probe_event_call(&tu->tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001603error:
1604 free_trace_uprobe(tu);
1605 return ERR_PTR(ret);
1606}
1607
1608void destroy_local_trace_uprobe(struct trace_event_call *event_call)
1609{
1610 struct trace_uprobe *tu;
1611
David Brazdil0f672f62019-12-10 10:32:29 +00001612 tu = trace_uprobe_primary_from_call(event_call);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001613
1614 free_trace_uprobe(tu);
1615}
1616#endif /* CONFIG_PERF_EVENTS */
1617
1618/* Make a trace interface for controling probe points */
1619static __init int init_uprobe_trace(void)
1620{
1621 struct dentry *d_tracer;
David Brazdil0f672f62019-12-10 10:32:29 +00001622 int ret;
1623
1624 ret = dyn_event_register(&trace_uprobe_ops);
1625 if (ret)
1626 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001627
1628 d_tracer = tracing_init_dentry();
1629 if (IS_ERR(d_tracer))
1630 return 0;
1631
1632 trace_create_file("uprobe_events", 0644, d_tracer,
1633 NULL, &uprobe_events_ops);
1634 /* Profile interface */
1635 trace_create_file("uprobe_profile", 0444, d_tracer,
1636 NULL, &uprobe_profile_ops);
1637 return 0;
1638}
1639
1640fs_initcall(init_uprobe_trace);