blob: 4d9f818029112f6800c945a563f6e951a4b8dbe3 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 */
5#include <linux/kernel.h>
6#include <linux/types.h>
7#include <linux/slab.h>
8#include <linux/bpf.h>
9#include <linux/bpf_perf_event.h>
10#include <linux/filter.h>
11#include <linux/uaccess.h>
12#include <linux/ctype.h>
13#include <linux/kprobes.h>
14#include <linux/syscalls.h>
15#include <linux/error-injection.h>
16
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <asm/tlb.h>
18
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000019#include "trace_probe.h"
20#include "trace.h"
21
David Brazdil0f672f62019-12-10 10:32:29 +000022#define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
24
25#ifdef CONFIG_MODULES
26struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
29};
30
31static LIST_HEAD(bpf_trace_modules);
32static DEFINE_MUTEX(bpf_module_mutex);
33
34static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
35{
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
38 unsigned int i;
39
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
46 ret = btp;
47 goto out;
48 }
49 }
50 }
51out:
52 mutex_unlock(&bpf_module_mutex);
53 return ret;
54}
55#else
56static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
57{
58 return NULL;
59}
60#endif /* CONFIG_MODULES */
61
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
64
65/**
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
69 *
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
72 *
73 * Return: BPF programs always return an integer which is interpreted by
74 * kprobe handler as:
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
78 */
79unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
80{
81 unsigned int ret;
82
83 if (in_nmi()) /* not supported yet */
84 return 1;
85
86 preempt_disable();
87
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
89 /*
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
93 * so return zero here
94 */
95 ret = 0;
96 goto out;
97 }
98
99 /*
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
104 *
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
113 */
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
115
116 out:
117 __this_cpu_dec(bpf_prog_active);
118 preempt_enable();
119
120 return ret;
121}
122EXPORT_SYMBOL_GPL(trace_call_bpf);
123
124#ifdef CONFIG_BPF_KPROBE_OVERRIDE
125BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
126{
127 regs_set_return_value(regs, rc);
128 override_function_with_return(regs);
129 return 0;
130}
131
132static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
134 .gpl_only = true,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
138};
139#endif
140
141BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
142{
143 int ret;
144
David Brazdil0f672f62019-12-10 10:32:29 +0000145 ret = security_locked_down(LOCKDOWN_BPF_READ);
146 if (ret < 0)
147 goto out;
148
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000149 ret = probe_kernel_read(dst, unsafe_ptr, size);
150 if (unlikely(ret < 0))
David Brazdil0f672f62019-12-10 10:32:29 +0000151out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000152 memset(dst, 0, size);
153
154 return ret;
155}
156
157static const struct bpf_func_proto bpf_probe_read_proto = {
158 .func = bpf_probe_read,
159 .gpl_only = true,
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
162 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
163 .arg3_type = ARG_ANYTHING,
164};
165
Olivier Deprez0e641232021-09-23 10:07:05 +0200166BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000167 u32, size)
168{
169 /*
170 * Ensure we're in user context which is safe for the helper to
171 * run. This helper has no business in a kthread.
172 *
173 * access_ok() should prevent writing to non-user memory, but in
174 * some situations (nommu, temporary switch, etc) access_ok() does
175 * not provide enough validation, hence the check on KERNEL_DS.
David Brazdil0f672f62019-12-10 10:32:29 +0000176 *
177 * nmi_uaccess_okay() ensures the probe is not run in an interim
178 * state, when the task or mm are switched. This is specifically
179 * required to prevent the use of temporary mm.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000180 */
181
182 if (unlikely(in_interrupt() ||
183 current->flags & (PF_KTHREAD | PF_EXITING)))
184 return -EPERM;
185 if (unlikely(uaccess_kernel()))
186 return -EPERM;
David Brazdil0f672f62019-12-10 10:32:29 +0000187 if (unlikely(!nmi_uaccess_okay()))
188 return -EPERM;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189
Olivier Deprez0e641232021-09-23 10:07:05 +0200190 return probe_user_write(unsafe_ptr, src, size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000191}
192
193static const struct bpf_func_proto bpf_probe_write_user_proto = {
194 .func = bpf_probe_write_user,
195 .gpl_only = true,
196 .ret_type = RET_INTEGER,
197 .arg1_type = ARG_ANYTHING,
198 .arg2_type = ARG_PTR_TO_MEM,
199 .arg3_type = ARG_CONST_SIZE,
200};
201
202static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
203{
204 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
205 current->comm, task_pid_nr(current));
206
207 return &bpf_probe_write_user_proto;
208}
209
210/*
211 * Only limited trace_printk() conversion specifiers allowed:
212 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
213 */
214BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
215 u64, arg2, u64, arg3)
216{
217 bool str_seen = false;
218 int mod[3] = {};
219 int fmt_cnt = 0;
220 u64 unsafe_addr;
221 char buf[64];
222 int i;
223
224 /*
225 * bpf_check()->check_func_arg()->check_stack_boundary()
226 * guarantees that fmt points to bpf program stack,
227 * fmt_size bytes of it were initialized and fmt_size > 0
228 */
229 if (fmt[--fmt_size] != 0)
230 return -EINVAL;
231
232 /* check format string for allowed specifiers */
233 for (i = 0; i < fmt_size; i++) {
234 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
235 return -EINVAL;
236
237 if (fmt[i] != '%')
238 continue;
239
240 if (fmt_cnt >= 3)
241 return -EINVAL;
242
243 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
244 i++;
245 if (fmt[i] == 'l') {
246 mod[fmt_cnt]++;
247 i++;
248 } else if (fmt[i] == 'p' || fmt[i] == 's') {
249 mod[fmt_cnt]++;
250 /* disallow any further format extensions */
251 if (fmt[i + 1] != 0 &&
252 !isspace(fmt[i + 1]) &&
253 !ispunct(fmt[i + 1]))
254 return -EINVAL;
255 fmt_cnt++;
256 if (fmt[i] == 's') {
257 if (str_seen)
258 /* allow only one '%s' per fmt string */
259 return -EINVAL;
260 str_seen = true;
261
262 switch (fmt_cnt) {
263 case 1:
264 unsafe_addr = arg1;
265 arg1 = (long) buf;
266 break;
267 case 2:
268 unsafe_addr = arg2;
269 arg2 = (long) buf;
270 break;
271 case 3:
272 unsafe_addr = arg3;
273 arg3 = (long) buf;
274 break;
275 }
276 buf[0] = 0;
277 strncpy_from_unsafe(buf,
278 (void *) (long) unsafe_addr,
279 sizeof(buf));
280 }
281 continue;
282 }
283
284 if (fmt[i] == 'l') {
285 mod[fmt_cnt]++;
286 i++;
287 }
288
289 if (fmt[i] != 'i' && fmt[i] != 'd' &&
290 fmt[i] != 'u' && fmt[i] != 'x')
291 return -EINVAL;
292 fmt_cnt++;
293 }
294
295/* Horrid workaround for getting va_list handling working with different
296 * argument type combinations generically for 32 and 64 bit archs.
297 */
298#define __BPF_TP_EMIT() __BPF_ARG3_TP()
299#define __BPF_TP(...) \
300 __trace_printk(0 /* Fake ip */, \
301 fmt, ##__VA_ARGS__)
302
303#define __BPF_ARG1_TP(...) \
304 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
305 ? __BPF_TP(arg1, ##__VA_ARGS__) \
306 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
307 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
308 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
309
310#define __BPF_ARG2_TP(...) \
311 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
312 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
313 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
314 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
315 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
316
317#define __BPF_ARG3_TP(...) \
318 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
319 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
320 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
321 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
322 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
323
324 return __BPF_TP_EMIT();
325}
326
327static const struct bpf_func_proto bpf_trace_printk_proto = {
328 .func = bpf_trace_printk,
329 .gpl_only = true,
330 .ret_type = RET_INTEGER,
331 .arg1_type = ARG_PTR_TO_MEM,
332 .arg2_type = ARG_CONST_SIZE,
333};
334
335const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
336{
337 /*
338 * this program might be calling bpf_trace_printk,
339 * so allocate per-cpu printk buffers
340 */
341 trace_printk_init_buffers();
342
343 return &bpf_trace_printk_proto;
344}
345
346static __always_inline int
347get_map_perf_counter(struct bpf_map *map, u64 flags,
348 u64 *value, u64 *enabled, u64 *running)
349{
350 struct bpf_array *array = container_of(map, struct bpf_array, map);
351 unsigned int cpu = smp_processor_id();
352 u64 index = flags & BPF_F_INDEX_MASK;
353 struct bpf_event_entry *ee;
354
355 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
356 return -EINVAL;
357 if (index == BPF_F_CURRENT_CPU)
358 index = cpu;
359 if (unlikely(index >= array->map.max_entries))
360 return -E2BIG;
361
362 ee = READ_ONCE(array->ptrs[index]);
363 if (!ee)
364 return -ENOENT;
365
366 return perf_event_read_local(ee->event, value, enabled, running);
367}
368
369BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
370{
371 u64 value = 0;
372 int err;
373
374 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
375 /*
376 * this api is ugly since we miss [-22..-2] range of valid
377 * counter values, but that's uapi
378 */
379 if (err)
380 return err;
381 return value;
382}
383
384static const struct bpf_func_proto bpf_perf_event_read_proto = {
385 .func = bpf_perf_event_read,
386 .gpl_only = true,
387 .ret_type = RET_INTEGER,
388 .arg1_type = ARG_CONST_MAP_PTR,
389 .arg2_type = ARG_ANYTHING,
390};
391
392BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
393 struct bpf_perf_event_value *, buf, u32, size)
394{
395 int err = -EINVAL;
396
397 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
398 goto clear;
399 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
400 &buf->running);
401 if (unlikely(err))
402 goto clear;
403 return 0;
404clear:
405 memset(buf, 0, size);
406 return err;
407}
408
409static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
410 .func = bpf_perf_event_read_value,
411 .gpl_only = true,
412 .ret_type = RET_INTEGER,
413 .arg1_type = ARG_CONST_MAP_PTR,
414 .arg2_type = ARG_ANYTHING,
415 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
416 .arg4_type = ARG_CONST_SIZE,
417};
418
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419static __always_inline u64
420__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
421 u64 flags, struct perf_sample_data *sd)
422{
423 struct bpf_array *array = container_of(map, struct bpf_array, map);
424 unsigned int cpu = smp_processor_id();
425 u64 index = flags & BPF_F_INDEX_MASK;
426 struct bpf_event_entry *ee;
427 struct perf_event *event;
428
429 if (index == BPF_F_CURRENT_CPU)
430 index = cpu;
431 if (unlikely(index >= array->map.max_entries))
432 return -E2BIG;
433
434 ee = READ_ONCE(array->ptrs[index]);
435 if (!ee)
436 return -ENOENT;
437
438 event = ee->event;
439 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
440 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
441 return -EINVAL;
442
443 if (unlikely(event->oncpu != cpu))
444 return -EOPNOTSUPP;
445
David Brazdil0f672f62019-12-10 10:32:29 +0000446 return perf_event_output(event, sd, regs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447}
448
David Brazdil0f672f62019-12-10 10:32:29 +0000449/*
450 * Support executing tracepoints in normal, irq, and nmi context that each call
451 * bpf_perf_event_output
452 */
453struct bpf_trace_sample_data {
454 struct perf_sample_data sds[3];
455};
456
457static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
458static DEFINE_PER_CPU(int, bpf_trace_nest_level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000459BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
460 u64, flags, void *, data, u64, size)
461{
David Brazdil0f672f62019-12-10 10:32:29 +0000462 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
463 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000464 struct perf_raw_record raw = {
465 .frag = {
466 .size = size,
467 .data = data,
468 },
469 };
David Brazdil0f672f62019-12-10 10:32:29 +0000470 struct perf_sample_data *sd;
471 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000472
David Brazdil0f672f62019-12-10 10:32:29 +0000473 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
474 err = -EBUSY;
475 goto out;
476 }
477
478 sd = &sds->sds[nest_level - 1];
479
480 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
481 err = -EINVAL;
482 goto out;
483 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484
485 perf_sample_data_init(sd, 0, 0);
486 sd->raw = &raw;
487
David Brazdil0f672f62019-12-10 10:32:29 +0000488 err = __bpf_perf_event_output(regs, map, flags, sd);
489
490out:
491 this_cpu_dec(bpf_trace_nest_level);
492 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000493}
494
495static const struct bpf_func_proto bpf_perf_event_output_proto = {
496 .func = bpf_perf_event_output,
497 .gpl_only = true,
498 .ret_type = RET_INTEGER,
499 .arg1_type = ARG_PTR_TO_CTX,
500 .arg2_type = ARG_CONST_MAP_PTR,
501 .arg3_type = ARG_ANYTHING,
502 .arg4_type = ARG_PTR_TO_MEM,
503 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
504};
505
David Brazdil0f672f62019-12-10 10:32:29 +0000506static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
507struct bpf_nested_pt_regs {
508 struct pt_regs regs[3];
509};
510static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
511static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512
513u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
514 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
515{
David Brazdil0f672f62019-12-10 10:32:29 +0000516 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000517 struct perf_raw_frag frag = {
518 .copy = ctx_copy,
519 .size = ctx_size,
520 .data = ctx,
521 };
522 struct perf_raw_record raw = {
523 .frag = {
524 {
525 .next = ctx_size ? &frag : NULL,
526 },
527 .size = meta_size,
528 .data = meta,
529 },
530 };
David Brazdil0f672f62019-12-10 10:32:29 +0000531 struct perf_sample_data *sd;
532 struct pt_regs *regs;
533 u64 ret;
534
535 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
536 ret = -EBUSY;
537 goto out;
538 }
539 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
540 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000541
542 perf_fetch_caller_regs(regs);
543 perf_sample_data_init(sd, 0, 0);
544 sd->raw = &raw;
545
David Brazdil0f672f62019-12-10 10:32:29 +0000546 ret = __bpf_perf_event_output(regs, map, flags, sd);
547out:
548 this_cpu_dec(bpf_event_output_nest_level);
549 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000550}
551
552BPF_CALL_0(bpf_get_current_task)
553{
554 return (long) current;
555}
556
557static const struct bpf_func_proto bpf_get_current_task_proto = {
558 .func = bpf_get_current_task,
559 .gpl_only = true,
560 .ret_type = RET_INTEGER,
561};
562
563BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
564{
565 struct bpf_array *array = container_of(map, struct bpf_array, map);
566 struct cgroup *cgrp;
567
568 if (unlikely(idx >= array->map.max_entries))
569 return -E2BIG;
570
571 cgrp = READ_ONCE(array->ptrs[idx]);
572 if (unlikely(!cgrp))
573 return -EAGAIN;
574
575 return task_under_cgroup_hierarchy(current, cgrp);
576}
577
578static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
579 .func = bpf_current_task_under_cgroup,
580 .gpl_only = false,
581 .ret_type = RET_INTEGER,
582 .arg1_type = ARG_CONST_MAP_PTR,
583 .arg2_type = ARG_ANYTHING,
584};
585
586BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
587 const void *, unsafe_ptr)
588{
589 int ret;
590
David Brazdil0f672f62019-12-10 10:32:29 +0000591 ret = security_locked_down(LOCKDOWN_BPF_READ);
592 if (ret < 0)
593 goto out;
594
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000595 /*
596 * The strncpy_from_unsafe() call will likely not fill the entire
597 * buffer, but that's okay in this circumstance as we're probing
598 * arbitrary memory anyway similar to bpf_probe_read() and might
599 * as well probe the stack. Thus, memory is explicitly cleared
600 * only in error case, so that improper users ignoring return
601 * code altogether don't copy garbage; otherwise length of string
602 * is returned that can be used for bpf_perf_event_output() et al.
603 */
604 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
605 if (unlikely(ret < 0))
David Brazdil0f672f62019-12-10 10:32:29 +0000606out:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000607 memset(dst, 0, size);
608
609 return ret;
610}
611
612static const struct bpf_func_proto bpf_probe_read_str_proto = {
613 .func = bpf_probe_read_str,
614 .gpl_only = true,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
617 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
618 .arg3_type = ARG_ANYTHING,
619};
620
David Brazdil0f672f62019-12-10 10:32:29 +0000621struct send_signal_irq_work {
622 struct irq_work irq_work;
623 struct task_struct *task;
624 u32 sig;
625};
626
627static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
628
629static void do_bpf_send_signal(struct irq_work *entry)
630{
631 struct send_signal_irq_work *work;
632
633 work = container_of(entry, struct send_signal_irq_work, irq_work);
634 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
635}
636
637BPF_CALL_1(bpf_send_signal, u32, sig)
638{
639 struct send_signal_irq_work *work = NULL;
640
641 /* Similar to bpf_probe_write_user, task needs to be
642 * in a sound condition and kernel memory access be
643 * permitted in order to send signal to the current
644 * task.
645 */
646 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
647 return -EPERM;
648 if (unlikely(uaccess_kernel()))
649 return -EPERM;
650 if (unlikely(!nmi_uaccess_okay()))
651 return -EPERM;
652
Olivier Deprez0e641232021-09-23 10:07:05 +0200653 if (irqs_disabled()) {
David Brazdil0f672f62019-12-10 10:32:29 +0000654 /* Do an early check on signal validity. Otherwise,
655 * the error is lost in deferred irq_work.
656 */
657 if (unlikely(!valid_signal(sig)))
658 return -EINVAL;
659
660 work = this_cpu_ptr(&send_signal_work);
661 if (work->irq_work.flags & IRQ_WORK_BUSY)
662 return -EBUSY;
663
664 /* Add the current task, which is the target of sending signal,
665 * to the irq_work. The current task may change when queued
666 * irq works get executed.
667 */
668 work->task = current;
669 work->sig = sig;
670 irq_work_queue(&work->irq_work);
671 return 0;
672 }
673
674 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
675}
676
677static const struct bpf_func_proto bpf_send_signal_proto = {
678 .func = bpf_send_signal,
679 .gpl_only = false,
680 .ret_type = RET_INTEGER,
681 .arg1_type = ARG_ANYTHING,
682};
683
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684static const struct bpf_func_proto *
685tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
686{
687 switch (func_id) {
688 case BPF_FUNC_map_lookup_elem:
689 return &bpf_map_lookup_elem_proto;
690 case BPF_FUNC_map_update_elem:
691 return &bpf_map_update_elem_proto;
692 case BPF_FUNC_map_delete_elem:
693 return &bpf_map_delete_elem_proto;
David Brazdil0f672f62019-12-10 10:32:29 +0000694 case BPF_FUNC_map_push_elem:
695 return &bpf_map_push_elem_proto;
696 case BPF_FUNC_map_pop_elem:
697 return &bpf_map_pop_elem_proto;
698 case BPF_FUNC_map_peek_elem:
699 return &bpf_map_peek_elem_proto;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700 case BPF_FUNC_probe_read:
701 return &bpf_probe_read_proto;
702 case BPF_FUNC_ktime_get_ns:
703 return &bpf_ktime_get_ns_proto;
704 case BPF_FUNC_tail_call:
705 return &bpf_tail_call_proto;
706 case BPF_FUNC_get_current_pid_tgid:
707 return &bpf_get_current_pid_tgid_proto;
708 case BPF_FUNC_get_current_task:
709 return &bpf_get_current_task_proto;
710 case BPF_FUNC_get_current_uid_gid:
711 return &bpf_get_current_uid_gid_proto;
712 case BPF_FUNC_get_current_comm:
713 return &bpf_get_current_comm_proto;
714 case BPF_FUNC_trace_printk:
715 return bpf_get_trace_printk_proto();
716 case BPF_FUNC_get_smp_processor_id:
717 return &bpf_get_smp_processor_id_proto;
718 case BPF_FUNC_get_numa_node_id:
719 return &bpf_get_numa_node_id_proto;
720 case BPF_FUNC_perf_event_read:
721 return &bpf_perf_event_read_proto;
722 case BPF_FUNC_probe_write_user:
723 return bpf_get_probe_write_proto();
724 case BPF_FUNC_current_task_under_cgroup:
725 return &bpf_current_task_under_cgroup_proto;
726 case BPF_FUNC_get_prandom_u32:
727 return &bpf_get_prandom_u32_proto;
728 case BPF_FUNC_probe_read_str:
729 return &bpf_probe_read_str_proto;
730#ifdef CONFIG_CGROUPS
731 case BPF_FUNC_get_current_cgroup_id:
732 return &bpf_get_current_cgroup_id_proto;
733#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000734 case BPF_FUNC_send_signal:
735 return &bpf_send_signal_proto;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 default:
737 return NULL;
738 }
739}
740
741static const struct bpf_func_proto *
742kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
743{
744 switch (func_id) {
745 case BPF_FUNC_perf_event_output:
746 return &bpf_perf_event_output_proto;
747 case BPF_FUNC_get_stackid:
748 return &bpf_get_stackid_proto;
749 case BPF_FUNC_get_stack:
750 return &bpf_get_stack_proto;
751 case BPF_FUNC_perf_event_read_value:
752 return &bpf_perf_event_read_value_proto;
753#ifdef CONFIG_BPF_KPROBE_OVERRIDE
754 case BPF_FUNC_override_return:
755 return &bpf_override_return_proto;
756#endif
757 default:
758 return tracing_func_proto(func_id, prog);
759 }
760}
761
762/* bpf+kprobe programs can access fields of 'struct pt_regs' */
763static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
764 const struct bpf_prog *prog,
765 struct bpf_insn_access_aux *info)
766{
767 if (off < 0 || off >= sizeof(struct pt_regs))
768 return false;
769 if (type != BPF_READ)
770 return false;
771 if (off % size != 0)
772 return false;
773 /*
774 * Assertion for 32 bit to make sure last 8 byte access
775 * (BPF_DW) to the last 4 byte member is disallowed.
776 */
777 if (off + size > sizeof(struct pt_regs))
778 return false;
779
780 return true;
781}
782
783const struct bpf_verifier_ops kprobe_verifier_ops = {
784 .get_func_proto = kprobe_prog_func_proto,
785 .is_valid_access = kprobe_prog_is_valid_access,
786};
787
788const struct bpf_prog_ops kprobe_prog_ops = {
789};
790
791BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
792 u64, flags, void *, data, u64, size)
793{
794 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
795
796 /*
797 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
798 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
799 * from there and call the same bpf_perf_event_output() helper inline.
800 */
801 return ____bpf_perf_event_output(regs, map, flags, data, size);
802}
803
804static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
805 .func = bpf_perf_event_output_tp,
806 .gpl_only = true,
807 .ret_type = RET_INTEGER,
808 .arg1_type = ARG_PTR_TO_CTX,
809 .arg2_type = ARG_CONST_MAP_PTR,
810 .arg3_type = ARG_ANYTHING,
811 .arg4_type = ARG_PTR_TO_MEM,
812 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
813};
814
815BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
816 u64, flags)
817{
818 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
819
820 /*
821 * Same comment as in bpf_perf_event_output_tp(), only that this time
822 * the other helper's function body cannot be inlined due to being
823 * external, thus we need to call raw helper function.
824 */
825 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
826 flags, 0, 0);
827}
828
829static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
830 .func = bpf_get_stackid_tp,
831 .gpl_only = true,
832 .ret_type = RET_INTEGER,
833 .arg1_type = ARG_PTR_TO_CTX,
834 .arg2_type = ARG_CONST_MAP_PTR,
835 .arg3_type = ARG_ANYTHING,
836};
837
838BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
839 u64, flags)
840{
841 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
842
843 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
844 (unsigned long) size, flags, 0);
845}
846
847static const struct bpf_func_proto bpf_get_stack_proto_tp = {
848 .func = bpf_get_stack_tp,
849 .gpl_only = true,
850 .ret_type = RET_INTEGER,
851 .arg1_type = ARG_PTR_TO_CTX,
852 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
853 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
854 .arg4_type = ARG_ANYTHING,
855};
856
857static const struct bpf_func_proto *
858tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
859{
860 switch (func_id) {
861 case BPF_FUNC_perf_event_output:
862 return &bpf_perf_event_output_proto_tp;
863 case BPF_FUNC_get_stackid:
864 return &bpf_get_stackid_proto_tp;
865 case BPF_FUNC_get_stack:
866 return &bpf_get_stack_proto_tp;
867 default:
868 return tracing_func_proto(func_id, prog);
869 }
870}
871
872static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
873 const struct bpf_prog *prog,
874 struct bpf_insn_access_aux *info)
875{
876 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
877 return false;
878 if (type != BPF_READ)
879 return false;
880 if (off % size != 0)
881 return false;
882
883 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
884 return true;
885}
886
887const struct bpf_verifier_ops tracepoint_verifier_ops = {
888 .get_func_proto = tp_prog_func_proto,
889 .is_valid_access = tp_prog_is_valid_access,
890};
891
892const struct bpf_prog_ops tracepoint_prog_ops = {
893};
894
895BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
896 struct bpf_perf_event_value *, buf, u32, size)
897{
898 int err = -EINVAL;
899
900 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
901 goto clear;
902 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
903 &buf->running);
904 if (unlikely(err))
905 goto clear;
906 return 0;
907clear:
908 memset(buf, 0, size);
909 return err;
910}
911
912static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
913 .func = bpf_perf_prog_read_value,
914 .gpl_only = true,
915 .ret_type = RET_INTEGER,
916 .arg1_type = ARG_PTR_TO_CTX,
917 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
918 .arg3_type = ARG_CONST_SIZE,
919};
920
921static const struct bpf_func_proto *
922pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
923{
924 switch (func_id) {
925 case BPF_FUNC_perf_event_output:
926 return &bpf_perf_event_output_proto_tp;
927 case BPF_FUNC_get_stackid:
928 return &bpf_get_stackid_proto_tp;
929 case BPF_FUNC_get_stack:
930 return &bpf_get_stack_proto_tp;
931 case BPF_FUNC_perf_prog_read_value:
932 return &bpf_perf_prog_read_value_proto;
933 default:
934 return tracing_func_proto(func_id, prog);
935 }
936}
937
938/*
939 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
940 * to avoid potential recursive reuse issue when/if tracepoints are added
David Brazdil0f672f62019-12-10 10:32:29 +0000941 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
942 *
943 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
944 * in normal, irq, and nmi context.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000945 */
David Brazdil0f672f62019-12-10 10:32:29 +0000946struct bpf_raw_tp_regs {
947 struct pt_regs regs[3];
948};
949static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
950static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
951static struct pt_regs *get_bpf_raw_tp_regs(void)
952{
953 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
954 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
955
956 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
957 this_cpu_dec(bpf_raw_tp_nest_level);
958 return ERR_PTR(-EBUSY);
959 }
960
961 return &tp_regs->regs[nest_level - 1];
962}
963
964static void put_bpf_raw_tp_regs(void)
965{
966 this_cpu_dec(bpf_raw_tp_nest_level);
967}
968
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000969BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
970 struct bpf_map *, map, u64, flags, void *, data, u64, size)
971{
David Brazdil0f672f62019-12-10 10:32:29 +0000972 struct pt_regs *regs = get_bpf_raw_tp_regs();
973 int ret;
974
975 if (IS_ERR(regs))
976 return PTR_ERR(regs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000977
978 perf_fetch_caller_regs(regs);
David Brazdil0f672f62019-12-10 10:32:29 +0000979 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
980
981 put_bpf_raw_tp_regs();
982 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000983}
984
985static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
986 .func = bpf_perf_event_output_raw_tp,
987 .gpl_only = true,
988 .ret_type = RET_INTEGER,
989 .arg1_type = ARG_PTR_TO_CTX,
990 .arg2_type = ARG_CONST_MAP_PTR,
991 .arg3_type = ARG_ANYTHING,
992 .arg4_type = ARG_PTR_TO_MEM,
993 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
994};
995
996BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
997 struct bpf_map *, map, u64, flags)
998{
David Brazdil0f672f62019-12-10 10:32:29 +0000999 struct pt_regs *regs = get_bpf_raw_tp_regs();
1000 int ret;
1001
1002 if (IS_ERR(regs))
1003 return PTR_ERR(regs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001004
1005 perf_fetch_caller_regs(regs);
1006 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
David Brazdil0f672f62019-12-10 10:32:29 +00001007 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1008 flags, 0, 0);
1009 put_bpf_raw_tp_regs();
1010 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001011}
1012
1013static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1014 .func = bpf_get_stackid_raw_tp,
1015 .gpl_only = true,
1016 .ret_type = RET_INTEGER,
1017 .arg1_type = ARG_PTR_TO_CTX,
1018 .arg2_type = ARG_CONST_MAP_PTR,
1019 .arg3_type = ARG_ANYTHING,
1020};
1021
1022BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1023 void *, buf, u32, size, u64, flags)
1024{
David Brazdil0f672f62019-12-10 10:32:29 +00001025 struct pt_regs *regs = get_bpf_raw_tp_regs();
1026 int ret;
1027
1028 if (IS_ERR(regs))
1029 return PTR_ERR(regs);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001030
1031 perf_fetch_caller_regs(regs);
David Brazdil0f672f62019-12-10 10:32:29 +00001032 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1033 (unsigned long) size, flags, 0);
1034 put_bpf_raw_tp_regs();
1035 return ret;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001036}
1037
1038static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1039 .func = bpf_get_stack_raw_tp,
1040 .gpl_only = true,
1041 .ret_type = RET_INTEGER,
1042 .arg1_type = ARG_PTR_TO_CTX,
1043 .arg2_type = ARG_PTR_TO_MEM,
1044 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1045 .arg4_type = ARG_ANYTHING,
1046};
1047
1048static const struct bpf_func_proto *
1049raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1050{
1051 switch (func_id) {
1052 case BPF_FUNC_perf_event_output:
1053 return &bpf_perf_event_output_proto_raw_tp;
1054 case BPF_FUNC_get_stackid:
1055 return &bpf_get_stackid_proto_raw_tp;
1056 case BPF_FUNC_get_stack:
1057 return &bpf_get_stack_proto_raw_tp;
1058 default:
1059 return tracing_func_proto(func_id, prog);
1060 }
1061}
1062
1063static bool raw_tp_prog_is_valid_access(int off, int size,
1064 enum bpf_access_type type,
1065 const struct bpf_prog *prog,
1066 struct bpf_insn_access_aux *info)
1067{
1068 /* largest tracepoint in the kernel has 12 args */
1069 if (off < 0 || off >= sizeof(__u64) * 12)
1070 return false;
1071 if (type != BPF_READ)
1072 return false;
1073 if (off % size != 0)
1074 return false;
1075 return true;
1076}
1077
1078const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1079 .get_func_proto = raw_tp_prog_func_proto,
1080 .is_valid_access = raw_tp_prog_is_valid_access,
1081};
1082
1083const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1084};
1085
David Brazdil0f672f62019-12-10 10:32:29 +00001086static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1087 enum bpf_access_type type,
1088 const struct bpf_prog *prog,
1089 struct bpf_insn_access_aux *info)
1090{
1091 if (off == 0) {
1092 if (size != sizeof(u64) || type != BPF_READ)
1093 return false;
1094 info->reg_type = PTR_TO_TP_BUFFER;
1095 }
1096 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1097}
1098
1099const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1100 .get_func_proto = raw_tp_prog_func_proto,
1101 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1102};
1103
1104const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1105};
1106
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001107static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1108 const struct bpf_prog *prog,
1109 struct bpf_insn_access_aux *info)
1110{
1111 const int size_u64 = sizeof(u64);
1112
1113 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1114 return false;
1115 if (type != BPF_READ)
1116 return false;
1117 if (off % size != 0) {
1118 if (sizeof(unsigned long) != 4)
1119 return false;
1120 if (size != 8)
1121 return false;
1122 if (off % size != 4)
1123 return false;
1124 }
1125
1126 switch (off) {
1127 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1128 bpf_ctx_record_field_size(info, size_u64);
1129 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1130 return false;
1131 break;
1132 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1133 bpf_ctx_record_field_size(info, size_u64);
1134 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1135 return false;
1136 break;
1137 default:
1138 if (size != sizeof(long))
1139 return false;
1140 }
1141
1142 return true;
1143}
1144
1145static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1146 const struct bpf_insn *si,
1147 struct bpf_insn *insn_buf,
1148 struct bpf_prog *prog, u32 *target_size)
1149{
1150 struct bpf_insn *insn = insn_buf;
1151
1152 switch (si->off) {
1153 case offsetof(struct bpf_perf_event_data, sample_period):
1154 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1155 data), si->dst_reg, si->src_reg,
1156 offsetof(struct bpf_perf_event_data_kern, data));
1157 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1158 bpf_target_off(struct perf_sample_data, period, 8,
1159 target_size));
1160 break;
1161 case offsetof(struct bpf_perf_event_data, addr):
1162 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1163 data), si->dst_reg, si->src_reg,
1164 offsetof(struct bpf_perf_event_data_kern, data));
1165 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1166 bpf_target_off(struct perf_sample_data, addr, 8,
1167 target_size));
1168 break;
1169 default:
1170 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1171 regs), si->dst_reg, si->src_reg,
1172 offsetof(struct bpf_perf_event_data_kern, regs));
1173 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1174 si->off);
1175 break;
1176 }
1177
1178 return insn - insn_buf;
1179}
1180
1181const struct bpf_verifier_ops perf_event_verifier_ops = {
1182 .get_func_proto = pe_prog_func_proto,
1183 .is_valid_access = pe_prog_is_valid_access,
1184 .convert_ctx_access = pe_prog_convert_ctx_access,
1185};
1186
1187const struct bpf_prog_ops perf_event_prog_ops = {
1188};
1189
1190static DEFINE_MUTEX(bpf_event_mutex);
1191
1192#define BPF_TRACE_MAX_PROGS 64
1193
1194int perf_event_attach_bpf_prog(struct perf_event *event,
1195 struct bpf_prog *prog)
1196{
David Brazdil0f672f62019-12-10 10:32:29 +00001197 struct bpf_prog_array *old_array;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001198 struct bpf_prog_array *new_array;
1199 int ret = -EEXIST;
1200
1201 /*
1202 * Kprobe override only works if they are on the function entry,
1203 * and only if they are on the opt-in list.
1204 */
1205 if (prog->kprobe_override &&
1206 (!trace_kprobe_on_func_entry(event->tp_event) ||
1207 !trace_kprobe_error_injectable(event->tp_event)))
1208 return -EINVAL;
1209
1210 mutex_lock(&bpf_event_mutex);
1211
1212 if (event->prog)
1213 goto unlock;
1214
David Brazdil0f672f62019-12-10 10:32:29 +00001215 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216 if (old_array &&
1217 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1218 ret = -E2BIG;
1219 goto unlock;
1220 }
1221
1222 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1223 if (ret < 0)
1224 goto unlock;
1225
1226 /* set the new array to event->tp_event and set event->prog */
1227 event->prog = prog;
1228 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1229 bpf_prog_array_free(old_array);
1230
1231unlock:
1232 mutex_unlock(&bpf_event_mutex);
1233 return ret;
1234}
1235
1236void perf_event_detach_bpf_prog(struct perf_event *event)
1237{
David Brazdil0f672f62019-12-10 10:32:29 +00001238 struct bpf_prog_array *old_array;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001239 struct bpf_prog_array *new_array;
1240 int ret;
1241
1242 mutex_lock(&bpf_event_mutex);
1243
1244 if (!event->prog)
1245 goto unlock;
1246
David Brazdil0f672f62019-12-10 10:32:29 +00001247 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001248 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1249 if (ret == -ENOENT)
1250 goto unlock;
1251 if (ret < 0) {
1252 bpf_prog_array_delete_safe(old_array, event->prog);
1253 } else {
1254 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1255 bpf_prog_array_free(old_array);
1256 }
1257
1258 bpf_prog_put(event->prog);
1259 event->prog = NULL;
1260
1261unlock:
1262 mutex_unlock(&bpf_event_mutex);
1263}
1264
1265int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1266{
1267 struct perf_event_query_bpf __user *uquery = info;
1268 struct perf_event_query_bpf query = {};
David Brazdil0f672f62019-12-10 10:32:29 +00001269 struct bpf_prog_array *progs;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001270 u32 *ids, prog_cnt, ids_len;
1271 int ret;
1272
1273 if (!capable(CAP_SYS_ADMIN))
1274 return -EPERM;
1275 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1276 return -EINVAL;
1277 if (copy_from_user(&query, uquery, sizeof(query)))
1278 return -EFAULT;
1279
1280 ids_len = query.ids_len;
1281 if (ids_len > BPF_TRACE_MAX_PROGS)
1282 return -E2BIG;
1283 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1284 if (!ids)
1285 return -ENOMEM;
1286 /*
1287 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1288 * is required when user only wants to check for uquery->prog_cnt.
1289 * There is no need to check for it since the case is handled
1290 * gracefully in bpf_prog_array_copy_info.
1291 */
1292
1293 mutex_lock(&bpf_event_mutex);
David Brazdil0f672f62019-12-10 10:32:29 +00001294 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1295 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001296 mutex_unlock(&bpf_event_mutex);
1297
1298 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1299 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1300 ret = -EFAULT;
1301
1302 kfree(ids);
1303 return ret;
1304}
1305
1306extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1307extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1308
David Brazdil0f672f62019-12-10 10:32:29 +00001309struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001310{
1311 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1312
1313 for (; btp < __stop__bpf_raw_tp; btp++) {
1314 if (!strcmp(btp->tp->name, name))
1315 return btp;
1316 }
David Brazdil0f672f62019-12-10 10:32:29 +00001317
1318 return bpf_get_raw_tracepoint_module(name);
1319}
1320
1321void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1322{
Olivier Deprez0e641232021-09-23 10:07:05 +02001323 struct module *mod;
David Brazdil0f672f62019-12-10 10:32:29 +00001324
Olivier Deprez0e641232021-09-23 10:07:05 +02001325 preempt_disable();
1326 mod = __module_address((unsigned long)btp);
1327 module_put(mod);
1328 preempt_enable();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001329}
1330
1331static __always_inline
1332void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1333{
1334 rcu_read_lock();
1335 preempt_disable();
1336 (void) BPF_PROG_RUN(prog, args);
1337 preempt_enable();
1338 rcu_read_unlock();
1339}
1340
1341#define UNPACK(...) __VA_ARGS__
1342#define REPEAT_1(FN, DL, X, ...) FN(X)
1343#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1344#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1345#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1346#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1347#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1348#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1349#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1350#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1351#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1352#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1353#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1354#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1355
1356#define SARG(X) u64 arg##X
1357#define COPY(X) args[X] = arg##X
1358
1359#define __DL_COM (,)
1360#define __DL_SEM (;)
1361
1362#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1363
1364#define BPF_TRACE_DEFN_x(x) \
1365 void bpf_trace_run##x(struct bpf_prog *prog, \
1366 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1367 { \
1368 u64 args[x]; \
1369 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1370 __bpf_trace_run(prog, args); \
1371 } \
1372 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1373BPF_TRACE_DEFN_x(1);
1374BPF_TRACE_DEFN_x(2);
1375BPF_TRACE_DEFN_x(3);
1376BPF_TRACE_DEFN_x(4);
1377BPF_TRACE_DEFN_x(5);
1378BPF_TRACE_DEFN_x(6);
1379BPF_TRACE_DEFN_x(7);
1380BPF_TRACE_DEFN_x(8);
1381BPF_TRACE_DEFN_x(9);
1382BPF_TRACE_DEFN_x(10);
1383BPF_TRACE_DEFN_x(11);
1384BPF_TRACE_DEFN_x(12);
1385
1386static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1387{
1388 struct tracepoint *tp = btp->tp;
1389
1390 /*
1391 * check that program doesn't access arguments beyond what's
1392 * available in this tracepoint
1393 */
1394 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1395 return -EINVAL;
1396
David Brazdil0f672f62019-12-10 10:32:29 +00001397 if (prog->aux->max_tp_access > btp->writable_size)
1398 return -EINVAL;
1399
Olivier Deprez0e641232021-09-23 10:07:05 +02001400 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
1401 prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001402}
1403
1404int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1405{
David Brazdil0f672f62019-12-10 10:32:29 +00001406 return __bpf_probe_register(btp, prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001407}
1408
1409int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1410{
David Brazdil0f672f62019-12-10 10:32:29 +00001411 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001412}
1413
1414int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1415 u32 *fd_type, const char **buf,
1416 u64 *probe_offset, u64 *probe_addr)
1417{
1418 bool is_tracepoint, is_syscall_tp;
1419 struct bpf_prog *prog;
1420 int flags, err = 0;
1421
1422 prog = event->prog;
1423 if (!prog)
1424 return -ENOENT;
1425
1426 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1427 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1428 return -EOPNOTSUPP;
1429
1430 *prog_id = prog->aux->id;
1431 flags = event->tp_event->flags;
1432 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1433 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1434
1435 if (is_tracepoint || is_syscall_tp) {
1436 *buf = is_tracepoint ? event->tp_event->tp->name
1437 : event->tp_event->name;
1438 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1439 *probe_offset = 0x0;
1440 *probe_addr = 0x0;
1441 } else {
1442 /* kprobe/uprobe */
1443 err = -EOPNOTSUPP;
1444#ifdef CONFIG_KPROBE_EVENTS
1445 if (flags & TRACE_EVENT_FL_KPROBE)
1446 err = bpf_get_kprobe_info(event, fd_type, buf,
1447 probe_offset, probe_addr,
1448 event->attr.type == PERF_TYPE_TRACEPOINT);
1449#endif
1450#ifdef CONFIG_UPROBE_EVENTS
1451 if (flags & TRACE_EVENT_FL_UPROBE)
1452 err = bpf_get_uprobe_info(event, fd_type, buf,
1453 probe_offset,
1454 event->attr.type == PERF_TYPE_TRACEPOINT);
1455#endif
1456 }
1457
1458 return err;
1459}
David Brazdil0f672f62019-12-10 10:32:29 +00001460
1461static int __init send_signal_irq_work_init(void)
1462{
1463 int cpu;
1464 struct send_signal_irq_work *work;
1465
1466 for_each_possible_cpu(cpu) {
1467 work = per_cpu_ptr(&send_signal_work, cpu);
1468 init_irq_work(&work->irq_work, do_bpf_send_signal);
1469 }
1470 return 0;
1471}
1472
1473subsys_initcall(send_signal_irq_work_init);
1474
1475#ifdef CONFIG_MODULES
1476static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1477 void *module)
1478{
1479 struct bpf_trace_module *btm, *tmp;
1480 struct module *mod = module;
1481
1482 if (mod->num_bpf_raw_events == 0 ||
1483 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1484 return 0;
1485
1486 mutex_lock(&bpf_module_mutex);
1487
1488 switch (op) {
1489 case MODULE_STATE_COMING:
1490 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1491 if (btm) {
1492 btm->module = module;
1493 list_add(&btm->list, &bpf_trace_modules);
1494 }
1495 break;
1496 case MODULE_STATE_GOING:
1497 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1498 if (btm->module == module) {
1499 list_del(&btm->list);
1500 kfree(btm);
1501 break;
1502 }
1503 }
1504 break;
1505 }
1506
1507 mutex_unlock(&bpf_module_mutex);
1508
1509 return 0;
1510}
1511
1512static struct notifier_block bpf_module_nb = {
1513 .notifier_call = bpf_event_notify,
1514};
1515
1516static int __init bpf_event_init(void)
1517{
1518 register_module_notifier(&bpf_module_nb);
1519 return 0;
1520}
1521
1522fs_initcall(bpf_event_init);
1523#endif /* CONFIG_MODULES */