blob: 66917a4eba2716ebdef75a78e854f4e9874c88b9 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 */
8#ifndef _UAPI__LINUX_BPF_H__
9#define _UAPI__LINUX_BPF_H__
10
11#include <linux/types.h>
12#include <linux/bpf_common.h>
13
14/* Extended instruction set based on top of classic BPF */
15
16/* instruction classes */
17#define BPF_ALU64 0x07 /* alu mode in double word width */
18
19/* ld/ldx fields */
20#define BPF_DW 0x18 /* double word (64-bit) */
21#define BPF_XADD 0xc0 /* exclusive add */
22
23/* alu/jmp fields */
24#define BPF_MOV 0xb0 /* mov reg to reg */
25#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
26
27/* change endianness of a register */
28#define BPF_END 0xd0 /* flags for endianness conversion: */
29#define BPF_TO_LE 0x00 /* convert to little-endian */
30#define BPF_TO_BE 0x08 /* convert to big-endian */
31#define BPF_FROM_LE BPF_TO_LE
32#define BPF_FROM_BE BPF_TO_BE
33
34/* jmp encodings */
35#define BPF_JNE 0x50 /* jump != */
36#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
37#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
38#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
39#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
40#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
41#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
42#define BPF_CALL 0x80 /* function call */
43#define BPF_EXIT 0x90 /* function return */
44
45/* Register numbers */
46enum {
47 BPF_REG_0 = 0,
48 BPF_REG_1,
49 BPF_REG_2,
50 BPF_REG_3,
51 BPF_REG_4,
52 BPF_REG_5,
53 BPF_REG_6,
54 BPF_REG_7,
55 BPF_REG_8,
56 BPF_REG_9,
57 BPF_REG_10,
58 __MAX_BPF_REG,
59};
60
61/* BPF has 10 general purpose 64-bit registers and stack frame. */
62#define MAX_BPF_REG __MAX_BPF_REG
63
64struct bpf_insn {
65 __u8 code; /* opcode */
66 __u8 dst_reg:4; /* dest register */
67 __u8 src_reg:4; /* source register */
68 __s16 off; /* signed offset */
69 __s32 imm; /* signed immediate constant */
70};
71
72/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
73struct bpf_lpm_trie_key {
74 __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
75 __u8 data[0]; /* Arbitrary size */
76};
77
78struct bpf_cgroup_storage_key {
79 __u64 cgroup_inode_id; /* cgroup inode id */
80 __u32 attach_type; /* program attach type */
81};
82
83/* BPF syscall commands, see bpf(2) man-page for details. */
84enum bpf_cmd {
85 BPF_MAP_CREATE,
86 BPF_MAP_LOOKUP_ELEM,
87 BPF_MAP_UPDATE_ELEM,
88 BPF_MAP_DELETE_ELEM,
89 BPF_MAP_GET_NEXT_KEY,
90 BPF_PROG_LOAD,
91 BPF_OBJ_PIN,
92 BPF_OBJ_GET,
93 BPF_PROG_ATTACH,
94 BPF_PROG_DETACH,
95 BPF_PROG_TEST_RUN,
96 BPF_PROG_GET_NEXT_ID,
97 BPF_MAP_GET_NEXT_ID,
98 BPF_PROG_GET_FD_BY_ID,
99 BPF_MAP_GET_FD_BY_ID,
100 BPF_OBJ_GET_INFO_BY_FD,
101 BPF_PROG_QUERY,
102 BPF_RAW_TRACEPOINT_OPEN,
103 BPF_BTF_LOAD,
104 BPF_BTF_GET_FD_BY_ID,
105 BPF_TASK_FD_QUERY,
106};
107
108enum bpf_map_type {
109 BPF_MAP_TYPE_UNSPEC,
110 BPF_MAP_TYPE_HASH,
111 BPF_MAP_TYPE_ARRAY,
112 BPF_MAP_TYPE_PROG_ARRAY,
113 BPF_MAP_TYPE_PERF_EVENT_ARRAY,
114 BPF_MAP_TYPE_PERCPU_HASH,
115 BPF_MAP_TYPE_PERCPU_ARRAY,
116 BPF_MAP_TYPE_STACK_TRACE,
117 BPF_MAP_TYPE_CGROUP_ARRAY,
118 BPF_MAP_TYPE_LRU_HASH,
119 BPF_MAP_TYPE_LRU_PERCPU_HASH,
120 BPF_MAP_TYPE_LPM_TRIE,
121 BPF_MAP_TYPE_ARRAY_OF_MAPS,
122 BPF_MAP_TYPE_HASH_OF_MAPS,
123 BPF_MAP_TYPE_DEVMAP,
124 BPF_MAP_TYPE_SOCKMAP,
125 BPF_MAP_TYPE_CPUMAP,
126 BPF_MAP_TYPE_XSKMAP,
127 BPF_MAP_TYPE_SOCKHASH,
128 BPF_MAP_TYPE_CGROUP_STORAGE,
129 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
130};
131
132enum bpf_prog_type {
133 BPF_PROG_TYPE_UNSPEC,
134 BPF_PROG_TYPE_SOCKET_FILTER,
135 BPF_PROG_TYPE_KPROBE,
136 BPF_PROG_TYPE_SCHED_CLS,
137 BPF_PROG_TYPE_SCHED_ACT,
138 BPF_PROG_TYPE_TRACEPOINT,
139 BPF_PROG_TYPE_XDP,
140 BPF_PROG_TYPE_PERF_EVENT,
141 BPF_PROG_TYPE_CGROUP_SKB,
142 BPF_PROG_TYPE_CGROUP_SOCK,
143 BPF_PROG_TYPE_LWT_IN,
144 BPF_PROG_TYPE_LWT_OUT,
145 BPF_PROG_TYPE_LWT_XMIT,
146 BPF_PROG_TYPE_SOCK_OPS,
147 BPF_PROG_TYPE_SK_SKB,
148 BPF_PROG_TYPE_CGROUP_DEVICE,
149 BPF_PROG_TYPE_SK_MSG,
150 BPF_PROG_TYPE_RAW_TRACEPOINT,
151 BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
152 BPF_PROG_TYPE_LWT_SEG6LOCAL,
153 BPF_PROG_TYPE_LIRC_MODE2,
154 BPF_PROG_TYPE_SK_REUSEPORT,
155};
156
157enum bpf_attach_type {
158 BPF_CGROUP_INET_INGRESS,
159 BPF_CGROUP_INET_EGRESS,
160 BPF_CGROUP_INET_SOCK_CREATE,
161 BPF_CGROUP_SOCK_OPS,
162 BPF_SK_SKB_STREAM_PARSER,
163 BPF_SK_SKB_STREAM_VERDICT,
164 BPF_CGROUP_DEVICE,
165 BPF_SK_MSG_VERDICT,
166 BPF_CGROUP_INET4_BIND,
167 BPF_CGROUP_INET6_BIND,
168 BPF_CGROUP_INET4_CONNECT,
169 BPF_CGROUP_INET6_CONNECT,
170 BPF_CGROUP_INET4_POST_BIND,
171 BPF_CGROUP_INET6_POST_BIND,
172 BPF_CGROUP_UDP4_SENDMSG,
173 BPF_CGROUP_UDP6_SENDMSG,
174 BPF_LIRC_MODE2,
175 __MAX_BPF_ATTACH_TYPE
176};
177
178#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
179
180/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
181 *
182 * NONE(default): No further bpf programs allowed in the subtree.
183 *
184 * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
185 * the program in this cgroup yields to sub-cgroup program.
186 *
187 * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
188 * that cgroup program gets run in addition to the program in this cgroup.
189 *
190 * Only one program is allowed to be attached to a cgroup with
191 * NONE or BPF_F_ALLOW_OVERRIDE flag.
192 * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
193 * release old program and attach the new one. Attach flags has to match.
194 *
195 * Multiple programs are allowed to be attached to a cgroup with
196 * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
197 * (those that were attached first, run first)
198 * The programs of sub-cgroup are executed first, then programs of
199 * this cgroup and then programs of parent cgroup.
200 * When children program makes decision (like picking TCP CA or sock bind)
201 * parent program has a chance to override it.
202 *
203 * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
204 * A cgroup with NONE doesn't allow any programs in sub-cgroups.
205 * Ex1:
206 * cgrp1 (MULTI progs A, B) ->
207 * cgrp2 (OVERRIDE prog C) ->
208 * cgrp3 (MULTI prog D) ->
209 * cgrp4 (OVERRIDE prog E) ->
210 * cgrp5 (NONE prog F)
211 * the event in cgrp5 triggers execution of F,D,A,B in that order.
212 * if prog F is detached, the execution is E,D,A,B
213 * if prog F and D are detached, the execution is E,A,B
214 * if prog F, E and D are detached, the execution is C,A,B
215 *
216 * All eligible programs are executed regardless of return code from
217 * earlier programs.
218 */
219#define BPF_F_ALLOW_OVERRIDE (1U << 0)
220#define BPF_F_ALLOW_MULTI (1U << 1)
221
222/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
223 * verifier will perform strict alignment checking as if the kernel
224 * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set,
225 * and NET_IP_ALIGN defined to 2.
226 */
227#define BPF_F_STRICT_ALIGNMENT (1U << 0)
228
229/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
230#define BPF_PSEUDO_MAP_FD 1
231
232/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
233 * offset to another bpf function
234 */
235#define BPF_PSEUDO_CALL 1
236
237/* flags for BPF_MAP_UPDATE_ELEM command */
238#define BPF_ANY 0 /* create new element or update existing */
239#define BPF_NOEXIST 1 /* create new element if it didn't exist */
240#define BPF_EXIST 2 /* update existing element */
241
242/* flags for BPF_MAP_CREATE command */
243#define BPF_F_NO_PREALLOC (1U << 0)
244/* Instead of having one common LRU list in the
245 * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
246 * which can scale and perform better.
247 * Note, the LRU nodes (including free nodes) cannot be moved
248 * across different LRU lists.
249 */
250#define BPF_F_NO_COMMON_LRU (1U << 1)
251/* Specify numa node during map creation */
252#define BPF_F_NUMA_NODE (1U << 2)
253
254/* flags for BPF_PROG_QUERY */
255#define BPF_F_QUERY_EFFECTIVE (1U << 0)
256
257#define BPF_OBJ_NAME_LEN 16U
258
259/* Flags for accessing BPF object */
260#define BPF_F_RDONLY (1U << 3)
261#define BPF_F_WRONLY (1U << 4)
262
263/* Flag for stack_map, store build_id+offset instead of pointer */
264#define BPF_F_STACK_BUILD_ID (1U << 5)
265
266enum bpf_stack_build_id_status {
267 /* user space need an empty entry to identify end of a trace */
268 BPF_STACK_BUILD_ID_EMPTY = 0,
269 /* with valid build_id and offset */
270 BPF_STACK_BUILD_ID_VALID = 1,
271 /* couldn't get build_id, fallback to ip */
272 BPF_STACK_BUILD_ID_IP = 2,
273};
274
275#define BPF_BUILD_ID_SIZE 20
276struct bpf_stack_build_id {
277 __s32 status;
278 unsigned char build_id[BPF_BUILD_ID_SIZE];
279 union {
280 __u64 offset;
281 __u64 ip;
282 };
283};
284
285union bpf_attr {
286 struct { /* anonymous struct used by BPF_MAP_CREATE command */
287 __u32 map_type; /* one of enum bpf_map_type */
288 __u32 key_size; /* size of key in bytes */
289 __u32 value_size; /* size of value in bytes */
290 __u32 max_entries; /* max number of entries in a map */
291 __u32 map_flags; /* BPF_MAP_CREATE related
292 * flags defined above.
293 */
294 __u32 inner_map_fd; /* fd pointing to the inner map */
295 __u32 numa_node; /* numa node (effective only if
296 * BPF_F_NUMA_NODE is set).
297 */
298 char map_name[BPF_OBJ_NAME_LEN];
299 __u32 map_ifindex; /* ifindex of netdev to create on */
300 __u32 btf_fd; /* fd pointing to a BTF type data */
301 __u32 btf_key_type_id; /* BTF type_id of the key */
302 __u32 btf_value_type_id; /* BTF type_id of the value */
303 };
304
305 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
306 __u32 map_fd;
307 __aligned_u64 key;
308 union {
309 __aligned_u64 value;
310 __aligned_u64 next_key;
311 };
312 __u64 flags;
313 };
314
315 struct { /* anonymous struct used by BPF_PROG_LOAD command */
316 __u32 prog_type; /* one of enum bpf_prog_type */
317 __u32 insn_cnt;
318 __aligned_u64 insns;
319 __aligned_u64 license;
320 __u32 log_level; /* verbosity level of verifier */
321 __u32 log_size; /* size of user buffer */
322 __aligned_u64 log_buf; /* user supplied buffer */
323 __u32 kern_version; /* checked when prog_type=kprobe */
324 __u32 prog_flags;
325 char prog_name[BPF_OBJ_NAME_LEN];
326 __u32 prog_ifindex; /* ifindex of netdev to prep for */
327 /* For some prog types expected attach type must be known at
328 * load time to verify attach type specific parts of prog
329 * (context accesses, allowed helpers, etc).
330 */
331 __u32 expected_attach_type;
332 };
333
334 struct { /* anonymous struct used by BPF_OBJ_* commands */
335 __aligned_u64 pathname;
336 __u32 bpf_fd;
337 __u32 file_flags;
338 };
339
340 struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
341 __u32 target_fd; /* container object to attach to */
342 __u32 attach_bpf_fd; /* eBPF program to attach */
343 __u32 attach_type;
344 __u32 attach_flags;
345 };
346
347 struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
348 __u32 prog_fd;
349 __u32 retval;
350 __u32 data_size_in;
351 __u32 data_size_out;
352 __aligned_u64 data_in;
353 __aligned_u64 data_out;
354 __u32 repeat;
355 __u32 duration;
356 } test;
357
358 struct { /* anonymous struct used by BPF_*_GET_*_ID */
359 union {
360 __u32 start_id;
361 __u32 prog_id;
362 __u32 map_id;
363 __u32 btf_id;
364 };
365 __u32 next_id;
366 __u32 open_flags;
367 };
368
369 struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
370 __u32 bpf_fd;
371 __u32 info_len;
372 __aligned_u64 info;
373 } info;
374
375 struct { /* anonymous struct used by BPF_PROG_QUERY command */
376 __u32 target_fd; /* container object to query */
377 __u32 attach_type;
378 __u32 query_flags;
379 __u32 attach_flags;
380 __aligned_u64 prog_ids;
381 __u32 prog_cnt;
382 } query;
383
384 struct {
385 __u64 name;
386 __u32 prog_fd;
387 } raw_tracepoint;
388
389 struct { /* anonymous struct for BPF_BTF_LOAD */
390 __aligned_u64 btf;
391 __aligned_u64 btf_log_buf;
392 __u32 btf_size;
393 __u32 btf_log_size;
394 __u32 btf_log_level;
395 };
396
397 struct {
398 __u32 pid; /* input: pid */
399 __u32 fd; /* input: fd */
400 __u32 flags; /* input: flags */
401 __u32 buf_len; /* input/output: buf len */
402 __aligned_u64 buf; /* input/output:
403 * tp_name for tracepoint
404 * symbol for kprobe
405 * filename for uprobe
406 */
407 __u32 prog_id; /* output: prod_id */
408 __u32 fd_type; /* output: BPF_FD_TYPE_* */
409 __u64 probe_offset; /* output: probe_offset */
410 __u64 probe_addr; /* output: probe_addr */
411 } task_fd_query;
412} __attribute__((aligned(8)));
413
414/* The description below is an attempt at providing documentation to eBPF
415 * developers about the multiple available eBPF helper functions. It can be
416 * parsed and used to produce a manual page. The workflow is the following,
417 * and requires the rst2man utility:
418 *
419 * $ ./scripts/bpf_helpers_doc.py \
420 * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
421 * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
422 * $ man /tmp/bpf-helpers.7
423 *
424 * Note that in order to produce this external documentation, some RST
425 * formatting is used in the descriptions to get "bold" and "italics" in
426 * manual pages. Also note that the few trailing white spaces are
427 * intentional, removing them would break paragraphs for rst2man.
428 *
429 * Start of BPF helper function descriptions:
430 *
431 * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key)
432 * Description
433 * Perform a lookup in *map* for an entry associated to *key*.
434 * Return
435 * Map value associated to *key*, or **NULL** if no entry was
436 * found.
437 *
438 * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
439 * Description
440 * Add or update the value of the entry associated to *key* in
441 * *map* with *value*. *flags* is one of:
442 *
443 * **BPF_NOEXIST**
444 * The entry for *key* must not exist in the map.
445 * **BPF_EXIST**
446 * The entry for *key* must already exist in the map.
447 * **BPF_ANY**
448 * No condition on the existence of the entry for *key*.
449 *
450 * Flag value **BPF_NOEXIST** cannot be used for maps of types
451 * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all
452 * elements always exist), the helper would return an error.
453 * Return
454 * 0 on success, or a negative error in case of failure.
455 *
456 * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
457 * Description
458 * Delete entry with *key* from *map*.
459 * Return
460 * 0 on success, or a negative error in case of failure.
461 *
462 * int bpf_probe_read(void *dst, u32 size, const void *src)
463 * Description
464 * For tracing programs, safely attempt to read *size* bytes from
465 * address *src* and store the data in *dst*.
466 * Return
467 * 0 on success, or a negative error in case of failure.
468 *
469 * u64 bpf_ktime_get_ns(void)
470 * Description
471 * Return the time elapsed since system boot, in nanoseconds.
472 * Return
473 * Current *ktime*.
474 *
475 * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
476 * Description
477 * This helper is a "printk()-like" facility for debugging. It
478 * prints a message defined by format *fmt* (of size *fmt_size*)
479 * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
480 * available. It can take up to three additional **u64**
481 * arguments (as an eBPF helpers, the total number of arguments is
482 * limited to five).
483 *
484 * Each time the helper is called, it appends a line to the trace.
485 * The format of the trace is customizable, and the exact output
486 * one will get depends on the options set in
487 * *\/sys/kernel/debug/tracing/trace_options* (see also the
488 * *README* file under the same directory). However, it usually
489 * defaults to something like:
490 *
491 * ::
492 *
493 * telnet-470 [001] .N.. 419421.045894: 0x00000001: <formatted msg>
494 *
495 * In the above:
496 *
497 * * ``telnet`` is the name of the current task.
498 * * ``470`` is the PID of the current task.
499 * * ``001`` is the CPU number on which the task is
500 * running.
501 * * In ``.N..``, each character refers to a set of
502 * options (whether irqs are enabled, scheduling
503 * options, whether hard/softirqs are running, level of
504 * preempt_disabled respectively). **N** means that
505 * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED**
506 * are set.
507 * * ``419421.045894`` is a timestamp.
508 * * ``0x00000001`` is a fake value used by BPF for the
509 * instruction pointer register.
510 * * ``<formatted msg>`` is the message formatted with
511 * *fmt*.
512 *
513 * The conversion specifiers supported by *fmt* are similar, but
514 * more limited than for printk(). They are **%d**, **%i**,
515 * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**,
516 * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size
517 * of field, padding with zeroes, etc.) is available, and the
518 * helper will return **-EINVAL** (but print nothing) if it
519 * encounters an unknown specifier.
520 *
521 * Also, note that **bpf_trace_printk**\ () is slow, and should
522 * only be used for debugging purposes. For this reason, a notice
523 * bloc (spanning several lines) is printed to kernel logs and
524 * states that the helper should not be used "for production use"
525 * the first time this helper is used (or more precisely, when
526 * **trace_printk**\ () buffers are allocated). For passing values
527 * to user space, perf events should be preferred.
528 * Return
529 * The number of bytes written to the buffer, or a negative error
530 * in case of failure.
531 *
532 * u32 bpf_get_prandom_u32(void)
533 * Description
534 * Get a pseudo-random number.
535 *
536 * From a security point of view, this helper uses its own
537 * pseudo-random internal state, and cannot be used to infer the
538 * seed of other random functions in the kernel. However, it is
539 * essential to note that the generator used by the helper is not
540 * cryptographically secure.
541 * Return
542 * A random 32-bit unsigned value.
543 *
544 * u32 bpf_get_smp_processor_id(void)
545 * Description
546 * Get the SMP (symmetric multiprocessing) processor id. Note that
547 * all programs run with preemption disabled, which means that the
548 * SMP processor id is stable during all the execution of the
549 * program.
550 * Return
551 * The SMP id of the processor running the program.
552 *
553 * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
554 * Description
555 * Store *len* bytes from address *from* into the packet
556 * associated to *skb*, at *offset*. *flags* are a combination of
557 * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the
558 * checksum for the packet after storing the bytes) and
559 * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
560 * **->swhash** and *skb*\ **->l4hash** to 0).
561 *
562 * A call to this helper is susceptible to change the underlaying
563 * packet buffer. Therefore, at load time, all checks on pointers
564 * previously done by the verifier are invalidated and must be
565 * performed again, if the helper is used in combination with
566 * direct packet access.
567 * Return
568 * 0 on success, or a negative error in case of failure.
569 *
570 * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
571 * Description
572 * Recompute the layer 3 (e.g. IP) checksum for the packet
573 * associated to *skb*. Computation is incremental, so the helper
574 * must know the former value of the header field that was
575 * modified (*from*), the new value of this field (*to*), and the
576 * number of bytes (2 or 4) for this field, stored in *size*.
577 * Alternatively, it is possible to store the difference between
578 * the previous and the new values of the header field in *to*, by
579 * setting *from* and *size* to 0. For both methods, *offset*
580 * indicates the location of the IP checksum within the packet.
581 *
582 * This helper works in combination with **bpf_csum_diff**\ (),
583 * which does not update the checksum in-place, but offers more
584 * flexibility and can handle sizes larger than 2 or 4 for the
585 * checksum to update.
586 *
587 * A call to this helper is susceptible to change the underlaying
588 * packet buffer. Therefore, at load time, all checks on pointers
589 * previously done by the verifier are invalidated and must be
590 * performed again, if the helper is used in combination with
591 * direct packet access.
592 * Return
593 * 0 on success, or a negative error in case of failure.
594 *
595 * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
596 * Description
597 * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
598 * packet associated to *skb*. Computation is incremental, so the
599 * helper must know the former value of the header field that was
600 * modified (*from*), the new value of this field (*to*), and the
601 * number of bytes (2 or 4) for this field, stored on the lowest
602 * four bits of *flags*. Alternatively, it is possible to store
603 * the difference between the previous and the new values of the
604 * header field in *to*, by setting *from* and the four lowest
605 * bits of *flags* to 0. For both methods, *offset* indicates the
606 * location of the IP checksum within the packet. In addition to
607 * the size of the field, *flags* can be added (bitwise OR) actual
608 * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left
609 * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and
610 * for updates resulting in a null checksum the value is set to
611 * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates
612 * the checksum is to be computed against a pseudo-header.
613 *
614 * This helper works in combination with **bpf_csum_diff**\ (),
615 * which does not update the checksum in-place, but offers more
616 * flexibility and can handle sizes larger than 2 or 4 for the
617 * checksum to update.
618 *
619 * A call to this helper is susceptible to change the underlaying
620 * packet buffer. Therefore, at load time, all checks on pointers
621 * previously done by the verifier are invalidated and must be
622 * performed again, if the helper is used in combination with
623 * direct packet access.
624 * Return
625 * 0 on success, or a negative error in case of failure.
626 *
627 * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
628 * Description
629 * This special helper is used to trigger a "tail call", or in
630 * other words, to jump into another eBPF program. The same stack
631 * frame is used (but values on stack and in registers for the
632 * caller are not accessible to the callee). This mechanism allows
633 * for program chaining, either for raising the maximum number of
634 * available eBPF instructions, or to execute given programs in
635 * conditional blocks. For security reasons, there is an upper
636 * limit to the number of successive tail calls that can be
637 * performed.
638 *
639 * Upon call of this helper, the program attempts to jump into a
640 * program referenced at index *index* in *prog_array_map*, a
641 * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes
642 * *ctx*, a pointer to the context.
643 *
644 * If the call succeeds, the kernel immediately runs the first
645 * instruction of the new program. This is not a function call,
646 * and it never returns to the previous program. If the call
647 * fails, then the helper has no effect, and the caller continues
648 * to run its subsequent instructions. A call can fail if the
649 * destination program for the jump does not exist (i.e. *index*
650 * is superior to the number of entries in *prog_array_map*), or
651 * if the maximum number of tail calls has been reached for this
652 * chain of programs. This limit is defined in the kernel by the
653 * macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
654 * which is currently set to 32.
655 * Return
656 * 0 on success, or a negative error in case of failure.
657 *
658 * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
659 * Description
660 * Clone and redirect the packet associated to *skb* to another
661 * net device of index *ifindex*. Both ingress and egress
662 * interfaces can be used for redirection. The **BPF_F_INGRESS**
663 * value in *flags* is used to make the distinction (ingress path
664 * is selected if the flag is present, egress path otherwise).
665 * This is the only flag supported for now.
666 *
667 * In comparison with **bpf_redirect**\ () helper,
668 * **bpf_clone_redirect**\ () has the associated cost of
669 * duplicating the packet buffer, but this can be executed out of
670 * the eBPF program. Conversely, **bpf_redirect**\ () is more
671 * efficient, but it is handled through an action code where the
672 * redirection happens only after the eBPF program has returned.
673 *
674 * A call to this helper is susceptible to change the underlaying
675 * packet buffer. Therefore, at load time, all checks on pointers
676 * previously done by the verifier are invalidated and must be
677 * performed again, if the helper is used in combination with
678 * direct packet access.
679 * Return
680 * 0 on success, or a negative error in case of failure.
681 *
682 * u64 bpf_get_current_pid_tgid(void)
683 * Return
684 * A 64-bit integer containing the current tgid and pid, and
685 * created as such:
686 * *current_task*\ **->tgid << 32 \|**
687 * *current_task*\ **->pid**.
688 *
689 * u64 bpf_get_current_uid_gid(void)
690 * Return
691 * A 64-bit integer containing the current GID and UID, and
692 * created as such: *current_gid* **<< 32 \|** *current_uid*.
693 *
694 * int bpf_get_current_comm(char *buf, u32 size_of_buf)
695 * Description
696 * Copy the **comm** attribute of the current task into *buf* of
697 * *size_of_buf*. The **comm** attribute contains the name of
698 * the executable (excluding the path) for the current task. The
699 * *size_of_buf* must be strictly positive. On success, the
700 * helper makes sure that the *buf* is NUL-terminated. On failure,
701 * it is filled with zeroes.
702 * Return
703 * 0 on success, or a negative error in case of failure.
704 *
705 * u32 bpf_get_cgroup_classid(struct sk_buff *skb)
706 * Description
707 * Retrieve the classid for the current task, i.e. for the net_cls
708 * cgroup to which *skb* belongs.
709 *
710 * This helper can be used on TC egress path, but not on ingress.
711 *
712 * The net_cls cgroup provides an interface to tag network packets
713 * based on a user-provided identifier for all traffic coming from
714 * the tasks belonging to the related cgroup. See also the related
715 * kernel documentation, available from the Linux sources in file
716 * *Documentation/cgroup-v1/net_cls.txt*.
717 *
718 * The Linux kernel has two versions for cgroups: there are
719 * cgroups v1 and cgroups v2. Both are available to users, who can
720 * use a mixture of them, but note that the net_cls cgroup is for
721 * cgroup v1 only. This makes it incompatible with BPF programs
722 * run on cgroups, which is a cgroup-v2-only feature (a socket can
723 * only hold data for one version of cgroups at a time).
724 *
725 * This helper is only available is the kernel was compiled with
726 * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to
727 * "**y**" or to "**m**".
728 * Return
729 * The classid, or 0 for the default unconfigured classid.
730 *
731 * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
732 * Description
733 * Push a *vlan_tci* (VLAN tag control information) of protocol
734 * *vlan_proto* to the packet associated to *skb*, then update
735 * the checksum. Note that if *vlan_proto* is different from
736 * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
737 * be **ETH_P_8021Q**.
738 *
739 * A call to this helper is susceptible to change the underlaying
740 * packet buffer. Therefore, at load time, all checks on pointers
741 * previously done by the verifier are invalidated and must be
742 * performed again, if the helper is used in combination with
743 * direct packet access.
744 * Return
745 * 0 on success, or a negative error in case of failure.
746 *
747 * int bpf_skb_vlan_pop(struct sk_buff *skb)
748 * Description
749 * Pop a VLAN header from the packet associated to *skb*.
750 *
751 * A call to this helper is susceptible to change the underlaying
752 * packet buffer. Therefore, at load time, all checks on pointers
753 * previously done by the verifier are invalidated and must be
754 * performed again, if the helper is used in combination with
755 * direct packet access.
756 * Return
757 * 0 on success, or a negative error in case of failure.
758 *
759 * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
760 * Description
761 * Get tunnel metadata. This helper takes a pointer *key* to an
762 * empty **struct bpf_tunnel_key** of **size**, that will be
763 * filled with tunnel metadata for the packet associated to *skb*.
764 * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which
765 * indicates that the tunnel is based on IPv6 protocol instead of
766 * IPv4.
767 *
768 * The **struct bpf_tunnel_key** is an object that generalizes the
769 * principal parameters used by various tunneling protocols into a
770 * single struct. This way, it can be used to easily make a
771 * decision based on the contents of the encapsulation header,
772 * "summarized" in this struct. In particular, it holds the IP
773 * address of the remote end (IPv4 or IPv6, depending on the case)
774 * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also,
775 * this struct exposes the *key*\ **->tunnel_id**, which is
776 * generally mapped to a VNI (Virtual Network Identifier), making
777 * it programmable together with the **bpf_skb_set_tunnel_key**\
778 * () helper.
779 *
780 * Let's imagine that the following code is part of a program
781 * attached to the TC ingress interface, on one end of a GRE
782 * tunnel, and is supposed to filter out all messages coming from
783 * remote ends with IPv4 address other than 10.0.0.1:
784 *
785 * ::
786 *
787 * int ret;
788 * struct bpf_tunnel_key key = {};
789 *
790 * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
791 * if (ret < 0)
792 * return TC_ACT_SHOT; // drop packet
793 *
794 * if (key.remote_ipv4 != 0x0a000001)
795 * return TC_ACT_SHOT; // drop packet
796 *
797 * return TC_ACT_OK; // accept packet
798 *
799 * This interface can also be used with all encapsulation devices
800 * that can operate in "collect metadata" mode: instead of having
801 * one network device per specific configuration, the "collect
802 * metadata" mode only requires a single device where the
803 * configuration can be extracted from this helper.
804 *
805 * This can be used together with various tunnels such as VXLan,
806 * Geneve, GRE or IP in IP (IPIP).
807 * Return
808 * 0 on success, or a negative error in case of failure.
809 *
810 * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
811 * Description
812 * Populate tunnel metadata for packet associated to *skb.* The
813 * tunnel metadata is set to the contents of *key*, of *size*. The
814 * *flags* can be set to a combination of the following values:
815 *
816 * **BPF_F_TUNINFO_IPV6**
817 * Indicate that the tunnel is based on IPv6 protocol
818 * instead of IPv4.
819 * **BPF_F_ZERO_CSUM_TX**
820 * For IPv4 packets, add a flag to tunnel metadata
821 * indicating that checksum computation should be skipped
822 * and checksum set to zeroes.
823 * **BPF_F_DONT_FRAGMENT**
824 * Add a flag to tunnel metadata indicating that the
825 * packet should not be fragmented.
826 * **BPF_F_SEQ_NUMBER**
827 * Add a flag to tunnel metadata indicating that a
828 * sequence number should be added to tunnel header before
829 * sending the packet. This flag was added for GRE
830 * encapsulation, but might be used with other protocols
831 * as well in the future.
832 *
833 * Here is a typical usage on the transmit path:
834 *
835 * ::
836 *
837 * struct bpf_tunnel_key key;
838 * populate key ...
839 * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
840 * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0);
841 *
842 * See also the description of the **bpf_skb_get_tunnel_key**\ ()
843 * helper for additional information.
844 * Return
845 * 0 on success, or a negative error in case of failure.
846 *
847 * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags)
848 * Description
849 * Read the value of a perf event counter. This helper relies on a
850 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of
851 * the perf event counter is selected when *map* is updated with
852 * perf event file descriptors. The *map* is an array whose size
853 * is the number of available CPUs, and each cell contains a value
854 * relative to one CPU. The value to retrieve is indicated by
855 * *flags*, that contains the index of the CPU to look up, masked
856 * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
857 * **BPF_F_CURRENT_CPU** to indicate that the value for the
858 * current CPU should be retrieved.
859 *
860 * Note that before Linux 4.13, only hardware perf event can be
861 * retrieved.
862 *
863 * Also, be aware that the newer helper
864 * **bpf_perf_event_read_value**\ () is recommended over
865 * **bpf_perf_event_read**\ () in general. The latter has some ABI
866 * quirks where error and counter value are used as a return code
867 * (which is wrong to do since ranges may overlap). This issue is
868 * fixed with **bpf_perf_event_read_value**\ (), which at the same
869 * time provides more features over the **bpf_perf_event_read**\
870 * () interface. Please refer to the description of
871 * **bpf_perf_event_read_value**\ () for details.
872 * Return
873 * The value of the perf event counter read from the map, or a
874 * negative error code in case of failure.
875 *
876 * int bpf_redirect(u32 ifindex, u64 flags)
877 * Description
878 * Redirect the packet to another net device of index *ifindex*.
879 * This helper is somewhat similar to **bpf_clone_redirect**\
880 * (), except that the packet is not cloned, which provides
881 * increased performance.
882 *
883 * Except for XDP, both ingress and egress interfaces can be used
884 * for redirection. The **BPF_F_INGRESS** value in *flags* is used
885 * to make the distinction (ingress path is selected if the flag
886 * is present, egress path otherwise). Currently, XDP only
887 * supports redirection to the egress interface, and accepts no
888 * flag at all.
889 *
890 * The same effect can be attained with the more generic
891 * **bpf_redirect_map**\ (), which requires specific maps to be
892 * used but offers better performance.
893 * Return
894 * For XDP, the helper returns **XDP_REDIRECT** on success or
895 * **XDP_ABORTED** on error. For other program types, the values
896 * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on
897 * error.
898 *
899 * u32 bpf_get_route_realm(struct sk_buff *skb)
900 * Description
901 * Retrieve the realm or the route, that is to say the
902 * **tclassid** field of the destination for the *skb*. The
903 * indentifier retrieved is a user-provided tag, similar to the
904 * one used with the net_cls cgroup (see description for
905 * **bpf_get_cgroup_classid**\ () helper), but here this tag is
906 * held by a route (a destination entry), not by a task.
907 *
908 * Retrieving this identifier works with the clsact TC egress hook
909 * (see also **tc-bpf(8)**), or alternatively on conventional
910 * classful egress qdiscs, but not on TC ingress path. In case of
911 * clsact TC egress hook, this has the advantage that, internally,
912 * the destination entry has not been dropped yet in the transmit
913 * path. Therefore, the destination entry does not need to be
914 * artificially held via **netif_keep_dst**\ () for a classful
915 * qdisc until the *skb* is freed.
916 *
917 * This helper is available only if the kernel was compiled with
918 * **CONFIG_IP_ROUTE_CLASSID** configuration option.
919 * Return
920 * The realm of the route for the packet associated to *skb*, or 0
921 * if none was found.
922 *
923 * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
924 * Description
925 * Write raw *data* blob into a special BPF perf event held by
926 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
927 * event must have the following attributes: **PERF_SAMPLE_RAW**
928 * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
929 * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
930 *
931 * The *flags* are used to indicate the index in *map* for which
932 * the value must be put, masked with **BPF_F_INDEX_MASK**.
933 * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
934 * to indicate that the index of the current CPU core should be
935 * used.
936 *
937 * The value to write, of *size*, is passed through eBPF stack and
938 * pointed by *data*.
939 *
940 * The context of the program *ctx* needs also be passed to the
941 * helper.
942 *
943 * On user space, a program willing to read the values needs to
944 * call **perf_event_open**\ () on the perf event (either for
945 * one or for all CPUs) and to store the file descriptor into the
946 * *map*. This must be done before the eBPF program can send data
947 * into it. An example is available in file
948 * *samples/bpf/trace_output_user.c* in the Linux kernel source
949 * tree (the eBPF program counterpart is in
950 * *samples/bpf/trace_output_kern.c*).
951 *
952 * **bpf_perf_event_output**\ () achieves better performance
953 * than **bpf_trace_printk**\ () for sharing data with user
954 * space, and is much better suitable for streaming data from eBPF
955 * programs.
956 *
957 * Note that this helper is not restricted to tracing use cases
958 * and can be used with programs attached to TC or XDP as well,
959 * where it allows for passing data to user space listeners. Data
960 * can be:
961 *
962 * * Only custom structs,
963 * * Only the packet payload, or
964 * * A combination of both.
965 * Return
966 * 0 on success, or a negative error in case of failure.
967 *
968 * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
969 * Description
970 * This helper was provided as an easy way to load data from a
971 * packet. It can be used to load *len* bytes from *offset* from
972 * the packet associated to *skb*, into the buffer pointed by
973 * *to*.
974 *
975 * Since Linux 4.7, usage of this helper has mostly been replaced
976 * by "direct packet access", enabling packet data to be
977 * manipulated with *skb*\ **->data** and *skb*\ **->data_end**
978 * pointing respectively to the first byte of packet data and to
979 * the byte after the last byte of packet data. However, it
980 * remains useful if one wishes to read large quantities of data
981 * at once from a packet into the eBPF stack.
982 * Return
983 * 0 on success, or a negative error in case of failure.
984 *
985 * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags)
986 * Description
987 * Walk a user or a kernel stack and return its id. To achieve
988 * this, the helper needs *ctx*, which is a pointer to the context
989 * on which the tracing program is executed, and a pointer to a
990 * *map* of type **BPF_MAP_TYPE_STACK_TRACE**.
991 *
992 * The last argument, *flags*, holds the number of stack frames to
993 * skip (from 0 to 255), masked with
994 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
995 * a combination of the following flags:
996 *
997 * **BPF_F_USER_STACK**
998 * Collect a user space stack instead of a kernel stack.
999 * **BPF_F_FAST_STACK_CMP**
1000 * Compare stacks by hash only.
1001 * **BPF_F_REUSE_STACKID**
1002 * If two different stacks hash into the same *stackid*,
1003 * discard the old one.
1004 *
1005 * The stack id retrieved is a 32 bit long integer handle which
1006 * can be further combined with other data (including other stack
1007 * ids) and used as a key into maps. This can be useful for
1008 * generating a variety of graphs (such as flame graphs or off-cpu
1009 * graphs).
1010 *
1011 * For walking a stack, this helper is an improvement over
1012 * **bpf_probe_read**\ (), which can be used with unrolled loops
1013 * but is not efficient and consumes a lot of eBPF instructions.
1014 * Instead, **bpf_get_stackid**\ () can collect up to
1015 * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that
1016 * this limit can be controlled with the **sysctl** program, and
1017 * that it should be manually increased in order to profile long
1018 * user stacks (such as stacks for Java programs). To do so, use:
1019 *
1020 * ::
1021 *
1022 * # sysctl kernel.perf_event_max_stack=<new value>
1023 * Return
1024 * The positive or null stack id on success, or a negative error
1025 * in case of failure.
1026 *
1027 * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed)
1028 * Description
1029 * Compute a checksum difference, from the raw buffer pointed by
1030 * *from*, of length *from_size* (that must be a multiple of 4),
1031 * towards the raw buffer pointed by *to*, of size *to_size*
1032 * (same remark). An optional *seed* can be added to the value
1033 * (this can be cascaded, the seed may come from a previous call
1034 * to the helper).
1035 *
1036 * This is flexible enough to be used in several ways:
1037 *
1038 * * With *from_size* == 0, *to_size* > 0 and *seed* set to
1039 * checksum, it can be used when pushing new data.
1040 * * With *from_size* > 0, *to_size* == 0 and *seed* set to
1041 * checksum, it can be used when removing data from a packet.
1042 * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it
1043 * can be used to compute a diff. Note that *from_size* and
1044 * *to_size* do not need to be equal.
1045 *
1046 * This helper can be used in combination with
1047 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to
1048 * which one can feed in the difference computed with
1049 * **bpf_csum_diff**\ ().
1050 * Return
1051 * The checksum result, or a negative error code in case of
1052 * failure.
1053 *
1054 * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1055 * Description
1056 * Retrieve tunnel options metadata for the packet associated to
1057 * *skb*, and store the raw tunnel option data to the buffer *opt*
1058 * of *size*.
1059 *
1060 * This helper can be used with encapsulation devices that can
1061 * operate in "collect metadata" mode (please refer to the related
1062 * note in the description of **bpf_skb_get_tunnel_key**\ () for
1063 * more details). A particular example where this can be used is
1064 * in combination with the Geneve encapsulation protocol, where it
1065 * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper)
1066 * and retrieving arbitrary TLVs (Type-Length-Value headers) from
1067 * the eBPF program. This allows for full customization of these
1068 * headers.
1069 * Return
1070 * The size of the option data retrieved.
1071 *
1072 * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
1073 * Description
1074 * Set tunnel options metadata for the packet associated to *skb*
1075 * to the option data contained in the raw buffer *opt* of *size*.
1076 *
1077 * See also the description of the **bpf_skb_get_tunnel_opt**\ ()
1078 * helper for additional information.
1079 * Return
1080 * 0 on success, or a negative error in case of failure.
1081 *
1082 * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
1083 * Description
1084 * Change the protocol of the *skb* to *proto*. Currently
1085 * supported are transition from IPv4 to IPv6, and from IPv6 to
1086 * IPv4. The helper takes care of the groundwork for the
1087 * transition, including resizing the socket buffer. The eBPF
1088 * program is expected to fill the new headers, if any, via
1089 * **skb_store_bytes**\ () and to recompute the checksums with
1090 * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\
1091 * (). The main case for this helper is to perform NAT64
1092 * operations out of an eBPF program.
1093 *
1094 * Internally, the GSO type is marked as dodgy so that headers are
1095 * checked and segments are recalculated by the GSO/GRO engine.
1096 * The size for GSO target is adapted as well.
1097 *
1098 * All values for *flags* are reserved for future usage, and must
1099 * be left at zero.
1100 *
1101 * A call to this helper is susceptible to change the underlaying
1102 * packet buffer. Therefore, at load time, all checks on pointers
1103 * previously done by the verifier are invalidated and must be
1104 * performed again, if the helper is used in combination with
1105 * direct packet access.
1106 * Return
1107 * 0 on success, or a negative error in case of failure.
1108 *
1109 * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
1110 * Description
1111 * Change the packet type for the packet associated to *skb*. This
1112 * comes down to setting *skb*\ **->pkt_type** to *type*, except
1113 * the eBPF program does not have a write access to *skb*\
1114 * **->pkt_type** beside this helper. Using a helper here allows
1115 * for graceful handling of errors.
1116 *
1117 * The major use case is to change incoming *skb*s to
1118 * **PACKET_HOST** in a programmatic way instead of having to
1119 * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for
1120 * example.
1121 *
1122 * Note that *type* only allows certain values. At this time, they
1123 * are:
1124 *
1125 * **PACKET_HOST**
1126 * Packet is for us.
1127 * **PACKET_BROADCAST**
1128 * Send packet to all.
1129 * **PACKET_MULTICAST**
1130 * Send packet to group.
1131 * **PACKET_OTHERHOST**
1132 * Send packet to someone else.
1133 * Return
1134 * 0 on success, or a negative error in case of failure.
1135 *
1136 * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
1137 * Description
1138 * Check whether *skb* is a descendant of the cgroup2 held by
1139 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1140 * Return
1141 * The return value depends on the result of the test, and can be:
1142 *
1143 * * 0, if the *skb* failed the cgroup2 descendant test.
1144 * * 1, if the *skb* succeeded the cgroup2 descendant test.
1145 * * A negative error code, if an error occurred.
1146 *
1147 * u32 bpf_get_hash_recalc(struct sk_buff *skb)
1148 * Description
1149 * Retrieve the hash of the packet, *skb*\ **->hash**. If it is
1150 * not set, in particular if the hash was cleared due to mangling,
1151 * recompute this hash. Later accesses to the hash can be done
1152 * directly with *skb*\ **->hash**.
1153 *
1154 * Calling **bpf_set_hash_invalid**\ (), changing a packet
1155 * prototype with **bpf_skb_change_proto**\ (), or calling
1156 * **bpf_skb_store_bytes**\ () with the
1157 * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear
1158 * the hash and to trigger a new computation for the next call to
1159 * **bpf_get_hash_recalc**\ ().
1160 * Return
1161 * The 32-bit hash.
1162 *
1163 * u64 bpf_get_current_task(void)
1164 * Return
1165 * A pointer to the current task struct.
1166 *
1167 * int bpf_probe_write_user(void *dst, const void *src, u32 len)
1168 * Description
1169 * Attempt in a safe way to write *len* bytes from the buffer
1170 * *src* to *dst* in memory. It only works for threads that are in
1171 * user context, and *dst* must be a valid user space address.
1172 *
1173 * This helper should not be used to implement any kind of
1174 * security mechanism because of TOC-TOU attacks, but rather to
1175 * debug, divert, and manipulate execution of semi-cooperative
1176 * processes.
1177 *
1178 * Keep in mind that this feature is meant for experiments, and it
1179 * has a risk of crashing the system and running programs.
1180 * Therefore, when an eBPF program using this helper is attached,
1181 * a warning including PID and process name is printed to kernel
1182 * logs.
1183 * Return
1184 * 0 on success, or a negative error in case of failure.
1185 *
1186 * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
1187 * Description
1188 * Check whether the probe is being run is the context of a given
1189 * subset of the cgroup2 hierarchy. The cgroup2 to test is held by
1190 * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
1191 * Return
1192 * The return value depends on the result of the test, and can be:
1193 *
1194 * * 0, if the *skb* task belongs to the cgroup2.
1195 * * 1, if the *skb* task does not belong to the cgroup2.
1196 * * A negative error code, if an error occurred.
1197 *
1198 * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
1199 * Description
1200 * Resize (trim or grow) the packet associated to *skb* to the
1201 * new *len*. The *flags* are reserved for future usage, and must
1202 * be left at zero.
1203 *
1204 * The basic idea is that the helper performs the needed work to
1205 * change the size of the packet, then the eBPF program rewrites
1206 * the rest via helpers like **bpf_skb_store_bytes**\ (),
1207 * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ ()
1208 * and others. This helper is a slow path utility intended for
1209 * replies with control messages. And because it is targeted for
1210 * slow path, the helper itself can afford to be slow: it
1211 * implicitly linearizes, unclones and drops offloads from the
1212 * *skb*.
1213 *
1214 * A call to this helper is susceptible to change the underlaying
1215 * packet buffer. Therefore, at load time, all checks on pointers
1216 * previously done by the verifier are invalidated and must be
1217 * performed again, if the helper is used in combination with
1218 * direct packet access.
1219 * Return
1220 * 0 on success, or a negative error in case of failure.
1221 *
1222 * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
1223 * Description
1224 * Pull in non-linear data in case the *skb* is non-linear and not
1225 * all of *len* are part of the linear section. Make *len* bytes
1226 * from *skb* readable and writable. If a zero value is passed for
1227 * *len*, then the whole length of the *skb* is pulled.
1228 *
1229 * This helper is only needed for reading and writing with direct
1230 * packet access.
1231 *
1232 * For direct packet access, testing that offsets to access
1233 * are within packet boundaries (test on *skb*\ **->data_end**) is
1234 * susceptible to fail if offsets are invalid, or if the requested
1235 * data is in non-linear parts of the *skb*. On failure the
1236 * program can just bail out, or in the case of a non-linear
1237 * buffer, use a helper to make the data available. The
1238 * **bpf_skb_load_bytes**\ () helper is a first solution to access
1239 * the data. Another one consists in using **bpf_skb_pull_data**
1240 * to pull in once the non-linear parts, then retesting and
1241 * eventually access the data.
1242 *
1243 * At the same time, this also makes sure the *skb* is uncloned,
1244 * which is a necessary condition for direct write. As this needs
1245 * to be an invariant for the write part only, the verifier
1246 * detects writes and adds a prologue that is calling
1247 * **bpf_skb_pull_data()** to effectively unclone the *skb* from
1248 * the very beginning in case it is indeed cloned.
1249 *
1250 * A call to this helper is susceptible to change the underlaying
1251 * packet buffer. Therefore, at load time, all checks on pointers
1252 * previously done by the verifier are invalidated and must be
1253 * performed again, if the helper is used in combination with
1254 * direct packet access.
1255 * Return
1256 * 0 on success, or a negative error in case of failure.
1257 *
1258 * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum)
1259 * Description
1260 * Add the checksum *csum* into *skb*\ **->csum** in case the
1261 * driver has supplied a checksum for the entire packet into that
1262 * field. Return an error otherwise. This helper is intended to be
1263 * used in combination with **bpf_csum_diff**\ (), in particular
1264 * when the checksum needs to be updated after data has been
1265 * written into the packet through direct packet access.
1266 * Return
1267 * The checksum on success, or a negative error code in case of
1268 * failure.
1269 *
1270 * void bpf_set_hash_invalid(struct sk_buff *skb)
1271 * Description
1272 * Invalidate the current *skb*\ **->hash**. It can be used after
1273 * mangling on headers through direct packet access, in order to
1274 * indicate that the hash is outdated and to trigger a
1275 * recalculation the next time the kernel tries to access this
1276 * hash or when the **bpf_get_hash_recalc**\ () helper is called.
1277 *
1278 * int bpf_get_numa_node_id(void)
1279 * Description
1280 * Return the id of the current NUMA node. The primary use case
1281 * for this helper is the selection of sockets for the local NUMA
1282 * node, when the program is attached to sockets using the
1283 * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**),
1284 * but the helper is also available to other eBPF program types,
1285 * similarly to **bpf_get_smp_processor_id**\ ().
1286 * Return
1287 * The id of current NUMA node.
1288 *
1289 * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
1290 * Description
1291 * Grows headroom of packet associated to *skb* and adjusts the
1292 * offset of the MAC header accordingly, adding *len* bytes of
1293 * space. It automatically extends and reallocates memory as
1294 * required.
1295 *
1296 * This helper can be used on a layer 3 *skb* to push a MAC header
1297 * for redirection into a layer 2 device.
1298 *
1299 * All values for *flags* are reserved for future usage, and must
1300 * be left at zero.
1301 *
1302 * A call to this helper is susceptible to change the underlaying
1303 * packet buffer. Therefore, at load time, all checks on pointers
1304 * previously done by the verifier are invalidated and must be
1305 * performed again, if the helper is used in combination with
1306 * direct packet access.
1307 * Return
1308 * 0 on success, or a negative error in case of failure.
1309 *
1310 * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
1311 * Description
1312 * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
1313 * it is possible to use a negative value for *delta*. This helper
1314 * can be used to prepare the packet for pushing or popping
1315 * headers.
1316 *
1317 * A call to this helper is susceptible to change the underlaying
1318 * packet buffer. Therefore, at load time, all checks on pointers
1319 * previously done by the verifier are invalidated and must be
1320 * performed again, if the helper is used in combination with
1321 * direct packet access.
1322 * Return
1323 * 0 on success, or a negative error in case of failure.
1324 *
1325 * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
1326 * Description
1327 * Copy a NUL terminated string from an unsafe address
1328 * *unsafe_ptr* to *dst*. The *size* should include the
1329 * terminating NUL byte. In case the string length is smaller than
1330 * *size*, the target is not padded with further NUL bytes. If the
1331 * string length is larger than *size*, just *size*-1 bytes are
1332 * copied and the last byte is set to NUL.
1333 *
1334 * On success, the length of the copied string is returned. This
1335 * makes this helper useful in tracing programs for reading
1336 * strings, and more importantly to get its length at runtime. See
1337 * the following snippet:
1338 *
1339 * ::
1340 *
1341 * SEC("kprobe/sys_open")
1342 * void bpf_sys_open(struct pt_regs *ctx)
1343 * {
1344 * char buf[PATHLEN]; // PATHLEN is defined to 256
1345 * int res = bpf_probe_read_str(buf, sizeof(buf),
1346 * ctx->di);
1347 *
1348 * // Consume buf, for example push it to
1349 * // userspace via bpf_perf_event_output(); we
1350 * // can use res (the string length) as event
1351 * // size, after checking its boundaries.
1352 * }
1353 *
1354 * In comparison, using **bpf_probe_read()** helper here instead
1355 * to read the string would require to estimate the length at
1356 * compile time, and would often result in copying more memory
1357 * than necessary.
1358 *
1359 * Another useful use case is when parsing individual process
1360 * arguments or individual environment variables navigating
1361 * *current*\ **->mm->arg_start** and *current*\
1362 * **->mm->env_start**: using this helper and the return value,
1363 * one can quickly iterate at the right offset of the memory area.
1364 * Return
1365 * On success, the strictly positive length of the string,
1366 * including the trailing NUL character. On error, a negative
1367 * value.
1368 *
1369 * u64 bpf_get_socket_cookie(struct sk_buff *skb)
1370 * Description
1371 * If the **struct sk_buff** pointed by *skb* has a known socket,
1372 * retrieve the cookie (generated by the kernel) of this socket.
1373 * If no cookie has been set yet, generate a new cookie. Once
1374 * generated, the socket cookie remains stable for the life of the
1375 * socket. This helper can be useful for monitoring per socket
1376 * networking traffic statistics as it provides a unique socket
1377 * identifier per namespace.
1378 * Return
1379 * A 8-byte long non-decreasing number on success, or 0 if the
1380 * socket field is missing inside *skb*.
1381 *
1382 * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
1383 * Description
1384 * Equivalent to bpf_get_socket_cookie() helper that accepts
1385 * *skb*, but gets socket from **struct bpf_sock_addr** contex.
1386 * Return
1387 * A 8-byte long non-decreasing number.
1388 *
1389 * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
1390 * Description
1391 * Equivalent to bpf_get_socket_cookie() helper that accepts
1392 * *skb*, but gets socket from **struct bpf_sock_ops** contex.
1393 * Return
1394 * A 8-byte long non-decreasing number.
1395 *
1396 * u32 bpf_get_socket_uid(struct sk_buff *skb)
1397 * Return
1398 * The owner UID of the socket associated to *skb*. If the socket
1399 * is **NULL**, or if it is not a full socket (i.e. if it is a
1400 * time-wait or a request socket instead), **overflowuid** value
1401 * is returned (note that **overflowuid** might also be the actual
1402 * UID value for the socket).
1403 *
1404 * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
1405 * Description
1406 * Set the full hash for *skb* (set the field *skb*\ **->hash**)
1407 * to value *hash*.
1408 * Return
1409 * 0
1410 *
1411 * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
1412 * Description
1413 * Emulate a call to **setsockopt()** on the socket associated to
1414 * *bpf_socket*, which must be a full socket. The *level* at
1415 * which the option resides and the name *optname* of the option
1416 * must be specified, see **setsockopt(2)** for more information.
1417 * The option value of length *optlen* is pointed by *optval*.
1418 *
1419 * This helper actually implements a subset of **setsockopt()**.
1420 * It supports the following *level*\ s:
1421 *
1422 * * **SOL_SOCKET**, which supports the following *optname*\ s:
1423 * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
1424 * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
1425 * * **IPPROTO_TCP**, which supports the following *optname*\ s:
1426 * **TCP_CONGESTION**, **TCP_BPF_IW**,
1427 * **TCP_BPF_SNDCWND_CLAMP**.
1428 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1429 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1430 * Return
1431 * 0 on success, or a negative error in case of failure.
1432 *
1433 * int bpf_skb_adjust_room(struct sk_buff *skb, u32 len_diff, u32 mode, u64 flags)
1434 * Description
1435 * Grow or shrink the room for data in the packet associated to
1436 * *skb* by *len_diff*, and according to the selected *mode*.
1437 *
1438 * There is a single supported mode at this time:
1439 *
1440 * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
1441 * (room space is added or removed below the layer 3 header).
1442 *
1443 * All values for *flags* are reserved for future usage, and must
1444 * be left at zero.
1445 *
1446 * A call to this helper is susceptible to change the underlaying
1447 * packet buffer. Therefore, at load time, all checks on pointers
1448 * previously done by the verifier are invalidated and must be
1449 * performed again, if the helper is used in combination with
1450 * direct packet access.
1451 * Return
1452 * 0 on success, or a negative error in case of failure.
1453 *
1454 * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1455 * Description
1456 * Redirect the packet to the endpoint referenced by *map* at
1457 * index *key*. Depending on its type, this *map* can contain
1458 * references to net devices (for forwarding packets through other
1459 * ports), or to CPUs (for redirecting XDP frames to another CPU;
1460 * but this is only implemented for native XDP (with driver
1461 * support) as of this writing).
1462 *
1463 * All values for *flags* are reserved for future usage, and must
1464 * be left at zero.
1465 *
1466 * When used to redirect packets to net devices, this helper
1467 * provides a high performance increase over **bpf_redirect**\ ().
1468 * This is due to various implementation details of the underlying
1469 * mechanisms, one of which is the fact that **bpf_redirect_map**\
1470 * () tries to send packet as a "bulk" to the device.
1471 * Return
1472 * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
1473 *
1474 * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
1475 * Description
1476 * Redirect the packet to the socket referenced by *map* (of type
1477 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1478 * egress interfaces can be used for redirection. The
1479 * **BPF_F_INGRESS** value in *flags* is used to make the
1480 * distinction (ingress path is selected if the flag is present,
1481 * egress path otherwise). This is the only flag supported for now.
1482 * Return
1483 * **SK_PASS** on success, or **SK_DROP** on error.
1484 *
1485 * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
1486 * Description
1487 * Add an entry to, or update a *map* referencing sockets. The
1488 * *skops* is used as a new value for the entry associated to
1489 * *key*. *flags* is one of:
1490 *
1491 * **BPF_NOEXIST**
1492 * The entry for *key* must not exist in the map.
1493 * **BPF_EXIST**
1494 * The entry for *key* must already exist in the map.
1495 * **BPF_ANY**
1496 * No condition on the existence of the entry for *key*.
1497 *
1498 * If the *map* has eBPF programs (parser and verdict), those will
1499 * be inherited by the socket being added. If the socket is
1500 * already attached to eBPF programs, this results in an error.
1501 * Return
1502 * 0 on success, or a negative error in case of failure.
1503 *
1504 * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
1505 * Description
1506 * Adjust the address pointed by *xdp_md*\ **->data_meta** by
1507 * *delta* (which can be positive or negative). Note that this
1508 * operation modifies the address stored in *xdp_md*\ **->data**,
1509 * so the latter must be loaded only after the helper has been
1510 * called.
1511 *
1512 * The use of *xdp_md*\ **->data_meta** is optional and programs
1513 * are not required to use it. The rationale is that when the
1514 * packet is processed with XDP (e.g. as DoS filter), it is
1515 * possible to push further meta data along with it before passing
1516 * to the stack, and to give the guarantee that an ingress eBPF
1517 * program attached as a TC classifier on the same device can pick
1518 * this up for further post-processing. Since TC works with socket
1519 * buffers, it remains possible to set from XDP the **mark** or
1520 * **priority** pointers, or other pointers for the socket buffer.
1521 * Having this scratch space generic and programmable allows for
1522 * more flexibility as the user is free to store whatever meta
1523 * data they need.
1524 *
1525 * A call to this helper is susceptible to change the underlaying
1526 * packet buffer. Therefore, at load time, all checks on pointers
1527 * previously done by the verifier are invalidated and must be
1528 * performed again, if the helper is used in combination with
1529 * direct packet access.
1530 * Return
1531 * 0 on success, or a negative error in case of failure.
1532 *
1533 * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
1534 * Description
1535 * Read the value of a perf event counter, and store it into *buf*
1536 * of size *buf_size*. This helper relies on a *map* of type
1537 * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event
1538 * counter is selected when *map* is updated with perf event file
1539 * descriptors. The *map* is an array whose size is the number of
1540 * available CPUs, and each cell contains a value relative to one
1541 * CPU. The value to retrieve is indicated by *flags*, that
1542 * contains the index of the CPU to look up, masked with
1543 * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to
1544 * **BPF_F_CURRENT_CPU** to indicate that the value for the
1545 * current CPU should be retrieved.
1546 *
1547 * This helper behaves in a way close to
1548 * **bpf_perf_event_read**\ () helper, save that instead of
1549 * just returning the value observed, it fills the *buf*
1550 * structure. This allows for additional data to be retrieved: in
1551 * particular, the enabled and running times (in *buf*\
1552 * **->enabled** and *buf*\ **->running**, respectively) are
1553 * copied. In general, **bpf_perf_event_read_value**\ () is
1554 * recommended over **bpf_perf_event_read**\ (), which has some
1555 * ABI issues and provides fewer functionalities.
1556 *
1557 * These values are interesting, because hardware PMU (Performance
1558 * Monitoring Unit) counters are limited resources. When there are
1559 * more PMU based perf events opened than available counters,
1560 * kernel will multiplex these events so each event gets certain
1561 * percentage (but not all) of the PMU time. In case that
1562 * multiplexing happens, the number of samples or counter value
1563 * will not reflect the case compared to when no multiplexing
1564 * occurs. This makes comparison between different runs difficult.
1565 * Typically, the counter value should be normalized before
1566 * comparing to other experiments. The usual normalization is done
1567 * as follows.
1568 *
1569 * ::
1570 *
1571 * normalized_counter = counter * t_enabled / t_running
1572 *
1573 * Where t_enabled is the time enabled for event and t_running is
1574 * the time running for event since last normalization. The
1575 * enabled and running times are accumulated since the perf event
1576 * open. To achieve scaling factor between two invocations of an
1577 * eBPF program, users can can use CPU id as the key (which is
1578 * typical for perf array usage model) to remember the previous
1579 * value and do the calculation inside the eBPF program.
1580 * Return
1581 * 0 on success, or a negative error in case of failure.
1582 *
1583 * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
1584 * Description
1585 * For en eBPF program attached to a perf event, retrieve the
1586 * value of the event counter associated to *ctx* and store it in
1587 * the structure pointed by *buf* and of size *buf_size*. Enabled
1588 * and running times are also stored in the structure (see
1589 * description of helper **bpf_perf_event_read_value**\ () for
1590 * more details).
1591 * Return
1592 * 0 on success, or a negative error in case of failure.
1593 *
1594 * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
1595 * Description
1596 * Emulate a call to **getsockopt()** on the socket associated to
1597 * *bpf_socket*, which must be a full socket. The *level* at
1598 * which the option resides and the name *optname* of the option
1599 * must be specified, see **getsockopt(2)** for more information.
1600 * The retrieved value is stored in the structure pointed by
1601 * *opval* and of length *optlen*.
1602 *
1603 * This helper actually implements a subset of **getsockopt()**.
1604 * It supports the following *level*\ s:
1605 *
1606 * * **IPPROTO_TCP**, which supports *optname*
1607 * **TCP_CONGESTION**.
1608 * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
1609 * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
1610 * Return
1611 * 0 on success, or a negative error in case of failure.
1612 *
1613 * int bpf_override_return(struct pt_reg *regs, u64 rc)
1614 * Description
1615 * Used for error injection, this helper uses kprobes to override
1616 * the return value of the probed function, and to set it to *rc*.
1617 * The first argument is the context *regs* on which the kprobe
1618 * works.
1619 *
1620 * This helper works by setting setting the PC (program counter)
1621 * to an override function which is run in place of the original
1622 * probed function. This means the probed function is not run at
1623 * all. The replacement function just returns with the required
1624 * value.
1625 *
1626 * This helper has security implications, and thus is subject to
1627 * restrictions. It is only available if the kernel was compiled
1628 * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration
1629 * option, and in this case it only works on functions tagged with
1630 * **ALLOW_ERROR_INJECTION** in the kernel code.
1631 *
1632 * Also, the helper is only available for the architectures having
1633 * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing,
1634 * x86 architecture is the only one to support this feature.
1635 * Return
1636 * 0
1637 *
1638 * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
1639 * Description
1640 * Attempt to set the value of the **bpf_sock_ops_cb_flags** field
1641 * for the full TCP socket associated to *bpf_sock_ops* to
1642 * *argval*.
1643 *
1644 * The primary use of this field is to determine if there should
1645 * be calls to eBPF programs of type
1646 * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP
1647 * code. A program of the same type can change its value, per
1648 * connection and as necessary, when the connection is
1649 * established. This field is directly accessible for reading, but
1650 * this helper must be used for updates in order to return an
1651 * error if an eBPF program tries to set a callback that is not
1652 * supported in the current kernel.
1653 *
1654 * The supported callback values that *argval* can combine are:
1655 *
1656 * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
1657 * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
1658 * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
1659 *
1660 * Here are some examples of where one could call such eBPF
1661 * program:
1662 *
1663 * * When RTO fires.
1664 * * When a packet is retransmitted.
1665 * * When the connection terminates.
1666 * * When a packet is sent.
1667 * * When a packet is received.
1668 * Return
1669 * Code **-EINVAL** if the socket is not a full TCP socket;
1670 * otherwise, a positive number containing the bits that could not
1671 * be set is returned (which comes down to 0 if all bits were set
1672 * as required).
1673 *
1674 * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
1675 * Description
1676 * This helper is used in programs implementing policies at the
1677 * socket level. If the message *msg* is allowed to pass (i.e. if
1678 * the verdict eBPF program returns **SK_PASS**), redirect it to
1679 * the socket referenced by *map* (of type
1680 * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
1681 * egress interfaces can be used for redirection. The
1682 * **BPF_F_INGRESS** value in *flags* is used to make the
1683 * distinction (ingress path is selected if the flag is present,
1684 * egress path otherwise). This is the only flag supported for now.
1685 * Return
1686 * **SK_PASS** on success, or **SK_DROP** on error.
1687 *
1688 * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
1689 * Description
1690 * For socket policies, apply the verdict of the eBPF program to
1691 * the next *bytes* (number of bytes) of message *msg*.
1692 *
1693 * For example, this helper can be used in the following cases:
1694 *
1695 * * A single **sendmsg**\ () or **sendfile**\ () system call
1696 * contains multiple logical messages that the eBPF program is
1697 * supposed to read and for which it should apply a verdict.
1698 * * An eBPF program only cares to read the first *bytes* of a
1699 * *msg*. If the message has a large payload, then setting up
1700 * and calling the eBPF program repeatedly for all bytes, even
1701 * though the verdict is already known, would create unnecessary
1702 * overhead.
1703 *
1704 * When called from within an eBPF program, the helper sets a
1705 * counter internal to the BPF infrastructure, that is used to
1706 * apply the last verdict to the next *bytes*. If *bytes* is
1707 * smaller than the current data being processed from a
1708 * **sendmsg**\ () or **sendfile**\ () system call, the first
1709 * *bytes* will be sent and the eBPF program will be re-run with
1710 * the pointer for start of data pointing to byte number *bytes*
1711 * **+ 1**. If *bytes* is larger than the current data being
1712 * processed, then the eBPF verdict will be applied to multiple
1713 * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are
1714 * consumed.
1715 *
1716 * Note that if a socket closes with the internal counter holding
1717 * a non-zero value, this is not a problem because data is not
1718 * being buffered for *bytes* and is sent as it is received.
1719 * Return
1720 * 0
1721 *
1722 * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
1723 * Description
1724 * For socket policies, prevent the execution of the verdict eBPF
1725 * program for message *msg* until *bytes* (byte number) have been
1726 * accumulated.
1727 *
1728 * This can be used when one needs a specific number of bytes
1729 * before a verdict can be assigned, even if the data spans
1730 * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme
1731 * case would be a user calling **sendmsg**\ () repeatedly with
1732 * 1-byte long message segments. Obviously, this is bad for
1733 * performance, but it is still valid. If the eBPF program needs
1734 * *bytes* bytes to validate a header, this helper can be used to
1735 * prevent the eBPF program to be called again until *bytes* have
1736 * been accumulated.
1737 * Return
1738 * 0
1739 *
1740 * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
1741 * Description
1742 * For socket policies, pull in non-linear data from user space
1743 * for *msg* and set pointers *msg*\ **->data** and *msg*\
1744 * **->data_end** to *start* and *end* bytes offsets into *msg*,
1745 * respectively.
1746 *
1747 * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
1748 * *msg* it can only parse data that the (**data**, **data_end**)
1749 * pointers have already consumed. For **sendmsg**\ () hooks this
1750 * is likely the first scatterlist element. But for calls relying
1751 * on the **sendpage** handler (e.g. **sendfile**\ ()) this will
1752 * be the range (**0**, **0**) because the data is shared with
1753 * user space and by default the objective is to avoid allowing
1754 * user space to modify data while (or after) eBPF verdict is
1755 * being decided. This helper can be used to pull in data and to
1756 * set the start and end pointer to given values. Data will be
1757 * copied if necessary (i.e. if data was not linear and if start
1758 * and end pointers do not point to the same chunk).
1759 *
1760 * A call to this helper is susceptible to change the underlaying
1761 * packet buffer. Therefore, at load time, all checks on pointers
1762 * previously done by the verifier are invalidated and must be
1763 * performed again, if the helper is used in combination with
1764 * direct packet access.
1765 *
1766 * All values for *flags* are reserved for future usage, and must
1767 * be left at zero.
1768 * Return
1769 * 0 on success, or a negative error in case of failure.
1770 *
1771 * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
1772 * Description
1773 * Bind the socket associated to *ctx* to the address pointed by
1774 * *addr*, of length *addr_len*. This allows for making outgoing
1775 * connection from the desired IP address, which can be useful for
1776 * example when all processes inside a cgroup should use one
1777 * single IP address on a host that has multiple IP configured.
1778 *
1779 * This helper works for IPv4 and IPv6, TCP and UDP sockets. The
1780 * domain (*addr*\ **->sa_family**) must be **AF_INET** (or
1781 * **AF_INET6**). Looking for a free port to bind to can be
1782 * expensive, therefore binding to port is not permitted by the
1783 * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
1784 * must be set to zero.
1785 * Return
1786 * 0 on success, or a negative error in case of failure.
1787 *
1788 * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
1789 * Description
1790 * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
1791 * only possible to shrink the packet as of this writing,
1792 * therefore *delta* must be a negative integer.
1793 *
1794 * A call to this helper is susceptible to change the underlaying
1795 * packet buffer. Therefore, at load time, all checks on pointers
1796 * previously done by the verifier are invalidated and must be
1797 * performed again, if the helper is used in combination with
1798 * direct packet access.
1799 * Return
1800 * 0 on success, or a negative error in case of failure.
1801 *
1802 * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
1803 * Description
1804 * Retrieve the XFRM state (IP transform framework, see also
1805 * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
1806 *
1807 * The retrieved value is stored in the **struct bpf_xfrm_state**
1808 * pointed by *xfrm_state* and of length *size*.
1809 *
1810 * All values for *flags* are reserved for future usage, and must
1811 * be left at zero.
1812 *
1813 * This helper is available only if the kernel was compiled with
1814 * **CONFIG_XFRM** configuration option.
1815 * Return
1816 * 0 on success, or a negative error in case of failure.
1817 *
1818 * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
1819 * Description
1820 * Return a user or a kernel stack in bpf program provided buffer.
1821 * To achieve this, the helper needs *ctx*, which is a pointer
1822 * to the context on which the tracing program is executed.
1823 * To store the stacktrace, the bpf program provides *buf* with
1824 * a nonnegative *size*.
1825 *
1826 * The last argument, *flags*, holds the number of stack frames to
1827 * skip (from 0 to 255), masked with
1828 * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
1829 * the following flags:
1830 *
1831 * **BPF_F_USER_STACK**
1832 * Collect a user space stack instead of a kernel stack.
1833 * **BPF_F_USER_BUILD_ID**
1834 * Collect buildid+offset instead of ips for user stack,
1835 * only valid if **BPF_F_USER_STACK** is also specified.
1836 *
1837 * **bpf_get_stack**\ () can collect up to
1838 * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
1839 * to sufficient large buffer size. Note that
1840 * this limit can be controlled with the **sysctl** program, and
1841 * that it should be manually increased in order to profile long
1842 * user stacks (such as stacks for Java programs). To do so, use:
1843 *
1844 * ::
1845 *
1846 * # sysctl kernel.perf_event_max_stack=<new value>
1847 * Return
1848 * A non-negative value equal to or less than *size* on success,
1849 * or a negative error in case of failure.
1850 *
1851 * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
1852 * Description
1853 * This helper is similar to **bpf_skb_load_bytes**\ () in that
1854 * it provides an easy way to load *len* bytes from *offset*
1855 * from the packet associated to *skb*, into the buffer pointed
1856 * by *to*. The difference to **bpf_skb_load_bytes**\ () is that
1857 * a fifth argument *start_header* exists in order to select a
1858 * base offset to start from. *start_header* can be one of:
1859 *
1860 * **BPF_HDR_START_MAC**
1861 * Base offset to load data from is *skb*'s mac header.
1862 * **BPF_HDR_START_NET**
1863 * Base offset to load data from is *skb*'s network header.
1864 *
1865 * In general, "direct packet access" is the preferred method to
1866 * access packet data, however, this helper is in particular useful
1867 * in socket filters where *skb*\ **->data** does not always point
1868 * to the start of the mac header and where "direct packet access"
1869 * is not available.
1870 * Return
1871 * 0 on success, or a negative error in case of failure.
1872 *
1873 * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
1874 * Description
1875 * Do FIB lookup in kernel tables using parameters in *params*.
1876 * If lookup is successful and result shows packet is to be
1877 * forwarded, the neighbor tables are searched for the nexthop.
1878 * If successful (ie., FIB lookup shows forwarding and nexthop
1879 * is resolved), the nexthop address is returned in ipv4_dst
1880 * or ipv6_dst based on family, smac is set to mac address of
1881 * egress device, dmac is set to nexthop mac address, rt_metric
1882 * is set to metric from route (IPv4/IPv6 only), and ifindex
1883 * is set to the device index of the nexthop from the FIB lookup.
1884 *
1885 * *plen* argument is the size of the passed in struct.
1886 * *flags* argument can be a combination of one or more of the
1887 * following values:
1888 *
1889 * **BPF_FIB_LOOKUP_DIRECT**
1890 * Do a direct table lookup vs full lookup using FIB
1891 * rules.
1892 * **BPF_FIB_LOOKUP_OUTPUT**
1893 * Perform lookup from an egress perspective (default is
1894 * ingress).
1895 *
1896 * *ctx* is either **struct xdp_md** for XDP programs or
1897 * **struct sk_buff** tc cls_act programs.
1898 * Return
1899 * * < 0 if any input argument is invalid
1900 * * 0 on success (packet is forwarded, nexthop neighbor exists)
1901 * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
1902 * packet is not forwarded or needs assist from full stack
1903 *
1904 * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
1905 * Description
1906 * Add an entry to, or update a sockhash *map* referencing sockets.
1907 * The *skops* is used as a new value for the entry associated to
1908 * *key*. *flags* is one of:
1909 *
1910 * **BPF_NOEXIST**
1911 * The entry for *key* must not exist in the map.
1912 * **BPF_EXIST**
1913 * The entry for *key* must already exist in the map.
1914 * **BPF_ANY**
1915 * No condition on the existence of the entry for *key*.
1916 *
1917 * If the *map* has eBPF programs (parser and verdict), those will
1918 * be inherited by the socket being added. If the socket is
1919 * already attached to eBPF programs, this results in an error.
1920 * Return
1921 * 0 on success, or a negative error in case of failure.
1922 *
1923 * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
1924 * Description
1925 * This helper is used in programs implementing policies at the
1926 * socket level. If the message *msg* is allowed to pass (i.e. if
1927 * the verdict eBPF program returns **SK_PASS**), redirect it to
1928 * the socket referenced by *map* (of type
1929 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
1930 * egress interfaces can be used for redirection. The
1931 * **BPF_F_INGRESS** value in *flags* is used to make the
1932 * distinction (ingress path is selected if the flag is present,
1933 * egress path otherwise). This is the only flag supported for now.
1934 * Return
1935 * **SK_PASS** on success, or **SK_DROP** on error.
1936 *
1937 * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
1938 * Description
1939 * This helper is used in programs implementing policies at the
1940 * skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
1941 * if the verdeict eBPF program returns **SK_PASS**), redirect it
1942 * to the socket referenced by *map* (of type
1943 * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
1944 * egress interfaces can be used for redirection. The
1945 * **BPF_F_INGRESS** value in *flags* is used to make the
1946 * distinction (ingress path is selected if the flag is present,
1947 * egress otherwise). This is the only flag supported for now.
1948 * Return
1949 * **SK_PASS** on success, or **SK_DROP** on error.
1950 *
1951 * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
1952 * Description
1953 * Encapsulate the packet associated to *skb* within a Layer 3
1954 * protocol header. This header is provided in the buffer at
1955 * address *hdr*, with *len* its size in bytes. *type* indicates
1956 * the protocol of the header and can be one of:
1957 *
1958 * **BPF_LWT_ENCAP_SEG6**
1959 * IPv6 encapsulation with Segment Routing Header
1960 * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH,
1961 * the IPv6 header is computed by the kernel.
1962 * **BPF_LWT_ENCAP_SEG6_INLINE**
1963 * Only works if *skb* contains an IPv6 packet. Insert a
1964 * Segment Routing Header (**struct ipv6_sr_hdr**) inside
1965 * the IPv6 header.
1966 *
1967 * A call to this helper is susceptible to change the underlaying
1968 * packet buffer. Therefore, at load time, all checks on pointers
1969 * previously done by the verifier are invalidated and must be
1970 * performed again, if the helper is used in combination with
1971 * direct packet access.
1972 * Return
1973 * 0 on success, or a negative error in case of failure.
1974 *
1975 * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
1976 * Description
1977 * Store *len* bytes from address *from* into the packet
1978 * associated to *skb*, at *offset*. Only the flags, tag and TLVs
1979 * inside the outermost IPv6 Segment Routing Header can be
1980 * modified through this helper.
1981 *
1982 * A call to this helper is susceptible to change the underlaying
1983 * packet buffer. Therefore, at load time, all checks on pointers
1984 * previously done by the verifier are invalidated and must be
1985 * performed again, if the helper is used in combination with
1986 * direct packet access.
1987 * Return
1988 * 0 on success, or a negative error in case of failure.
1989 *
1990 * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
1991 * Description
1992 * Adjust the size allocated to TLVs in the outermost IPv6
1993 * Segment Routing Header contained in the packet associated to
1994 * *skb*, at position *offset* by *delta* bytes. Only offsets
1995 * after the segments are accepted. *delta* can be as well
1996 * positive (growing) as negative (shrinking).
1997 *
1998 * A call to this helper is susceptible to change the underlaying
1999 * packet buffer. Therefore, at load time, all checks on pointers
2000 * previously done by the verifier are invalidated and must be
2001 * performed again, if the helper is used in combination with
2002 * direct packet access.
2003 * Return
2004 * 0 on success, or a negative error in case of failure.
2005 *
2006 * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
2007 * Description
2008 * Apply an IPv6 Segment Routing action of type *action* to the
2009 * packet associated to *skb*. Each action takes a parameter
2010 * contained at address *param*, and of length *param_len* bytes.
2011 * *action* can be one of:
2012 *
2013 * **SEG6_LOCAL_ACTION_END_X**
2014 * End.X action: Endpoint with Layer-3 cross-connect.
2015 * Type of *param*: **struct in6_addr**.
2016 * **SEG6_LOCAL_ACTION_END_T**
2017 * End.T action: Endpoint with specific IPv6 table lookup.
2018 * Type of *param*: **int**.
2019 * **SEG6_LOCAL_ACTION_END_B6**
2020 * End.B6 action: Endpoint bound to an SRv6 policy.
2021 * Type of param: **struct ipv6_sr_hdr**.
2022 * **SEG6_LOCAL_ACTION_END_B6_ENCAP**
2023 * End.B6.Encap action: Endpoint bound to an SRv6
2024 * encapsulation policy.
2025 * Type of param: **struct ipv6_sr_hdr**.
2026 *
2027 * A call to this helper is susceptible to change the underlaying
2028 * packet buffer. Therefore, at load time, all checks on pointers
2029 * previously done by the verifier are invalidated and must be
2030 * performed again, if the helper is used in combination with
2031 * direct packet access.
2032 * Return
2033 * 0 on success, or a negative error in case of failure.
2034 *
2035 * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
2036 * Description
2037 * This helper is used in programs implementing IR decoding, to
2038 * report a successfully decoded key press with *scancode*,
2039 * *toggle* value in the given *protocol*. The scancode will be
2040 * translated to a keycode using the rc keymap, and reported as
2041 * an input key down event. After a period a key up event is
2042 * generated. This period can be extended by calling either
2043 * **bpf_rc_keydown** () again with the same values, or calling
2044 * **bpf_rc_repeat** ().
2045 *
2046 * Some protocols include a toggle bit, in case the button was
2047 * released and pressed again between consecutive scancodes.
2048 *
2049 * The *ctx* should point to the lirc sample as passed into
2050 * the program.
2051 *
2052 * The *protocol* is the decoded protocol number (see
2053 * **enum rc_proto** for some predefined values).
2054 *
2055 * This helper is only available is the kernel was compiled with
2056 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2057 * "**y**".
2058 * Return
2059 * 0
2060 *
2061 * int bpf_rc_repeat(void *ctx)
2062 * Description
2063 * This helper is used in programs implementing IR decoding, to
2064 * report a successfully decoded repeat key message. This delays
2065 * the generation of a key up event for previously generated
2066 * key down event.
2067 *
2068 * Some IR protocols like NEC have a special IR message for
2069 * repeating last button, for when a button is held down.
2070 *
2071 * The *ctx* should point to the lirc sample as passed into
2072 * the program.
2073 *
2074 * This helper is only available is the kernel was compiled with
2075 * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
2076 * "**y**".
2077 * Return
2078 * 0
2079 *
2080 * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
2081 * Description
2082 * Return the cgroup v2 id of the socket associated with the *skb*.
2083 * This is roughly similar to the **bpf_get_cgroup_classid**\ ()
2084 * helper for cgroup v1 by providing a tag resp. identifier that
2085 * can be matched on or used for map lookups e.g. to implement
2086 * policy. The cgroup v2 id of a given path in the hierarchy is
2087 * exposed in user space through the f_handle API in order to get
2088 * to the same 64-bit id.
2089 *
2090 * This helper can be used on TC egress path, but not on ingress,
2091 * and is available only if the kernel was compiled with the
2092 * **CONFIG_SOCK_CGROUP_DATA** configuration option.
2093 * Return
2094 * The id is returned or 0 in case the id could not be retrieved.
2095 *
2096 * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
2097 * Description
2098 * Return id of cgroup v2 that is ancestor of cgroup associated
2099 * with the *skb* at the *ancestor_level*. The root cgroup is at
2100 * *ancestor_level* zero and each step down the hierarchy
2101 * increments the level. If *ancestor_level* == level of cgroup
2102 * associated with *skb*, then return value will be same as that
2103 * of **bpf_skb_cgroup_id**\ ().
2104 *
2105 * The helper is useful to implement policies based on cgroups
2106 * that are upper in hierarchy than immediate cgroup associated
2107 * with *skb*.
2108 *
2109 * The format of returned id and helper limitations are same as in
2110 * **bpf_skb_cgroup_id**\ ().
2111 * Return
2112 * The id is returned or 0 in case the id could not be retrieved.
2113 *
2114 * u64 bpf_get_current_cgroup_id(void)
2115 * Return
2116 * A 64-bit integer containing the current cgroup id based
2117 * on the cgroup within which the current task is running.
2118 *
2119 * void* get_local_storage(void *map, u64 flags)
2120 * Description
2121 * Get the pointer to the local storage area.
2122 * The type and the size of the local storage is defined
2123 * by the *map* argument.
2124 * The *flags* meaning is specific for each map type,
2125 * and has to be 0 for cgroup local storage.
2126 *
2127 * Depending on the bpf program type, a local storage area
2128 * can be shared between multiple instances of the bpf program,
2129 * running simultaneously.
2130 *
2131 * A user should care about the synchronization by himself.
2132 * For example, by using the BPF_STX_XADD instruction to alter
2133 * the shared data.
2134 * Return
2135 * Pointer to the local storage area.
2136 *
2137 * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
2138 * Description
2139 * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
2140 * It checks the selected sk is matching the incoming
2141 * request in the skb.
2142 * Return
2143 * 0 on success, or a negative error in case of failure.
2144 */
2145#define __BPF_FUNC_MAPPER(FN) \
2146 FN(unspec), \
2147 FN(map_lookup_elem), \
2148 FN(map_update_elem), \
2149 FN(map_delete_elem), \
2150 FN(probe_read), \
2151 FN(ktime_get_ns), \
2152 FN(trace_printk), \
2153 FN(get_prandom_u32), \
2154 FN(get_smp_processor_id), \
2155 FN(skb_store_bytes), \
2156 FN(l3_csum_replace), \
2157 FN(l4_csum_replace), \
2158 FN(tail_call), \
2159 FN(clone_redirect), \
2160 FN(get_current_pid_tgid), \
2161 FN(get_current_uid_gid), \
2162 FN(get_current_comm), \
2163 FN(get_cgroup_classid), \
2164 FN(skb_vlan_push), \
2165 FN(skb_vlan_pop), \
2166 FN(skb_get_tunnel_key), \
2167 FN(skb_set_tunnel_key), \
2168 FN(perf_event_read), \
2169 FN(redirect), \
2170 FN(get_route_realm), \
2171 FN(perf_event_output), \
2172 FN(skb_load_bytes), \
2173 FN(get_stackid), \
2174 FN(csum_diff), \
2175 FN(skb_get_tunnel_opt), \
2176 FN(skb_set_tunnel_opt), \
2177 FN(skb_change_proto), \
2178 FN(skb_change_type), \
2179 FN(skb_under_cgroup), \
2180 FN(get_hash_recalc), \
2181 FN(get_current_task), \
2182 FN(probe_write_user), \
2183 FN(current_task_under_cgroup), \
2184 FN(skb_change_tail), \
2185 FN(skb_pull_data), \
2186 FN(csum_update), \
2187 FN(set_hash_invalid), \
2188 FN(get_numa_node_id), \
2189 FN(skb_change_head), \
2190 FN(xdp_adjust_head), \
2191 FN(probe_read_str), \
2192 FN(get_socket_cookie), \
2193 FN(get_socket_uid), \
2194 FN(set_hash), \
2195 FN(setsockopt), \
2196 FN(skb_adjust_room), \
2197 FN(redirect_map), \
2198 FN(sk_redirect_map), \
2199 FN(sock_map_update), \
2200 FN(xdp_adjust_meta), \
2201 FN(perf_event_read_value), \
2202 FN(perf_prog_read_value), \
2203 FN(getsockopt), \
2204 FN(override_return), \
2205 FN(sock_ops_cb_flags_set), \
2206 FN(msg_redirect_map), \
2207 FN(msg_apply_bytes), \
2208 FN(msg_cork_bytes), \
2209 FN(msg_pull_data), \
2210 FN(bind), \
2211 FN(xdp_adjust_tail), \
2212 FN(skb_get_xfrm_state), \
2213 FN(get_stack), \
2214 FN(skb_load_bytes_relative), \
2215 FN(fib_lookup), \
2216 FN(sock_hash_update), \
2217 FN(msg_redirect_hash), \
2218 FN(sk_redirect_hash), \
2219 FN(lwt_push_encap), \
2220 FN(lwt_seg6_store_bytes), \
2221 FN(lwt_seg6_adjust_srh), \
2222 FN(lwt_seg6_action), \
2223 FN(rc_repeat), \
2224 FN(rc_keydown), \
2225 FN(skb_cgroup_id), \
2226 FN(get_current_cgroup_id), \
2227 FN(get_local_storage), \
2228 FN(sk_select_reuseport), \
2229 FN(skb_ancestor_cgroup_id),
2230
2231/* integer value in 'imm' field of BPF_CALL instruction selects which helper
2232 * function eBPF program intends to call
2233 */
2234#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
2235enum bpf_func_id {
2236 __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
2237 __BPF_FUNC_MAX_ID,
2238};
2239#undef __BPF_ENUM_FN
2240
2241/* All flags used by eBPF helper functions, placed here. */
2242
2243/* BPF_FUNC_skb_store_bytes flags. */
2244#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
2245#define BPF_F_INVALIDATE_HASH (1ULL << 1)
2246
2247/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
2248 * First 4 bits are for passing the header field size.
2249 */
2250#define BPF_F_HDR_FIELD_MASK 0xfULL
2251
2252/* BPF_FUNC_l4_csum_replace flags. */
2253#define BPF_F_PSEUDO_HDR (1ULL << 4)
2254#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
2255#define BPF_F_MARK_ENFORCE (1ULL << 6)
2256
2257/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
2258#define BPF_F_INGRESS (1ULL << 0)
2259
2260/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
2261#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
2262
2263/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
2264#define BPF_F_SKIP_FIELD_MASK 0xffULL
2265#define BPF_F_USER_STACK (1ULL << 8)
2266/* flags used by BPF_FUNC_get_stackid only. */
2267#define BPF_F_FAST_STACK_CMP (1ULL << 9)
2268#define BPF_F_REUSE_STACKID (1ULL << 10)
2269/* flags used by BPF_FUNC_get_stack only. */
2270#define BPF_F_USER_BUILD_ID (1ULL << 11)
2271
2272/* BPF_FUNC_skb_set_tunnel_key flags. */
2273#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
2274#define BPF_F_DONT_FRAGMENT (1ULL << 2)
2275#define BPF_F_SEQ_NUMBER (1ULL << 3)
2276
2277/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
2278 * BPF_FUNC_perf_event_read_value flags.
2279 */
2280#define BPF_F_INDEX_MASK 0xffffffffULL
2281#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
2282/* BPF_FUNC_perf_event_output for sk_buff input context. */
2283#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
2284
2285/* Mode for BPF_FUNC_skb_adjust_room helper. */
2286enum bpf_adj_room_mode {
2287 BPF_ADJ_ROOM_NET,
2288};
2289
2290/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
2291enum bpf_hdr_start_off {
2292 BPF_HDR_START_MAC,
2293 BPF_HDR_START_NET,
2294};
2295
2296/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */
2297enum bpf_lwt_encap_mode {
2298 BPF_LWT_ENCAP_SEG6,
2299 BPF_LWT_ENCAP_SEG6_INLINE
2300};
2301
2302/* user accessible mirror of in-kernel sk_buff.
2303 * new fields can only be added to the end of this structure
2304 */
2305struct __sk_buff {
2306 __u32 len;
2307 __u32 pkt_type;
2308 __u32 mark;
2309 __u32 queue_mapping;
2310 __u32 protocol;
2311 __u32 vlan_present;
2312 __u32 vlan_tci;
2313 __u32 vlan_proto;
2314 __u32 priority;
2315 __u32 ingress_ifindex;
2316 __u32 ifindex;
2317 __u32 tc_index;
2318 __u32 cb[5];
2319 __u32 hash;
2320 __u32 tc_classid;
2321 __u32 data;
2322 __u32 data_end;
2323 __u32 napi_id;
2324
2325 /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
2326 __u32 family;
2327 __u32 remote_ip4; /* Stored in network byte order */
2328 __u32 local_ip4; /* Stored in network byte order */
2329 __u32 remote_ip6[4]; /* Stored in network byte order */
2330 __u32 local_ip6[4]; /* Stored in network byte order */
2331 __u32 remote_port; /* Stored in network byte order */
2332 __u32 local_port; /* stored in host byte order */
2333 /* ... here. */
2334
2335 __u32 data_meta;
2336};
2337
2338struct bpf_tunnel_key {
2339 __u32 tunnel_id;
2340 union {
2341 __u32 remote_ipv4;
2342 __u32 remote_ipv6[4];
2343 };
2344 __u8 tunnel_tos;
2345 __u8 tunnel_ttl;
2346 __u16 tunnel_ext; /* Padding, future use. */
2347 __u32 tunnel_label;
2348};
2349
2350/* user accessible mirror of in-kernel xfrm_state.
2351 * new fields can only be added to the end of this structure
2352 */
2353struct bpf_xfrm_state {
2354 __u32 reqid;
2355 __u32 spi; /* Stored in network byte order */
2356 __u16 family;
2357 __u16 ext; /* Padding, future use. */
2358 union {
2359 __u32 remote_ipv4; /* Stored in network byte order */
2360 __u32 remote_ipv6[4]; /* Stored in network byte order */
2361 };
2362};
2363
2364/* Generic BPF return codes which all BPF program types may support.
2365 * The values are binary compatible with their TC_ACT_* counter-part to
2366 * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT
2367 * programs.
2368 *
2369 * XDP is handled seprately, see XDP_*.
2370 */
2371enum bpf_ret_code {
2372 BPF_OK = 0,
2373 /* 1 reserved */
2374 BPF_DROP = 2,
2375 /* 3-6 reserved */
2376 BPF_REDIRECT = 7,
2377 /* >127 are reserved for prog type specific return codes */
2378};
2379
2380struct bpf_sock {
2381 __u32 bound_dev_if;
2382 __u32 family;
2383 __u32 type;
2384 __u32 protocol;
2385 __u32 mark;
2386 __u32 priority;
2387 __u32 src_ip4; /* Allows 1,2,4-byte read.
2388 * Stored in network byte order.
2389 */
2390 __u32 src_ip6[4]; /* Allows 1,2,4-byte read.
2391 * Stored in network byte order.
2392 */
2393 __u32 src_port; /* Allows 4-byte read.
2394 * Stored in host byte order
2395 */
2396};
2397
2398#define XDP_PACKET_HEADROOM 256
2399
2400/* User return codes for XDP prog type.
2401 * A valid XDP program must return one of these defined values. All other
2402 * return codes are reserved for future use. Unknown return codes will
2403 * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
2404 */
2405enum xdp_action {
2406 XDP_ABORTED = 0,
2407 XDP_DROP,
2408 XDP_PASS,
2409 XDP_TX,
2410 XDP_REDIRECT,
2411};
2412
2413/* user accessible metadata for XDP packet hook
2414 * new fields must be added to the end of this structure
2415 */
2416struct xdp_md {
2417 __u32 data;
2418 __u32 data_end;
2419 __u32 data_meta;
2420 /* Below access go through struct xdp_rxq_info */
2421 __u32 ingress_ifindex; /* rxq->dev->ifindex */
2422 __u32 rx_queue_index; /* rxq->queue_index */
2423};
2424
2425enum sk_action {
2426 SK_DROP = 0,
2427 SK_PASS,
2428};
2429
2430/* user accessible metadata for SK_MSG packet hook, new fields must
2431 * be added to the end of this structure
2432 */
2433struct sk_msg_md {
2434 void *data;
2435 void *data_end;
2436
2437 __u32 family;
2438 __u32 remote_ip4; /* Stored in network byte order */
2439 __u32 local_ip4; /* Stored in network byte order */
2440 __u32 remote_ip6[4]; /* Stored in network byte order */
2441 __u32 local_ip6[4]; /* Stored in network byte order */
2442 __u32 remote_port; /* Stored in network byte order */
2443 __u32 local_port; /* stored in host byte order */
2444};
2445
2446struct sk_reuseport_md {
2447 /*
2448 * Start of directly accessible data. It begins from
2449 * the tcp/udp header.
2450 */
2451 void *data;
2452 void *data_end; /* End of directly accessible data */
2453 /*
2454 * Total length of packet (starting from the tcp/udp header).
2455 * Note that the directly accessible bytes (data_end - data)
2456 * could be less than this "len". Those bytes could be
2457 * indirectly read by a helper "bpf_skb_load_bytes()".
2458 */
2459 __u32 len;
2460 /*
2461 * Eth protocol in the mac header (network byte order). e.g.
2462 * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
2463 */
2464 __u32 eth_protocol;
2465 __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
2466 __u32 bind_inany; /* Is sock bound to an INANY address? */
2467 __u32 hash; /* A hash of the packet 4 tuples */
2468};
2469
2470#define BPF_TAG_SIZE 8
2471
2472struct bpf_prog_info {
2473 __u32 type;
2474 __u32 id;
2475 __u8 tag[BPF_TAG_SIZE];
2476 __u32 jited_prog_len;
2477 __u32 xlated_prog_len;
2478 __aligned_u64 jited_prog_insns;
2479 __aligned_u64 xlated_prog_insns;
2480 __u64 load_time; /* ns since boottime */
2481 __u32 created_by_uid;
2482 __u32 nr_map_ids;
2483 __aligned_u64 map_ids;
2484 char name[BPF_OBJ_NAME_LEN];
2485 __u32 ifindex;
2486 __u32 gpl_compatible:1;
2487 __u64 netns_dev;
2488 __u64 netns_ino;
2489 __u32 nr_jited_ksyms;
2490 __u32 nr_jited_func_lens;
2491 __aligned_u64 jited_ksyms;
2492 __aligned_u64 jited_func_lens;
2493} __attribute__((aligned(8)));
2494
2495struct bpf_map_info {
2496 __u32 type;
2497 __u32 id;
2498 __u32 key_size;
2499 __u32 value_size;
2500 __u32 max_entries;
2501 __u32 map_flags;
2502 char name[BPF_OBJ_NAME_LEN];
2503 __u32 ifindex;
2504 __u32 :32;
2505 __u64 netns_dev;
2506 __u64 netns_ino;
2507 __u32 btf_id;
2508 __u32 btf_key_type_id;
2509 __u32 btf_value_type_id;
2510} __attribute__((aligned(8)));
2511
2512struct bpf_btf_info {
2513 __aligned_u64 btf;
2514 __u32 btf_size;
2515 __u32 id;
2516} __attribute__((aligned(8)));
2517
2518/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
2519 * by user and intended to be used by socket (e.g. to bind to, depends on
2520 * attach attach type).
2521 */
2522struct bpf_sock_addr {
2523 __u32 user_family; /* Allows 4-byte read, but no write. */
2524 __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
2525 * Stored in network byte order.
2526 */
2527 __u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
2528 * Stored in network byte order.
2529 */
2530 __u32 user_port; /* Allows 4-byte read and write.
2531 * Stored in network byte order
2532 */
2533 __u32 family; /* Allows 4-byte read, but no write */
2534 __u32 type; /* Allows 4-byte read, but no write */
2535 __u32 protocol; /* Allows 4-byte read, but no write */
2536 __u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write.
2537 * Stored in network byte order.
2538 */
2539 __u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
2540 * Stored in network byte order.
2541 */
2542};
2543
2544/* User bpf_sock_ops struct to access socket values and specify request ops
2545 * and their replies.
2546 * Some of this fields are in network (bigendian) byte order and may need
2547 * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
2548 * New fields can only be added at the end of this structure
2549 */
2550struct bpf_sock_ops {
2551 __u32 op;
2552 union {
2553 __u32 args[4]; /* Optionally passed to bpf program */
2554 __u32 reply; /* Returned by bpf program */
2555 __u32 replylong[4]; /* Optionally returned by bpf prog */
2556 };
2557 __u32 family;
2558 __u32 remote_ip4; /* Stored in network byte order */
2559 __u32 local_ip4; /* Stored in network byte order */
2560 __u32 remote_ip6[4]; /* Stored in network byte order */
2561 __u32 local_ip6[4]; /* Stored in network byte order */
2562 __u32 remote_port; /* Stored in network byte order */
2563 __u32 local_port; /* stored in host byte order */
2564 __u32 is_fullsock; /* Some TCP fields are only valid if
2565 * there is a full socket. If not, the
2566 * fields read as zero.
2567 */
2568 __u32 snd_cwnd;
2569 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
2570 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
2571 __u32 state;
2572 __u32 rtt_min;
2573 __u32 snd_ssthresh;
2574 __u32 rcv_nxt;
2575 __u32 snd_nxt;
2576 __u32 snd_una;
2577 __u32 mss_cache;
2578 __u32 ecn_flags;
2579 __u32 rate_delivered;
2580 __u32 rate_interval_us;
2581 __u32 packets_out;
2582 __u32 retrans_out;
2583 __u32 total_retrans;
2584 __u32 segs_in;
2585 __u32 data_segs_in;
2586 __u32 segs_out;
2587 __u32 data_segs_out;
2588 __u32 lost_out;
2589 __u32 sacked_out;
2590 __u32 sk_txhash;
2591 __u64 bytes_received;
2592 __u64 bytes_acked;
2593};
2594
2595/* Definitions for bpf_sock_ops_cb_flags */
2596#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
2597#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
2598#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
2599#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
2600 * supported cb flags
2601 */
2602
2603/* List of known BPF sock_ops operators.
2604 * New entries can only be added at the end
2605 */
2606enum {
2607 BPF_SOCK_OPS_VOID,
2608 BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
2609 * -1 if default value should be used
2610 */
2611 BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
2612 * window (in packets) or -1 if default
2613 * value should be used
2614 */
2615 BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
2616 * active connection is initialized
2617 */
2618 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
2619 * active connection is
2620 * established
2621 */
2622 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
2623 * passive connection is
2624 * established
2625 */
2626 BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
2627 * needs ECN
2628 */
2629 BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
2630 * based on the path and may be
2631 * dependent on the congestion control
2632 * algorithm. In general it indicates
2633 * a congestion threshold. RTTs above
2634 * this indicate congestion
2635 */
2636 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
2637 * Arg1: value of icsk_retransmits
2638 * Arg2: value of icsk_rto
2639 * Arg3: whether RTO has expired
2640 */
2641 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
2642 * Arg1: sequence number of 1st byte
2643 * Arg2: # segments
2644 * Arg3: return value of
2645 * tcp_transmit_skb (0 => success)
2646 */
2647 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
2648 * Arg1: old_state
2649 * Arg2: new_state
2650 */
2651 BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
2652 * socket transition to LISTEN state.
2653 */
2654};
2655
2656/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
2657 * changes between the TCP and BPF versions. Ideally this should never happen.
2658 * If it does, we need to add code to convert them before calling
2659 * the BPF sock_ops function.
2660 */
2661enum {
2662 BPF_TCP_ESTABLISHED = 1,
2663 BPF_TCP_SYN_SENT,
2664 BPF_TCP_SYN_RECV,
2665 BPF_TCP_FIN_WAIT1,
2666 BPF_TCP_FIN_WAIT2,
2667 BPF_TCP_TIME_WAIT,
2668 BPF_TCP_CLOSE,
2669 BPF_TCP_CLOSE_WAIT,
2670 BPF_TCP_LAST_ACK,
2671 BPF_TCP_LISTEN,
2672 BPF_TCP_CLOSING, /* Now a valid state */
2673 BPF_TCP_NEW_SYN_RECV,
2674
2675 BPF_TCP_MAX_STATES /* Leave at the end! */
2676};
2677
2678#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
2679#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
2680
2681struct bpf_perf_event_value {
2682 __u64 counter;
2683 __u64 enabled;
2684 __u64 running;
2685};
2686
2687#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
2688#define BPF_DEVCG_ACC_READ (1ULL << 1)
2689#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
2690
2691#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
2692#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
2693
2694struct bpf_cgroup_dev_ctx {
2695 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
2696 __u32 access_type;
2697 __u32 major;
2698 __u32 minor;
2699};
2700
2701struct bpf_raw_tracepoint_args {
2702 __u64 args[0];
2703};
2704
2705/* DIRECT: Skip the FIB rules and go to FIB table associated with device
2706 * OUTPUT: Do lookup from egress perspective; default is ingress
2707 */
2708#define BPF_FIB_LOOKUP_DIRECT BIT(0)
2709#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
2710
2711enum {
2712 BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
2713 BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
2714 BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
2715 BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
2716 BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
2717 BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
2718 BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
2719 BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
2720 BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
2721};
2722
2723struct bpf_fib_lookup {
2724 /* input: network family for lookup (AF_INET, AF_INET6)
2725 * output: network family of egress nexthop
2726 */
2727 __u8 family;
2728
2729 /* set if lookup is to consider L4 data - e.g., FIB rules */
2730 __u8 l4_protocol;
2731 __be16 sport;
2732 __be16 dport;
2733
2734 /* total length of packet from network header - used for MTU check */
2735 __u16 tot_len;
2736
2737 /* input: L3 device index for lookup
2738 * output: device index from FIB lookup
2739 */
2740 __u32 ifindex;
2741
2742 union {
2743 /* inputs to lookup */
2744 __u8 tos; /* AF_INET */
2745 __be32 flowinfo; /* AF_INET6, flow_label + priority */
2746
2747 /* output: metric of fib result (IPv4/IPv6 only) */
2748 __u32 rt_metric;
2749 };
2750
2751 union {
2752 __be32 ipv4_src;
2753 __u32 ipv6_src[4]; /* in6_addr; network order */
2754 };
2755
2756 /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in
2757 * network header. output: bpf_fib_lookup sets to gateway address
2758 * if FIB lookup returns gateway route
2759 */
2760 union {
2761 __be32 ipv4_dst;
2762 __u32 ipv6_dst[4]; /* in6_addr; network order */
2763 };
2764
2765 /* output */
2766 __be16 h_vlan_proto;
2767 __be16 h_vlan_TCI;
2768 __u8 smac[6]; /* ETH_ALEN */
2769 __u8 dmac[6]; /* ETH_ALEN */
2770};
2771
2772enum bpf_task_fd_type {
2773 BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
2774 BPF_FD_TYPE_TRACEPOINT, /* tp name */
2775 BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */
2776 BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */
2777 BPF_FD_TYPE_UPROBE, /* filename + offset */
2778 BPF_FD_TYPE_URETPROBE, /* filename + offset */
2779};
2780
2781#endif /* _UAPI__LINUX_BPF_H__ */