blob: 57d10b779dea019e05a218a8f0e965b84988088c [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009 */
David Brazdil0f672f62019-12-10 10:32:29 +000010#ifndef __LIBBPF_LIBBPF_H
11#define __LIBBPF_LIBBPF_H
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000012
David Brazdil0f672f62019-12-10 10:32:29 +000013#include <stdarg.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000014#include <stdio.h>
15#include <stdint.h>
16#include <stdbool.h>
17#include <sys/types.h> // for size_t
18#include <linux/bpf.h>
19
Olivier Deprez157378f2022-04-04 15:47:50 +020020#include "libbpf_common.h"
21
David Brazdil0f672f62019-12-10 10:32:29 +000022#ifdef __cplusplus
23extern "C" {
24#endif
25
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000026enum libbpf_errno {
27 __LIBBPF_ERRNO__START = 4000,
28
29 /* Something wrong in libelf */
30 LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START,
31 LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */
32 LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */
33 LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */
34 LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */
35 LIBBPF_ERRNO__RELOC, /* Relocation failed */
36 LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */
37 LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */
38 LIBBPF_ERRNO__PROG2BIG, /* Program too big */
39 LIBBPF_ERRNO__KVER, /* Incorrect kernel version */
40 LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */
41 LIBBPF_ERRNO__WRNGPID, /* Wrong pid in netlink message */
42 LIBBPF_ERRNO__INVSEQ, /* Invalid netlink sequence */
David Brazdil0f672f62019-12-10 10:32:29 +000043 LIBBPF_ERRNO__NLPARSE, /* netlink parsing error */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 __LIBBPF_ERRNO__END,
45};
46
David Brazdil0f672f62019-12-10 10:32:29 +000047LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000048
David Brazdil0f672f62019-12-10 10:32:29 +000049enum libbpf_print_level {
50 LIBBPF_WARN,
51 LIBBPF_INFO,
52 LIBBPF_DEBUG,
53};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054
David Brazdil0f672f62019-12-10 10:32:29 +000055typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
56 const char *, va_list ap);
57
58LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059
60/* Hide internal to user */
61struct bpf_object;
62
63struct bpf_object_open_attr {
64 const char *file;
65 enum bpf_prog_type prog_type;
66};
67
Olivier Deprez157378f2022-04-04 15:47:50 +020068struct bpf_object_open_opts {
69 /* size of this struct, for forward/backward compatiblity */
70 size_t sz;
71 /* object name override, if provided:
72 * - for object open from file, this will override setting object
73 * name from file path's base name;
74 * - for object open from memory buffer, this will specify an object
75 * name and will override default "<addr>-<buf-size>" name;
76 */
77 const char *object_name;
78 /* parse map definitions non-strictly, allowing extra attributes/data */
79 bool relaxed_maps;
80 /* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures.
81 * Value is ignored. Relocations always are processed non-strictly.
82 * Non-relocatable instructions are replaced with invalid ones to
83 * prevent accidental errors.
84 * */
85 bool relaxed_core_relocs;
86 /* maps that set the 'pinning' attribute in their definition will have
87 * their pin_path attribute set to a file in this directory, and be
88 * auto-pinned to that path on load; defaults to "/sys/fs/bpf".
89 */
90 const char *pin_root_path;
91 __u32 attach_prog_fd;
92 /* Additional kernel config content that augments and overrides
93 * system Kconfig for CONFIG_xxx externs.
94 */
95 const char *kconfig;
96};
97#define bpf_object_open_opts__last_field kconfig
98
David Brazdil0f672f62019-12-10 10:32:29 +000099LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
100LIBBPF_API struct bpf_object *
Olivier Deprez157378f2022-04-04 15:47:50 +0200101bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts);
102LIBBPF_API struct bpf_object *
103bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
104 const struct bpf_object_open_opts *opts);
105
106/* deprecated bpf_object__open variants */
107LIBBPF_API struct bpf_object *
108bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
109 const char *name);
110LIBBPF_API struct bpf_object *
David Brazdil0f672f62019-12-10 10:32:29 +0000111bpf_object__open_xattr(struct bpf_object_open_attr *attr);
Olivier Deprez157378f2022-04-04 15:47:50 +0200112
113enum libbpf_pin_type {
114 LIBBPF_PIN_NONE,
115 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
116 LIBBPF_PIN_BY_NAME,
117};
118
119/* pin_maps and unpin_maps can both be called with a NULL path, in which case
120 * they will use the pin_path attribute of each map (and ignore all maps that
121 * don't have a pin_path set).
122 */
David Brazdil0f672f62019-12-10 10:32:29 +0000123LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
124LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
125 const char *path);
126LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
127 const char *path);
128LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
129 const char *path);
130LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
131LIBBPF_API void bpf_object__close(struct bpf_object *object);
132
133struct bpf_object_load_attr {
134 struct bpf_object *obj;
135 int log_level;
136 const char *target_btf_path;
137};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000138
139/* Load/unload object into/from kernel */
David Brazdil0f672f62019-12-10 10:32:29 +0000140LIBBPF_API int bpf_object__load(struct bpf_object *obj);
141LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
142LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
Olivier Deprez157378f2022-04-04 15:47:50 +0200143
David Brazdil0f672f62019-12-10 10:32:29 +0000144LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
145LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146
David Brazdil0f672f62019-12-10 10:32:29 +0000147struct btf;
148LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
149LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000150
David Brazdil0f672f62019-12-10 10:32:29 +0000151LIBBPF_API struct bpf_program *
152bpf_object__find_program_by_title(const struct bpf_object *obj,
153 const char *title);
Olivier Deprez157378f2022-04-04 15:47:50 +0200154LIBBPF_API struct bpf_program *
155bpf_object__find_program_by_name(const struct bpf_object *obj,
156 const char *name);
David Brazdil0f672f62019-12-10 10:32:29 +0000157
158LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159#define bpf_object__for_each_safe(pos, tmp) \
160 for ((pos) = bpf_object__next(NULL), \
161 (tmp) = bpf_object__next(pos); \
162 (pos) != NULL; \
163 (pos) = (tmp), (tmp) = bpf_object__next(tmp))
164
165typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
David Brazdil0f672f62019-12-10 10:32:29 +0000166LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
167 bpf_object_clear_priv_t clear_priv);
168LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169
David Brazdil0f672f62019-12-10 10:32:29 +0000170LIBBPF_API int
171libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
172 enum bpf_attach_type *expected_attach_type);
173LIBBPF_API int libbpf_attach_type_by_name(const char *name,
174 enum bpf_attach_type *attach_type);
Olivier Deprez157378f2022-04-04 15:47:50 +0200175LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
176 enum bpf_attach_type attach_type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177
178/* Accessors of bpf_program */
179struct bpf_program;
David Brazdil0f672f62019-12-10 10:32:29 +0000180LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
181 const struct bpf_object *obj);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000182
183#define bpf_object__for_each_program(pos, obj) \
184 for ((pos) = bpf_program__next(NULL, (obj)); \
185 (pos) != NULL; \
186 (pos) = bpf_program__next((pos), (obj)))
187
David Brazdil0f672f62019-12-10 10:32:29 +0000188LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
189 const struct bpf_object *obj);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000190
David Brazdil0f672f62019-12-10 10:32:29 +0000191typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192
David Brazdil0f672f62019-12-10 10:32:29 +0000193LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
194 bpf_program_clear_priv_t clear_priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195
David Brazdil0f672f62019-12-10 10:32:29 +0000196LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
197LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
198 __u32 ifindex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199
Olivier Deprez157378f2022-04-04 15:47:50 +0200200LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
201LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
202LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
203const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
204LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
205LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
206
207/* returns program size in bytes */
208LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
David Brazdil0f672f62019-12-10 10:32:29 +0000209
210LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
211 __u32 kern_version);
212LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
213LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
214 const char *path,
215 int instance);
216LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
217 const char *path,
218 int instance);
219LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
220LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
221LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
222
223struct bpf_link;
224
Olivier Deprez157378f2022-04-04 15:47:50 +0200225LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
226LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
227LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
228LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
229LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
230LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
231 struct bpf_program *prog);
232LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
233LIBBPF_API int bpf_link__detach(struct bpf_link *link);
David Brazdil0f672f62019-12-10 10:32:29 +0000234LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
235
236LIBBPF_API struct bpf_link *
Olivier Deprez157378f2022-04-04 15:47:50 +0200237bpf_program__attach(struct bpf_program *prog);
238LIBBPF_API struct bpf_link *
David Brazdil0f672f62019-12-10 10:32:29 +0000239bpf_program__attach_perf_event(struct bpf_program *prog, int pfd);
240LIBBPF_API struct bpf_link *
241bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
242 const char *func_name);
243LIBBPF_API struct bpf_link *
244bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
245 pid_t pid, const char *binary_path,
246 size_t func_offset);
247LIBBPF_API struct bpf_link *
248bpf_program__attach_tracepoint(struct bpf_program *prog,
249 const char *tp_category,
250 const char *tp_name);
251LIBBPF_API struct bpf_link *
252bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
253 const char *tp_name);
Olivier Deprez157378f2022-04-04 15:47:50 +0200254LIBBPF_API struct bpf_link *
255bpf_program__attach_trace(struct bpf_program *prog);
256LIBBPF_API struct bpf_link *
257bpf_program__attach_lsm(struct bpf_program *prog);
258LIBBPF_API struct bpf_link *
259bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd);
260LIBBPF_API struct bpf_link *
261bpf_program__attach_netns(struct bpf_program *prog, int netns_fd);
262LIBBPF_API struct bpf_link *
263bpf_program__attach_xdp(struct bpf_program *prog, int ifindex);
264LIBBPF_API struct bpf_link *
265bpf_program__attach_freplace(struct bpf_program *prog,
266 int target_fd, const char *attach_func_name);
267
268struct bpf_map;
269
270LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
271
272struct bpf_iter_attach_opts {
273 size_t sz; /* size of this struct for forward/backward compatibility */
274 union bpf_iter_link_info *link_info;
275 __u32 link_info_len;
276};
277#define bpf_iter_attach_opts__last_field link_info_len
278
279LIBBPF_API struct bpf_link *
280bpf_program__attach_iter(struct bpf_program *prog,
281 const struct bpf_iter_attach_opts *opts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000282
283struct bpf_insn;
284
285/*
286 * Libbpf allows callers to adjust BPF programs before being loaded
287 * into kernel. One program in an object file can be transformed into
288 * multiple variants to be attached to different hooks.
289 *
290 * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
291 * form an API for this purpose.
292 *
293 * - bpf_program_prep_t:
294 * Defines a 'preprocessor', which is a caller defined function
295 * passed to libbpf through bpf_program__set_prep(), and will be
296 * called before program is loaded. The processor should adjust
297 * the program one time for each instance according to the instance id
298 * passed to it.
299 *
300 * - bpf_program__set_prep:
301 * Attaches a preprocessor to a BPF program. The number of instances
302 * that should be created is also passed through this function.
303 *
304 * - bpf_program__nth_fd:
305 * After the program is loaded, get resulting FD of a given instance
306 * of the BPF program.
307 *
308 * If bpf_program__set_prep() is not used, the program would be loaded
309 * without adjustment during bpf_object__load(). The program has only
310 * one instance. In this case bpf_program__fd(prog) is equal to
311 * bpf_program__nth_fd(prog, 0).
312 */
313
314struct bpf_prog_prep_result {
315 /*
316 * If not NULL, load new instruction array.
317 * If set to NULL, don't load this instance.
318 */
319 struct bpf_insn *new_insn_ptr;
320 int new_insn_cnt;
321
322 /* If not NULL, result FD is written to it. */
323 int *pfd;
324};
325
326/*
327 * Parameters of bpf_program_prep_t:
328 * - prog: The bpf_program being loaded.
329 * - n: Index of instance being generated.
330 * - insns: BPF instructions array.
331 * - insns_cnt:Number of instructions in insns.
332 * - res: Output parameter, result of transformation.
333 *
334 * Return value:
335 * - Zero: pre-processing success.
336 * - Non-zero: pre-processing error, stop loading.
337 */
338typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
339 struct bpf_insn *insns, int insns_cnt,
340 struct bpf_prog_prep_result *res);
341
David Brazdil0f672f62019-12-10 10:32:29 +0000342LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
343 bpf_program_prep_t prep);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344
David Brazdil0f672f62019-12-10 10:32:29 +0000345LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000346
347/*
348 * Adjust type of BPF program. Default is kprobe.
349 */
David Brazdil0f672f62019-12-10 10:32:29 +0000350LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
351LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
352LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
353LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
Olivier Deprez157378f2022-04-04 15:47:50 +0200354LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
David Brazdil0f672f62019-12-10 10:32:29 +0000355LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
356LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
357LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
358LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
Olivier Deprez157378f2022-04-04 15:47:50 +0200359LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
360LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
361LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
362LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
363
364LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
David Brazdil0f672f62019-12-10 10:32:29 +0000365LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
366 enum bpf_prog_type type);
Olivier Deprez157378f2022-04-04 15:47:50 +0200367
368LIBBPF_API enum bpf_attach_type
369bpf_program__get_expected_attach_type(struct bpf_program *prog);
David Brazdil0f672f62019-12-10 10:32:29 +0000370LIBBPF_API void
371bpf_program__set_expected_attach_type(struct bpf_program *prog,
372 enum bpf_attach_type type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000373
Olivier Deprez157378f2022-04-04 15:47:50 +0200374LIBBPF_API int
375bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
376 const char *attach_func_name);
377
David Brazdil0f672f62019-12-10 10:32:29 +0000378LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
379LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
380LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
381LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
Olivier Deprez157378f2022-04-04 15:47:50 +0200382LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
David Brazdil0f672f62019-12-10 10:32:29 +0000383LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
384LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
385LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
386LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
Olivier Deprez157378f2022-04-04 15:47:50 +0200387LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
388LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
389LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
390LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000391
392/*
393 * No need for __attribute__((packed)), all members of 'bpf_map_def'
394 * are all aligned. In addition, using __attribute__((packed))
395 * would trigger a -Wpacked warning message, and lead to an error
396 * if -Werror is set.
397 */
398struct bpf_map_def {
399 unsigned int type;
400 unsigned int key_size;
401 unsigned int value_size;
402 unsigned int max_entries;
403 unsigned int map_flags;
404};
405
406/*
407 * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel,
408 * so no need to worry about a name clash.
409 */
David Brazdil0f672f62019-12-10 10:32:29 +0000410LIBBPF_API struct bpf_map *
411bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
412
413LIBBPF_API int
414bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000415
416/*
417 * Get bpf_map through the offset of corresponding struct bpf_map_def
418 * in the BPF object file.
419 */
David Brazdil0f672f62019-12-10 10:32:29 +0000420LIBBPF_API struct bpf_map *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000421bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
422
David Brazdil0f672f62019-12-10 10:32:29 +0000423LIBBPF_API struct bpf_map *
424bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
425#define bpf_object__for_each_map(pos, obj) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426 for ((pos) = bpf_map__next(NULL, (obj)); \
427 (pos) != NULL; \
428 (pos) = bpf_map__next((pos), (obj)))
David Brazdil0f672f62019-12-10 10:32:29 +0000429#define bpf_map__for_each bpf_object__for_each_map
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430
David Brazdil0f672f62019-12-10 10:32:29 +0000431LIBBPF_API struct bpf_map *
432bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
433
Olivier Deprez157378f2022-04-04 15:47:50 +0200434/* get/set map FD */
David Brazdil0f672f62019-12-10 10:32:29 +0000435LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
Olivier Deprez157378f2022-04-04 15:47:50 +0200436LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
437/* get map definition */
David Brazdil0f672f62019-12-10 10:32:29 +0000438LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
Olivier Deprez157378f2022-04-04 15:47:50 +0200439/* get map name */
David Brazdil0f672f62019-12-10 10:32:29 +0000440LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
Olivier Deprez157378f2022-04-04 15:47:50 +0200441/* get/set map type */
442LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map);
443LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
444/* get/set map size (max_entries) */
445LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
446LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
447LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
448/* get/set map flags */
449LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
450LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
451/* get/set map NUMA node */
452LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map);
453LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
454/* get/set map key size */
455LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
456LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
457/* get/set map value size */
458LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
459LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
460/* get map key/value BTF type IDs */
David Brazdil0f672f62019-12-10 10:32:29 +0000461LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
462LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
Olivier Deprez157378f2022-04-04 15:47:50 +0200463/* get/set map if_index */
464LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
465LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466
467typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
David Brazdil0f672f62019-12-10 10:32:29 +0000468LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
469 bpf_map_clear_priv_t clear_priv);
470LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
Olivier Deprez157378f2022-04-04 15:47:50 +0200471LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
472 const void *data, size_t size);
David Brazdil0f672f62019-12-10 10:32:29 +0000473LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
474LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
Olivier Deprez157378f2022-04-04 15:47:50 +0200475LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
476LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
477LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
David Brazdil0f672f62019-12-10 10:32:29 +0000478LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
479LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480
David Brazdil0f672f62019-12-10 10:32:29 +0000481LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
482
483LIBBPF_API long libbpf_get_error(const void *ptr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000484
485struct bpf_prog_load_attr {
486 const char *file;
487 enum bpf_prog_type prog_type;
488 enum bpf_attach_type expected_attach_type;
489 int ifindex;
David Brazdil0f672f62019-12-10 10:32:29 +0000490 int log_level;
491 int prog_flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000492};
493
David Brazdil0f672f62019-12-10 10:32:29 +0000494LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
495 struct bpf_object **pobj, int *prog_fd);
496LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
497 struct bpf_object **pobj, int *prog_fd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000498
Olivier Deprez157378f2022-04-04 15:47:50 +0200499struct xdp_link_info {
500 __u32 prog_id;
501 __u32 drv_prog_id;
502 __u32 hw_prog_id;
503 __u32 skb_prog_id;
504 __u8 attach_mode;
505};
David Brazdil0f672f62019-12-10 10:32:29 +0000506
Olivier Deprez157378f2022-04-04 15:47:50 +0200507struct bpf_xdp_set_link_opts {
508 size_t sz;
509 int old_fd;
510 size_t :0;
511};
512#define bpf_xdp_set_link_opts__last_field old_fd
513
514LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
515LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
516 const struct bpf_xdp_set_link_opts *opts);
517LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
518LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
519 size_t info_size, __u32 flags);
520
521/* Ring buffer APIs */
522struct ring_buffer;
523
524typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
525
526struct ring_buffer_opts {
527 size_t sz; /* size of this struct, for forward/backward compatiblity */
528};
529
530#define ring_buffer_opts__last_field sz
531
532LIBBPF_API struct ring_buffer *
533ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
534 const struct ring_buffer_opts *opts);
535LIBBPF_API void ring_buffer__free(struct ring_buffer *rb);
536LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
537 ring_buffer_sample_fn sample_cb, void *ctx);
538LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
539LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
540
541/* Perf buffer APIs */
David Brazdil0f672f62019-12-10 10:32:29 +0000542struct perf_buffer;
543
544typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu,
545 void *data, __u32 size);
546typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
547
548/* common use perf buffer options */
549struct perf_buffer_opts {
550 /* if specified, sample_cb is called for each sample */
551 perf_buffer_sample_fn sample_cb;
552 /* if specified, lost_cb is called for each batch of lost samples */
553 perf_buffer_lost_fn lost_cb;
554 /* ctx is provided to sample_cb and lost_cb */
555 void *ctx;
556};
557
558LIBBPF_API struct perf_buffer *
559perf_buffer__new(int map_fd, size_t page_cnt,
560 const struct perf_buffer_opts *opts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561
562enum bpf_perf_event_ret {
563 LIBBPF_PERF_EVENT_DONE = 0,
564 LIBBPF_PERF_EVENT_ERROR = -1,
565 LIBBPF_PERF_EVENT_CONT = -2,
566};
567
David Brazdil0f672f62019-12-10 10:32:29 +0000568struct perf_event_header;
569
570typedef enum bpf_perf_event_ret
571(*perf_buffer_event_fn)(void *ctx, int cpu, struct perf_event_header *event);
572
573/* raw perf buffer options, giving most power and control */
574struct perf_buffer_raw_opts {
575 /* perf event attrs passed directly into perf_event_open() */
576 struct perf_event_attr *attr;
577 /* raw event callback */
578 perf_buffer_event_fn event_cb;
579 /* ctx is provided to event_cb */
580 void *ctx;
581 /* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
582 * max_entries of given PERF_EVENT_ARRAY map)
583 */
584 int cpu_cnt;
585 /* if cpu_cnt > 0, cpus is an array of CPUs to open ring buffers on */
586 int *cpus;
587 /* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
588 int *map_keys;
589};
590
591LIBBPF_API struct perf_buffer *
592perf_buffer__new_raw(int map_fd, size_t page_cnt,
593 const struct perf_buffer_raw_opts *opts);
594
595LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
Olivier Deprez157378f2022-04-04 15:47:50 +0200596LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
David Brazdil0f672f62019-12-10 10:32:29 +0000597LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
Olivier Deprez157378f2022-04-04 15:47:50 +0200598LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
599LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
600LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
601LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
David Brazdil0f672f62019-12-10 10:32:29 +0000602
603typedef enum bpf_perf_event_ret
604 (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
605 void *private_data);
606LIBBPF_API enum bpf_perf_event_ret
607bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
608 void **copy_mem, size_t *copy_size,
609 bpf_perf_event_print_t fn, void *private_data);
610
David Brazdil0f672f62019-12-10 10:32:29 +0000611struct bpf_prog_linfo;
612struct bpf_prog_info;
613
614LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo);
615LIBBPF_API struct bpf_prog_linfo *
616bpf_prog_linfo__new(const struct bpf_prog_info *info);
617LIBBPF_API const struct bpf_line_info *
618bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
619 __u64 addr, __u32 func_idx, __u32 nr_skip);
620LIBBPF_API const struct bpf_line_info *
621bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
622 __u32 insn_off, __u32 nr_skip);
623
624/*
625 * Probe for supported system features
626 *
627 * Note that running many of these probes in a short amount of time can cause
628 * the kernel to reach the maximal size of lockable memory allowed for the
629 * user, causing subsequent probes to fail. In this case, the caller may want
630 * to adjust that limit with setrlimit().
631 */
632LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
633 __u32 ifindex);
634LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
635LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
636 enum bpf_prog_type prog_type, __u32 ifindex);
Olivier Deprez157378f2022-04-04 15:47:50 +0200637LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
David Brazdil0f672f62019-12-10 10:32:29 +0000638
639/*
640 * Get bpf_prog_info in continuous memory
641 *
642 * struct bpf_prog_info has multiple arrays. The user has option to choose
643 * arrays to fetch from kernel. The following APIs provide an uniform way to
644 * fetch these data. All arrays in bpf_prog_info are stored in a single
645 * continuous memory region. This makes it easy to store the info in a
646 * file.
647 *
648 * Before writing bpf_prog_info_linear to files, it is necessary to
649 * translate pointers in bpf_prog_info to offsets. Helper functions
650 * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
651 * are introduced to switch between pointers and offsets.
652 *
653 * Examples:
654 * # To fetch map_ids and prog_tags:
655 * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
656 * (1UL << BPF_PROG_INFO_PROG_TAGS);
657 * struct bpf_prog_info_linear *info_linear =
658 * bpf_program__get_prog_info_linear(fd, arrays);
659 *
660 * # To save data in file
661 * bpf_program__bpil_addr_to_offs(info_linear);
662 * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
663 *
664 * # To read data from file
665 * read(f, info_linear, <proper_size>);
666 * bpf_program__bpil_offs_to_addr(info_linear);
667 */
668enum bpf_prog_info_array {
669 BPF_PROG_INFO_FIRST_ARRAY = 0,
670 BPF_PROG_INFO_JITED_INSNS = 0,
671 BPF_PROG_INFO_XLATED_INSNS,
672 BPF_PROG_INFO_MAP_IDS,
673 BPF_PROG_INFO_JITED_KSYMS,
674 BPF_PROG_INFO_JITED_FUNC_LENS,
675 BPF_PROG_INFO_FUNC_INFO,
676 BPF_PROG_INFO_LINE_INFO,
677 BPF_PROG_INFO_JITED_LINE_INFO,
678 BPF_PROG_INFO_PROG_TAGS,
679 BPF_PROG_INFO_LAST_ARRAY,
680};
681
682struct bpf_prog_info_linear {
683 /* size of struct bpf_prog_info, when the tool is compiled */
684 __u32 info_len;
685 /* total bytes allocated for data, round up to 8 bytes */
686 __u32 data_len;
687 /* which arrays are included in data */
688 __u64 arrays;
689 struct bpf_prog_info info;
690 __u8 data[];
691};
692
693LIBBPF_API struct bpf_prog_info_linear *
694bpf_program__get_prog_info_linear(int fd, __u64 arrays);
695
696LIBBPF_API void
697bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
698
699LIBBPF_API void
700bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
701
702/*
703 * A helper function to get the number of possible CPUs before looking up
704 * per-CPU maps. Negative errno is returned on failure.
705 *
706 * Example usage:
707 *
708 * int ncpus = libbpf_num_possible_cpus();
709 * if (ncpus < 0) {
710 * // error handling
711 * }
712 * long values[ncpus];
713 * bpf_map_lookup_elem(per_cpu_map_fd, key, values);
714 *
715 */
716LIBBPF_API int libbpf_num_possible_cpus(void);
717
Olivier Deprez157378f2022-04-04 15:47:50 +0200718struct bpf_map_skeleton {
719 const char *name;
720 struct bpf_map **map;
721 void **mmaped;
722};
723
724struct bpf_prog_skeleton {
725 const char *name;
726 struct bpf_program **prog;
727 struct bpf_link **link;
728};
729
730struct bpf_object_skeleton {
731 size_t sz; /* size of this struct, for forward/backward compatibility */
732
733 const char *name;
734 void *data;
735 size_t data_sz;
736
737 struct bpf_object **obj;
738
739 int map_cnt;
740 int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */
741 struct bpf_map_skeleton *maps;
742
743 int prog_cnt;
744 int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */
745 struct bpf_prog_skeleton *progs;
746};
747
748LIBBPF_API int
749bpf_object__open_skeleton(struct bpf_object_skeleton *s,
750 const struct bpf_object_open_opts *opts);
751LIBBPF_API int bpf_object__load_skeleton(struct bpf_object_skeleton *s);
752LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
753LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
754LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
755
756enum libbpf_tristate {
757 TRI_NO = 0,
758 TRI_YES = 1,
759 TRI_MODULE = 2,
760};
761
David Brazdil0f672f62019-12-10 10:32:29 +0000762#ifdef __cplusplus
763} /* extern "C" */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000764#endif
David Brazdil0f672f62019-12-10 10:32:29 +0000765
766#endif /* __LIBBPF_LIBBPF_H */