blob: ea3ff499e94a36200dd36d5d4856b8e666fce37d [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001/* SPDX-License-Identifier: GPL-2.0-only */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003 */
4#ifndef _LINUX_BPF_H
5#define _LINUX_BPF_H 1
6
7#include <uapi/linux/bpf.h>
8
9#include <linux/workqueue.h>
10#include <linux/file.h>
11#include <linux/percpu.h>
12#include <linux/err.h>
13#include <linux/rbtree_latch.h>
14#include <linux/numa.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020015#include <linux/mm_types.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000016#include <linux/wait.h>
David Brazdil0f672f62019-12-10 10:32:29 +000017#include <linux/u64_stats_sync.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020018#include <linux/refcount.h>
19#include <linux/mutex.h>
20#include <linux/module.h>
21#include <linux/kallsyms.h>
22#include <linux/capability.h>
23#include <linux/percpu-refcount.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
25struct bpf_verifier_env;
Olivier Deprez157378f2022-04-04 15:47:50 +020026struct bpf_verifier_log;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000027struct perf_event;
28struct bpf_prog;
Olivier Deprez157378f2022-04-04 15:47:50 +020029struct bpf_prog_aux;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030struct bpf_map;
31struct sock;
32struct seq_file;
David Brazdil0f672f62019-12-10 10:32:29 +000033struct btf;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034struct btf_type;
Olivier Deprez157378f2022-04-04 15:47:50 +020035struct exception_table_entry;
36struct seq_operations;
37struct bpf_iter_aux_info;
38struct bpf_local_storage;
39struct bpf_local_storage_map;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000040
David Brazdil0f672f62019-12-10 10:32:29 +000041extern struct idr btf_idr;
42extern spinlock_t btf_idr_lock;
43
Olivier Deprez157378f2022-04-04 15:47:50 +020044typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
45 struct bpf_iter_aux_info *aux);
46typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
47struct bpf_iter_seq_info {
48 const struct seq_operations *seq_ops;
49 bpf_iter_init_seq_priv_t init_seq_private;
50 bpf_iter_fini_seq_priv_t fini_seq_private;
51 u32 seq_priv_size;
52};
53
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000054/* map is generic key/value storage optionally accesible by eBPF programs */
55struct bpf_map_ops {
56 /* funcs callable from userspace (via syscall) */
57 int (*map_alloc_check)(union bpf_attr *attr);
58 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
59 void (*map_release)(struct bpf_map *map, struct file *map_file);
60 void (*map_free)(struct bpf_map *map);
61 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
62 void (*map_release_uref)(struct bpf_map *map);
David Brazdil0f672f62019-12-10 10:32:29 +000063 void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
Olivier Deprez157378f2022-04-04 15:47:50 +020064 int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
65 union bpf_attr __user *uattr);
66 int (*map_lookup_and_delete_batch)(struct bpf_map *map,
67 const union bpf_attr *attr,
68 union bpf_attr __user *uattr);
69 int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
70 union bpf_attr __user *uattr);
71 int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
72 union bpf_attr __user *uattr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073
74 /* funcs callable from userspace and from eBPF programs */
75 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
76 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
77 int (*map_delete_elem)(struct bpf_map *map, void *key);
David Brazdil0f672f62019-12-10 10:32:29 +000078 int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
79 int (*map_pop_elem)(struct bpf_map *map, void *value);
80 int (*map_peek_elem)(struct bpf_map *map, void *value);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081
82 /* funcs called by prog_array and perf_event_array map */
83 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
84 int fd);
85 void (*map_fd_put_ptr)(void *ptr);
Olivier Deprez157378f2022-04-04 15:47:50 +020086 int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087 u32 (*map_fd_sys_lookup_elem)(void *ptr);
88 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
89 struct seq_file *m);
90 int (*map_check_btf)(const struct bpf_map *map,
David Brazdil0f672f62019-12-10 10:32:29 +000091 const struct btf *btf,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000092 const struct btf_type *key_type,
93 const struct btf_type *value_type);
David Brazdil0f672f62019-12-10 10:32:29 +000094
Olivier Deprez157378f2022-04-04 15:47:50 +020095 /* Prog poke tracking helpers. */
96 int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
97 void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
98 void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
99 struct bpf_prog *new);
100
David Brazdil0f672f62019-12-10 10:32:29 +0000101 /* Direct value access helpers. */
102 int (*map_direct_value_addr)(const struct bpf_map *map,
103 u64 *imm, u32 off);
104 int (*map_direct_value_meta)(const struct bpf_map *map,
105 u64 imm, u32 *off);
Olivier Deprez157378f2022-04-04 15:47:50 +0200106 int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
107 __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
108 struct poll_table_struct *pts);
109
110 /* Functions called by bpf_local_storage maps */
111 int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
112 void *owner, u32 size);
113 void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
114 void *owner, u32 size);
115 struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
116
117 /* map_meta_equal must be implemented for maps that can be
118 * used as an inner map. It is a runtime check to ensure
119 * an inner map can be inserted to an outer map.
120 *
121 * Some properties of the inner map has been used during the
122 * verification time. When inserting an inner map at the runtime,
123 * map_meta_equal has to ensure the inserting map has the same
124 * properties that the verifier has used earlier.
125 */
126 bool (*map_meta_equal)(const struct bpf_map *meta0,
127 const struct bpf_map *meta1);
128
129 /* BTF name and id of struct allocated by map_alloc */
130 const char * const map_btf_name;
131 int *map_btf_id;
132
133 /* bpf_iter info used to open a seq_file */
134 const struct bpf_iter_seq_info *iter_seq_info;
David Brazdil0f672f62019-12-10 10:32:29 +0000135};
136
137struct bpf_map_memory {
138 u32 pages;
139 struct user_struct *user;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140};
141
142struct bpf_map {
143 /* The first two cachelines with read-mostly members of which some
144 * are also accessed in fast-path (e.g. ops, max_entries).
145 */
146 const struct bpf_map_ops *ops ____cacheline_aligned;
147 struct bpf_map *inner_map_meta;
148#ifdef CONFIG_SECURITY
149 void *security;
150#endif
151 enum bpf_map_type map_type;
152 u32 key_size;
153 u32 value_size;
154 u32 max_entries;
155 u32 map_flags;
David Brazdil0f672f62019-12-10 10:32:29 +0000156 int spin_lock_off; /* >=0 valid offset, <0 error */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000157 u32 id;
158 int numa_node;
159 u32 btf_key_type_id;
160 u32 btf_value_type_id;
161 struct btf *btf;
David Brazdil0f672f62019-12-10 10:32:29 +0000162 struct bpf_map_memory memory;
Olivier Deprez157378f2022-04-04 15:47:50 +0200163 char name[BPF_OBJ_NAME_LEN];
164 u32 btf_vmlinux_value_type_id;
165 bool bypass_spec_v1;
166 bool frozen; /* write-once; write-protected by freeze_mutex */
167 /* 22 bytes hole */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168
169 /* The 3rd and 4th cacheline with misc members to avoid false sharing
170 * particularly with refcounting.
171 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200172 atomic64_t refcnt ____cacheline_aligned;
173 atomic64_t usercnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000174 struct work_struct work;
Olivier Deprez157378f2022-04-04 15:47:50 +0200175 struct mutex freeze_mutex;
176 atomic64_t writecnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177};
178
David Brazdil0f672f62019-12-10 10:32:29 +0000179static inline bool map_value_has_spin_lock(const struct bpf_map *map)
180{
181 return map->spin_lock_off >= 0;
182}
183
184static inline void check_and_init_map_lock(struct bpf_map *map, void *dst)
185{
186 if (likely(!map_value_has_spin_lock(map)))
187 return;
188 *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
189 (struct bpf_spin_lock){};
190}
191
192/* copy everything but bpf_spin_lock */
193static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
194{
195 if (unlikely(map_value_has_spin_lock(map))) {
196 u32 off = map->spin_lock_off;
197
198 memcpy(dst, src, off);
199 memcpy(dst + off + sizeof(struct bpf_spin_lock),
200 src + off + sizeof(struct bpf_spin_lock),
201 map->value_size - off - sizeof(struct bpf_spin_lock));
202 } else {
203 memcpy(dst, src, map->value_size);
204 }
205}
206void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
207 bool lock_src);
Olivier Deprez157378f2022-04-04 15:47:50 +0200208int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
David Brazdil0f672f62019-12-10 10:32:29 +0000209
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210struct bpf_offload_dev;
211struct bpf_offloaded_map;
212
213struct bpf_map_dev_ops {
214 int (*map_get_next_key)(struct bpf_offloaded_map *map,
215 void *key, void *next_key);
216 int (*map_lookup_elem)(struct bpf_offloaded_map *map,
217 void *key, void *value);
218 int (*map_update_elem)(struct bpf_offloaded_map *map,
219 void *key, void *value, u64 flags);
220 int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
221};
222
223struct bpf_offloaded_map {
224 struct bpf_map map;
225 struct net_device *netdev;
226 const struct bpf_map_dev_ops *dev_ops;
227 void *dev_priv;
228 struct list_head offloads;
229};
230
231static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
232{
233 return container_of(map, struct bpf_offloaded_map, map);
234}
235
236static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
237{
238 return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
239}
240
241static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
242{
Olivier Deprez157378f2022-04-04 15:47:50 +0200243 return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
244 map->ops->map_seq_show_elem;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000245}
246
247int map_check_no_btf(const struct bpf_map *map,
David Brazdil0f672f62019-12-10 10:32:29 +0000248 const struct btf *btf,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000249 const struct btf_type *key_type,
250 const struct btf_type *value_type);
251
Olivier Deprez157378f2022-04-04 15:47:50 +0200252bool bpf_map_meta_equal(const struct bpf_map *meta0,
253 const struct bpf_map *meta1);
254
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000255extern const struct bpf_map_ops bpf_map_offload_ops;
256
257/* function argument constraints */
258enum bpf_arg_type {
259 ARG_DONTCARE = 0, /* unused argument in helper function */
260
261 /* the following constraints used to prototype
262 * bpf_map_lookup/update/delete_elem() functions
263 */
264 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
265 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
266 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
David Brazdil0f672f62019-12-10 10:32:29 +0000267 ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
268 ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000269
270 /* the following constraints used to prototype bpf_memcmp() and other
271 * functions that access data on eBPF program stack
272 */
273 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
274 ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
275 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
276 * helper function must fill all bytes or clear
277 * them in error case.
278 */
279
280 ARG_CONST_SIZE, /* number of bytes accessed from memory */
281 ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
282
283 ARG_PTR_TO_CTX, /* pointer to context */
Olivier Deprez157378f2022-04-04 15:47:50 +0200284 ARG_PTR_TO_CTX_OR_NULL, /* pointer to context or NULL */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285 ARG_ANYTHING, /* any (initialized) argument is ok */
David Brazdil0f672f62019-12-10 10:32:29 +0000286 ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
287 ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
288 ARG_PTR_TO_INT, /* pointer to int */
289 ARG_PTR_TO_LONG, /* pointer to long */
290 ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
Olivier Deprez157378f2022-04-04 15:47:50 +0200291 ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */
292 ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
293 ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
294 ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
295 ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
296 ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
297 ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
298 __BPF_ARG_TYPE_MAX,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000299};
300
301/* type of values returned from helper functions */
302enum bpf_return_type {
303 RET_INTEGER, /* function returns integer */
304 RET_VOID, /* function doesn't return anything */
305 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
306 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
David Brazdil0f672f62019-12-10 10:32:29 +0000307 RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
308 RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
309 RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
Olivier Deprez157378f2022-04-04 15:47:50 +0200310 RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
311 RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
312 RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
313 RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000314};
315
316/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
317 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
318 * instructions after verifying
319 */
320struct bpf_func_proto {
321 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
322 bool gpl_only;
323 bool pkt_access;
324 enum bpf_return_type ret_type;
Olivier Deprez157378f2022-04-04 15:47:50 +0200325 union {
326 struct {
327 enum bpf_arg_type arg1_type;
328 enum bpf_arg_type arg2_type;
329 enum bpf_arg_type arg3_type;
330 enum bpf_arg_type arg4_type;
331 enum bpf_arg_type arg5_type;
332 };
333 enum bpf_arg_type arg_type[5];
334 };
335 union {
336 struct {
337 u32 *arg1_btf_id;
338 u32 *arg2_btf_id;
339 u32 *arg3_btf_id;
340 u32 *arg4_btf_id;
341 u32 *arg5_btf_id;
342 };
343 u32 *arg_btf_id[5];
344 };
345 int *ret_btf_id; /* return value btf_id */
346 bool (*allowed)(const struct bpf_prog *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347};
348
349/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
350 * the first argument to eBPF programs.
351 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
352 */
353struct bpf_context;
354
355enum bpf_access_type {
356 BPF_READ = 1,
357 BPF_WRITE = 2
358};
359
360/* types of values stored in eBPF registers */
361/* Pointer types represent:
362 * pointer
363 * pointer + imm
364 * pointer + (u16) var
365 * pointer + (u16) var + imm
366 * if (range > 0) then [ptr, ptr + range - off) is safe to access
367 * if (id > 0) means that some 'var' was added
368 * if (off > 0) means that 'imm' was added
369 */
370enum bpf_reg_type {
371 NOT_INIT = 0, /* nothing was written into register */
372 SCALAR_VALUE, /* reg doesn't contain a valid pointer */
373 PTR_TO_CTX, /* reg points to bpf_context */
374 CONST_PTR_TO_MAP, /* reg points to struct bpf_map */
375 PTR_TO_MAP_VALUE, /* reg points to map element value */
376 PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */
377 PTR_TO_STACK, /* reg == frame_pointer + offset */
378 PTR_TO_PACKET_META, /* skb->data - meta_len */
379 PTR_TO_PACKET, /* reg points to skb->data */
380 PTR_TO_PACKET_END, /* skb->data + headlen */
David Brazdil0f672f62019-12-10 10:32:29 +0000381 PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */
382 PTR_TO_SOCKET, /* reg points to struct bpf_sock */
383 PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */
384 PTR_TO_SOCK_COMMON, /* reg points to sock_common */
385 PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
386 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
387 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
388 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
389 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
Olivier Deprez157378f2022-04-04 15:47:50 +0200390 /* PTR_TO_BTF_ID points to a kernel struct that does not need
391 * to be null checked by the BPF program. This does not imply the
392 * pointer is _not_ null and in practice this can easily be a null
393 * pointer when reading pointer chains. The assumption is program
394 * context will handle null pointer dereference typically via fault
395 * handling. The verifier must keep this in mind and can make no
396 * assumptions about null or non-null when doing branch analysis.
397 * Further, when passed into helpers the helpers can not, without
398 * additional context, assume the value is non-null.
399 */
400 PTR_TO_BTF_ID,
401 /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
402 * been checked for null. Used primarily to inform the verifier
403 * an explicit null check is required for this struct.
404 */
405 PTR_TO_BTF_ID_OR_NULL,
406 PTR_TO_MEM, /* reg points to valid memory region */
407 PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
408 PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
409 PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
410 PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
411 PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
412 PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000413};
414
415/* The information passed from prog-specific *_is_valid_access
416 * back to the verifier.
417 */
418struct bpf_insn_access_aux {
419 enum bpf_reg_type reg_type;
Olivier Deprez157378f2022-04-04 15:47:50 +0200420 union {
421 int ctx_field_size;
422 u32 btf_id;
423 };
424 struct bpf_verifier_log *log; /* for verbose logs */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425};
426
427static inline void
428bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
429{
430 aux->ctx_field_size = size;
431}
432
433struct bpf_prog_ops {
434 int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
435 union bpf_attr __user *uattr);
436};
437
438struct bpf_verifier_ops {
439 /* return eBPF function prototype for verification */
440 const struct bpf_func_proto *
441 (*get_func_proto)(enum bpf_func_id func_id,
442 const struct bpf_prog *prog);
443
444 /* return true if 'size' wide access at offset 'off' within bpf_context
445 * with 'type' (read or write) is allowed
446 */
447 bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
448 const struct bpf_prog *prog,
449 struct bpf_insn_access_aux *info);
450 int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
451 const struct bpf_prog *prog);
452 int (*gen_ld_abs)(const struct bpf_insn *orig,
453 struct bpf_insn *insn_buf);
454 u32 (*convert_ctx_access)(enum bpf_access_type type,
455 const struct bpf_insn *src,
456 struct bpf_insn *dst,
457 struct bpf_prog *prog, u32 *target_size);
Olivier Deprez157378f2022-04-04 15:47:50 +0200458 int (*btf_struct_access)(struct bpf_verifier_log *log,
459 const struct btf_type *t, int off, int size,
460 enum bpf_access_type atype,
461 u32 *next_btf_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000462};
463
464struct bpf_prog_offload_ops {
David Brazdil0f672f62019-12-10 10:32:29 +0000465 /* verifier basic callbacks */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000466 int (*insn_hook)(struct bpf_verifier_env *env,
467 int insn_idx, int prev_insn_idx);
David Brazdil0f672f62019-12-10 10:32:29 +0000468 int (*finalize)(struct bpf_verifier_env *env);
469 /* verifier optimization callbacks (called after .finalize) */
470 int (*replace_insn)(struct bpf_verifier_env *env, u32 off,
471 struct bpf_insn *insn);
472 int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt);
473 /* program management callbacks */
474 int (*prepare)(struct bpf_prog *prog);
475 int (*translate)(struct bpf_prog *prog);
476 void (*destroy)(struct bpf_prog *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000477};
478
479struct bpf_prog_offload {
480 struct bpf_prog *prog;
481 struct net_device *netdev;
David Brazdil0f672f62019-12-10 10:32:29 +0000482 struct bpf_offload_dev *offdev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 void *dev_priv;
484 struct list_head offloads;
485 bool dev_state;
David Brazdil0f672f62019-12-10 10:32:29 +0000486 bool opt_failed;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487 void *jited_image;
488 u32 jited_len;
489};
490
David Brazdil0f672f62019-12-10 10:32:29 +0000491enum bpf_cgroup_storage_type {
492 BPF_CGROUP_STORAGE_SHARED,
493 BPF_CGROUP_STORAGE_PERCPU,
494 __BPF_CGROUP_STORAGE_MAX
495};
496
497#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
498
Olivier Deprez157378f2022-04-04 15:47:50 +0200499/* The longest tracepoint has 12 args.
500 * See include/trace/bpf_probe.h
501 */
502#define MAX_BPF_FUNC_ARGS 12
503
David Brazdil0f672f62019-12-10 10:32:29 +0000504struct bpf_prog_stats {
505 u64 cnt;
506 u64 nsecs;
507 struct u64_stats_sync syncp;
Olivier Deprez157378f2022-04-04 15:47:50 +0200508} __aligned(2 * sizeof(u64));
509
510struct btf_func_model {
511 u8 ret_size;
512 u8 nr_args;
513 u8 arg_size[MAX_BPF_FUNC_ARGS];
514};
515
516/* Restore arguments before returning from trampoline to let original function
517 * continue executing. This flag is used for fentry progs when there are no
518 * fexit progs.
519 */
520#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
521/* Call original function after fentry progs, but before fexit progs.
522 * Makes sense for fentry/fexit, normal calls and indirect calls.
523 */
524#define BPF_TRAMP_F_CALL_ORIG BIT(1)
525/* Skip current frame and return to parent. Makes sense for fentry/fexit
526 * programs only. Should not be used with normal calls and indirect calls.
527 */
528#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
529/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
530#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
531
532/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
533 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
534 */
535#define BPF_MAX_TRAMP_PROGS 40
536
537struct bpf_tramp_progs {
538 struct bpf_prog *progs[BPF_MAX_TRAMP_PROGS];
539 int nr_progs;
540};
541
542/* Different use cases for BPF trampoline:
543 * 1. replace nop at the function entry (kprobe equivalent)
544 * flags = BPF_TRAMP_F_RESTORE_REGS
545 * fentry = a set of programs to run before returning from trampoline
546 *
547 * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
548 * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
549 * orig_call = fentry_ip + MCOUNT_INSN_SIZE
550 * fentry = a set of program to run before calling original function
551 * fexit = a set of program to run after original function
552 *
553 * 3. replace direct call instruction anywhere in the function body
554 * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
555 * With flags = 0
556 * fentry = a set of programs to run before returning from trampoline
557 * With flags = BPF_TRAMP_F_CALL_ORIG
558 * orig_call = original callback addr or direct function addr
559 * fentry = a set of program to run before calling original function
560 * fexit = a set of program to run after original function
561 */
562struct bpf_tramp_image;
563int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
564 const struct btf_func_model *m, u32 flags,
565 struct bpf_tramp_progs *tprogs,
566 void *orig_call);
567/* these two functions are called from generated trampoline */
568u64 notrace __bpf_prog_enter(void);
569void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
570void notrace __bpf_prog_enter_sleepable(void);
571void notrace __bpf_prog_exit_sleepable(void);
572void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
573void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
574
575struct bpf_ksym {
576 unsigned long start;
577 unsigned long end;
578 char name[KSYM_NAME_LEN];
579 struct list_head lnode;
580 struct latch_tree_node tnode;
581 bool prog;
582};
583
584enum bpf_tramp_prog_type {
585 BPF_TRAMP_FENTRY,
586 BPF_TRAMP_FEXIT,
587 BPF_TRAMP_MODIFY_RETURN,
588 BPF_TRAMP_MAX,
589 BPF_TRAMP_REPLACE, /* more than MAX */
590};
591
592struct bpf_tramp_image {
593 void *image;
594 struct bpf_ksym ksym;
595 struct percpu_ref pcref;
596 void *ip_after_call;
597 void *ip_epilogue;
598 union {
599 struct rcu_head rcu;
600 struct work_struct work;
601 };
602};
603
604struct bpf_trampoline {
605 /* hlist for trampoline_table */
606 struct hlist_node hlist;
607 /* serializes access to fields of this trampoline */
608 struct mutex mutex;
609 refcount_t refcnt;
610 u64 key;
611 struct {
612 struct btf_func_model model;
613 void *addr;
614 bool ftrace_managed;
615 } func;
616 /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
617 * program by replacing one of its functions. func.addr is the address
618 * of the function it replaced.
619 */
620 struct bpf_prog *extension_prog;
621 /* list of BPF programs using this trampoline */
622 struct hlist_head progs_hlist[BPF_TRAMP_MAX];
623 /* Number of attached programs. A counter per kind. */
624 int progs_cnt[BPF_TRAMP_MAX];
625 /* Executable image of trampoline */
626 struct bpf_tramp_image *cur_image;
627 u64 selector;
628};
629
630struct bpf_attach_target_info {
631 struct btf_func_model fmodel;
632 long tgt_addr;
633 const char *tgt_name;
634 const struct btf_type *tgt_type;
635};
636
637#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
638
639struct bpf_dispatcher_prog {
640 struct bpf_prog *prog;
641 refcount_t users;
642};
643
644struct bpf_dispatcher {
645 /* dispatcher mutex */
646 struct mutex mutex;
647 void *func;
648 struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
649 int num_progs;
650 void *image;
651 u32 image_off;
652 struct bpf_ksym ksym;
653};
654
655static __always_inline unsigned int bpf_dispatcher_nop_func(
656 const void *ctx,
657 const struct bpf_insn *insnsi,
658 unsigned int (*bpf_func)(const void *,
659 const struct bpf_insn *))
660{
661 return bpf_func(ctx, insnsi);
662}
663#ifdef CONFIG_BPF_JIT
664int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
665int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
666struct bpf_trampoline *bpf_trampoline_get(u64 key,
667 struct bpf_attach_target_info *tgt_info);
668void bpf_trampoline_put(struct bpf_trampoline *tr);
669int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs);
670#define BPF_DISPATCHER_INIT(_name) { \
671 .mutex = __MUTEX_INITIALIZER(_name.mutex), \
672 .func = &_name##_func, \
673 .progs = {}, \
674 .num_progs = 0, \
675 .image = NULL, \
676 .image_off = 0, \
677 .ksym = { \
678 .name = #_name, \
679 .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \
680 }, \
681}
682
683#define DEFINE_BPF_DISPATCHER(name) \
684 noinline unsigned int bpf_dispatcher_##name##_func( \
685 const void *ctx, \
686 const struct bpf_insn *insnsi, \
687 unsigned int (*bpf_func)(const void *, \
688 const struct bpf_insn *)) \
689 { \
690 return bpf_func(ctx, insnsi); \
691 } \
692 EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \
693 struct bpf_dispatcher bpf_dispatcher_##name = \
694 BPF_DISPATCHER_INIT(bpf_dispatcher_##name);
695#define DECLARE_BPF_DISPATCHER(name) \
696 unsigned int bpf_dispatcher_##name##_func( \
697 const void *ctx, \
698 const struct bpf_insn *insnsi, \
699 unsigned int (*bpf_func)(const void *, \
700 const struct bpf_insn *)); \
701 extern struct bpf_dispatcher bpf_dispatcher_##name;
702#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func
703#define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name)
704void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
705 struct bpf_prog *to);
706/* Called only from JIT-enabled code, so there's no need for stubs. */
707void *bpf_jit_alloc_exec_page(void);
708void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
709void bpf_image_ksym_del(struct bpf_ksym *ksym);
710void bpf_ksym_add(struct bpf_ksym *ksym);
711void bpf_ksym_del(struct bpf_ksym *ksym);
712int bpf_jit_charge_modmem(u32 pages);
713void bpf_jit_uncharge_modmem(u32 pages);
714#else
715static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
716 struct bpf_trampoline *tr)
717{
718 return -ENOTSUPP;
719}
720static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
721 struct bpf_trampoline *tr)
722{
723 return -ENOTSUPP;
724}
725static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
726 struct bpf_attach_target_info *tgt_info)
727{
728 return ERR_PTR(-EOPNOTSUPP);
729}
730static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
731#define DEFINE_BPF_DISPATCHER(name)
732#define DECLARE_BPF_DISPATCHER(name)
733#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func
734#define BPF_DISPATCHER_PTR(name) NULL
735static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
736 struct bpf_prog *from,
737 struct bpf_prog *to) {}
738static inline bool is_bpf_image_address(unsigned long address)
739{
740 return false;
741}
742#endif
743
744struct bpf_func_info_aux {
745 u16 linkage;
746 bool unreliable;
747};
748
749enum bpf_jit_poke_reason {
750 BPF_POKE_REASON_TAIL_CALL,
751};
752
753/* Descriptor of pokes pointing /into/ the JITed image. */
754struct bpf_jit_poke_descriptor {
755 void *tailcall_target;
756 void *tailcall_bypass;
757 void *bypass_addr;
758 void *aux;
759 union {
760 struct {
761 struct bpf_map *map;
762 u32 key;
763 } tail_call;
764 };
765 bool tailcall_target_stable;
766 u8 adj_off;
767 u16 reason;
768 u32 insn_idx;
769};
770
771/* reg_type info for ctx arguments */
772struct bpf_ctx_arg_aux {
773 u32 offset;
774 enum bpf_reg_type reg_type;
775 u32 btf_id;
David Brazdil0f672f62019-12-10 10:32:29 +0000776};
777
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000778struct bpf_prog_aux {
Olivier Deprez157378f2022-04-04 15:47:50 +0200779 atomic64_t refcnt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 u32 used_map_cnt;
781 u32 max_ctx_offset;
David Brazdil0f672f62019-12-10 10:32:29 +0000782 u32 max_pkt_offset;
783 u32 max_tp_access;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000784 u32 stack_depth;
785 u32 id;
David Brazdil0f672f62019-12-10 10:32:29 +0000786 u32 func_cnt; /* used by non-func prog as the number of func progs */
787 u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
Olivier Deprez157378f2022-04-04 15:47:50 +0200788 u32 attach_btf_id; /* in-kernel BTF type id to attach to */
789 u32 ctx_arg_info_size;
790 u32 max_rdonly_access;
791 u32 max_rdwr_access;
792 const struct bpf_ctx_arg_aux *ctx_arg_info;
793 struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
794 struct bpf_prog *dst_prog;
795 struct bpf_trampoline *dst_trampoline;
796 enum bpf_prog_type saved_dst_prog_type;
797 enum bpf_attach_type saved_dst_attach_type;
David Brazdil0f672f62019-12-10 10:32:29 +0000798 bool verifier_zext; /* Zero extensions has been inserted by verifier. */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000799 bool offload_requested;
Olivier Deprez157378f2022-04-04 15:47:50 +0200800 bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
801 bool func_proto_unreliable;
802 bool sleepable;
803 bool tail_call_reachable;
804 struct hlist_node tramp_hlist;
805 /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
806 const struct btf_type *attach_func_proto;
807 /* function name for valid attach_btf_id */
808 const char *attach_func_name;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000809 struct bpf_prog **func;
810 void *jit_data; /* JIT specific data. arch dependent */
Olivier Deprez157378f2022-04-04 15:47:50 +0200811 struct bpf_jit_poke_descriptor *poke_tab;
812 u32 size_poke_tab;
813 struct bpf_ksym ksym;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000814 const struct bpf_prog_ops *ops;
815 struct bpf_map **used_maps;
Olivier Deprez157378f2022-04-04 15:47:50 +0200816 struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000817 struct bpf_prog *prog;
818 struct user_struct *user;
819 u64 load_time; /* ns since boottime */
David Brazdil0f672f62019-12-10 10:32:29 +0000820 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000821 char name[BPF_OBJ_NAME_LEN];
822#ifdef CONFIG_SECURITY
823 void *security;
824#endif
825 struct bpf_prog_offload *offload;
David Brazdil0f672f62019-12-10 10:32:29 +0000826 struct btf *btf;
827 struct bpf_func_info *func_info;
Olivier Deprez157378f2022-04-04 15:47:50 +0200828 struct bpf_func_info_aux *func_info_aux;
David Brazdil0f672f62019-12-10 10:32:29 +0000829 /* bpf_line_info loaded from userspace. linfo->insn_off
830 * has the xlated insn offset.
831 * Both the main and sub prog share the same linfo.
832 * The subprog can access its first linfo by
833 * using the linfo_idx.
834 */
835 struct bpf_line_info *linfo;
836 /* jited_linfo is the jited addr of the linfo. It has a
837 * one to one mapping to linfo:
838 * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
839 * Both the main and sub prog share the same jited_linfo.
840 * The subprog can access its first jited_linfo by
841 * using the linfo_idx.
842 */
843 void **jited_linfo;
844 u32 func_info_cnt;
845 u32 nr_linfo;
846 /* subprog can use linfo_idx to access its first linfo and
847 * jited_linfo.
848 * main prog always has linfo_idx == 0
849 */
850 u32 linfo_idx;
Olivier Deprez157378f2022-04-04 15:47:50 +0200851 u32 num_exentries;
852 struct exception_table_entry *extable;
David Brazdil0f672f62019-12-10 10:32:29 +0000853 struct bpf_prog_stats __percpu *stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000854 union {
855 struct work_struct work;
856 struct rcu_head rcu;
857 };
858};
859
Olivier Deprez157378f2022-04-04 15:47:50 +0200860struct bpf_array_aux {
861 /* 'Ownership' of prog array is claimed by the first program that
862 * is going to use this map or by the first program which FD is
863 * stored in the map to make sure that all callers and callees have
864 * the same prog type and JITed flag.
865 */
866 struct {
867 spinlock_t lock;
868 enum bpf_prog_type type;
869 bool jited;
870 } owner;
871 /* Programs with direct jumps into programs part of this array. */
872 struct list_head poke_progs;
873 struct bpf_map *map;
874 struct mutex poke_mutex;
875 struct work_struct work;
876};
877
878struct bpf_link {
879 atomic64_t refcnt;
880 u32 id;
881 enum bpf_link_type type;
882 const struct bpf_link_ops *ops;
883 struct bpf_prog *prog;
884 struct work_struct work;
885};
886
887struct bpf_link_ops {
888 void (*release)(struct bpf_link *link);
889 void (*dealloc)(struct bpf_link *link);
890 int (*detach)(struct bpf_link *link);
891 int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
892 struct bpf_prog *old_prog);
893 void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
894 int (*fill_link_info)(const struct bpf_link *link,
895 struct bpf_link_info *info);
896};
897
898struct bpf_link_primer {
899 struct bpf_link *link;
900 struct file *file;
901 int fd;
902 u32 id;
903};
904
905struct bpf_struct_ops_value;
906struct btf_type;
907struct btf_member;
908
909#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
910struct bpf_struct_ops {
911 const struct bpf_verifier_ops *verifier_ops;
912 int (*init)(struct btf *btf);
913 int (*check_member)(const struct btf_type *t,
914 const struct btf_member *member);
915 int (*init_member)(const struct btf_type *t,
916 const struct btf_member *member,
917 void *kdata, const void *udata);
918 int (*reg)(void *kdata);
919 void (*unreg)(void *kdata);
920 const struct btf_type *type;
921 const struct btf_type *value_type;
922 const char *name;
923 struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
924 u32 type_id;
925 u32 value_id;
926};
927
928#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
929#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
930const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
931void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
932bool bpf_struct_ops_get(const void *kdata);
933void bpf_struct_ops_put(const void *kdata);
934int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
935 void *value);
936static inline bool bpf_try_module_get(const void *data, struct module *owner)
937{
938 if (owner == BPF_MODULE_OWNER)
939 return bpf_struct_ops_get(data);
940 else
941 return try_module_get(owner);
942}
943static inline void bpf_module_put(const void *data, struct module *owner)
944{
945 if (owner == BPF_MODULE_OWNER)
946 bpf_struct_ops_put(data);
947 else
948 module_put(owner);
949}
950#else
951static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
952{
953 return NULL;
954}
955static inline void bpf_struct_ops_init(struct btf *btf,
956 struct bpf_verifier_log *log)
957{
958}
959static inline bool bpf_try_module_get(const void *data, struct module *owner)
960{
961 return try_module_get(owner);
962}
963static inline void bpf_module_put(const void *data, struct module *owner)
964{
965 module_put(owner);
966}
967static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
968 void *key,
969 void *value)
970{
971 return -EINVAL;
972}
973#endif
974
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000975struct bpf_array {
976 struct bpf_map map;
977 u32 elem_size;
978 u32 index_mask;
Olivier Deprez157378f2022-04-04 15:47:50 +0200979 struct bpf_array_aux *aux;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000980 union {
981 char value[0] __aligned(8);
982 void *ptrs[0] __aligned(8);
983 void __percpu *pptrs[0] __aligned(8);
984 };
985};
986
David Brazdil0f672f62019-12-10 10:32:29 +0000987#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000988#define MAX_TAIL_CALL_CNT 32
989
David Brazdil0f672f62019-12-10 10:32:29 +0000990#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
991 BPF_F_RDONLY_PROG | \
992 BPF_F_WRONLY | \
993 BPF_F_WRONLY_PROG)
994
995#define BPF_MAP_CAN_READ BIT(0)
996#define BPF_MAP_CAN_WRITE BIT(1)
997
998static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
999{
1000 u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1001
1002 /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
1003 * not possible.
1004 */
1005 if (access_flags & BPF_F_RDONLY_PROG)
1006 return BPF_MAP_CAN_READ;
1007 else if (access_flags & BPF_F_WRONLY_PROG)
1008 return BPF_MAP_CAN_WRITE;
1009 else
1010 return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
1011}
1012
1013static inline bool bpf_map_flags_access_ok(u32 access_flags)
1014{
1015 return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
1016 (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
1017}
1018
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001019struct bpf_event_entry {
1020 struct perf_event *event;
1021 struct file *perf_file;
1022 struct file *map_file;
1023 struct rcu_head rcu;
1024};
1025
1026bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
1027int bpf_prog_calc_tag(struct bpf_prog *fp);
Olivier Deprez157378f2022-04-04 15:47:50 +02001028const char *kernel_type_name(u32 btf_type_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001029
1030const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
1031
1032typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
1033 unsigned long off, unsigned long len);
David Brazdil0f672f62019-12-10 10:32:29 +00001034typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
1035 const struct bpf_insn *src,
1036 struct bpf_insn *dst,
1037 struct bpf_prog *prog,
1038 u32 *target_size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001039
1040u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
1041 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
1042
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001043/* an array of programs to be executed under rcu_lock.
1044 *
1045 * Typical usage:
1046 * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN);
1047 *
1048 * the structure returned by bpf_prog_array_alloc() should be populated
1049 * with program pointers and the last pointer must be NULL.
1050 * The user has to keep refcnt on the program and make sure the program
1051 * is removed from the array before bpf_prog_put().
1052 * The 'struct bpf_prog_array *' should only be replaced with xchg()
1053 * since other cpus are walking the array of pointers in parallel.
1054 */
1055struct bpf_prog_array_item {
1056 struct bpf_prog *prog;
David Brazdil0f672f62019-12-10 10:32:29 +00001057 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001058};
1059
1060struct bpf_prog_array {
1061 struct rcu_head rcu;
Olivier Deprez157378f2022-04-04 15:47:50 +02001062 struct bpf_prog_array_item items[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001063};
1064
1065struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
David Brazdil0f672f62019-12-10 10:32:29 +00001066void bpf_prog_array_free(struct bpf_prog_array *progs);
1067int bpf_prog_array_length(struct bpf_prog_array *progs);
1068bool bpf_prog_array_is_empty(struct bpf_prog_array *array);
1069int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001070 __u32 __user *prog_ids, u32 cnt);
1071
David Brazdil0f672f62019-12-10 10:32:29 +00001072void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001073 struct bpf_prog *old_prog);
Olivier Deprez157378f2022-04-04 15:47:50 +02001074int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
1075int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
1076 struct bpf_prog *prog);
David Brazdil0f672f62019-12-10 10:32:29 +00001077int bpf_prog_array_copy_info(struct bpf_prog_array *array,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001078 u32 *prog_ids, u32 request_cnt,
1079 u32 *prog_cnt);
David Brazdil0f672f62019-12-10 10:32:29 +00001080int bpf_prog_array_copy(struct bpf_prog_array *old_array,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001081 struct bpf_prog *exclude_prog,
1082 struct bpf_prog *include_prog,
1083 struct bpf_prog_array **new_array);
1084
Olivier Deprez0e641232021-09-23 10:07:05 +02001085#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001086 ({ \
1087 struct bpf_prog_array_item *_item; \
1088 struct bpf_prog *_prog; \
1089 struct bpf_prog_array *_array; \
1090 u32 _ret = 1; \
Olivier Deprez157378f2022-04-04 15:47:50 +02001091 migrate_disable(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001092 rcu_read_lock(); \
1093 _array = rcu_dereference(array); \
1094 if (unlikely(check_non_null && !_array))\
1095 goto _out; \
1096 _item = &_array->items[0]; \
1097 while ((_prog = READ_ONCE(_item->prog))) { \
Olivier Deprez157378f2022-04-04 15:47:50 +02001098 if (!set_cg_storage) { \
1099 _ret &= func(_prog, ctx); \
1100 } else { \
1101 if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
1102 break; \
1103 _ret &= func(_prog, ctx); \
1104 bpf_cgroup_storage_unset(); \
1105 } \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001106 _item++; \
1107 } \
1108_out: \
1109 rcu_read_unlock(); \
Olivier Deprez157378f2022-04-04 15:47:50 +02001110 migrate_enable(); \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 _ret; \
1112 })
1113
David Brazdil0f672f62019-12-10 10:32:29 +00001114/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs
1115 * so BPF programs can request cwr for TCP packets.
1116 *
1117 * Current cgroup skb programs can only return 0 or 1 (0 to drop the
1118 * packet. This macro changes the behavior so the low order bit
1119 * indicates whether the packet should be dropped (0) or not (1)
1120 * and the next bit is a congestion notification bit. This could be
1121 * used by TCP to call tcp_enter_cwr()
1122 *
1123 * Hence, new allowed return values of CGROUP EGRESS BPF programs are:
1124 * 0: drop packet
1125 * 1: keep packet
1126 * 2: drop packet and cn
1127 * 3: keep packet and cn
1128 *
1129 * This macro then converts it to one of the NET_XMIT or an error
1130 * code that is then interpreted as drop packet (and no cn):
1131 * 0: NET_XMIT_SUCCESS skb should be transmitted
1132 * 1: NET_XMIT_DROP skb should be dropped and cn
1133 * 2: NET_XMIT_CN skb should be transmitted and cn
1134 * 3: -EPERM skb should be dropped
1135 */
1136#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \
1137 ({ \
1138 struct bpf_prog_array_item *_item; \
1139 struct bpf_prog *_prog; \
1140 struct bpf_prog_array *_array; \
1141 u32 ret; \
1142 u32 _ret = 1; \
1143 u32 _cn = 0; \
Olivier Deprez157378f2022-04-04 15:47:50 +02001144 migrate_disable(); \
David Brazdil0f672f62019-12-10 10:32:29 +00001145 rcu_read_lock(); \
1146 _array = rcu_dereference(array); \
1147 _item = &_array->items[0]; \
1148 while ((_prog = READ_ONCE(_item->prog))) { \
Olivier Deprez157378f2022-04-04 15:47:50 +02001149 if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \
1150 break; \
David Brazdil0f672f62019-12-10 10:32:29 +00001151 ret = func(_prog, ctx); \
Olivier Deprez157378f2022-04-04 15:47:50 +02001152 bpf_cgroup_storage_unset(); \
David Brazdil0f672f62019-12-10 10:32:29 +00001153 _ret &= (ret & 1); \
1154 _cn |= (ret & 2); \
1155 _item++; \
1156 } \
1157 rcu_read_unlock(); \
Olivier Deprez157378f2022-04-04 15:47:50 +02001158 migrate_enable(); \
David Brazdil0f672f62019-12-10 10:32:29 +00001159 if (_ret) \
1160 _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \
1161 else \
1162 _ret = (_cn ? NET_XMIT_DROP : -EPERM); \
1163 _ret; \
1164 })
1165
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001166#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
Olivier Deprez0e641232021-09-23 10:07:05 +02001167 __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001168
1169#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
Olivier Deprez0e641232021-09-23 10:07:05 +02001170 __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001171
1172#ifdef CONFIG_BPF_SYSCALL
1173DECLARE_PER_CPU(int, bpf_prog_active);
Olivier Deprez157378f2022-04-04 15:47:50 +02001174extern struct mutex bpf_stats_enabled_mutex;
1175
1176/*
1177 * Block execution of BPF programs attached to instrumentation (perf,
1178 * kprobes, tracepoints) to prevent deadlocks on map operations as any of
1179 * these events can happen inside a region which holds a map bucket lock
1180 * and can deadlock on it.
1181 *
1182 * Use the preemption safe inc/dec variants on RT because migrate disable
1183 * is preemptible on RT and preemption in the middle of the RMW operation
1184 * might lead to inconsistent state. Use the raw variants for non RT
1185 * kernels as migrate_disable() maps to preempt_disable() so the slightly
1186 * more expensive save operation can be avoided.
1187 */
1188static inline void bpf_disable_instrumentation(void)
1189{
1190 migrate_disable();
1191 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1192 this_cpu_inc(bpf_prog_active);
1193 else
1194 __this_cpu_inc(bpf_prog_active);
1195}
1196
1197static inline void bpf_enable_instrumentation(void)
1198{
1199 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1200 this_cpu_dec(bpf_prog_active);
1201 else
1202 __this_cpu_dec(bpf_prog_active);
1203 migrate_enable();
1204}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001205
1206extern const struct file_operations bpf_map_fops;
1207extern const struct file_operations bpf_prog_fops;
Olivier Deprez157378f2022-04-04 15:47:50 +02001208extern const struct file_operations bpf_iter_fops;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001209
Olivier Deprez157378f2022-04-04 15:47:50 +02001210#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001211 extern const struct bpf_prog_ops _name ## _prog_ops; \
1212 extern const struct bpf_verifier_ops _name ## _verifier_ops;
1213#define BPF_MAP_TYPE(_id, _ops) \
1214 extern const struct bpf_map_ops _ops;
Olivier Deprez157378f2022-04-04 15:47:50 +02001215#define BPF_LINK_TYPE(_id, _name)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001216#include <linux/bpf_types.h>
1217#undef BPF_PROG_TYPE
1218#undef BPF_MAP_TYPE
Olivier Deprez157378f2022-04-04 15:47:50 +02001219#undef BPF_LINK_TYPE
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001220
1221extern const struct bpf_prog_ops bpf_offload_prog_ops;
1222extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
1223extern const struct bpf_verifier_ops xdp_analyzer_ops;
1224
1225struct bpf_prog *bpf_prog_get(u32 ufd);
1226struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
1227 bool attach_drv);
Olivier Deprez157378f2022-04-04 15:47:50 +02001228void bpf_prog_add(struct bpf_prog *prog, int i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001229void bpf_prog_sub(struct bpf_prog *prog, int i);
Olivier Deprez157378f2022-04-04 15:47:50 +02001230void bpf_prog_inc(struct bpf_prog *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001231struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
1232void bpf_prog_put(struct bpf_prog *prog);
1233int __bpf_prog_charge(struct user_struct *user, u32 pages);
1234void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
1235
1236void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
1237void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
1238
Olivier Deprez157378f2022-04-04 15:47:50 +02001239struct bpf_map *bpf_map_get(u32 ufd);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001240struct bpf_map *bpf_map_get_with_uref(u32 ufd);
1241struct bpf_map *__bpf_map_get(struct fd f);
Olivier Deprez157378f2022-04-04 15:47:50 +02001242void bpf_map_inc(struct bpf_map *map);
1243void bpf_map_inc_with_uref(struct bpf_map *map);
1244struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001245void bpf_map_put_with_uref(struct bpf_map *map);
1246void bpf_map_put(struct bpf_map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
1248void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
David Brazdil0f672f62019-12-10 10:32:29 +00001249int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
1250void bpf_map_charge_finish(struct bpf_map_memory *mem);
1251void bpf_map_charge_move(struct bpf_map_memory *dst,
1252 struct bpf_map_memory *src);
1253void *bpf_map_area_alloc(u64 size, int numa_node);
Olivier Deprez157378f2022-04-04 15:47:50 +02001254void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001255void bpf_map_area_free(void *base);
Olivier Deprez157378f2022-04-04 15:47:50 +02001256bool bpf_map_write_active(const struct bpf_map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
Olivier Deprez157378f2022-04-04 15:47:50 +02001258int generic_map_lookup_batch(struct bpf_map *map,
1259 const union bpf_attr *attr,
1260 union bpf_attr __user *uattr);
1261int generic_map_update_batch(struct bpf_map *map,
1262 const union bpf_attr *attr,
1263 union bpf_attr __user *uattr);
1264int generic_map_delete_batch(struct bpf_map *map,
1265 const union bpf_attr *attr,
1266 union bpf_attr __user *uattr);
1267struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
1268struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269
1270extern int sysctl_unprivileged_bpf_disabled;
1271
Olivier Deprez157378f2022-04-04 15:47:50 +02001272static inline bool bpf_allow_ptr_leaks(void)
1273{
1274 return perfmon_capable();
1275}
1276
1277static inline bool bpf_allow_uninit_stack(void)
1278{
1279 return perfmon_capable();
1280}
1281
1282static inline bool bpf_allow_ptr_to_map_access(void)
1283{
1284 return perfmon_capable();
1285}
1286
1287static inline bool bpf_bypass_spec_v1(void)
1288{
1289 return perfmon_capable();
1290}
1291
1292static inline bool bpf_bypass_spec_v4(void)
1293{
1294 return perfmon_capable();
1295}
1296
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001297int bpf_map_new_fd(struct bpf_map *map, int flags);
1298int bpf_prog_new_fd(struct bpf_prog *prog);
1299
Olivier Deprez157378f2022-04-04 15:47:50 +02001300void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1301 const struct bpf_link_ops *ops, struct bpf_prog *prog);
1302int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
1303int bpf_link_settle(struct bpf_link_primer *primer);
1304void bpf_link_cleanup(struct bpf_link_primer *primer);
1305void bpf_link_inc(struct bpf_link *link);
1306void bpf_link_put(struct bpf_link *link);
1307int bpf_link_new_fd(struct bpf_link *link);
1308struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
1309struct bpf_link *bpf_link_get_from_fd(u32 ufd);
1310
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001311int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
1312int bpf_obj_get_user(const char __user *pathname, int flags);
1313
Olivier Deprez157378f2022-04-04 15:47:50 +02001314#define BPF_ITER_FUNC_PREFIX "bpf_iter_"
1315#define DEFINE_BPF_ITER_FUNC(target, args...) \
1316 extern int bpf_iter_ ## target(args); \
1317 int __init bpf_iter_ ## target(args) { return 0; }
1318
1319struct bpf_iter_aux_info {
1320 struct bpf_map *map;
1321};
1322
1323typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
1324 union bpf_iter_link_info *linfo,
1325 struct bpf_iter_aux_info *aux);
1326typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
1327typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
1328 struct seq_file *seq);
1329typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
1330 struct bpf_link_info *info);
1331
1332#define BPF_ITER_CTX_ARG_MAX 2
1333struct bpf_iter_reg {
1334 const char *target;
1335 bpf_iter_attach_target_t attach_target;
1336 bpf_iter_detach_target_t detach_target;
1337 bpf_iter_show_fdinfo_t show_fdinfo;
1338 bpf_iter_fill_link_info_t fill_link_info;
1339 u32 ctx_arg_info_size;
1340 struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
1341 const struct bpf_iter_seq_info *seq_info;
1342};
1343
1344struct bpf_iter_meta {
1345 __bpf_md_ptr(struct seq_file *, seq);
1346 u64 session_id;
1347 u64 seq_num;
1348};
1349
1350struct bpf_iter__bpf_map_elem {
1351 __bpf_md_ptr(struct bpf_iter_meta *, meta);
1352 __bpf_md_ptr(struct bpf_map *, map);
1353 __bpf_md_ptr(void *, key);
1354 __bpf_md_ptr(void *, value);
1355};
1356
1357int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
1358void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
1359bool bpf_iter_prog_supported(struct bpf_prog *prog);
1360int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
1361int bpf_iter_new_fd(struct bpf_link *link);
1362bool bpf_link_is_iter(struct bpf_link *link);
1363struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
1364int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
1365void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
1366 struct seq_file *seq);
1367int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
1368 struct bpf_link_info *info);
1369
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001370int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
1371int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
1372int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
1373 u64 flags);
1374int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
1375 u64 flags);
1376
1377int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
1378
1379int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
1380 void *key, void *value, u64 map_flags);
1381int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1382int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
1383 void *key, void *value, u64 map_flags);
1384int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
1385
1386int bpf_get_file_flag(int flags);
1387int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
1388 size_t actual_size);
1389
1390/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
1391 * forced to use 'long' read/writes to try to atomically copy long counters.
1392 * Best-effort only. No barriers here, since it _will_ race with concurrent
1393 * updates from BPF programs. Called from bpf syscall and mostly used with
1394 * size 8 or 16 bytes, so ask compiler to inline it.
1395 */
1396static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
1397{
1398 const long *lsrc = src;
1399 long *ldst = dst;
1400
1401 size /= sizeof(long);
1402 while (size--)
1403 *ldst++ = *lsrc++;
1404}
1405
1406/* verify correctness of eBPF program */
David Brazdil0f672f62019-12-10 10:32:29 +00001407int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
1408 union bpf_attr __user *uattr);
Olivier Deprez0e641232021-09-23 10:07:05 +02001409
1410#ifndef CONFIG_BPF_JIT_ALWAYS_ON
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001411void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
Olivier Deprez0e641232021-09-23 10:07:05 +02001412#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001413
Olivier Deprez157378f2022-04-04 15:47:50 +02001414struct btf *bpf_get_btf_vmlinux(void);
1415
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001416/* Map specifics */
1417struct xdp_buff;
1418struct sk_buff;
1419
1420struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
David Brazdil0f672f62019-12-10 10:32:29 +00001421struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
Olivier Deprez157378f2022-04-04 15:47:50 +02001422void __dev_flush(void);
1423int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1424 struct net_device *dev_rx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001425int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1426 struct net_device *dev_rx);
1427int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
1428 struct bpf_prog *xdp_prog);
Olivier Deprez157378f2022-04-04 15:47:50 +02001429bool dev_map_can_have_prog(struct bpf_map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001430
1431struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
Olivier Deprez157378f2022-04-04 15:47:50 +02001432void __cpu_map_flush(void);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001433int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
1434 struct net_device *dev_rx);
Olivier Deprez157378f2022-04-04 15:47:50 +02001435bool cpu_map_prog_allowed(struct bpf_map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001436
1437/* Return map's numa specified by userspace */
1438static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
1439{
1440 return (attr->map_flags & BPF_F_NUMA_NODE) ?
1441 attr->numa_node : NUMA_NO_NODE;
1442}
1443
1444struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
1445int array_map_alloc_check(union bpf_attr *attr);
1446
David Brazdil0f672f62019-12-10 10:32:29 +00001447int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
1448 union bpf_attr __user *uattr);
1449int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
1450 union bpf_attr __user *uattr);
Olivier Deprez157378f2022-04-04 15:47:50 +02001451int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1452 const union bpf_attr *kattr,
1453 union bpf_attr __user *uattr);
David Brazdil0f672f62019-12-10 10:32:29 +00001454int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1455 const union bpf_attr *kattr,
1456 union bpf_attr __user *uattr);
Olivier Deprez157378f2022-04-04 15:47:50 +02001457int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
1458 const union bpf_attr *kattr,
1459 union bpf_attr __user *uattr);
1460bool btf_ctx_access(int off, int size, enum bpf_access_type type,
1461 const struct bpf_prog *prog,
1462 struct bpf_insn_access_aux *info);
1463int btf_struct_access(struct bpf_verifier_log *log,
1464 const struct btf_type *t, int off, int size,
1465 enum bpf_access_type atype,
1466 u32 *next_btf_id);
1467bool btf_struct_ids_match(struct bpf_verifier_log *log,
1468 int off, u32 id, u32 need_type_id);
1469
1470int btf_distill_func_proto(struct bpf_verifier_log *log,
1471 struct btf *btf,
1472 const struct btf_type *func_proto,
1473 const char *func_name,
1474 struct btf_func_model *m);
1475
1476struct bpf_reg_state;
1477int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
1478 struct bpf_reg_state *regs);
1479int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
1480 struct bpf_reg_state *reg);
1481int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
1482 struct btf *btf, const struct btf_type *t);
1483
1484struct bpf_prog *bpf_prog_by_id(u32 id);
1485struct bpf_link *bpf_link_by_id(u32 id);
1486
1487const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
1488
1489static inline bool unprivileged_ebpf_enabled(void)
1490{
1491 return !sysctl_unprivileged_bpf_disabled;
1492}
1493
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001494#else /* !CONFIG_BPF_SYSCALL */
1495static inline struct bpf_prog *bpf_prog_get(u32 ufd)
1496{
1497 return ERR_PTR(-EOPNOTSUPP);
1498}
1499
1500static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
1501 enum bpf_prog_type type,
1502 bool attach_drv)
1503{
1504 return ERR_PTR(-EOPNOTSUPP);
1505}
1506
Olivier Deprez157378f2022-04-04 15:47:50 +02001507static inline void bpf_prog_add(struct bpf_prog *prog, int i)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001508{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001509}
1510
1511static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
1512{
1513}
1514
1515static inline void bpf_prog_put(struct bpf_prog *prog)
1516{
1517}
1518
Olivier Deprez157378f2022-04-04 15:47:50 +02001519static inline void bpf_prog_inc(struct bpf_prog *prog)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001520{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001521}
1522
1523static inline struct bpf_prog *__must_check
1524bpf_prog_inc_not_zero(struct bpf_prog *prog)
1525{
1526 return ERR_PTR(-EOPNOTSUPP);
1527}
1528
1529static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
1530{
1531 return 0;
1532}
1533
1534static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
1535{
1536}
1537
Olivier Deprez157378f2022-04-04 15:47:50 +02001538static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
1539 const struct bpf_link_ops *ops,
1540 struct bpf_prog *prog)
1541{
1542}
1543
1544static inline int bpf_link_prime(struct bpf_link *link,
1545 struct bpf_link_primer *primer)
1546{
1547 return -EOPNOTSUPP;
1548}
1549
1550static inline int bpf_link_settle(struct bpf_link_primer *primer)
1551{
1552 return -EOPNOTSUPP;
1553}
1554
1555static inline void bpf_link_cleanup(struct bpf_link_primer *primer)
1556{
1557}
1558
1559static inline void bpf_link_inc(struct bpf_link *link)
1560{
1561}
1562
1563static inline void bpf_link_put(struct bpf_link *link)
1564{
1565}
1566
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001567static inline int bpf_obj_get_user(const char __user *pathname, int flags)
1568{
1569 return -EOPNOTSUPP;
1570}
1571
1572static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
1573 u32 key)
1574{
1575 return NULL;
1576}
1577
David Brazdil0f672f62019-12-10 10:32:29 +00001578static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
1579 u32 key)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001580{
David Brazdil0f672f62019-12-10 10:32:29 +00001581 return NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001582}
Olivier Deprez157378f2022-04-04 15:47:50 +02001583static inline bool dev_map_can_have_prog(struct bpf_map *map)
1584{
1585 return false;
1586}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001587
Olivier Deprez157378f2022-04-04 15:47:50 +02001588static inline void __dev_flush(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001589{
1590}
1591
1592struct xdp_buff;
1593struct bpf_dtab_netdev;
1594
1595static inline
Olivier Deprez157378f2022-04-04 15:47:50 +02001596int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
1597 struct net_device *dev_rx)
1598{
1599 return 0;
1600}
1601
1602static inline
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001603int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
1604 struct net_device *dev_rx)
1605{
1606 return 0;
1607}
1608
1609struct sk_buff;
1610
1611static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
1612 struct sk_buff *skb,
1613 struct bpf_prog *xdp_prog)
1614{
1615 return 0;
1616}
1617
1618static inline
1619struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
1620{
1621 return NULL;
1622}
1623
Olivier Deprez157378f2022-04-04 15:47:50 +02001624static inline void __cpu_map_flush(void)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001625{
1626}
1627
1628static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
1629 struct xdp_buff *xdp,
1630 struct net_device *dev_rx)
1631{
1632 return 0;
1633}
1634
Olivier Deprez157378f2022-04-04 15:47:50 +02001635static inline bool cpu_map_prog_allowed(struct bpf_map *map)
1636{
1637 return false;
1638}
1639
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001640static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
1641 enum bpf_prog_type type)
1642{
1643 return ERR_PTR(-EOPNOTSUPP);
1644}
David Brazdil0f672f62019-12-10 10:32:29 +00001645
1646static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
1647 const union bpf_attr *kattr,
1648 union bpf_attr __user *uattr)
1649{
1650 return -ENOTSUPP;
1651}
1652
1653static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
1654 const union bpf_attr *kattr,
1655 union bpf_attr __user *uattr)
1656{
1657 return -ENOTSUPP;
1658}
1659
Olivier Deprez157378f2022-04-04 15:47:50 +02001660static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog,
1661 const union bpf_attr *kattr,
1662 union bpf_attr __user *uattr)
1663{
1664 return -ENOTSUPP;
1665}
1666
David Brazdil0f672f62019-12-10 10:32:29 +00001667static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
1668 const union bpf_attr *kattr,
1669 union bpf_attr __user *uattr)
1670{
1671 return -ENOTSUPP;
1672}
Olivier Deprez157378f2022-04-04 15:47:50 +02001673
1674static inline void bpf_map_put(struct bpf_map *map)
1675{
1676}
1677
1678static inline struct bpf_prog *bpf_prog_by_id(u32 id)
1679{
1680 return ERR_PTR(-ENOTSUPP);
1681}
1682
1683static inline const struct bpf_func_proto *
1684bpf_base_func_proto(enum bpf_func_id func_id)
1685{
1686 return NULL;
1687}
1688
1689static inline bool unprivileged_ebpf_enabled(void)
1690{
1691 return false;
1692}
1693
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001694#endif /* CONFIG_BPF_SYSCALL */
1695
1696static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
1697 enum bpf_prog_type type)
1698{
1699 return bpf_prog_get_type_dev(ufd, type, false);
1700}
1701
Olivier Deprez157378f2022-04-04 15:47:50 +02001702void __bpf_free_used_maps(struct bpf_prog_aux *aux,
1703 struct bpf_map **used_maps, u32 len);
1704
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001705bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool);
1706
1707int bpf_prog_offload_compile(struct bpf_prog *prog);
1708void bpf_prog_offload_destroy(struct bpf_prog *prog);
1709int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
1710 struct bpf_prog *prog);
1711
1712int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
1713
1714int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
1715int bpf_map_offload_update_elem(struct bpf_map *map,
1716 void *key, void *value, u64 flags);
1717int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
1718int bpf_map_offload_get_next_key(struct bpf_map *map,
1719 void *key, void *next_key);
1720
1721bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
1722
David Brazdil0f672f62019-12-10 10:32:29 +00001723struct bpf_offload_dev *
1724bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001725void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
David Brazdil0f672f62019-12-10 10:32:29 +00001726void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001727int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
1728 struct net_device *netdev);
1729void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
1730 struct net_device *netdev);
1731bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
1732
1733#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
1734int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
1735
1736static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
1737{
1738 return aux->offload_requested;
1739}
1740
1741static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1742{
1743 return unlikely(map->ops == &bpf_map_offload_ops);
1744}
1745
1746struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
1747void bpf_map_offload_map_free(struct bpf_map *map);
1748#else
1749static inline int bpf_prog_offload_init(struct bpf_prog *prog,
1750 union bpf_attr *attr)
1751{
1752 return -EOPNOTSUPP;
1753}
1754
1755static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
1756{
1757 return false;
1758}
1759
1760static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
1761{
1762 return false;
1763}
1764
1765static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
1766{
1767 return ERR_PTR(-EOPNOTSUPP);
1768}
1769
1770static inline void bpf_map_offload_map_free(struct bpf_map *map)
1771{
1772}
1773#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
1774
David Brazdil0f672f62019-12-10 10:32:29 +00001775#if defined(CONFIG_BPF_STREAM_PARSER)
Olivier Deprez0e641232021-09-23 10:07:05 +02001776int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
1777 struct bpf_prog *old, u32 which);
David Brazdil0f672f62019-12-10 10:32:29 +00001778int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
Olivier Deprez0e641232021-09-23 10:07:05 +02001779int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
Olivier Deprez157378f2022-04-04 15:47:50 +02001780int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
1781void sock_map_unhash(struct sock *sk);
1782void sock_map_close(struct sock *sk, long timeout);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001783#else
David Brazdil0f672f62019-12-10 10:32:29 +00001784static inline int sock_map_prog_update(struct bpf_map *map,
Olivier Deprez0e641232021-09-23 10:07:05 +02001785 struct bpf_prog *prog,
1786 struct bpf_prog *old, u32 which)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001787{
1788 return -EOPNOTSUPP;
1789}
1790
David Brazdil0f672f62019-12-10 10:32:29 +00001791static inline int sock_map_get_from_fd(const union bpf_attr *attr,
1792 struct bpf_prog *prog)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001793{
1794 return -EINVAL;
1795}
Olivier Deprez0e641232021-09-23 10:07:05 +02001796
1797static inline int sock_map_prog_detach(const union bpf_attr *attr,
1798 enum bpf_prog_type ptype)
1799{
1800 return -EOPNOTSUPP;
1801}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001802
Olivier Deprez157378f2022-04-04 15:47:50 +02001803static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
1804 u64 flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001805{
1806 return -EOPNOTSUPP;
1807}
Olivier Deprez157378f2022-04-04 15:47:50 +02001808#endif /* CONFIG_BPF_STREAM_PARSER */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001809
1810#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
1811void bpf_sk_reuseport_detach(struct sock *sk);
1812int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
1813 void *value);
1814int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
1815 void *value, u64 map_flags);
1816#else
1817static inline void bpf_sk_reuseport_detach(struct sock *sk)
1818{
1819}
1820
1821#ifdef CONFIG_BPF_SYSCALL
1822static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
1823 void *key, void *value)
1824{
1825 return -EOPNOTSUPP;
1826}
1827
1828static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
1829 void *key, void *value,
1830 u64 map_flags)
1831{
1832 return -EOPNOTSUPP;
1833}
1834#endif /* CONFIG_BPF_SYSCALL */
1835#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
1836
1837/* verifier prototypes for helper functions called from eBPF programs */
1838extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
1839extern const struct bpf_func_proto bpf_map_update_elem_proto;
1840extern const struct bpf_func_proto bpf_map_delete_elem_proto;
David Brazdil0f672f62019-12-10 10:32:29 +00001841extern const struct bpf_func_proto bpf_map_push_elem_proto;
1842extern const struct bpf_func_proto bpf_map_pop_elem_proto;
1843extern const struct bpf_func_proto bpf_map_peek_elem_proto;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001844
1845extern const struct bpf_func_proto bpf_get_prandom_u32_proto;
1846extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
1847extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
1848extern const struct bpf_func_proto bpf_tail_call_proto;
1849extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
Olivier Deprez157378f2022-04-04 15:47:50 +02001850extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001851extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
1852extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
1853extern const struct bpf_func_proto bpf_get_current_comm_proto;
1854extern const struct bpf_func_proto bpf_get_stackid_proto;
1855extern const struct bpf_func_proto bpf_get_stack_proto;
Olivier Deprez157378f2022-04-04 15:47:50 +02001856extern const struct bpf_func_proto bpf_get_task_stack_proto;
1857extern const struct bpf_func_proto bpf_get_stackid_proto_pe;
1858extern const struct bpf_func_proto bpf_get_stack_proto_pe;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001859extern const struct bpf_func_proto bpf_sock_map_update_proto;
1860extern const struct bpf_func_proto bpf_sock_hash_update_proto;
1861extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
Olivier Deprez157378f2022-04-04 15:47:50 +02001862extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto;
David Brazdil0f672f62019-12-10 10:32:29 +00001863extern const struct bpf_func_proto bpf_msg_redirect_hash_proto;
1864extern const struct bpf_func_proto bpf_msg_redirect_map_proto;
1865extern const struct bpf_func_proto bpf_sk_redirect_hash_proto;
1866extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
1867extern const struct bpf_func_proto bpf_spin_lock_proto;
1868extern const struct bpf_func_proto bpf_spin_unlock_proto;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001869extern const struct bpf_func_proto bpf_get_local_storage_proto;
David Brazdil0f672f62019-12-10 10:32:29 +00001870extern const struct bpf_func_proto bpf_strtol_proto;
1871extern const struct bpf_func_proto bpf_strtoul_proto;
1872extern const struct bpf_func_proto bpf_tcp_sock_proto;
Olivier Deprez157378f2022-04-04 15:47:50 +02001873extern const struct bpf_func_proto bpf_jiffies64_proto;
1874extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
1875extern const struct bpf_func_proto bpf_event_output_data_proto;
1876extern const struct bpf_func_proto bpf_ringbuf_output_proto;
1877extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
1878extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
1879extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
1880extern const struct bpf_func_proto bpf_ringbuf_query_proto;
1881extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
1882extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
1883extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
1884extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
1885extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
1886extern const struct bpf_func_proto bpf_copy_from_user_proto;
1887extern const struct bpf_func_proto bpf_snprintf_btf_proto;
1888extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
1889extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
1890
1891const struct bpf_func_proto *bpf_tracing_func_proto(
1892 enum bpf_func_id func_id, const struct bpf_prog *prog);
1893
1894const struct bpf_func_proto *tracing_prog_func_proto(
1895 enum bpf_func_id func_id, const struct bpf_prog *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001896
1897/* Shared helpers among cBPF and eBPF. */
1898void bpf_user_rnd_init_once(void);
1899u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
Olivier Deprez157378f2022-04-04 15:47:50 +02001900u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001901
David Brazdil0f672f62019-12-10 10:32:29 +00001902#if defined(CONFIG_NET)
1903bool bpf_sock_common_is_valid_access(int off, int size,
1904 enum bpf_access_type type,
1905 struct bpf_insn_access_aux *info);
1906bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1907 struct bpf_insn_access_aux *info);
1908u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1909 const struct bpf_insn *si,
1910 struct bpf_insn *insn_buf,
1911 struct bpf_prog *prog,
1912 u32 *target_size);
1913#else
1914static inline bool bpf_sock_common_is_valid_access(int off, int size,
1915 enum bpf_access_type type,
1916 struct bpf_insn_access_aux *info)
1917{
1918 return false;
1919}
1920static inline bool bpf_sock_is_valid_access(int off, int size,
1921 enum bpf_access_type type,
1922 struct bpf_insn_access_aux *info)
1923{
1924 return false;
1925}
1926static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
1927 const struct bpf_insn *si,
1928 struct bpf_insn *insn_buf,
1929 struct bpf_prog *prog,
1930 u32 *target_size)
1931{
1932 return 0;
1933}
1934#endif
1935
1936#ifdef CONFIG_INET
Olivier Deprez157378f2022-04-04 15:47:50 +02001937struct sk_reuseport_kern {
1938 struct sk_buff *skb;
1939 struct sock *sk;
1940 struct sock *selected_sk;
1941 void *data_end;
1942 u32 hash;
1943 u32 reuseport_id;
1944 bool bind_inany;
1945};
David Brazdil0f672f62019-12-10 10:32:29 +00001946bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1947 struct bpf_insn_access_aux *info);
1948
1949u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1950 const struct bpf_insn *si,
1951 struct bpf_insn *insn_buf,
1952 struct bpf_prog *prog,
1953 u32 *target_size);
1954
1955bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1956 struct bpf_insn_access_aux *info);
1957
1958u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1959 const struct bpf_insn *si,
1960 struct bpf_insn *insn_buf,
1961 struct bpf_prog *prog,
1962 u32 *target_size);
1963#else
1964static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
1965 enum bpf_access_type type,
1966 struct bpf_insn_access_aux *info)
1967{
1968 return false;
1969}
1970
1971static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1972 const struct bpf_insn *si,
1973 struct bpf_insn *insn_buf,
1974 struct bpf_prog *prog,
1975 u32 *target_size)
1976{
1977 return 0;
1978}
1979static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
1980 enum bpf_access_type type,
1981 struct bpf_insn_access_aux *info)
1982{
1983 return false;
1984}
1985
1986static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1987 const struct bpf_insn *si,
1988 struct bpf_insn *insn_buf,
1989 struct bpf_prog *prog,
1990 u32 *target_size)
1991{
1992 return 0;
1993}
1994#endif /* CONFIG_INET */
1995
Olivier Deprez157378f2022-04-04 15:47:50 +02001996enum bpf_text_poke_type {
1997 BPF_MOD_CALL,
1998 BPF_MOD_JUMP,
1999};
2000
2001int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
2002 void *addr1, void *addr2);
2003
2004struct btf_id_set;
2005bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
2006
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002007#endif /* _LINUX_BPF_H */