blob: 91b9669785418d4a36d2c196d06e34605c820cf8 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H
4
David Brazdil0f672f62019-12-10 10:32:29 +00005#include <linux/bpf.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006#include <linux/errno.h>
7#include <linux/jump_label.h>
8#include <linux/percpu.h>
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <linux/percpu-refcount.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010#include <linux/rbtree.h>
11#include <uapi/linux/bpf.h>
12
13struct sock;
14struct sockaddr;
15struct cgroup;
16struct sk_buff;
17struct bpf_map;
18struct bpf_prog;
19struct bpf_sock_ops_kern;
20struct bpf_cgroup_storage;
David Brazdil0f672f62019-12-10 10:32:29 +000021struct ctl_table;
22struct ctl_table_header;
Olivier Deprez157378f2022-04-04 15:47:50 +020023struct task_struct;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000024
25#ifdef CONFIG_CGROUP_BPF
26
27extern struct static_key_false cgroup_bpf_enabled_key;
28#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
29
Olivier Deprez157378f2022-04-04 15:47:50 +020030#define BPF_CGROUP_STORAGE_NEST_MAX 8
31
32struct bpf_cgroup_storage_info {
33 struct task_struct *task;
34 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
35};
36
37/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
38 * to use bpf cgroup storage simultaneously.
39 */
40DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
41 bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
David Brazdil0f672f62019-12-10 10:32:29 +000042
43#define for_each_cgroup_storage_type(stype) \
44 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000045
46struct bpf_cgroup_storage_map;
47
48struct bpf_storage_buffer {
49 struct rcu_head rcu;
Olivier Deprez157378f2022-04-04 15:47:50 +020050 char data[];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051};
52
53struct bpf_cgroup_storage {
David Brazdil0f672f62019-12-10 10:32:29 +000054 union {
55 struct bpf_storage_buffer *buf;
56 void __percpu *percpu_buf;
57 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000058 struct bpf_cgroup_storage_map *map;
59 struct bpf_cgroup_storage_key key;
Olivier Deprez157378f2022-04-04 15:47:50 +020060 struct list_head list_map;
61 struct list_head list_cg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062 struct rb_node node;
63 struct rcu_head rcu;
64};
65
Olivier Deprez157378f2022-04-04 15:47:50 +020066struct bpf_cgroup_link {
67 struct bpf_link link;
68 struct cgroup *cgroup;
69 enum bpf_attach_type type;
70};
71
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000072struct bpf_prog_list {
73 struct list_head node;
74 struct bpf_prog *prog;
Olivier Deprez157378f2022-04-04 15:47:50 +020075 struct bpf_cgroup_link *link;
David Brazdil0f672f62019-12-10 10:32:29 +000076 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000077};
78
79struct bpf_prog_array;
80
81struct cgroup_bpf {
82 /* array of effective progs in this cgroup */
83 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
84
85 /* attached progs to this cgroup and attach flags
86 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
87 * have either zero or one element
88 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
89 */
90 struct list_head progs[MAX_BPF_ATTACH_TYPE];
91 u32 flags[MAX_BPF_ATTACH_TYPE];
92
Olivier Deprez157378f2022-04-04 15:47:50 +020093 /* list of cgroup shared storages */
94 struct list_head storages;
95
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096 /* temp storage for effective prog array used by prog_attach/detach */
David Brazdil0f672f62019-12-10 10:32:29 +000097 struct bpf_prog_array *inactive;
98
99 /* reference counter used to detach bpf programs after cgroup removal */
100 struct percpu_ref refcnt;
101
102 /* cgroup_bpf is released using a work queue */
103 struct work_struct release_work;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000104};
105
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000106int cgroup_bpf_inherit(struct cgroup *cgrp);
David Brazdil0f672f62019-12-10 10:32:29 +0000107void cgroup_bpf_offline(struct cgroup *cgrp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108
Olivier Deprez157378f2022-04-04 15:47:50 +0200109int __cgroup_bpf_attach(struct cgroup *cgrp,
110 struct bpf_prog *prog, struct bpf_prog *replace_prog,
111 struct bpf_cgroup_link *link,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 enum bpf_attach_type type, u32 flags);
113int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
Olivier Deprez157378f2022-04-04 15:47:50 +0200114 struct bpf_cgroup_link *link,
David Brazdil0f672f62019-12-10 10:32:29 +0000115 enum bpf_attach_type type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000116int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
117 union bpf_attr __user *uattr);
118
119/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
Olivier Deprez157378f2022-04-04 15:47:50 +0200120int cgroup_bpf_attach(struct cgroup *cgrp,
121 struct bpf_prog *prog, struct bpf_prog *replace_prog,
122 struct bpf_cgroup_link *link, enum bpf_attach_type type,
123 u32 flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000124int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
Olivier Deprez157378f2022-04-04 15:47:50 +0200125 enum bpf_attach_type type);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
127 union bpf_attr __user *uattr);
128
129int __cgroup_bpf_run_filter_skb(struct sock *sk,
130 struct sk_buff *skb,
131 enum bpf_attach_type type);
132
133int __cgroup_bpf_run_filter_sk(struct sock *sk,
134 enum bpf_attach_type type);
135
136int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
137 struct sockaddr *uaddr,
138 enum bpf_attach_type type,
139 void *t_ctx);
140
141int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
142 struct bpf_sock_ops_kern *sock_ops,
143 enum bpf_attach_type type);
144
145int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
146 short access, enum bpf_attach_type type);
147
David Brazdil0f672f62019-12-10 10:32:29 +0000148int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
149 struct ctl_table *table, int write,
Olivier Deprez157378f2022-04-04 15:47:50 +0200150 char **buf, size_t *pcount, loff_t *ppos,
David Brazdil0f672f62019-12-10 10:32:29 +0000151 enum bpf_attach_type type);
152
153int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
154 int *optname, char __user *optval,
155 int *optlen, char **kernel_optval);
156int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
157 int optname, char __user *optval,
158 int __user *optlen, int max_optlen,
159 int retval);
160
161static inline enum bpf_cgroup_storage_type cgroup_storage_type(
162 struct bpf_map *map)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000163{
David Brazdil0f672f62019-12-10 10:32:29 +0000164 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
165 return BPF_CGROUP_STORAGE_PERCPU;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166
David Brazdil0f672f62019-12-10 10:32:29 +0000167 return BPF_CGROUP_STORAGE_SHARED;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000168}
169
Olivier Deprez157378f2022-04-04 15:47:50 +0200170static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
171 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
David Brazdil0f672f62019-12-10 10:32:29 +0000172{
173 enum bpf_cgroup_storage_type stype;
Olivier Deprez157378f2022-04-04 15:47:50 +0200174 int i, err = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000175
Olivier Deprez157378f2022-04-04 15:47:50 +0200176 preempt_disable();
177 for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
178 if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
179 continue;
180
181 this_cpu_write(bpf_cgroup_storage_info[i].task, current);
182 for_each_cgroup_storage_type(stype)
183 this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
184 storage[stype]);
185 goto out;
186 }
187 err = -EBUSY;
188 WARN_ON_ONCE(1);
189
190out:
191 preempt_enable();
192 return err;
David Brazdil0f672f62019-12-10 10:32:29 +0000193}
194
Olivier Deprez157378f2022-04-04 15:47:50 +0200195static inline void bpf_cgroup_storage_unset(void)
196{
197 int i;
198
199 for (i = BPF_CGROUP_STORAGE_NEST_MAX - 1; i >= 0; i--) {
200 if (likely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
201 continue;
202
203 this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
204 return;
205 }
206}
207
208struct bpf_cgroup_storage *
209cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
210 void *key, bool locked);
David Brazdil0f672f62019-12-10 10:32:29 +0000211struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
212 enum bpf_cgroup_storage_type stype);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000213void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
214void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
215 struct cgroup *cgroup,
216 enum bpf_attach_type type);
217void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
Olivier Deprez157378f2022-04-04 15:47:50 +0200218int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000219
David Brazdil0f672f62019-12-10 10:32:29 +0000220int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
221int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
222 void *value, u64 flags);
223
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000224/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
225#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
226({ \
227 int __ret = 0; \
228 if (cgroup_bpf_enabled) \
229 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
230 BPF_CGROUP_INET_INGRESS); \
231 \
232 __ret; \
233})
234
235#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
236({ \
237 int __ret = 0; \
238 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
239 typeof(sk) __sk = sk_to_full_sk(sk); \
240 if (sk_fullsock(__sk)) \
241 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
242 BPF_CGROUP_INET_EGRESS); \
243 } \
244 __ret; \
245})
246
247#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
248({ \
249 int __ret = 0; \
250 if (cgroup_bpf_enabled) { \
251 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
252 } \
253 __ret; \
254})
255
256#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
257 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
258
Olivier Deprez157378f2022-04-04 15:47:50 +0200259#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
260 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
261
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
263 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
264
265#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
266 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
267
268#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
269({ \
270 int __ret = 0; \
271 if (cgroup_bpf_enabled) \
272 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
273 NULL); \
274 __ret; \
275})
276
277#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
278({ \
279 int __ret = 0; \
280 if (cgroup_bpf_enabled) { \
281 lock_sock(sk); \
282 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
283 t_ctx); \
284 release_sock(sk); \
285 } \
286 __ret; \
287})
288
289#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
290 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
291
292#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
293 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
294
295#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
296 sk->sk_prot->pre_connect)
297
298#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
299 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
300
301#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
302 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
303
304#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
305 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
306
307#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
308 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
309
310#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
311 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
312
313#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
314 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
315
David Brazdil0f672f62019-12-10 10:32:29 +0000316#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
317 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
318
319#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
320 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
321
Olivier Deprez157378f2022-04-04 15:47:50 +0200322/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
323 * fullsock and its parent fullsock cannot be traced by
324 * sk_to_full_sk().
325 *
326 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
327 * Its listener-sk is not attached to the rsk_listener.
328 * In this case, the caller holds the listener-sk (unlocked),
329 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
330 * the listener-sk such that the cgroup-bpf-progs of the
331 * listener-sk will be run.
332 *
333 * Regardless of syncookie mode or not,
334 * calling bpf_setsockopt on listener-sk will not make sense anyway,
335 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
336 */
337#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
338({ \
339 int __ret = 0; \
340 if (cgroup_bpf_enabled) \
341 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
342 sock_ops, \
343 BPF_CGROUP_SOCK_OPS); \
344 __ret; \
345})
346
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000347#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
348({ \
349 int __ret = 0; \
350 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
351 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
352 if (__sk && sk_fullsock(__sk)) \
353 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
354 sock_ops, \
355 BPF_CGROUP_SOCK_OPS); \
356 } \
357 __ret; \
358})
359
360#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
361({ \
362 int __ret = 0; \
363 if (cgroup_bpf_enabled) \
364 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
365 access, \
366 BPF_CGROUP_DEVICE); \
367 \
368 __ret; \
369})
David Brazdil0f672f62019-12-10 10:32:29 +0000370
371
Olivier Deprez157378f2022-04-04 15:47:50 +0200372#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
David Brazdil0f672f62019-12-10 10:32:29 +0000373({ \
374 int __ret = 0; \
375 if (cgroup_bpf_enabled) \
376 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
Olivier Deprez157378f2022-04-04 15:47:50 +0200377 buf, count, pos, \
David Brazdil0f672f62019-12-10 10:32:29 +0000378 BPF_CGROUP_SYSCTL); \
379 __ret; \
380})
381
382#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
383 kernel_optval) \
384({ \
385 int __ret = 0; \
386 if (cgroup_bpf_enabled) \
387 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
388 optname, optval, \
389 optlen, \
390 kernel_optval); \
391 __ret; \
392})
393
394#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
395({ \
396 int __ret = 0; \
397 if (cgroup_bpf_enabled) \
398 get_user(__ret, optlen); \
399 __ret; \
400})
401
402#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
403 max_optlen, retval) \
404({ \
405 int __ret = retval; \
406 if (cgroup_bpf_enabled) \
407 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
408 optname, optval, \
409 optlen, max_optlen, \
410 retval); \
411 __ret; \
412})
413
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000414int cgroup_bpf_prog_attach(const union bpf_attr *attr,
415 enum bpf_prog_type ptype, struct bpf_prog *prog);
416int cgroup_bpf_prog_detach(const union bpf_attr *attr,
417 enum bpf_prog_type ptype);
Olivier Deprez157378f2022-04-04 15:47:50 +0200418int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000419int cgroup_bpf_prog_query(const union bpf_attr *attr,
420 union bpf_attr __user *uattr);
421#else
422
423struct bpf_prog;
424struct cgroup_bpf {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000425static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
David Brazdil0f672f62019-12-10 10:32:29 +0000426static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000427
428static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
429 enum bpf_prog_type ptype,
430 struct bpf_prog *prog)
431{
432 return -EINVAL;
433}
434
435static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
436 enum bpf_prog_type ptype)
437{
438 return -EINVAL;
439}
440
Olivier Deprez157378f2022-04-04 15:47:50 +0200441static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
442 struct bpf_prog *prog)
443{
444 return -EINVAL;
445}
446
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000447static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
448 union bpf_attr __user *uattr)
449{
450 return -EINVAL;
451}
452
Olivier Deprez157378f2022-04-04 15:47:50 +0200453static inline int bpf_cgroup_storage_set(
454 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
455static inline void bpf_cgroup_storage_unset(void) {}
456static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 struct bpf_map *map) { return 0; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000458static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
David Brazdil0f672f62019-12-10 10:32:29 +0000459 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000460static inline void bpf_cgroup_storage_free(
461 struct bpf_cgroup_storage *storage) {}
David Brazdil0f672f62019-12-10 10:32:29 +0000462static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
463 void *value) {
464 return 0;
465}
466static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
467 void *key, void *value, u64 flags) {
468 return 0;
469}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000470
471#define cgroup_bpf_enabled (0)
Olivier Deprez157378f2022-04-04 15:47:50 +0200472#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000473#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
474#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
475#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
476#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
Olivier Deprez157378f2022-04-04 15:47:50 +0200477#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000478#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
479#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
480#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
481#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
482#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
483#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
484#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
485#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
486#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
487#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
David Brazdil0f672f62019-12-10 10:32:29 +0000488#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
489#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
491#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
Olivier Deprez157378f2022-04-04 15:47:50 +0200492#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
David Brazdil0f672f62019-12-10 10:32:29 +0000493#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
494#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
495 optlen, max_optlen, retval) ({ retval; })
496#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
497 kernel_optval) ({ 0; })
498
499#define for_each_cgroup_storage_type(stype) for (; false; )
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000500
501#endif /* CONFIG_CGROUP_BPF */
502
503#endif /* _BPF_CGROUP_H */