blob: c410a736301bcc87f5918b2895004be8d4db0ac6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * net/sched/cls_api.c Packet classifier API.
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
8 *
9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/skbuff.h>
19#include <linux/init.h>
20#include <linux/kmod.h>
21#include <linux/slab.h>
22#include <linux/idr.h>
David Brazdil0f672f62019-12-10 10:32:29 +000023#include <linux/jhash.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020024#include <linux/rculist.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025#include <net/net_namespace.h>
26#include <net/sock.h>
27#include <net/netlink.h>
28#include <net/pkt_sched.h>
29#include <net/pkt_cls.h>
David Brazdil0f672f62019-12-10 10:32:29 +000030#include <net/tc_act/tc_pedit.h>
31#include <net/tc_act/tc_mirred.h>
32#include <net/tc_act/tc_vlan.h>
33#include <net/tc_act/tc_tunnel_key.h>
34#include <net/tc_act/tc_csum.h>
35#include <net/tc_act/tc_gact.h>
36#include <net/tc_act/tc_police.h>
37#include <net/tc_act/tc_sample.h>
38#include <net/tc_act/tc_skbedit.h>
39#include <net/tc_act/tc_ct.h>
40#include <net/tc_act/tc_mpls.h>
Olivier Deprez157378f2022-04-04 15:47:50 +020041#include <net/tc_act/tc_gate.h>
David Brazdil0f672f62019-12-10 10:32:29 +000042#include <net/flow_offload.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000043
44extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
45
46/* The list of all installed classifier types */
47static LIST_HEAD(tcf_proto_base);
48
49/* Protects list of registered TC modules. It is pure SMP lock. */
50static DEFINE_RWLOCK(cls_mod_lock);
51
David Brazdil0f672f62019-12-10 10:32:29 +000052static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
53{
54 return jhash_3words(tp->chain->index, tp->prio,
55 (__force __u32)tp->protocol, 0);
56}
57
58static void tcf_proto_signal_destroying(struct tcf_chain *chain,
59 struct tcf_proto *tp)
60{
61 struct tcf_block *block = chain->block;
62
63 mutex_lock(&block->proto_destroy_lock);
64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
65 destroy_obj_hashfn(tp));
66 mutex_unlock(&block->proto_destroy_lock);
67}
68
69static bool tcf_proto_cmp(const struct tcf_proto *tp1,
70 const struct tcf_proto *tp2)
71{
72 return tp1->chain->index == tp2->chain->index &&
73 tp1->prio == tp2->prio &&
74 tp1->protocol == tp2->protocol;
75}
76
77static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
78 struct tcf_proto *tp)
79{
80 u32 hash = destroy_obj_hashfn(tp);
81 struct tcf_proto *iter;
82 bool found = false;
83
84 rcu_read_lock();
85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
86 destroy_ht_node, hash) {
87 if (tcf_proto_cmp(tp, iter)) {
88 found = true;
89 break;
90 }
91 }
92 rcu_read_unlock();
93
94 return found;
95}
96
97static void
98tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
99{
100 struct tcf_block *block = chain->block;
101
102 mutex_lock(&block->proto_destroy_lock);
103 if (hash_hashed(&tp->destroy_ht_node))
104 hash_del_rcu(&tp->destroy_ht_node);
105 mutex_unlock(&block->proto_destroy_lock);
106}
107
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000108/* Find classifier type by string name */
109
110static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
111{
112 const struct tcf_proto_ops *t, *res = NULL;
113
114 if (kind) {
115 read_lock(&cls_mod_lock);
116 list_for_each_entry(t, &tcf_proto_base, head) {
117 if (strcmp(kind, t->kind) == 0) {
118 if (try_module_get(t->owner))
119 res = t;
120 break;
121 }
122 }
123 read_unlock(&cls_mod_lock);
124 }
125 return res;
126}
127
128static const struct tcf_proto_ops *
David Brazdil0f672f62019-12-10 10:32:29 +0000129tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
130 struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131{
132 const struct tcf_proto_ops *ops;
133
134 ops = __tcf_proto_lookup_ops(kind);
135 if (ops)
136 return ops;
137#ifdef CONFIG_MODULES
David Brazdil0f672f62019-12-10 10:32:29 +0000138 if (rtnl_held)
139 rtnl_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000140 request_module("cls_%s", kind);
David Brazdil0f672f62019-12-10 10:32:29 +0000141 if (rtnl_held)
142 rtnl_lock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000143 ops = __tcf_proto_lookup_ops(kind);
144 /* We dropped the RTNL semaphore in order to perform
145 * the module load. So, even if we succeeded in loading
146 * the module we have to replay the request. We indicate
147 * this using -EAGAIN.
148 */
149 if (ops) {
150 module_put(ops->owner);
151 return ERR_PTR(-EAGAIN);
152 }
153#endif
154 NL_SET_ERR_MSG(extack, "TC classifier not found");
155 return ERR_PTR(-ENOENT);
156}
157
158/* Register(unregister) new classifier type */
159
160int register_tcf_proto_ops(struct tcf_proto_ops *ops)
161{
162 struct tcf_proto_ops *t;
163 int rc = -EEXIST;
164
165 write_lock(&cls_mod_lock);
166 list_for_each_entry(t, &tcf_proto_base, head)
167 if (!strcmp(ops->kind, t->kind))
168 goto out;
169
170 list_add_tail(&ops->head, &tcf_proto_base);
171 rc = 0;
172out:
173 write_unlock(&cls_mod_lock);
174 return rc;
175}
176EXPORT_SYMBOL(register_tcf_proto_ops);
177
178static struct workqueue_struct *tc_filter_wq;
179
180int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
181{
182 struct tcf_proto_ops *t;
183 int rc = -ENOENT;
184
185 /* Wait for outstanding call_rcu()s, if any, from a
186 * tcf_proto_ops's destroy() handler.
187 */
188 rcu_barrier();
189 flush_workqueue(tc_filter_wq);
190
191 write_lock(&cls_mod_lock);
192 list_for_each_entry(t, &tcf_proto_base, head) {
193 if (t == ops) {
194 list_del(&t->head);
195 rc = 0;
196 break;
197 }
198 }
199 write_unlock(&cls_mod_lock);
200 return rc;
201}
202EXPORT_SYMBOL(unregister_tcf_proto_ops);
203
204bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
205{
206 INIT_RCU_WORK(rwork, func);
207 return queue_rcu_work(tc_filter_wq, rwork);
208}
209EXPORT_SYMBOL(tcf_queue_work);
210
211/* Select new prio value from the range, managed by kernel. */
212
213static inline u32 tcf_auto_prio(struct tcf_proto *tp)
214{
215 u32 first = TC_H_MAKE(0xC0000000U, 0U);
216
217 if (tp)
218 first = tp->prio - 1;
219
220 return TC_H_MAJ(first);
221}
222
David Brazdil0f672f62019-12-10 10:32:29 +0000223static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
224{
225 if (kind)
226 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
227 memset(name, 0, IFNAMSIZ);
228 return false;
229}
230
231static bool tcf_proto_is_unlocked(const char *kind)
232{
233 const struct tcf_proto_ops *ops;
234 bool ret;
235
236 if (strlen(kind) == 0)
237 return false;
238
239 ops = tcf_proto_lookup_ops(kind, false, NULL);
240 /* On error return false to take rtnl lock. Proto lookup/create
241 * functions will perform lookup again and properly handle errors.
242 */
243 if (IS_ERR(ops))
244 return false;
245
246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
247 module_put(ops->owner);
248 return ret;
249}
250
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
252 u32 prio, struct tcf_chain *chain,
David Brazdil0f672f62019-12-10 10:32:29 +0000253 bool rtnl_held,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000254 struct netlink_ext_ack *extack)
255{
256 struct tcf_proto *tp;
257 int err;
258
259 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
260 if (!tp)
261 return ERR_PTR(-ENOBUFS);
262
David Brazdil0f672f62019-12-10 10:32:29 +0000263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264 if (IS_ERR(tp->ops)) {
265 err = PTR_ERR(tp->ops);
266 goto errout;
267 }
268 tp->classify = tp->ops->classify;
269 tp->protocol = protocol;
270 tp->prio = prio;
271 tp->chain = chain;
David Brazdil0f672f62019-12-10 10:32:29 +0000272 spin_lock_init(&tp->lock);
273 refcount_set(&tp->refcnt, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000274
275 err = tp->ops->init(tp);
276 if (err) {
277 module_put(tp->ops->owner);
278 goto errout;
279 }
280 return tp;
281
282errout:
283 kfree(tp);
284 return ERR_PTR(err);
285}
286
David Brazdil0f672f62019-12-10 10:32:29 +0000287static void tcf_proto_get(struct tcf_proto *tp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000288{
David Brazdil0f672f62019-12-10 10:32:29 +0000289 refcount_inc(&tp->refcnt);
290}
291
292static void tcf_chain_put(struct tcf_chain *chain);
293
294static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
295 bool sig_destroy, struct netlink_ext_ack *extack)
296{
297 tp->ops->destroy(tp, rtnl_held, extack);
298 if (sig_destroy)
299 tcf_proto_signal_destroyed(tp->chain, tp);
300 tcf_chain_put(tp->chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000301 module_put(tp->ops->owner);
302 kfree_rcu(tp, rcu);
303}
304
David Brazdil0f672f62019-12-10 10:32:29 +0000305static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
306 struct netlink_ext_ack *extack)
307{
308 if (refcount_dec_and_test(&tp->refcnt))
309 tcf_proto_destroy(tp, rtnl_held, true, extack);
310}
311
Olivier Deprez0e641232021-09-23 10:07:05 +0200312static bool tcf_proto_check_delete(struct tcf_proto *tp)
David Brazdil0f672f62019-12-10 10:32:29 +0000313{
Olivier Deprez0e641232021-09-23 10:07:05 +0200314 if (tp->ops->delete_empty)
315 return tp->ops->delete_empty(tp);
David Brazdil0f672f62019-12-10 10:32:29 +0000316
Olivier Deprez0e641232021-09-23 10:07:05 +0200317 tp->deleting = true;
David Brazdil0f672f62019-12-10 10:32:29 +0000318 return tp->deleting;
319}
320
321static void tcf_proto_mark_delete(struct tcf_proto *tp)
322{
323 spin_lock(&tp->lock);
324 tp->deleting = true;
325 spin_unlock(&tp->lock);
326}
327
328static bool tcf_proto_is_deleting(struct tcf_proto *tp)
329{
330 bool deleting;
331
332 spin_lock(&tp->lock);
333 deleting = tp->deleting;
334 spin_unlock(&tp->lock);
335
336 return deleting;
337}
338
339#define ASSERT_BLOCK_LOCKED(block) \
340 lockdep_assert_held(&(block)->lock)
341
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000342struct tcf_filter_chain_list_item {
343 struct list_head list;
344 tcf_chain_head_change_t *chain_head_change;
345 void *chain_head_change_priv;
346};
347
348static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
349 u32 chain_index)
350{
351 struct tcf_chain *chain;
352
David Brazdil0f672f62019-12-10 10:32:29 +0000353 ASSERT_BLOCK_LOCKED(block);
354
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000355 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
356 if (!chain)
357 return NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +0200358 list_add_tail_rcu(&chain->list, &block->chain_list);
David Brazdil0f672f62019-12-10 10:32:29 +0000359 mutex_init(&chain->filter_chain_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000360 chain->block = block;
361 chain->index = chain_index;
362 chain->refcnt = 1;
363 if (!chain->index)
364 block->chain0.chain = chain;
365 return chain;
366}
367
368static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
369 struct tcf_proto *tp_head)
370{
371 if (item->chain_head_change)
372 item->chain_head_change(tp_head, item->chain_head_change_priv);
373}
374
375static void tcf_chain0_head_change(struct tcf_chain *chain,
376 struct tcf_proto *tp_head)
377{
378 struct tcf_filter_chain_list_item *item;
379 struct tcf_block *block = chain->block;
380
381 if (chain->index)
382 return;
David Brazdil0f672f62019-12-10 10:32:29 +0000383
384 mutex_lock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000385 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
386 tcf_chain_head_change_item(item, tp_head);
David Brazdil0f672f62019-12-10 10:32:29 +0000387 mutex_unlock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000388}
389
David Brazdil0f672f62019-12-10 10:32:29 +0000390/* Returns true if block can be safely freed. */
391
392static bool tcf_chain_detach(struct tcf_chain *chain)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393{
394 struct tcf_block *block = chain->block;
395
David Brazdil0f672f62019-12-10 10:32:29 +0000396 ASSERT_BLOCK_LOCKED(block);
397
Olivier Deprez157378f2022-04-04 15:47:50 +0200398 list_del_rcu(&chain->list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000399 if (!chain->index)
400 block->chain0.chain = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +0000401
402 if (list_empty(&block->chain_list) &&
403 refcount_read(&block->refcnt) == 0)
404 return true;
405
406 return false;
407}
408
409static void tcf_block_destroy(struct tcf_block *block)
410{
411 mutex_destroy(&block->lock);
412 mutex_destroy(&block->proto_destroy_lock);
413 kfree_rcu(block, rcu);
414}
415
416static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
417{
418 struct tcf_block *block = chain->block;
419
420 mutex_destroy(&chain->filter_chain_lock);
421 kfree_rcu(chain, rcu);
422 if (free_block)
423 tcf_block_destroy(block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000424}
425
426static void tcf_chain_hold(struct tcf_chain *chain)
427{
David Brazdil0f672f62019-12-10 10:32:29 +0000428 ASSERT_BLOCK_LOCKED(chain->block);
429
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000430 ++chain->refcnt;
431}
432
433static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
434{
David Brazdil0f672f62019-12-10 10:32:29 +0000435 ASSERT_BLOCK_LOCKED(chain->block);
436
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437 /* In case all the references are action references, this
438 * chain should not be shown to the user.
439 */
440 return chain->refcnt == chain->action_refcnt;
441}
442
443static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
444 u32 chain_index)
445{
446 struct tcf_chain *chain;
447
David Brazdil0f672f62019-12-10 10:32:29 +0000448 ASSERT_BLOCK_LOCKED(block);
449
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000450 list_for_each_entry(chain, &block->chain_list, list) {
451 if (chain->index == chain_index)
452 return chain;
453 }
454 return NULL;
455}
456
Olivier Deprez157378f2022-04-04 15:47:50 +0200457#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
458static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
459 u32 chain_index)
460{
461 struct tcf_chain *chain;
462
463 list_for_each_entry_rcu(chain, &block->chain_list, list) {
464 if (chain->index == chain_index)
465 return chain;
466 }
467 return NULL;
468}
469#endif
470
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000471static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
472 u32 seq, u16 flags, int event, bool unicast);
473
474static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
475 u32 chain_index, bool create,
476 bool by_act)
477{
David Brazdil0f672f62019-12-10 10:32:29 +0000478 struct tcf_chain *chain = NULL;
479 bool is_first_reference;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000480
David Brazdil0f672f62019-12-10 10:32:29 +0000481 mutex_lock(&block->lock);
482 chain = tcf_chain_lookup(block, chain_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000483 if (chain) {
484 tcf_chain_hold(chain);
485 } else {
486 if (!create)
David Brazdil0f672f62019-12-10 10:32:29 +0000487 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488 chain = tcf_chain_create(block, chain_index);
489 if (!chain)
David Brazdil0f672f62019-12-10 10:32:29 +0000490 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000491 }
492
493 if (by_act)
494 ++chain->action_refcnt;
David Brazdil0f672f62019-12-10 10:32:29 +0000495 is_first_reference = chain->refcnt - chain->action_refcnt == 1;
496 mutex_unlock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000497
498 /* Send notification only in case we got the first
499 * non-action reference. Until then, the chain acts only as
500 * a placeholder for actions pointing to it and user ought
501 * not know about them.
502 */
David Brazdil0f672f62019-12-10 10:32:29 +0000503 if (is_first_reference && !by_act)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
505 RTM_NEWCHAIN, false);
506
507 return chain;
David Brazdil0f672f62019-12-10 10:32:29 +0000508
509errout:
510 mutex_unlock(&block->lock);
511 return chain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000512}
513
514static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
515 bool create)
516{
517 return __tcf_chain_get(block, chain_index, create, false);
518}
519
520struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
521{
522 return __tcf_chain_get(block, chain_index, true, true);
523}
524EXPORT_SYMBOL(tcf_chain_get_by_act);
525
David Brazdil0f672f62019-12-10 10:32:29 +0000526static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
527 void *tmplt_priv);
528static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
529 void *tmplt_priv, u32 chain_index,
530 struct tcf_block *block, struct sk_buff *oskb,
531 u32 seq, u16 flags, bool unicast);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532
David Brazdil0f672f62019-12-10 10:32:29 +0000533static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
534 bool explicitly_created)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000535{
David Brazdil0f672f62019-12-10 10:32:29 +0000536 struct tcf_block *block = chain->block;
537 const struct tcf_proto_ops *tmplt_ops;
538 bool free_block = false;
539 unsigned int refcnt;
540 void *tmplt_priv;
541
542 mutex_lock(&block->lock);
543 if (explicitly_created) {
544 if (!chain->explicitly_created) {
545 mutex_unlock(&block->lock);
546 return;
547 }
548 chain->explicitly_created = false;
549 }
550
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000551 if (by_act)
552 chain->action_refcnt--;
David Brazdil0f672f62019-12-10 10:32:29 +0000553
554 /* tc_chain_notify_delete can't be called while holding block lock.
555 * However, when block is unlocked chain can be changed concurrently, so
556 * save these to temporary variables.
557 */
558 refcnt = --chain->refcnt;
559 tmplt_ops = chain->tmplt_ops;
560 tmplt_priv = chain->tmplt_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561
562 /* The last dropped non-action reference will trigger notification. */
David Brazdil0f672f62019-12-10 10:32:29 +0000563 if (refcnt - chain->action_refcnt == 0 && !by_act) {
564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
565 block, NULL, 0, 0, false);
566 /* Last reference to chain, no need to lock. */
567 chain->flushing = false;
568 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000569
David Brazdil0f672f62019-12-10 10:32:29 +0000570 if (refcnt == 0)
571 free_block = tcf_chain_detach(chain);
572 mutex_unlock(&block->lock);
573
574 if (refcnt == 0) {
575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
576 tcf_chain_destroy(chain, free_block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000577 }
578}
579
580static void tcf_chain_put(struct tcf_chain *chain)
581{
David Brazdil0f672f62019-12-10 10:32:29 +0000582 __tcf_chain_put(chain, false, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000583}
584
585void tcf_chain_put_by_act(struct tcf_chain *chain)
586{
David Brazdil0f672f62019-12-10 10:32:29 +0000587 __tcf_chain_put(chain, true, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000588}
589EXPORT_SYMBOL(tcf_chain_put_by_act);
590
591static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
592{
David Brazdil0f672f62019-12-10 10:32:29 +0000593 __tcf_chain_put(chain, false, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000594}
595
David Brazdil0f672f62019-12-10 10:32:29 +0000596static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000597{
David Brazdil0f672f62019-12-10 10:32:29 +0000598 struct tcf_proto *tp, *tp_next;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000599
David Brazdil0f672f62019-12-10 10:32:29 +0000600 mutex_lock(&chain->filter_chain_lock);
601 tp = tcf_chain_dereference(chain->filter_chain, chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000602 while (tp) {
David Brazdil0f672f62019-12-10 10:32:29 +0000603 tp_next = rcu_dereference_protected(tp->next, 1);
604 tcf_proto_signal_destroying(chain, tp);
605 tp = tp_next;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000606 }
David Brazdil0f672f62019-12-10 10:32:29 +0000607 tp = tcf_chain_dereference(chain->filter_chain, chain);
608 RCU_INIT_POINTER(chain->filter_chain, NULL);
609 tcf_chain0_head_change(chain, NULL);
610 chain->flushing = true;
611 mutex_unlock(&chain->filter_chain_lock);
612
613 while (tp) {
614 tp_next = rcu_dereference_protected(tp->next, 1);
615 tcf_proto_put(tp, rtnl_held, NULL);
616 tp = tp_next;
617 }
618}
619
620static int tcf_block_setup(struct tcf_block *block,
621 struct flow_block_offload *bo);
622
Olivier Deprez157378f2022-04-04 15:47:50 +0200623static void tcf_block_offload_init(struct flow_block_offload *bo,
624 struct net_device *dev, struct Qdisc *sch,
625 enum flow_block_command command,
626 enum flow_block_binder_type binder_type,
627 struct flow_block *flow_block,
628 bool shared, struct netlink_ext_ack *extack)
David Brazdil0f672f62019-12-10 10:32:29 +0000629{
Olivier Deprez157378f2022-04-04 15:47:50 +0200630 bo->net = dev_net(dev);
631 bo->command = command;
632 bo->binder_type = binder_type;
633 bo->block = flow_block;
634 bo->block_shared = shared;
635 bo->extack = extack;
636 bo->sch = sch;
637 bo->cb_list_head = &flow_block->cb_list;
638 INIT_LIST_HEAD(&bo->cb_list);
639}
David Brazdil0f672f62019-12-10 10:32:29 +0000640
Olivier Deprez157378f2022-04-04 15:47:50 +0200641static void tcf_block_unbind(struct tcf_block *block,
642 struct flow_block_offload *bo);
David Brazdil0f672f62019-12-10 10:32:29 +0000643
Olivier Deprez157378f2022-04-04 15:47:50 +0200644static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
645{
646 struct tcf_block *block = block_cb->indr.data;
647 struct net_device *dev = block_cb->indr.dev;
648 struct Qdisc *sch = block_cb->indr.sch;
649 struct netlink_ext_ack extack = {};
650 struct flow_block_offload bo = {};
David Brazdil0f672f62019-12-10 10:32:29 +0000651
Olivier Deprez157378f2022-04-04 15:47:50 +0200652 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
653 block_cb->indr.binder_type,
654 &block->flow_block, tcf_block_shared(block),
655 &extack);
656 rtnl_lock();
David Brazdil0f672f62019-12-10 10:32:29 +0000657 down_write(&block->cb_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200658 list_del(&block_cb->driver_list);
659 list_move(&block_cb->list, &bo.cb_list);
660 tcf_block_unbind(block, &bo);
David Brazdil0f672f62019-12-10 10:32:29 +0000661 up_write(&block->cb_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200662 rtnl_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000663}
664
665static bool tcf_block_offload_in_use(struct tcf_block *block)
666{
David Brazdil0f672f62019-12-10 10:32:29 +0000667 return atomic_read(&block->offloadcnt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000668}
669
670static int tcf_block_offload_cmd(struct tcf_block *block,
Olivier Deprez157378f2022-04-04 15:47:50 +0200671 struct net_device *dev, struct Qdisc *sch,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000672 struct tcf_block_ext_info *ei,
David Brazdil0f672f62019-12-10 10:32:29 +0000673 enum flow_block_command command,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000674 struct netlink_ext_ack *extack)
675{
David Brazdil0f672f62019-12-10 10:32:29 +0000676 struct flow_block_offload bo = {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000677
Olivier Deprez157378f2022-04-04 15:47:50 +0200678 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
679 &block->flow_block, tcf_block_shared(block),
680 extack);
David Brazdil0f672f62019-12-10 10:32:29 +0000681
Olivier Deprez157378f2022-04-04 15:47:50 +0200682 if (dev->netdev_ops->ndo_setup_tc) {
683 int err;
David Brazdil0f672f62019-12-10 10:32:29 +0000684
Olivier Deprez157378f2022-04-04 15:47:50 +0200685 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
686 if (err < 0) {
687 if (err != -EOPNOTSUPP)
688 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
689 return err;
690 }
691
692 return tcf_block_setup(block, &bo);
693 }
694
695 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
696 tc_block_indr_cleanup);
697 tcf_block_setup(block, &bo);
698
699 return -EOPNOTSUPP;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000700}
701
702static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
703 struct tcf_block_ext_info *ei,
704 struct netlink_ext_ack *extack)
705{
706 struct net_device *dev = q->dev_queue->dev;
707 int err;
708
David Brazdil0f672f62019-12-10 10:32:29 +0000709 down_write(&block->cb_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000710
711 /* If tc offload feature is disabled and the block we try to bind
712 * to already has some offloaded filters, forbid to bind.
713 */
Olivier Deprez157378f2022-04-04 15:47:50 +0200714 if (dev->netdev_ops->ndo_setup_tc &&
715 !tc_can_offload(dev) &&
716 tcf_block_offload_in_use(block)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000717 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
David Brazdil0f672f62019-12-10 10:32:29 +0000718 err = -EOPNOTSUPP;
719 goto err_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000720 }
721
Olivier Deprez157378f2022-04-04 15:47:50 +0200722 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 if (err == -EOPNOTSUPP)
724 goto no_offload_dev_inc;
David Brazdil0f672f62019-12-10 10:32:29 +0000725 if (err)
726 goto err_unlock;
727
David Brazdil0f672f62019-12-10 10:32:29 +0000728 up_write(&block->cb_lock);
729 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000730
731no_offload_dev_inc:
Olivier Deprez157378f2022-04-04 15:47:50 +0200732 if (tcf_block_offload_in_use(block))
David Brazdil0f672f62019-12-10 10:32:29 +0000733 goto err_unlock;
Olivier Deprez157378f2022-04-04 15:47:50 +0200734
David Brazdil0f672f62019-12-10 10:32:29 +0000735 err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000736 block->nooffloaddevcnt++;
David Brazdil0f672f62019-12-10 10:32:29 +0000737err_unlock:
738 up_write(&block->cb_lock);
739 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000740}
741
742static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
743 struct tcf_block_ext_info *ei)
744{
745 struct net_device *dev = q->dev_queue->dev;
746 int err;
747
David Brazdil0f672f62019-12-10 10:32:29 +0000748 down_write(&block->cb_lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200749 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 if (err == -EOPNOTSUPP)
751 goto no_offload_dev_dec;
David Brazdil0f672f62019-12-10 10:32:29 +0000752 up_write(&block->cb_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000753 return;
754
755no_offload_dev_dec:
756 WARN_ON(block->nooffloaddevcnt-- == 0);
David Brazdil0f672f62019-12-10 10:32:29 +0000757 up_write(&block->cb_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000758}
759
760static int
761tcf_chain0_head_change_cb_add(struct tcf_block *block,
762 struct tcf_block_ext_info *ei,
763 struct netlink_ext_ack *extack)
764{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000765 struct tcf_filter_chain_list_item *item;
David Brazdil0f672f62019-12-10 10:32:29 +0000766 struct tcf_chain *chain0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000767
768 item = kmalloc(sizeof(*item), GFP_KERNEL);
769 if (!item) {
770 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
771 return -ENOMEM;
772 }
773 item->chain_head_change = ei->chain_head_change;
774 item->chain_head_change_priv = ei->chain_head_change_priv;
David Brazdil0f672f62019-12-10 10:32:29 +0000775
776 mutex_lock(&block->lock);
777 chain0 = block->chain0.chain;
778 if (chain0)
779 tcf_chain_hold(chain0);
780 else
781 list_add(&item->list, &block->chain0.filter_chain_list);
782 mutex_unlock(&block->lock);
783
784 if (chain0) {
785 struct tcf_proto *tp_head;
786
787 mutex_lock(&chain0->filter_chain_lock);
788
789 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
790 if (tp_head)
791 tcf_chain_head_change_item(item, tp_head);
792
793 mutex_lock(&block->lock);
794 list_add(&item->list, &block->chain0.filter_chain_list);
795 mutex_unlock(&block->lock);
796
797 mutex_unlock(&chain0->filter_chain_lock);
798 tcf_chain_put(chain0);
799 }
800
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000801 return 0;
802}
803
804static void
805tcf_chain0_head_change_cb_del(struct tcf_block *block,
806 struct tcf_block_ext_info *ei)
807{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000808 struct tcf_filter_chain_list_item *item;
809
David Brazdil0f672f62019-12-10 10:32:29 +0000810 mutex_lock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000811 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
812 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
813 (item->chain_head_change == ei->chain_head_change &&
814 item->chain_head_change_priv == ei->chain_head_change_priv)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000815 if (block->chain0.chain)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000816 tcf_chain_head_change_item(item, NULL);
817 list_del(&item->list);
David Brazdil0f672f62019-12-10 10:32:29 +0000818 mutex_unlock(&block->lock);
819
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000820 kfree(item);
821 return;
822 }
823 }
David Brazdil0f672f62019-12-10 10:32:29 +0000824 mutex_unlock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000825 WARN_ON(1);
826}
827
828struct tcf_net {
David Brazdil0f672f62019-12-10 10:32:29 +0000829 spinlock_t idr_lock; /* Protects idr */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000830 struct idr idr;
831};
832
833static unsigned int tcf_net_id;
834
835static int tcf_block_insert(struct tcf_block *block, struct net *net,
836 struct netlink_ext_ack *extack)
837{
838 struct tcf_net *tn = net_generic(net, tcf_net_id);
David Brazdil0f672f62019-12-10 10:32:29 +0000839 int err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000840
David Brazdil0f672f62019-12-10 10:32:29 +0000841 idr_preload(GFP_KERNEL);
842 spin_lock(&tn->idr_lock);
843 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
844 GFP_NOWAIT);
845 spin_unlock(&tn->idr_lock);
846 idr_preload_end();
847
848 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000849}
850
851static void tcf_block_remove(struct tcf_block *block, struct net *net)
852{
853 struct tcf_net *tn = net_generic(net, tcf_net_id);
854
David Brazdil0f672f62019-12-10 10:32:29 +0000855 spin_lock(&tn->idr_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000856 idr_remove(&tn->idr, block->index);
David Brazdil0f672f62019-12-10 10:32:29 +0000857 spin_unlock(&tn->idr_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000858}
859
860static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
861 u32 block_index,
862 struct netlink_ext_ack *extack)
863{
864 struct tcf_block *block;
865
866 block = kzalloc(sizeof(*block), GFP_KERNEL);
867 if (!block) {
868 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
869 return ERR_PTR(-ENOMEM);
870 }
David Brazdil0f672f62019-12-10 10:32:29 +0000871 mutex_init(&block->lock);
872 mutex_init(&block->proto_destroy_lock);
873 init_rwsem(&block->cb_lock);
874 flow_block_init(&block->flow_block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000875 INIT_LIST_HEAD(&block->chain_list);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000876 INIT_LIST_HEAD(&block->owner_list);
877 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
878
David Brazdil0f672f62019-12-10 10:32:29 +0000879 refcount_set(&block->refcnt, 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000880 block->net = net;
881 block->index = block_index;
882
883 /* Don't store q pointer for blocks which are shared */
884 if (!tcf_block_shared(block))
885 block->q = q;
886 return block;
887}
888
889static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
890{
891 struct tcf_net *tn = net_generic(net, tcf_net_id);
892
893 return idr_find(&tn->idr, block_index);
894}
895
David Brazdil0f672f62019-12-10 10:32:29 +0000896static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
897{
898 struct tcf_block *block;
899
900 rcu_read_lock();
901 block = tcf_block_lookup(net, block_index);
902 if (block && !refcount_inc_not_zero(&block->refcnt))
903 block = NULL;
904 rcu_read_unlock();
905
906 return block;
907}
908
909static struct tcf_chain *
910__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
911{
912 mutex_lock(&block->lock);
913 if (chain)
914 chain = list_is_last(&chain->list, &block->chain_list) ?
915 NULL : list_next_entry(chain, list);
916 else
917 chain = list_first_entry_or_null(&block->chain_list,
918 struct tcf_chain, list);
919
920 /* skip all action-only chains */
921 while (chain && tcf_chain_held_by_acts_only(chain))
922 chain = list_is_last(&chain->list, &block->chain_list) ?
923 NULL : list_next_entry(chain, list);
924
925 if (chain)
926 tcf_chain_hold(chain);
927 mutex_unlock(&block->lock);
928
929 return chain;
930}
931
932/* Function to be used by all clients that want to iterate over all chains on
933 * block. It properly obtains block->lock and takes reference to chain before
934 * returning it. Users of this function must be tolerant to concurrent chain
935 * insertion/deletion or ensure that no concurrent chain modification is
936 * possible. Note that all netlink dump callbacks cannot guarantee to provide
937 * consistent dump because rtnl lock is released each time skb is filled with
938 * data and sent to user-space.
939 */
940
941struct tcf_chain *
942tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
943{
944 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
945
946 if (chain)
947 tcf_chain_put(chain);
948
949 return chain_next;
950}
951EXPORT_SYMBOL(tcf_get_next_chain);
952
953static struct tcf_proto *
954__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
955{
956 u32 prio = 0;
957
958 ASSERT_RTNL();
959 mutex_lock(&chain->filter_chain_lock);
960
961 if (!tp) {
962 tp = tcf_chain_dereference(chain->filter_chain, chain);
963 } else if (tcf_proto_is_deleting(tp)) {
964 /* 'deleting' flag is set and chain->filter_chain_lock was
965 * unlocked, which means next pointer could be invalid. Restart
966 * search.
967 */
968 prio = tp->prio + 1;
969 tp = tcf_chain_dereference(chain->filter_chain, chain);
970
971 for (; tp; tp = tcf_chain_dereference(tp->next, chain))
972 if (!tp->deleting && tp->prio >= prio)
973 break;
974 } else {
975 tp = tcf_chain_dereference(tp->next, chain);
976 }
977
978 if (tp)
979 tcf_proto_get(tp);
980
981 mutex_unlock(&chain->filter_chain_lock);
982
983 return tp;
984}
985
986/* Function to be used by all clients that want to iterate over all tp's on
987 * chain. Users of this function must be tolerant to concurrent tp
988 * insertion/deletion or ensure that no concurrent chain modification is
989 * possible. Note that all netlink dump callbacks cannot guarantee to provide
990 * consistent dump because rtnl lock is released each time skb is filled with
991 * data and sent to user-space.
992 */
993
994struct tcf_proto *
995tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
996 bool rtnl_held)
997{
998 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
999
1000 if (tp)
1001 tcf_proto_put(tp, rtnl_held, NULL);
1002
1003 return tp_next;
1004}
1005EXPORT_SYMBOL(tcf_get_next_proto);
1006
1007static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1008{
1009 struct tcf_chain *chain;
1010
1011 /* Last reference to block. At this point chains cannot be added or
1012 * removed concurrently.
1013 */
1014 for (chain = tcf_get_next_chain(block, NULL);
1015 chain;
1016 chain = tcf_get_next_chain(block, chain)) {
1017 tcf_chain_put_explicitly_created(chain);
1018 tcf_chain_flush(chain, rtnl_held);
1019 }
1020}
1021
1022/* Lookup Qdisc and increments its reference counter.
1023 * Set parent, if necessary.
1024 */
1025
1026static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1027 u32 *parent, int ifindex, bool rtnl_held,
1028 struct netlink_ext_ack *extack)
1029{
1030 const struct Qdisc_class_ops *cops;
1031 struct net_device *dev;
1032 int err = 0;
1033
1034 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1035 return 0;
1036
1037 rcu_read_lock();
1038
1039 /* Find link */
1040 dev = dev_get_by_index_rcu(net, ifindex);
1041 if (!dev) {
1042 rcu_read_unlock();
1043 return -ENODEV;
1044 }
1045
1046 /* Find qdisc */
1047 if (!*parent) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001048 *q = rcu_dereference(dev->qdisc);
David Brazdil0f672f62019-12-10 10:32:29 +00001049 *parent = (*q)->handle;
1050 } else {
1051 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1052 if (!*q) {
1053 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1054 err = -EINVAL;
1055 goto errout_rcu;
1056 }
1057 }
1058
1059 *q = qdisc_refcount_inc_nz(*q);
1060 if (!*q) {
1061 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1062 err = -EINVAL;
1063 goto errout_rcu;
1064 }
1065
1066 /* Is it classful? */
1067 cops = (*q)->ops->cl_ops;
1068 if (!cops) {
1069 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1070 err = -EINVAL;
1071 goto errout_qdisc;
1072 }
1073
1074 if (!cops->tcf_block) {
1075 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1076 err = -EOPNOTSUPP;
1077 goto errout_qdisc;
1078 }
1079
1080errout_rcu:
1081 /* At this point we know that qdisc is not noop_qdisc,
1082 * which means that qdisc holds a reference to net_device
1083 * and we hold a reference to qdisc, so it is safe to release
1084 * rcu read lock.
1085 */
1086 rcu_read_unlock();
1087 return err;
1088
1089errout_qdisc:
1090 rcu_read_unlock();
1091
1092 if (rtnl_held)
1093 qdisc_put(*q);
1094 else
1095 qdisc_put_unlocked(*q);
1096 *q = NULL;
1097
1098 return err;
1099}
1100
1101static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1102 int ifindex, struct netlink_ext_ack *extack)
1103{
1104 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1105 return 0;
1106
1107 /* Do we search for filter, attached to class? */
1108 if (TC_H_MIN(parent)) {
1109 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1110
1111 *cl = cops->find(q, parent);
1112 if (*cl == 0) {
1113 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1114 return -ENOENT;
1115 }
1116 }
1117
1118 return 0;
1119}
1120
1121static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1122 unsigned long cl, int ifindex,
1123 u32 block_index,
1124 struct netlink_ext_ack *extack)
1125{
1126 struct tcf_block *block;
1127
1128 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1129 block = tcf_block_refcnt_get(net, block_index);
1130 if (!block) {
1131 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1132 return ERR_PTR(-EINVAL);
1133 }
1134 } else {
1135 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1136
1137 block = cops->tcf_block(q, cl, extack);
1138 if (!block)
1139 return ERR_PTR(-EINVAL);
1140
1141 if (tcf_block_shared(block)) {
1142 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1143 return ERR_PTR(-EOPNOTSUPP);
1144 }
1145
1146 /* Always take reference to block in order to support execution
1147 * of rules update path of cls API without rtnl lock. Caller
1148 * must release block when it is finished using it. 'if' block
1149 * of this conditional obtain reference to block by calling
1150 * tcf_block_refcnt_get().
1151 */
1152 refcount_inc(&block->refcnt);
1153 }
1154
1155 return block;
1156}
1157
1158static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1159 struct tcf_block_ext_info *ei, bool rtnl_held)
1160{
1161 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1162 /* Flushing/putting all chains will cause the block to be
1163 * deallocated when last chain is freed. However, if chain_list
1164 * is empty, block has to be manually deallocated. After block
1165 * reference counter reached 0, it is no longer possible to
1166 * increment it or add new chains to block.
1167 */
1168 bool free_block = list_empty(&block->chain_list);
1169
1170 mutex_unlock(&block->lock);
1171 if (tcf_block_shared(block))
1172 tcf_block_remove(block, block->net);
1173
1174 if (q)
1175 tcf_block_offload_unbind(block, q, ei);
1176
1177 if (free_block)
1178 tcf_block_destroy(block);
1179 else
1180 tcf_block_flush_all_chains(block, rtnl_held);
1181 } else if (q) {
1182 tcf_block_offload_unbind(block, q, ei);
1183 }
1184}
1185
1186static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1187{
1188 __tcf_block_put(block, NULL, NULL, rtnl_held);
1189}
1190
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001191/* Find tcf block.
1192 * Set q, parent, cl when appropriate.
1193 */
1194
1195static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1196 u32 *parent, unsigned long *cl,
1197 int ifindex, u32 block_index,
1198 struct netlink_ext_ack *extack)
1199{
1200 struct tcf_block *block;
David Brazdil0f672f62019-12-10 10:32:29 +00001201 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001202
David Brazdil0f672f62019-12-10 10:32:29 +00001203 ASSERT_RTNL();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001204
David Brazdil0f672f62019-12-10 10:32:29 +00001205 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1206 if (err)
1207 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001208
David Brazdil0f672f62019-12-10 10:32:29 +00001209 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1210 if (err)
1211 goto errout_qdisc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001212
David Brazdil0f672f62019-12-10 10:32:29 +00001213 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1214 if (IS_ERR(block)) {
1215 err = PTR_ERR(block);
1216 goto errout_qdisc;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001217 }
1218
1219 return block;
David Brazdil0f672f62019-12-10 10:32:29 +00001220
1221errout_qdisc:
1222 if (*q)
1223 qdisc_put(*q);
1224errout:
1225 *q = NULL;
1226 return ERR_PTR(err);
1227}
1228
1229static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1230 bool rtnl_held)
1231{
1232 if (!IS_ERR_OR_NULL(block))
1233 tcf_block_refcnt_put(block, rtnl_held);
1234
1235 if (q) {
1236 if (rtnl_held)
1237 qdisc_put(q);
1238 else
1239 qdisc_put_unlocked(q);
1240 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001241}
1242
1243struct tcf_block_owner_item {
1244 struct list_head list;
1245 struct Qdisc *q;
David Brazdil0f672f62019-12-10 10:32:29 +00001246 enum flow_block_binder_type binder_type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247};
1248
1249static void
1250tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1251 struct Qdisc *q,
David Brazdil0f672f62019-12-10 10:32:29 +00001252 enum flow_block_binder_type binder_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001253{
1254 if (block->keep_dst &&
David Brazdil0f672f62019-12-10 10:32:29 +00001255 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1256 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001257 netif_keep_dst(qdisc_dev(q));
1258}
1259
1260void tcf_block_netif_keep_dst(struct tcf_block *block)
1261{
1262 struct tcf_block_owner_item *item;
1263
1264 block->keep_dst = true;
1265 list_for_each_entry(item, &block->owner_list, list)
1266 tcf_block_owner_netif_keep_dst(block, item->q,
1267 item->binder_type);
1268}
1269EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1270
1271static int tcf_block_owner_add(struct tcf_block *block,
1272 struct Qdisc *q,
David Brazdil0f672f62019-12-10 10:32:29 +00001273 enum flow_block_binder_type binder_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001274{
1275 struct tcf_block_owner_item *item;
1276
1277 item = kmalloc(sizeof(*item), GFP_KERNEL);
1278 if (!item)
1279 return -ENOMEM;
1280 item->q = q;
1281 item->binder_type = binder_type;
1282 list_add(&item->list, &block->owner_list);
1283 return 0;
1284}
1285
1286static void tcf_block_owner_del(struct tcf_block *block,
1287 struct Qdisc *q,
David Brazdil0f672f62019-12-10 10:32:29 +00001288 enum flow_block_binder_type binder_type)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001289{
1290 struct tcf_block_owner_item *item;
1291
1292 list_for_each_entry(item, &block->owner_list, list) {
1293 if (item->q == q && item->binder_type == binder_type) {
1294 list_del(&item->list);
1295 kfree(item);
1296 return;
1297 }
1298 }
1299 WARN_ON(1);
1300}
1301
1302int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1303 struct tcf_block_ext_info *ei,
1304 struct netlink_ext_ack *extack)
1305{
1306 struct net *net = qdisc_net(q);
1307 struct tcf_block *block = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001308 int err;
1309
David Brazdil0f672f62019-12-10 10:32:29 +00001310 if (ei->block_index)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001311 /* block_index not 0 means the shared block is requested */
David Brazdil0f672f62019-12-10 10:32:29 +00001312 block = tcf_block_refcnt_get(net, ei->block_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001313
1314 if (!block) {
1315 block = tcf_block_create(net, q, ei->block_index, extack);
1316 if (IS_ERR(block))
1317 return PTR_ERR(block);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001318 if (tcf_block_shared(block)) {
1319 err = tcf_block_insert(block, net, extack);
1320 if (err)
1321 goto err_block_insert;
1322 }
1323 }
1324
1325 err = tcf_block_owner_add(block, q, ei->binder_type);
1326 if (err)
1327 goto err_block_owner_add;
1328
1329 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1330
1331 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1332 if (err)
1333 goto err_chain0_head_change_cb_add;
1334
1335 err = tcf_block_offload_bind(block, q, ei, extack);
1336 if (err)
1337 goto err_block_offload_bind;
1338
1339 *p_block = block;
1340 return 0;
1341
1342err_block_offload_bind:
1343 tcf_chain0_head_change_cb_del(block, ei);
1344err_chain0_head_change_cb_add:
1345 tcf_block_owner_del(block, q, ei->binder_type);
1346err_block_owner_add:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001347err_block_insert:
David Brazdil0f672f62019-12-10 10:32:29 +00001348 tcf_block_refcnt_put(block, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001349 return err;
1350}
1351EXPORT_SYMBOL(tcf_block_get_ext);
1352
1353static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1354{
1355 struct tcf_proto __rcu **p_filter_chain = priv;
1356
1357 rcu_assign_pointer(*p_filter_chain, tp_head);
1358}
1359
1360int tcf_block_get(struct tcf_block **p_block,
1361 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1362 struct netlink_ext_ack *extack)
1363{
1364 struct tcf_block_ext_info ei = {
1365 .chain_head_change = tcf_chain_head_change_dflt,
1366 .chain_head_change_priv = p_filter_chain,
1367 };
1368
1369 WARN_ON(!p_filter_chain);
1370 return tcf_block_get_ext(p_block, q, &ei, extack);
1371}
1372EXPORT_SYMBOL(tcf_block_get);
1373
1374/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1375 * actions should be all removed after flushing.
1376 */
1377void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1378 struct tcf_block_ext_info *ei)
1379{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001380 if (!block)
1381 return;
1382 tcf_chain0_head_change_cb_del(block, ei);
1383 tcf_block_owner_del(block, q, ei->binder_type);
1384
David Brazdil0f672f62019-12-10 10:32:29 +00001385 __tcf_block_put(block, q, ei, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001386}
1387EXPORT_SYMBOL(tcf_block_put_ext);
1388
1389void tcf_block_put(struct tcf_block *block)
1390{
1391 struct tcf_block_ext_info ei = {0, };
1392
1393 if (!block)
1394 return;
1395 tcf_block_put_ext(block, block->q, &ei);
1396}
1397
1398EXPORT_SYMBOL(tcf_block_put);
1399
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001400static int
David Brazdil0f672f62019-12-10 10:32:29 +00001401tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001402 void *cb_priv, bool add, bool offload_in_use,
1403 struct netlink_ext_ack *extack)
1404{
David Brazdil0f672f62019-12-10 10:32:29 +00001405 struct tcf_chain *chain, *chain_prev;
1406 struct tcf_proto *tp, *tp_prev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001407 int err;
1408
David Brazdil0f672f62019-12-10 10:32:29 +00001409 lockdep_assert_held(&block->cb_lock);
1410
1411 for (chain = __tcf_get_next_chain(block, NULL);
1412 chain;
1413 chain_prev = chain,
1414 chain = __tcf_get_next_chain(block, chain),
1415 tcf_chain_put(chain_prev)) {
1416 for (tp = __tcf_get_next_proto(chain, NULL); tp;
1417 tp_prev = tp,
1418 tp = __tcf_get_next_proto(chain, tp),
1419 tcf_proto_put(tp_prev, true, NULL)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001420 if (tp->ops->reoffload) {
1421 err = tp->ops->reoffload(tp, add, cb, cb_priv,
1422 extack);
1423 if (err && add)
1424 goto err_playback_remove;
1425 } else if (add && offload_in_use) {
1426 err = -EOPNOTSUPP;
1427 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1428 goto err_playback_remove;
1429 }
1430 }
1431 }
1432
1433 return 0;
1434
1435err_playback_remove:
David Brazdil0f672f62019-12-10 10:32:29 +00001436 tcf_proto_put(tp, true, NULL);
1437 tcf_chain_put(chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001438 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1439 extack);
1440 return err;
1441}
1442
David Brazdil0f672f62019-12-10 10:32:29 +00001443static int tcf_block_bind(struct tcf_block *block,
1444 struct flow_block_offload *bo)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001445{
David Brazdil0f672f62019-12-10 10:32:29 +00001446 struct flow_block_cb *block_cb, *next;
1447 int err, i = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001448
David Brazdil0f672f62019-12-10 10:32:29 +00001449 lockdep_assert_held(&block->cb_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001450
David Brazdil0f672f62019-12-10 10:32:29 +00001451 list_for_each_entry(block_cb, &bo->cb_list, list) {
1452 err = tcf_block_playback_offloads(block, block_cb->cb,
1453 block_cb->cb_priv, true,
1454 tcf_block_offload_in_use(block),
1455 bo->extack);
1456 if (err)
1457 goto err_unroll;
1458 if (!bo->unlocked_driver_cb)
1459 block->lockeddevcnt++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001460
David Brazdil0f672f62019-12-10 10:32:29 +00001461 i++;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001462 }
David Brazdil0f672f62019-12-10 10:32:29 +00001463 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1464
1465 return 0;
1466
1467err_unroll:
1468 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1469 if (i-- > 0) {
1470 list_del(&block_cb->list);
1471 tcf_block_playback_offloads(block, block_cb->cb,
1472 block_cb->cb_priv, false,
1473 tcf_block_offload_in_use(block),
1474 NULL);
1475 if (!bo->unlocked_driver_cb)
1476 block->lockeddevcnt--;
1477 }
1478 flow_block_cb_free(block_cb);
1479 }
1480
1481 return err;
1482}
1483
1484static void tcf_block_unbind(struct tcf_block *block,
1485 struct flow_block_offload *bo)
1486{
1487 struct flow_block_cb *block_cb, *next;
1488
1489 lockdep_assert_held(&block->cb_lock);
1490
1491 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1492 tcf_block_playback_offloads(block, block_cb->cb,
1493 block_cb->cb_priv, false,
1494 tcf_block_offload_in_use(block),
1495 NULL);
1496 list_del(&block_cb->list);
1497 flow_block_cb_free(block_cb);
1498 if (!bo->unlocked_driver_cb)
1499 block->lockeddevcnt--;
1500 }
1501}
1502
1503static int tcf_block_setup(struct tcf_block *block,
1504 struct flow_block_offload *bo)
1505{
1506 int err;
1507
1508 switch (bo->command) {
1509 case FLOW_BLOCK_BIND:
1510 err = tcf_block_bind(block, bo);
1511 break;
1512 case FLOW_BLOCK_UNBIND:
1513 err = 0;
1514 tcf_block_unbind(block, bo);
1515 break;
1516 default:
1517 WARN_ON_ONCE(1);
1518 err = -EOPNOTSUPP;
1519 }
1520
1521 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001522}
1523
1524/* Main classifier routine: scans classifier chain attached
1525 * to this qdisc, (optionally) tests for protocol and asks
1526 * specific classifiers.
1527 */
Olivier Deprez157378f2022-04-04 15:47:50 +02001528static inline int __tcf_classify(struct sk_buff *skb,
1529 const struct tcf_proto *tp,
1530 const struct tcf_proto *orig_tp,
1531 struct tcf_result *res,
1532 bool compat_mode,
1533 u32 *last_executed_chain)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001534{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001535#ifdef CONFIG_NET_CLS_ACT
Olivier Deprez157378f2022-04-04 15:47:50 +02001536 const int max_reclassify_loop = 16;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001537 const struct tcf_proto *first_tp;
1538 int limit = 0;
1539
1540reclassify:
1541#endif
1542 for (; tp; tp = rcu_dereference_bh(tp->next)) {
Olivier Deprez0e641232021-09-23 10:07:05 +02001543 __be16 protocol = skb_protocol(skb, false);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001544 int err;
1545
1546 if (tp->protocol != protocol &&
1547 tp->protocol != htons(ETH_P_ALL))
1548 continue;
1549
1550 err = tp->classify(skb, tp, res);
1551#ifdef CONFIG_NET_CLS_ACT
1552 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1553 first_tp = orig_tp;
Olivier Deprez157378f2022-04-04 15:47:50 +02001554 *last_executed_chain = first_tp->chain->index;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001555 goto reset;
1556 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1557 first_tp = res->goto_tp;
Olivier Deprez157378f2022-04-04 15:47:50 +02001558 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001559 goto reset;
1560 }
1561#endif
1562 if (err >= 0)
1563 return err;
1564 }
1565
1566 return TC_ACT_UNSPEC; /* signal: continue lookup */
1567#ifdef CONFIG_NET_CLS_ACT
1568reset:
1569 if (unlikely(limit++ >= max_reclassify_loop)) {
1570 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1571 tp->chain->block->index,
1572 tp->prio & 0xffff,
1573 ntohs(tp->protocol));
1574 return TC_ACT_SHOT;
1575 }
1576
1577 tp = first_tp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001578 goto reclassify;
1579#endif
1580}
Olivier Deprez157378f2022-04-04 15:47:50 +02001581
1582int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1583 struct tcf_result *res, bool compat_mode)
1584{
1585 u32 last_executed_chain = 0;
1586
1587 return __tcf_classify(skb, tp, tp, res, compat_mode,
1588 &last_executed_chain);
1589}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001590EXPORT_SYMBOL(tcf_classify);
1591
Olivier Deprez157378f2022-04-04 15:47:50 +02001592int tcf_classify_ingress(struct sk_buff *skb,
1593 const struct tcf_block *ingress_block,
1594 const struct tcf_proto *tp,
1595 struct tcf_result *res, bool compat_mode)
1596{
1597#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1598 u32 last_executed_chain = 0;
1599
1600 return __tcf_classify(skb, tp, tp, res, compat_mode,
1601 &last_executed_chain);
1602#else
1603 u32 last_executed_chain = tp ? tp->chain->index : 0;
1604 const struct tcf_proto *orig_tp = tp;
1605 struct tc_skb_ext *ext;
1606 int ret;
1607
1608 ext = skb_ext_find(skb, TC_SKB_EXT);
1609
1610 if (ext && ext->chain) {
1611 struct tcf_chain *fchain;
1612
1613 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1614 if (!fchain)
1615 return TC_ACT_SHOT;
1616
1617 /* Consume, so cloned/redirect skbs won't inherit ext */
1618 skb_ext_del(skb, TC_SKB_EXT);
1619
1620 tp = rcu_dereference_bh(fchain->filter_chain);
1621 last_executed_chain = fchain->index;
1622 }
1623
1624 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1625 &last_executed_chain);
1626
1627 /* If we missed on some chain */
1628 if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1629 ext = tc_skb_ext_alloc(skb);
1630 if (WARN_ON_ONCE(!ext))
1631 return TC_ACT_SHOT;
1632 ext->chain = last_executed_chain;
1633 ext->mru = qdisc_skb_cb(skb)->mru;
1634 }
1635
1636 return ret;
1637#endif
1638}
1639EXPORT_SYMBOL(tcf_classify_ingress);
1640
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001641struct tcf_chain_info {
1642 struct tcf_proto __rcu **pprev;
1643 struct tcf_proto __rcu *next;
1644};
1645
David Brazdil0f672f62019-12-10 10:32:29 +00001646static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1647 struct tcf_chain_info *chain_info)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001648{
David Brazdil0f672f62019-12-10 10:32:29 +00001649 return tcf_chain_dereference(*chain_info->pprev, chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001650}
1651
David Brazdil0f672f62019-12-10 10:32:29 +00001652static int tcf_chain_tp_insert(struct tcf_chain *chain,
1653 struct tcf_chain_info *chain_info,
1654 struct tcf_proto *tp)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001655{
David Brazdil0f672f62019-12-10 10:32:29 +00001656 if (chain->flushing)
1657 return -EAGAIN;
1658
Olivier Deprez92d4c212022-12-06 15:05:30 +01001659 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001660 if (*chain_info->pprev == chain->filter_chain)
1661 tcf_chain0_head_change(chain, tp);
David Brazdil0f672f62019-12-10 10:32:29 +00001662 tcf_proto_get(tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001663 rcu_assign_pointer(*chain_info->pprev, tp);
David Brazdil0f672f62019-12-10 10:32:29 +00001664
1665 return 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001666}
1667
1668static void tcf_chain_tp_remove(struct tcf_chain *chain,
1669 struct tcf_chain_info *chain_info,
1670 struct tcf_proto *tp)
1671{
David Brazdil0f672f62019-12-10 10:32:29 +00001672 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001673
David Brazdil0f672f62019-12-10 10:32:29 +00001674 tcf_proto_mark_delete(tp);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001675 if (tp == chain->filter_chain)
1676 tcf_chain0_head_change(chain, next);
1677 RCU_INIT_POINTER(*chain_info->pprev, next);
David Brazdil0f672f62019-12-10 10:32:29 +00001678}
1679
1680static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1681 struct tcf_chain_info *chain_info,
1682 u32 protocol, u32 prio,
1683 bool prio_allocate);
1684
1685/* Try to insert new proto.
1686 * If proto with specified priority already exists, free new proto
1687 * and return existing one.
1688 */
1689
1690static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1691 struct tcf_proto *tp_new,
1692 u32 protocol, u32 prio,
1693 bool rtnl_held)
1694{
1695 struct tcf_chain_info chain_info;
1696 struct tcf_proto *tp;
1697 int err = 0;
1698
1699 mutex_lock(&chain->filter_chain_lock);
1700
1701 if (tcf_proto_exists_destroying(chain, tp_new)) {
1702 mutex_unlock(&chain->filter_chain_lock);
1703 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1704 return ERR_PTR(-EAGAIN);
1705 }
1706
1707 tp = tcf_chain_tp_find(chain, &chain_info,
1708 protocol, prio, false);
1709 if (!tp)
1710 err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1711 mutex_unlock(&chain->filter_chain_lock);
1712
1713 if (tp) {
1714 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1715 tp_new = tp;
1716 } else if (err) {
1717 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1718 tp_new = ERR_PTR(err);
1719 }
1720
1721 return tp_new;
1722}
1723
1724static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1725 struct tcf_proto *tp, bool rtnl_held,
1726 struct netlink_ext_ack *extack)
1727{
1728 struct tcf_chain_info chain_info;
1729 struct tcf_proto *tp_iter;
1730 struct tcf_proto **pprev;
1731 struct tcf_proto *next;
1732
1733 mutex_lock(&chain->filter_chain_lock);
1734
1735 /* Atomically find and remove tp from chain. */
1736 for (pprev = &chain->filter_chain;
1737 (tp_iter = tcf_chain_dereference(*pprev, chain));
1738 pprev = &tp_iter->next) {
1739 if (tp_iter == tp) {
1740 chain_info.pprev = pprev;
1741 chain_info.next = tp_iter->next;
1742 WARN_ON(tp_iter->deleting);
1743 break;
1744 }
1745 }
1746 /* Verify that tp still exists and no new filters were inserted
1747 * concurrently.
1748 * Mark tp for deletion if it is empty.
1749 */
Olivier Deprez0e641232021-09-23 10:07:05 +02001750 if (!tp_iter || !tcf_proto_check_delete(tp)) {
David Brazdil0f672f62019-12-10 10:32:29 +00001751 mutex_unlock(&chain->filter_chain_lock);
1752 return;
1753 }
1754
1755 tcf_proto_signal_destroying(chain, tp);
1756 next = tcf_chain_dereference(chain_info.next, chain);
1757 if (tp == chain->filter_chain)
1758 tcf_chain0_head_change(chain, next);
1759 RCU_INIT_POINTER(*chain_info.pprev, next);
1760 mutex_unlock(&chain->filter_chain_lock);
1761
1762 tcf_proto_put(tp, rtnl_held, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001763}
1764
1765static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1766 struct tcf_chain_info *chain_info,
1767 u32 protocol, u32 prio,
1768 bool prio_allocate)
1769{
1770 struct tcf_proto **pprev;
1771 struct tcf_proto *tp;
1772
1773 /* Check the chain for existence of proto-tcf with this priority */
1774 for (pprev = &chain->filter_chain;
David Brazdil0f672f62019-12-10 10:32:29 +00001775 (tp = tcf_chain_dereference(*pprev, chain));
1776 pprev = &tp->next) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001777 if (tp->prio >= prio) {
1778 if (tp->prio == prio) {
1779 if (prio_allocate ||
1780 (tp->protocol != protocol && protocol))
1781 return ERR_PTR(-EINVAL);
1782 } else {
1783 tp = NULL;
1784 }
1785 break;
1786 }
1787 }
1788 chain_info->pprev = pprev;
David Brazdil0f672f62019-12-10 10:32:29 +00001789 if (tp) {
1790 chain_info->next = tp->next;
1791 tcf_proto_get(tp);
1792 } else {
1793 chain_info->next = NULL;
1794 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001795 return tp;
1796}
1797
1798static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1799 struct tcf_proto *tp, struct tcf_block *block,
1800 struct Qdisc *q, u32 parent, void *fh,
David Brazdil0f672f62019-12-10 10:32:29 +00001801 u32 portid, u32 seq, u16 flags, int event,
Olivier Deprez157378f2022-04-04 15:47:50 +02001802 bool terse_dump, bool rtnl_held)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001803{
1804 struct tcmsg *tcm;
1805 struct nlmsghdr *nlh;
1806 unsigned char *b = skb_tail_pointer(skb);
1807
1808 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1809 if (!nlh)
1810 goto out_nlmsg_trim;
1811 tcm = nlmsg_data(nlh);
1812 tcm->tcm_family = AF_UNSPEC;
1813 tcm->tcm__pad1 = 0;
1814 tcm->tcm__pad2 = 0;
1815 if (q) {
1816 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1817 tcm->tcm_parent = parent;
1818 } else {
1819 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1820 tcm->tcm_block_index = block->index;
1821 }
1822 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1823 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1824 goto nla_put_failure;
1825 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1826 goto nla_put_failure;
1827 if (!fh) {
1828 tcm->tcm_handle = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001829 } else if (terse_dump) {
1830 if (tp->ops->terse_dump) {
1831 if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1832 rtnl_held) < 0)
1833 goto nla_put_failure;
1834 } else {
1835 goto cls_op_not_supp;
1836 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001837 } else {
David Brazdil0f672f62019-12-10 10:32:29 +00001838 if (tp->ops->dump &&
1839 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001840 goto nla_put_failure;
1841 }
1842 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1843 return skb->len;
1844
1845out_nlmsg_trim:
1846nla_put_failure:
Olivier Deprez157378f2022-04-04 15:47:50 +02001847cls_op_not_supp:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001848 nlmsg_trim(skb, b);
1849 return -1;
1850}
1851
1852static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1853 struct nlmsghdr *n, struct tcf_proto *tp,
1854 struct tcf_block *block, struct Qdisc *q,
David Brazdil0f672f62019-12-10 10:32:29 +00001855 u32 parent, void *fh, int event, bool unicast,
1856 bool rtnl_held)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001857{
1858 struct sk_buff *skb;
1859 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001860 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001861
1862 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1863 if (!skb)
1864 return -ENOBUFS;
1865
1866 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
David Brazdil0f672f62019-12-10 10:32:29 +00001867 n->nlmsg_seq, n->nlmsg_flags, event,
Olivier Deprez157378f2022-04-04 15:47:50 +02001868 false, rtnl_held) <= 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001869 kfree_skb(skb);
1870 return -EINVAL;
1871 }
1872
1873 if (unicast)
David Brazdil0f672f62019-12-10 10:32:29 +00001874 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1875 else
1876 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1877 n->nlmsg_flags & NLM_F_ECHO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001878
David Brazdil0f672f62019-12-10 10:32:29 +00001879 if (err > 0)
1880 err = 0;
1881 return err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001882}
1883
1884static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1885 struct nlmsghdr *n, struct tcf_proto *tp,
1886 struct tcf_block *block, struct Qdisc *q,
1887 u32 parent, void *fh, bool unicast, bool *last,
David Brazdil0f672f62019-12-10 10:32:29 +00001888 bool rtnl_held, struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001889{
1890 struct sk_buff *skb;
1891 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1892 int err;
1893
1894 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1895 if (!skb)
1896 return -ENOBUFS;
1897
1898 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
David Brazdil0f672f62019-12-10 10:32:29 +00001899 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
Olivier Deprez157378f2022-04-04 15:47:50 +02001900 false, rtnl_held) <= 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001901 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1902 kfree_skb(skb);
1903 return -EINVAL;
1904 }
1905
David Brazdil0f672f62019-12-10 10:32:29 +00001906 err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001907 if (err) {
1908 kfree_skb(skb);
1909 return err;
1910 }
1911
1912 if (unicast)
David Brazdil0f672f62019-12-10 10:32:29 +00001913 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1914 else
1915 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1916 n->nlmsg_flags & NLM_F_ECHO);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001917 if (err < 0)
1918 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
David Brazdil0f672f62019-12-10 10:32:29 +00001919
1920 if (err > 0)
1921 err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001922 return err;
1923}
1924
1925static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1926 struct tcf_block *block, struct Qdisc *q,
1927 u32 parent, struct nlmsghdr *n,
David Brazdil0f672f62019-12-10 10:32:29 +00001928 struct tcf_chain *chain, int event,
1929 bool rtnl_held)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001930{
1931 struct tcf_proto *tp;
1932
David Brazdil0f672f62019-12-10 10:32:29 +00001933 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1934 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001935 tfilter_notify(net, oskb, n, tp, block,
David Brazdil0f672f62019-12-10 10:32:29 +00001936 q, parent, NULL, event, false, rtnl_held);
1937}
1938
1939static void tfilter_put(struct tcf_proto *tp, void *fh)
1940{
1941 if (tp->ops->put && fh)
1942 tp->ops->put(tp, fh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001943}
1944
1945static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1946 struct netlink_ext_ack *extack)
1947{
1948 struct net *net = sock_net(skb->sk);
1949 struct nlattr *tca[TCA_MAX + 1];
David Brazdil0f672f62019-12-10 10:32:29 +00001950 char name[IFNAMSIZ];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001951 struct tcmsg *t;
1952 u32 protocol;
1953 u32 prio;
1954 bool prio_allocate;
1955 u32 parent;
1956 u32 chain_index;
Olivier Deprez157378f2022-04-04 15:47:50 +02001957 struct Qdisc *q;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001958 struct tcf_chain_info chain_info;
Olivier Deprez157378f2022-04-04 15:47:50 +02001959 struct tcf_chain *chain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001960 struct tcf_block *block;
1961 struct tcf_proto *tp;
1962 unsigned long cl;
1963 void *fh;
1964 int err;
1965 int tp_created;
David Brazdil0f672f62019-12-10 10:32:29 +00001966 bool rtnl_held = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001967
1968 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1969 return -EPERM;
1970
1971replay:
1972 tp_created = 0;
1973
David Brazdil0f672f62019-12-10 10:32:29 +00001974 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1975 rtm_tca_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001976 if (err < 0)
1977 return err;
1978
1979 t = nlmsg_data(n);
1980 protocol = TC_H_MIN(t->tcm_info);
1981 prio = TC_H_MAJ(t->tcm_info);
1982 prio_allocate = false;
1983 parent = t->tcm_parent;
David Brazdil0f672f62019-12-10 10:32:29 +00001984 tp = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001985 cl = 0;
David Brazdil0f672f62019-12-10 10:32:29 +00001986 block = NULL;
Olivier Deprez157378f2022-04-04 15:47:50 +02001987 q = NULL;
1988 chain = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001989
1990 if (prio == 0) {
1991 /* If no priority is provided by the user,
1992 * we allocate one.
1993 */
1994 if (n->nlmsg_flags & NLM_F_CREATE) {
1995 prio = TC_H_MAKE(0x80000000U, 0U);
1996 prio_allocate = true;
1997 } else {
1998 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1999 return -ENOENT;
2000 }
2001 }
2002
2003 /* Find head of filter chain. */
2004
David Brazdil0f672f62019-12-10 10:32:29 +00002005 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2006 if (err)
2007 return err;
2008
2009 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2010 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2011 err = -EINVAL;
2012 goto errout;
2013 }
2014
2015 /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2016 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2017 * type is not specified, classifier is not unlocked.
2018 */
2019 if (rtnl_held ||
2020 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2021 !tcf_proto_is_unlocked(name)) {
2022 rtnl_held = true;
2023 rtnl_lock();
2024 }
2025
2026 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2027 if (err)
2028 goto errout;
2029
2030 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2031 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002032 if (IS_ERR(block)) {
2033 err = PTR_ERR(block);
2034 goto errout;
2035 }
Olivier Deprez0e641232021-09-23 10:07:05 +02002036 block->classid = parent;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002037
2038 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2039 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2040 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2041 err = -EINVAL;
2042 goto errout;
2043 }
2044 chain = tcf_chain_get(block, chain_index, true);
2045 if (!chain) {
2046 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2047 err = -ENOMEM;
2048 goto errout;
2049 }
2050
David Brazdil0f672f62019-12-10 10:32:29 +00002051 mutex_lock(&chain->filter_chain_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002052 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2053 prio, prio_allocate);
2054 if (IS_ERR(tp)) {
2055 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2056 err = PTR_ERR(tp);
David Brazdil0f672f62019-12-10 10:32:29 +00002057 goto errout_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002058 }
2059
2060 if (tp == NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +00002061 struct tcf_proto *tp_new = NULL;
2062
2063 if (chain->flushing) {
2064 err = -EAGAIN;
2065 goto errout_locked;
2066 }
2067
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002068 /* Proto-tcf does not exist, create new one */
2069
2070 if (tca[TCA_KIND] == NULL || !protocol) {
2071 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2072 err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00002073 goto errout_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002074 }
2075
2076 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2077 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2078 err = -ENOENT;
David Brazdil0f672f62019-12-10 10:32:29 +00002079 goto errout_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002080 }
2081
2082 if (prio_allocate)
David Brazdil0f672f62019-12-10 10:32:29 +00002083 prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2084 &chain_info));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002085
David Brazdil0f672f62019-12-10 10:32:29 +00002086 mutex_unlock(&chain->filter_chain_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +02002087 tp_new = tcf_proto_create(name, protocol, prio, chain,
2088 rtnl_held, extack);
David Brazdil0f672f62019-12-10 10:32:29 +00002089 if (IS_ERR(tp_new)) {
2090 err = PTR_ERR(tp_new);
2091 goto errout_tp;
2092 }
2093
2094 tp_created = 1;
2095 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2096 rtnl_held);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002097 if (IS_ERR(tp)) {
2098 err = PTR_ERR(tp);
David Brazdil0f672f62019-12-10 10:32:29 +00002099 goto errout_tp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002100 }
David Brazdil0f672f62019-12-10 10:32:29 +00002101 } else {
2102 mutex_unlock(&chain->filter_chain_lock);
2103 }
2104
2105 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002106 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2107 err = -EINVAL;
2108 goto errout;
2109 }
2110
2111 fh = tp->ops->get(tp, t->tcm_handle);
2112
2113 if (!fh) {
2114 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2115 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2116 err = -ENOENT;
2117 goto errout;
2118 }
2119 } else if (n->nlmsg_flags & NLM_F_EXCL) {
David Brazdil0f672f62019-12-10 10:32:29 +00002120 tfilter_put(tp, fh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002121 NL_SET_ERR_MSG(extack, "Filter already exists");
2122 err = -EEXIST;
2123 goto errout;
2124 }
2125
2126 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
Olivier Deprez92d4c212022-12-06 15:05:30 +01002127 tfilter_put(tp, fh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002128 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2129 err = -EINVAL;
2130 goto errout;
2131 }
2132
2133 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2134 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
David Brazdil0f672f62019-12-10 10:32:29 +00002135 rtnl_held, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002136 if (err == 0) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002137 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
David Brazdil0f672f62019-12-10 10:32:29 +00002138 RTM_NEWTFILTER, false, rtnl_held);
2139 tfilter_put(tp, fh);
2140 /* q pointer is NULL for shared blocks */
2141 if (q)
2142 q->flags &= ~TCQ_F_CAN_BYPASS;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002143 }
2144
2145errout:
David Brazdil0f672f62019-12-10 10:32:29 +00002146 if (err && tp_created)
2147 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2148errout_tp:
2149 if (chain) {
2150 if (tp && !IS_ERR(tp))
2151 tcf_proto_put(tp, rtnl_held, NULL);
2152 if (!tp_created)
2153 tcf_chain_put(chain);
2154 }
2155 tcf_block_release(q, block, rtnl_held);
2156
2157 if (rtnl_held)
2158 rtnl_unlock();
2159
2160 if (err == -EAGAIN) {
2161 /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2162 * of target chain.
2163 */
2164 rtnl_held = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002165 /* Replay the request. */
2166 goto replay;
David Brazdil0f672f62019-12-10 10:32:29 +00002167 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002168 return err;
David Brazdil0f672f62019-12-10 10:32:29 +00002169
2170errout_locked:
2171 mutex_unlock(&chain->filter_chain_lock);
2172 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002173}
2174
2175static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2176 struct netlink_ext_ack *extack)
2177{
2178 struct net *net = sock_net(skb->sk);
2179 struct nlattr *tca[TCA_MAX + 1];
David Brazdil0f672f62019-12-10 10:32:29 +00002180 char name[IFNAMSIZ];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002181 struct tcmsg *t;
2182 u32 protocol;
2183 u32 prio;
2184 u32 parent;
2185 u32 chain_index;
2186 struct Qdisc *q = NULL;
2187 struct tcf_chain_info chain_info;
2188 struct tcf_chain *chain = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002189 struct tcf_block *block = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002190 struct tcf_proto *tp = NULL;
2191 unsigned long cl = 0;
2192 void *fh = NULL;
2193 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00002194 bool rtnl_held = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002195
2196 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2197 return -EPERM;
2198
David Brazdil0f672f62019-12-10 10:32:29 +00002199 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2200 rtm_tca_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002201 if (err < 0)
2202 return err;
2203
2204 t = nlmsg_data(n);
2205 protocol = TC_H_MIN(t->tcm_info);
2206 prio = TC_H_MAJ(t->tcm_info);
2207 parent = t->tcm_parent;
2208
2209 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2210 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2211 return -ENOENT;
2212 }
2213
2214 /* Find head of filter chain. */
2215
David Brazdil0f672f62019-12-10 10:32:29 +00002216 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2217 if (err)
2218 return err;
2219
2220 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2221 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2222 err = -EINVAL;
2223 goto errout;
2224 }
2225 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2226 * found), qdisc is not unlocked, classifier type is not specified,
2227 * classifier is not unlocked.
2228 */
2229 if (!prio ||
2230 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2231 !tcf_proto_is_unlocked(name)) {
2232 rtnl_held = true;
2233 rtnl_lock();
2234 }
2235
2236 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2237 if (err)
2238 goto errout;
2239
2240 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2241 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002242 if (IS_ERR(block)) {
2243 err = PTR_ERR(block);
2244 goto errout;
2245 }
2246
2247 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2248 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2249 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2250 err = -EINVAL;
2251 goto errout;
2252 }
2253 chain = tcf_chain_get(block, chain_index, false);
2254 if (!chain) {
2255 /* User requested flush on non-existent chain. Nothing to do,
2256 * so just return success.
2257 */
2258 if (prio == 0) {
2259 err = 0;
2260 goto errout;
2261 }
2262 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2263 err = -ENOENT;
2264 goto errout;
2265 }
2266
2267 if (prio == 0) {
2268 tfilter_notify_chain(net, skb, block, q, parent, n,
David Brazdil0f672f62019-12-10 10:32:29 +00002269 chain, RTM_DELTFILTER, rtnl_held);
2270 tcf_chain_flush(chain, rtnl_held);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002271 err = 0;
2272 goto errout;
2273 }
2274
David Brazdil0f672f62019-12-10 10:32:29 +00002275 mutex_lock(&chain->filter_chain_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002276 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2277 prio, false);
2278 if (!tp || IS_ERR(tp)) {
2279 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2280 err = tp ? PTR_ERR(tp) : -ENOENT;
David Brazdil0f672f62019-12-10 10:32:29 +00002281 goto errout_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002282 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2283 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2284 err = -EINVAL;
David Brazdil0f672f62019-12-10 10:32:29 +00002285 goto errout_locked;
2286 } else if (t->tcm_handle == 0) {
2287 tcf_proto_signal_destroying(chain, tp);
2288 tcf_chain_tp_remove(chain, &chain_info, tp);
2289 mutex_unlock(&chain->filter_chain_lock);
2290
2291 tcf_proto_put(tp, rtnl_held, NULL);
2292 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2293 RTM_DELTFILTER, false, rtnl_held);
2294 err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002295 goto errout;
2296 }
David Brazdil0f672f62019-12-10 10:32:29 +00002297 mutex_unlock(&chain->filter_chain_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002298
2299 fh = tp->ops->get(tp, t->tcm_handle);
2300
2301 if (!fh) {
David Brazdil0f672f62019-12-10 10:32:29 +00002302 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2303 err = -ENOENT;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002304 } else {
2305 bool last;
2306
2307 err = tfilter_del_notify(net, skb, n, tp, block,
2308 q, parent, fh, false, &last,
David Brazdil0f672f62019-12-10 10:32:29 +00002309 rtnl_held, extack);
2310
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002311 if (err)
2312 goto errout;
David Brazdil0f672f62019-12-10 10:32:29 +00002313 if (last)
2314 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002315 }
2316
2317errout:
David Brazdil0f672f62019-12-10 10:32:29 +00002318 if (chain) {
2319 if (tp && !IS_ERR(tp))
2320 tcf_proto_put(tp, rtnl_held, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002321 tcf_chain_put(chain);
David Brazdil0f672f62019-12-10 10:32:29 +00002322 }
2323 tcf_block_release(q, block, rtnl_held);
2324
2325 if (rtnl_held)
2326 rtnl_unlock();
2327
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002328 return err;
David Brazdil0f672f62019-12-10 10:32:29 +00002329
2330errout_locked:
2331 mutex_unlock(&chain->filter_chain_lock);
2332 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002333}
2334
2335static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2336 struct netlink_ext_ack *extack)
2337{
2338 struct net *net = sock_net(skb->sk);
2339 struct nlattr *tca[TCA_MAX + 1];
David Brazdil0f672f62019-12-10 10:32:29 +00002340 char name[IFNAMSIZ];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002341 struct tcmsg *t;
2342 u32 protocol;
2343 u32 prio;
2344 u32 parent;
2345 u32 chain_index;
2346 struct Qdisc *q = NULL;
2347 struct tcf_chain_info chain_info;
2348 struct tcf_chain *chain = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002349 struct tcf_block *block = NULL;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002350 struct tcf_proto *tp = NULL;
2351 unsigned long cl = 0;
2352 void *fh = NULL;
2353 int err;
David Brazdil0f672f62019-12-10 10:32:29 +00002354 bool rtnl_held = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002355
David Brazdil0f672f62019-12-10 10:32:29 +00002356 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2357 rtm_tca_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002358 if (err < 0)
2359 return err;
2360
2361 t = nlmsg_data(n);
2362 protocol = TC_H_MIN(t->tcm_info);
2363 prio = TC_H_MAJ(t->tcm_info);
2364 parent = t->tcm_parent;
2365
2366 if (prio == 0) {
2367 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2368 return -ENOENT;
2369 }
2370
2371 /* Find head of filter chain. */
2372
David Brazdil0f672f62019-12-10 10:32:29 +00002373 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2374 if (err)
2375 return err;
2376
2377 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2378 NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2379 err = -EINVAL;
2380 goto errout;
2381 }
2382 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2383 * unlocked, classifier type is not specified, classifier is not
2384 * unlocked.
2385 */
2386 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2387 !tcf_proto_is_unlocked(name)) {
2388 rtnl_held = true;
2389 rtnl_lock();
2390 }
2391
2392 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2393 if (err)
2394 goto errout;
2395
2396 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2397 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002398 if (IS_ERR(block)) {
2399 err = PTR_ERR(block);
2400 goto errout;
2401 }
2402
2403 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2404 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2405 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2406 err = -EINVAL;
2407 goto errout;
2408 }
2409 chain = tcf_chain_get(block, chain_index, false);
2410 if (!chain) {
2411 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2412 err = -EINVAL;
2413 goto errout;
2414 }
2415
David Brazdil0f672f62019-12-10 10:32:29 +00002416 mutex_lock(&chain->filter_chain_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002417 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2418 prio, false);
David Brazdil0f672f62019-12-10 10:32:29 +00002419 mutex_unlock(&chain->filter_chain_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002420 if (!tp || IS_ERR(tp)) {
2421 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2422 err = tp ? PTR_ERR(tp) : -ENOENT;
2423 goto errout;
2424 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2425 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2426 err = -EINVAL;
2427 goto errout;
2428 }
2429
2430 fh = tp->ops->get(tp, t->tcm_handle);
2431
2432 if (!fh) {
2433 NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2434 err = -ENOENT;
2435 } else {
2436 err = tfilter_notify(net, skb, n, tp, block, q, parent,
David Brazdil0f672f62019-12-10 10:32:29 +00002437 fh, RTM_NEWTFILTER, true, rtnl_held);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002438 if (err < 0)
2439 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2440 }
2441
David Brazdil0f672f62019-12-10 10:32:29 +00002442 tfilter_put(tp, fh);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002443errout:
David Brazdil0f672f62019-12-10 10:32:29 +00002444 if (chain) {
2445 if (tp && !IS_ERR(tp))
2446 tcf_proto_put(tp, rtnl_held, NULL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002447 tcf_chain_put(chain);
David Brazdil0f672f62019-12-10 10:32:29 +00002448 }
2449 tcf_block_release(q, block, rtnl_held);
2450
2451 if (rtnl_held)
2452 rtnl_unlock();
2453
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002454 return err;
2455}
2456
2457struct tcf_dump_args {
2458 struct tcf_walker w;
2459 struct sk_buff *skb;
2460 struct netlink_callback *cb;
2461 struct tcf_block *block;
2462 struct Qdisc *q;
2463 u32 parent;
Olivier Deprez157378f2022-04-04 15:47:50 +02002464 bool terse_dump;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002465};
2466
2467static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2468{
2469 struct tcf_dump_args *a = (void *)arg;
2470 struct net *net = sock_net(a->skb->sk);
2471
2472 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2473 n, NETLINK_CB(a->cb->skb).portid,
2474 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
Olivier Deprez157378f2022-04-04 15:47:50 +02002475 RTM_NEWTFILTER, a->terse_dump, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002476}
2477
2478static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2479 struct sk_buff *skb, struct netlink_callback *cb,
Olivier Deprez157378f2022-04-04 15:47:50 +02002480 long index_start, long *p_index, bool terse)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002481{
2482 struct net *net = sock_net(skb->sk);
2483 struct tcf_block *block = chain->block;
2484 struct tcmsg *tcm = nlmsg_data(cb->nlh);
David Brazdil0f672f62019-12-10 10:32:29 +00002485 struct tcf_proto *tp, *tp_prev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002486 struct tcf_dump_args arg;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002487
David Brazdil0f672f62019-12-10 10:32:29 +00002488 for (tp = __tcf_get_next_proto(chain, NULL);
2489 tp;
2490 tp_prev = tp,
2491 tp = __tcf_get_next_proto(chain, tp),
2492 tcf_proto_put(tp_prev, true, NULL),
2493 (*p_index)++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002494 if (*p_index < index_start)
2495 continue;
2496 if (TC_H_MAJ(tcm->tcm_info) &&
2497 TC_H_MAJ(tcm->tcm_info) != tp->prio)
2498 continue;
2499 if (TC_H_MIN(tcm->tcm_info) &&
2500 TC_H_MIN(tcm->tcm_info) != tp->protocol)
2501 continue;
2502 if (*p_index > index_start)
2503 memset(&cb->args[1], 0,
2504 sizeof(cb->args) - sizeof(cb->args[0]));
2505 if (cb->args[1] == 0) {
2506 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2507 NETLINK_CB(cb->skb).portid,
2508 cb->nlh->nlmsg_seq, NLM_F_MULTI,
Olivier Deprez157378f2022-04-04 15:47:50 +02002509 RTM_NEWTFILTER, false, true) <= 0)
David Brazdil0f672f62019-12-10 10:32:29 +00002510 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002511 cb->args[1] = 1;
2512 }
2513 if (!tp->ops->walk)
2514 continue;
2515 arg.w.fn = tcf_node_dump;
2516 arg.skb = skb;
2517 arg.cb = cb;
2518 arg.block = block;
2519 arg.q = q;
2520 arg.parent = parent;
2521 arg.w.stop = 0;
2522 arg.w.skip = cb->args[1] - 1;
2523 arg.w.count = 0;
2524 arg.w.cookie = cb->args[2];
Olivier Deprez157378f2022-04-04 15:47:50 +02002525 arg.terse_dump = terse;
David Brazdil0f672f62019-12-10 10:32:29 +00002526 tp->ops->walk(tp, &arg.w, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002527 cb->args[2] = arg.w.cookie;
2528 cb->args[1] = arg.w.count + 1;
2529 if (arg.w.stop)
David Brazdil0f672f62019-12-10 10:32:29 +00002530 goto errout;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002531 }
2532 return true;
David Brazdil0f672f62019-12-10 10:32:29 +00002533
2534errout:
2535 tcf_proto_put(tp, true, NULL);
2536 return false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002537}
2538
Olivier Deprez157378f2022-04-04 15:47:50 +02002539static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2540 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2541};
2542
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002543/* called with RTNL */
2544static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2545{
David Brazdil0f672f62019-12-10 10:32:29 +00002546 struct tcf_chain *chain, *chain_prev;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002547 struct net *net = sock_net(skb->sk);
2548 struct nlattr *tca[TCA_MAX + 1];
2549 struct Qdisc *q = NULL;
2550 struct tcf_block *block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002551 struct tcmsg *tcm = nlmsg_data(cb->nlh);
Olivier Deprez157378f2022-04-04 15:47:50 +02002552 bool terse_dump = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002553 long index_start;
2554 long index;
2555 u32 parent;
2556 int err;
2557
2558 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2559 return skb->len;
2560
David Brazdil0f672f62019-12-10 10:32:29 +00002561 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
Olivier Deprez157378f2022-04-04 15:47:50 +02002562 tcf_tfilter_dump_policy, cb->extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002563 if (err)
2564 return err;
2565
Olivier Deprez157378f2022-04-04 15:47:50 +02002566 if (tca[TCA_DUMP_FLAGS]) {
2567 struct nla_bitfield32 flags =
2568 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2569
2570 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2571 }
2572
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002573 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
David Brazdil0f672f62019-12-10 10:32:29 +00002574 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002575 if (!block)
2576 goto out;
2577 /* If we work with block index, q is NULL and parent value
2578 * will never be used in the following code. The check
2579 * in tcf_fill_node prevents it. However, compiler does not
2580 * see that far, so set parent to zero to silence the warning
2581 * about parent being uninitialized.
2582 */
2583 parent = 0;
2584 } else {
2585 const struct Qdisc_class_ops *cops;
2586 struct net_device *dev;
2587 unsigned long cl = 0;
2588
2589 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2590 if (!dev)
2591 return skb->len;
2592
2593 parent = tcm->tcm_parent;
Olivier Deprez0e641232021-09-23 10:07:05 +02002594 if (!parent)
Olivier Deprez157378f2022-04-04 15:47:50 +02002595 q = rtnl_dereference(dev->qdisc);
Olivier Deprez0e641232021-09-23 10:07:05 +02002596 else
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002597 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002598 if (!q)
2599 goto out;
2600 cops = q->ops->cl_ops;
2601 if (!cops)
2602 goto out;
2603 if (!cops->tcf_block)
2604 goto out;
2605 if (TC_H_MIN(tcm->tcm_parent)) {
2606 cl = cops->find(q, tcm->tcm_parent);
2607 if (cl == 0)
2608 goto out;
2609 }
2610 block = cops->tcf_block(q, cl, NULL);
2611 if (!block)
2612 goto out;
Olivier Deprez0e641232021-09-23 10:07:05 +02002613 parent = block->classid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002614 if (tcf_block_shared(block))
2615 q = NULL;
2616 }
2617
2618 index_start = cb->args[0];
2619 index = 0;
2620
David Brazdil0f672f62019-12-10 10:32:29 +00002621 for (chain = __tcf_get_next_chain(block, NULL);
2622 chain;
2623 chain_prev = chain,
2624 chain = __tcf_get_next_chain(block, chain),
2625 tcf_chain_put(chain_prev)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002626 if (tca[TCA_CHAIN] &&
2627 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2628 continue;
2629 if (!tcf_chain_dump(chain, q, parent, skb, cb,
Olivier Deprez157378f2022-04-04 15:47:50 +02002630 index_start, &index, terse_dump)) {
David Brazdil0f672f62019-12-10 10:32:29 +00002631 tcf_chain_put(chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002632 err = -EMSGSIZE;
2633 break;
2634 }
2635 }
2636
David Brazdil0f672f62019-12-10 10:32:29 +00002637 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2638 tcf_block_refcnt_put(block, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002639 cb->args[0] = index;
2640
2641out:
2642 /* If we did no progress, the error (EMSGSIZE) is real */
2643 if (skb->len == 0 && err)
2644 return err;
2645 return skb->len;
2646}
2647
David Brazdil0f672f62019-12-10 10:32:29 +00002648static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2649 void *tmplt_priv, u32 chain_index,
2650 struct net *net, struct sk_buff *skb,
2651 struct tcf_block *block,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002652 u32 portid, u32 seq, u16 flags, int event)
2653{
2654 unsigned char *b = skb_tail_pointer(skb);
2655 const struct tcf_proto_ops *ops;
2656 struct nlmsghdr *nlh;
2657 struct tcmsg *tcm;
2658 void *priv;
2659
David Brazdil0f672f62019-12-10 10:32:29 +00002660 ops = tmplt_ops;
2661 priv = tmplt_priv;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002662
2663 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2664 if (!nlh)
2665 goto out_nlmsg_trim;
2666 tcm = nlmsg_data(nlh);
2667 tcm->tcm_family = AF_UNSPEC;
2668 tcm->tcm__pad1 = 0;
2669 tcm->tcm__pad2 = 0;
2670 tcm->tcm_handle = 0;
2671 if (block->q) {
2672 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2673 tcm->tcm_parent = block->q->handle;
2674 } else {
2675 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2676 tcm->tcm_block_index = block->index;
2677 }
2678
David Brazdil0f672f62019-12-10 10:32:29 +00002679 if (nla_put_u32(skb, TCA_CHAIN, chain_index))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002680 goto nla_put_failure;
2681
2682 if (ops) {
2683 if (nla_put_string(skb, TCA_KIND, ops->kind))
2684 goto nla_put_failure;
2685 if (ops->tmplt_dump(skb, net, priv) < 0)
2686 goto nla_put_failure;
2687 }
2688
2689 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2690 return skb->len;
2691
2692out_nlmsg_trim:
2693nla_put_failure:
2694 nlmsg_trim(skb, b);
2695 return -EMSGSIZE;
2696}
2697
2698static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2699 u32 seq, u16 flags, int event, bool unicast)
2700{
2701 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2702 struct tcf_block *block = chain->block;
2703 struct net *net = block->net;
2704 struct sk_buff *skb;
David Brazdil0f672f62019-12-10 10:32:29 +00002705 int err = 0;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002706
2707 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2708 if (!skb)
2709 return -ENOBUFS;
2710
David Brazdil0f672f62019-12-10 10:32:29 +00002711 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2712 chain->index, net, skb, block, portid,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002713 seq, flags, event) <= 0) {
2714 kfree_skb(skb);
2715 return -EINVAL;
2716 }
2717
2718 if (unicast)
David Brazdil0f672f62019-12-10 10:32:29 +00002719 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2720 else
2721 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2722 flags & NLM_F_ECHO);
2723
2724 if (err > 0)
2725 err = 0;
2726 return err;
2727}
2728
2729static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2730 void *tmplt_priv, u32 chain_index,
2731 struct tcf_block *block, struct sk_buff *oskb,
2732 u32 seq, u16 flags, bool unicast)
2733{
2734 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2735 struct net *net = block->net;
2736 struct sk_buff *skb;
2737
2738 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2739 if (!skb)
2740 return -ENOBUFS;
2741
2742 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2743 block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2744 kfree_skb(skb);
2745 return -EINVAL;
2746 }
2747
2748 if (unicast)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002749 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2750
2751 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2752}
2753
2754static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2755 struct nlattr **tca,
2756 struct netlink_ext_ack *extack)
2757{
2758 const struct tcf_proto_ops *ops;
Olivier Deprez0e641232021-09-23 10:07:05 +02002759 char name[IFNAMSIZ];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002760 void *tmplt_priv;
2761
2762 /* If kind is not set, user did not specify template. */
2763 if (!tca[TCA_KIND])
2764 return 0;
2765
Olivier Deprez0e641232021-09-23 10:07:05 +02002766 if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2767 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2768 return -EINVAL;
2769 }
2770
2771 ops = tcf_proto_lookup_ops(name, true, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002772 if (IS_ERR(ops))
2773 return PTR_ERR(ops);
2774 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2775 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2776 return -EOPNOTSUPP;
2777 }
2778
2779 tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2780 if (IS_ERR(tmplt_priv)) {
2781 module_put(ops->owner);
2782 return PTR_ERR(tmplt_priv);
2783 }
2784 chain->tmplt_ops = ops;
2785 chain->tmplt_priv = tmplt_priv;
2786 return 0;
2787}
2788
David Brazdil0f672f62019-12-10 10:32:29 +00002789static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2790 void *tmplt_priv)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002791{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002792 /* If template ops are set, no work to do for us. */
David Brazdil0f672f62019-12-10 10:32:29 +00002793 if (!tmplt_ops)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002794 return;
2795
David Brazdil0f672f62019-12-10 10:32:29 +00002796 tmplt_ops->tmplt_destroy(tmplt_priv);
2797 module_put(tmplt_ops->owner);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002798}
2799
2800/* Add/delete/get a chain */
2801
2802static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2803 struct netlink_ext_ack *extack)
2804{
2805 struct net *net = sock_net(skb->sk);
2806 struct nlattr *tca[TCA_MAX + 1];
2807 struct tcmsg *t;
2808 u32 parent;
2809 u32 chain_index;
Olivier Deprez157378f2022-04-04 15:47:50 +02002810 struct Qdisc *q;
2811 struct tcf_chain *chain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002812 struct tcf_block *block;
2813 unsigned long cl;
2814 int err;
2815
2816 if (n->nlmsg_type != RTM_GETCHAIN &&
2817 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2818 return -EPERM;
2819
2820replay:
Olivier Deprez157378f2022-04-04 15:47:50 +02002821 q = NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00002822 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2823 rtm_tca_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002824 if (err < 0)
2825 return err;
2826
2827 t = nlmsg_data(n);
2828 parent = t->tcm_parent;
2829 cl = 0;
2830
2831 block = tcf_block_find(net, &q, &parent, &cl,
2832 t->tcm_ifindex, t->tcm_block_index, extack);
2833 if (IS_ERR(block))
2834 return PTR_ERR(block);
2835
2836 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2837 if (chain_index > TC_ACT_EXT_VAL_MASK) {
2838 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
David Brazdil0f672f62019-12-10 10:32:29 +00002839 err = -EINVAL;
2840 goto errout_block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002841 }
David Brazdil0f672f62019-12-10 10:32:29 +00002842
2843 mutex_lock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002844 chain = tcf_chain_lookup(block, chain_index);
2845 if (n->nlmsg_type == RTM_NEWCHAIN) {
2846 if (chain) {
2847 if (tcf_chain_held_by_acts_only(chain)) {
2848 /* The chain exists only because there is
2849 * some action referencing it.
2850 */
2851 tcf_chain_hold(chain);
2852 } else {
2853 NL_SET_ERR_MSG(extack, "Filter chain already exists");
David Brazdil0f672f62019-12-10 10:32:29 +00002854 err = -EEXIST;
2855 goto errout_block_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002856 }
2857 } else {
2858 if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2859 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
David Brazdil0f672f62019-12-10 10:32:29 +00002860 err = -ENOENT;
2861 goto errout_block_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002862 }
2863 chain = tcf_chain_create(block, chain_index);
2864 if (!chain) {
2865 NL_SET_ERR_MSG(extack, "Failed to create filter chain");
David Brazdil0f672f62019-12-10 10:32:29 +00002866 err = -ENOMEM;
2867 goto errout_block_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002868 }
2869 }
2870 } else {
2871 if (!chain || tcf_chain_held_by_acts_only(chain)) {
2872 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
David Brazdil0f672f62019-12-10 10:32:29 +00002873 err = -EINVAL;
2874 goto errout_block_locked;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002875 }
2876 tcf_chain_hold(chain);
2877 }
2878
David Brazdil0f672f62019-12-10 10:32:29 +00002879 if (n->nlmsg_type == RTM_NEWCHAIN) {
2880 /* Modifying chain requires holding parent block lock. In case
2881 * the chain was successfully added, take a reference to the
2882 * chain. This ensures that an empty chain does not disappear at
2883 * the end of this function.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002884 */
2885 tcf_chain_hold(chain);
2886 chain->explicitly_created = true;
David Brazdil0f672f62019-12-10 10:32:29 +00002887 }
2888 mutex_unlock(&block->lock);
2889
2890 switch (n->nlmsg_type) {
2891 case RTM_NEWCHAIN:
2892 err = tc_chain_tmplt_add(chain, net, tca, extack);
2893 if (err) {
2894 tcf_chain_put_explicitly_created(chain);
2895 goto errout;
2896 }
2897
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002898 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2899 RTM_NEWCHAIN, false);
2900 break;
2901 case RTM_DELCHAIN:
2902 tfilter_notify_chain(net, skb, block, q, parent, n,
David Brazdil0f672f62019-12-10 10:32:29 +00002903 chain, RTM_DELTFILTER, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002904 /* Flush the chain first as the user requested chain removal. */
David Brazdil0f672f62019-12-10 10:32:29 +00002905 tcf_chain_flush(chain, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002906 /* In case the chain was successfully deleted, put a reference
2907 * to the chain previously taken during addition.
2908 */
2909 tcf_chain_put_explicitly_created(chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002910 break;
2911 case RTM_GETCHAIN:
2912 err = tc_chain_notify(chain, skb, n->nlmsg_seq,
Olivier Deprez0e641232021-09-23 10:07:05 +02002913 n->nlmsg_flags, n->nlmsg_type, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002914 if (err < 0)
2915 NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2916 break;
2917 default:
2918 err = -EOPNOTSUPP;
2919 NL_SET_ERR_MSG(extack, "Unsupported message type");
2920 goto errout;
2921 }
2922
2923errout:
2924 tcf_chain_put(chain);
David Brazdil0f672f62019-12-10 10:32:29 +00002925errout_block:
2926 tcf_block_release(q, block, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002927 if (err == -EAGAIN)
2928 /* Replay the request. */
2929 goto replay;
2930 return err;
David Brazdil0f672f62019-12-10 10:32:29 +00002931
2932errout_block_locked:
2933 mutex_unlock(&block->lock);
2934 goto errout_block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002935}
2936
2937/* called with RTNL */
2938static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2939{
2940 struct net *net = sock_net(skb->sk);
2941 struct nlattr *tca[TCA_MAX + 1];
2942 struct Qdisc *q = NULL;
2943 struct tcf_block *block;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002944 struct tcmsg *tcm = nlmsg_data(cb->nlh);
David Brazdil0f672f62019-12-10 10:32:29 +00002945 struct tcf_chain *chain;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002946 long index_start;
2947 long index;
2948 u32 parent;
2949 int err;
2950
2951 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2952 return skb->len;
2953
David Brazdil0f672f62019-12-10 10:32:29 +00002954 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2955 rtm_tca_policy, cb->extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002956 if (err)
2957 return err;
2958
2959 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
David Brazdil0f672f62019-12-10 10:32:29 +00002960 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002961 if (!block)
2962 goto out;
2963 /* If we work with block index, q is NULL and parent value
2964 * will never be used in the following code. The check
2965 * in tcf_fill_node prevents it. However, compiler does not
2966 * see that far, so set parent to zero to silence the warning
2967 * about parent being uninitialized.
2968 */
2969 parent = 0;
2970 } else {
2971 const struct Qdisc_class_ops *cops;
2972 struct net_device *dev;
2973 unsigned long cl = 0;
2974
2975 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2976 if (!dev)
2977 return skb->len;
2978
2979 parent = tcm->tcm_parent;
2980 if (!parent) {
Olivier Deprez157378f2022-04-04 15:47:50 +02002981 q = rtnl_dereference(dev->qdisc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002982 parent = q->handle;
2983 } else {
2984 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2985 }
2986 if (!q)
2987 goto out;
2988 cops = q->ops->cl_ops;
2989 if (!cops)
2990 goto out;
2991 if (!cops->tcf_block)
2992 goto out;
2993 if (TC_H_MIN(tcm->tcm_parent)) {
2994 cl = cops->find(q, tcm->tcm_parent);
2995 if (cl == 0)
2996 goto out;
2997 }
2998 block = cops->tcf_block(q, cl, NULL);
2999 if (!block)
3000 goto out;
3001 if (tcf_block_shared(block))
3002 q = NULL;
3003 }
3004
3005 index_start = cb->args[0];
3006 index = 0;
3007
David Brazdil0f672f62019-12-10 10:32:29 +00003008 mutex_lock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003009 list_for_each_entry(chain, &block->chain_list, list) {
3010 if ((tca[TCA_CHAIN] &&
3011 nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3012 continue;
3013 if (index < index_start) {
3014 index++;
3015 continue;
3016 }
3017 if (tcf_chain_held_by_acts_only(chain))
3018 continue;
David Brazdil0f672f62019-12-10 10:32:29 +00003019 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3020 chain->index, net, skb, block,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003021 NETLINK_CB(cb->skb).portid,
3022 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3023 RTM_NEWCHAIN);
3024 if (err <= 0)
3025 break;
3026 index++;
3027 }
David Brazdil0f672f62019-12-10 10:32:29 +00003028 mutex_unlock(&block->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003029
David Brazdil0f672f62019-12-10 10:32:29 +00003030 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3031 tcf_block_refcnt_put(block, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003032 cb->args[0] = index;
3033
3034out:
3035 /* If we did no progress, the error (EMSGSIZE) is real */
3036 if (skb->len == 0 && err)
3037 return err;
3038 return skb->len;
3039}
3040
3041void tcf_exts_destroy(struct tcf_exts *exts)
3042{
3043#ifdef CONFIG_NET_CLS_ACT
David Brazdil0f672f62019-12-10 10:32:29 +00003044 if (exts->actions) {
3045 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3046 kfree(exts->actions);
3047 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003048 exts->nr_actions = 0;
3049#endif
3050}
3051EXPORT_SYMBOL(tcf_exts_destroy);
3052
3053int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3054 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
David Brazdil0f672f62019-12-10 10:32:29 +00003055 bool rtnl_held, struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003056{
3057#ifdef CONFIG_NET_CLS_ACT
3058 {
Olivier Deprez157378f2022-04-04 15:47:50 +02003059 int init_res[TCA_ACT_MAX_PRIO] = {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003060 struct tc_action *act;
3061 size_t attr_size = 0;
3062
3063 if (exts->police && tb[exts->police]) {
Olivier Deprez157378f2022-04-04 15:47:50 +02003064 struct tc_action_ops *a_o;
3065
3066 a_o = tc_action_load_ops("police", tb[exts->police], rtnl_held, extack);
3067 if (IS_ERR(a_o))
3068 return PTR_ERR(a_o);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003069 act = tcf_action_init_1(net, tp, tb[exts->police],
3070 rate_tlv, "police", ovr,
Olivier Deprez157378f2022-04-04 15:47:50 +02003071 TCA_ACT_BIND, a_o, init_res,
3072 rtnl_held, extack);
3073 module_put(a_o->owner);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003074 if (IS_ERR(act))
3075 return PTR_ERR(act);
3076
3077 act->type = exts->type = TCA_OLD_COMPAT;
3078 exts->actions[0] = act;
3079 exts->nr_actions = 1;
Olivier Deprez0e641232021-09-23 10:07:05 +02003080 tcf_idr_insert_many(exts->actions);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003081 } else if (exts->action && tb[exts->action]) {
3082 int err;
3083
3084 err = tcf_action_init(net, tp, tb[exts->action],
3085 rate_tlv, NULL, ovr, TCA_ACT_BIND,
Olivier Deprez157378f2022-04-04 15:47:50 +02003086 exts->actions, init_res,
3087 &attr_size, rtnl_held, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003088 if (err < 0)
3089 return err;
3090 exts->nr_actions = err;
3091 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003092 }
3093#else
3094 if ((exts->action && tb[exts->action]) ||
3095 (exts->police && tb[exts->police])) {
3096 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3097 return -EOPNOTSUPP;
3098 }
3099#endif
3100
3101 return 0;
3102}
3103EXPORT_SYMBOL(tcf_exts_validate);
3104
3105void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3106{
3107#ifdef CONFIG_NET_CLS_ACT
3108 struct tcf_exts old = *dst;
3109
3110 *dst = *src;
3111 tcf_exts_destroy(&old);
3112#endif
3113}
3114EXPORT_SYMBOL(tcf_exts_change);
3115
3116#ifdef CONFIG_NET_CLS_ACT
3117static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3118{
3119 if (exts->nr_actions == 0)
3120 return NULL;
3121 else
3122 return exts->actions[0];
3123}
3124#endif
3125
3126int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3127{
3128#ifdef CONFIG_NET_CLS_ACT
3129 struct nlattr *nest;
3130
3131 if (exts->action && tcf_exts_has_actions(exts)) {
3132 /*
3133 * again for backward compatible mode - we want
3134 * to work with both old and new modes of entering
3135 * tc data even if iproute2 was newer - jhs
3136 */
3137 if (exts->type != TCA_OLD_COMPAT) {
David Brazdil0f672f62019-12-10 10:32:29 +00003138 nest = nla_nest_start_noflag(skb, exts->action);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003139 if (nest == NULL)
3140 goto nla_put_failure;
3141
Olivier Deprez157378f2022-04-04 15:47:50 +02003142 if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3143 < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003144 goto nla_put_failure;
3145 nla_nest_end(skb, nest);
3146 } else if (exts->police) {
3147 struct tc_action *act = tcf_exts_first_act(exts);
David Brazdil0f672f62019-12-10 10:32:29 +00003148 nest = nla_nest_start_noflag(skb, exts->police);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003149 if (nest == NULL || !act)
3150 goto nla_put_failure;
3151 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3152 goto nla_put_failure;
3153 nla_nest_end(skb, nest);
3154 }
3155 }
3156 return 0;
3157
3158nla_put_failure:
3159 nla_nest_cancel(skb, nest);
3160 return -1;
3161#else
3162 return 0;
3163#endif
3164}
3165EXPORT_SYMBOL(tcf_exts_dump);
3166
Olivier Deprez157378f2022-04-04 15:47:50 +02003167int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3168{
3169#ifdef CONFIG_NET_CLS_ACT
3170 struct nlattr *nest;
3171
3172 if (!exts->action || !tcf_exts_has_actions(exts))
3173 return 0;
3174
3175 nest = nla_nest_start_noflag(skb, exts->action);
3176 if (!nest)
3177 goto nla_put_failure;
3178
3179 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3180 goto nla_put_failure;
3181 nla_nest_end(skb, nest);
3182 return 0;
3183
3184nla_put_failure:
3185 nla_nest_cancel(skb, nest);
3186 return -1;
3187#else
3188 return 0;
3189#endif
3190}
3191EXPORT_SYMBOL(tcf_exts_terse_dump);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003192
3193int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3194{
3195#ifdef CONFIG_NET_CLS_ACT
3196 struct tc_action *a = tcf_exts_first_act(exts);
3197 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3198 return -1;
3199#endif
3200 return 0;
3201}
3202EXPORT_SYMBOL(tcf_exts_dump_stats);
3203
David Brazdil0f672f62019-12-10 10:32:29 +00003204static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003205{
David Brazdil0f672f62019-12-10 10:32:29 +00003206 if (*flags & TCA_CLS_FLAGS_IN_HW)
3207 return;
3208 *flags |= TCA_CLS_FLAGS_IN_HW;
3209 atomic_inc(&block->offloadcnt);
3210}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003211
David Brazdil0f672f62019-12-10 10:32:29 +00003212static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3213{
3214 if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3215 return;
3216 *flags &= ~TCA_CLS_FLAGS_IN_HW;
3217 atomic_dec(&block->offloadcnt);
3218}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003219
David Brazdil0f672f62019-12-10 10:32:29 +00003220static void tc_cls_offload_cnt_update(struct tcf_block *block,
3221 struct tcf_proto *tp, u32 *cnt,
3222 u32 *flags, u32 diff, bool add)
3223{
3224 lockdep_assert_held(&block->cb_lock);
3225
3226 spin_lock(&tp->lock);
3227 if (add) {
3228 if (!*cnt)
3229 tcf_block_offload_inc(block, flags);
3230 *cnt += diff;
3231 } else {
3232 *cnt -= diff;
3233 if (!*cnt)
3234 tcf_block_offload_dec(block, flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003235 }
David Brazdil0f672f62019-12-10 10:32:29 +00003236 spin_unlock(&tp->lock);
3237}
3238
3239static void
3240tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3241 u32 *cnt, u32 *flags)
3242{
3243 lockdep_assert_held(&block->cb_lock);
3244
3245 spin_lock(&tp->lock);
3246 tcf_block_offload_dec(block, flags);
3247 *cnt = 0;
3248 spin_unlock(&tp->lock);
3249}
3250
3251static int
3252__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3253 void *type_data, bool err_stop)
3254{
3255 struct flow_block_cb *block_cb;
3256 int ok_count = 0;
3257 int err;
3258
3259 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3260 err = block_cb->cb(type, type_data, block_cb->cb_priv);
3261 if (err) {
3262 if (err_stop)
3263 return err;
3264 } else {
3265 ok_count++;
3266 }
3267 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003268 return ok_count;
3269}
3270
David Brazdil0f672f62019-12-10 10:32:29 +00003271int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3272 void *type_data, bool err_stop, bool rtnl_held)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003273{
David Brazdil0f672f62019-12-10 10:32:29 +00003274 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003275 int ok_count;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003276
David Brazdil0f672f62019-12-10 10:32:29 +00003277retry:
3278 if (take_rtnl)
3279 rtnl_lock();
3280 down_read(&block->cb_lock);
3281 /* Need to obtain rtnl lock if block is bound to devs that require it.
3282 * In block bind code cb_lock is obtained while holding rtnl, so we must
3283 * obtain the locks in same order here.
3284 */
3285 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3286 up_read(&block->cb_lock);
3287 take_rtnl = true;
3288 goto retry;
3289 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003290
David Brazdil0f672f62019-12-10 10:32:29 +00003291 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003292
David Brazdil0f672f62019-12-10 10:32:29 +00003293 up_read(&block->cb_lock);
3294 if (take_rtnl)
3295 rtnl_unlock();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003296 return ok_count;
3297}
3298EXPORT_SYMBOL(tc_setup_cb_call);
3299
David Brazdil0f672f62019-12-10 10:32:29 +00003300/* Non-destructive filter add. If filter that wasn't already in hardware is
3301 * successfully offloaded, increment block offloads counter. On failure,
3302 * previously offloaded filter is considered to be intact and offloads counter
3303 * is not decremented.
3304 */
3305
3306int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3307 enum tc_setup_type type, void *type_data, bool err_stop,
3308 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3309{
3310 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3311 int ok_count;
3312
3313retry:
3314 if (take_rtnl)
3315 rtnl_lock();
3316 down_read(&block->cb_lock);
3317 /* Need to obtain rtnl lock if block is bound to devs that require it.
3318 * In block bind code cb_lock is obtained while holding rtnl, so we must
3319 * obtain the locks in same order here.
3320 */
3321 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3322 up_read(&block->cb_lock);
3323 take_rtnl = true;
3324 goto retry;
3325 }
3326
3327 /* Make sure all netdevs sharing this block are offload-capable. */
3328 if (block->nooffloaddevcnt && err_stop) {
3329 ok_count = -EOPNOTSUPP;
3330 goto err_unlock;
3331 }
3332
3333 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3334 if (ok_count < 0)
3335 goto err_unlock;
3336
3337 if (tp->ops->hw_add)
3338 tp->ops->hw_add(tp, type_data);
3339 if (ok_count > 0)
3340 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3341 ok_count, true);
3342err_unlock:
3343 up_read(&block->cb_lock);
3344 if (take_rtnl)
3345 rtnl_unlock();
3346 return ok_count < 0 ? ok_count : 0;
3347}
3348EXPORT_SYMBOL(tc_setup_cb_add);
3349
3350/* Destructive filter replace. If filter that wasn't already in hardware is
3351 * successfully offloaded, increment block offload counter. On failure,
3352 * previously offloaded filter is considered to be destroyed and offload counter
3353 * is decremented.
3354 */
3355
3356int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3357 enum tc_setup_type type, void *type_data, bool err_stop,
3358 u32 *old_flags, unsigned int *old_in_hw_count,
3359 u32 *new_flags, unsigned int *new_in_hw_count,
3360 bool rtnl_held)
3361{
3362 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3363 int ok_count;
3364
3365retry:
3366 if (take_rtnl)
3367 rtnl_lock();
3368 down_read(&block->cb_lock);
3369 /* Need to obtain rtnl lock if block is bound to devs that require it.
3370 * In block bind code cb_lock is obtained while holding rtnl, so we must
3371 * obtain the locks in same order here.
3372 */
3373 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3374 up_read(&block->cb_lock);
3375 take_rtnl = true;
3376 goto retry;
3377 }
3378
3379 /* Make sure all netdevs sharing this block are offload-capable. */
3380 if (block->nooffloaddevcnt && err_stop) {
3381 ok_count = -EOPNOTSUPP;
3382 goto err_unlock;
3383 }
3384
3385 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3386 if (tp->ops->hw_del)
3387 tp->ops->hw_del(tp, type_data);
3388
3389 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3390 if (ok_count < 0)
3391 goto err_unlock;
3392
3393 if (tp->ops->hw_add)
3394 tp->ops->hw_add(tp, type_data);
3395 if (ok_count > 0)
3396 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3397 new_flags, ok_count, true);
3398err_unlock:
3399 up_read(&block->cb_lock);
3400 if (take_rtnl)
3401 rtnl_unlock();
3402 return ok_count < 0 ? ok_count : 0;
3403}
3404EXPORT_SYMBOL(tc_setup_cb_replace);
3405
3406/* Destroy filter and decrement block offload counter, if filter was previously
3407 * offloaded.
3408 */
3409
3410int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3411 enum tc_setup_type type, void *type_data, bool err_stop,
3412 u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3413{
3414 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3415 int ok_count;
3416
3417retry:
3418 if (take_rtnl)
3419 rtnl_lock();
3420 down_read(&block->cb_lock);
3421 /* Need to obtain rtnl lock if block is bound to devs that require it.
3422 * In block bind code cb_lock is obtained while holding rtnl, so we must
3423 * obtain the locks in same order here.
3424 */
3425 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3426 up_read(&block->cb_lock);
3427 take_rtnl = true;
3428 goto retry;
3429 }
3430
3431 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3432
3433 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3434 if (tp->ops->hw_del)
3435 tp->ops->hw_del(tp, type_data);
3436
3437 up_read(&block->cb_lock);
3438 if (take_rtnl)
3439 rtnl_unlock();
3440 return ok_count < 0 ? ok_count : 0;
3441}
3442EXPORT_SYMBOL(tc_setup_cb_destroy);
3443
3444int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3445 bool add, flow_setup_cb_t *cb,
3446 enum tc_setup_type type, void *type_data,
3447 void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3448{
3449 int err = cb(type, type_data, cb_priv);
3450
3451 if (err) {
3452 if (add && tc_skip_sw(*flags))
3453 return err;
3454 } else {
3455 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3456 add);
3457 }
3458
3459 return 0;
3460}
3461EXPORT_SYMBOL(tc_setup_cb_reoffload);
3462
Olivier Deprez157378f2022-04-04 15:47:50 +02003463static int tcf_act_get_cookie(struct flow_action_entry *entry,
3464 const struct tc_action *act)
3465{
3466 struct tc_cookie *cookie;
3467 int err = 0;
3468
3469 rcu_read_lock();
3470 cookie = rcu_dereference(act->act_cookie);
3471 if (cookie) {
3472 entry->cookie = flow_action_cookie_create(cookie->data,
3473 cookie->len,
3474 GFP_ATOMIC);
3475 if (!entry->cookie)
3476 err = -ENOMEM;
3477 }
3478 rcu_read_unlock();
3479 return err;
3480}
3481
3482static void tcf_act_put_cookie(struct flow_action_entry *entry)
3483{
3484 flow_action_cookie_destroy(entry->cookie);
3485}
3486
David Brazdil0f672f62019-12-10 10:32:29 +00003487void tc_cleanup_flow_action(struct flow_action *flow_action)
3488{
3489 struct flow_action_entry *entry;
3490 int i;
3491
Olivier Deprez157378f2022-04-04 15:47:50 +02003492 flow_action_for_each(i, entry, flow_action) {
3493 tcf_act_put_cookie(entry);
David Brazdil0f672f62019-12-10 10:32:29 +00003494 if (entry->destructor)
3495 entry->destructor(entry->destructor_priv);
Olivier Deprez157378f2022-04-04 15:47:50 +02003496 }
David Brazdil0f672f62019-12-10 10:32:29 +00003497}
3498EXPORT_SYMBOL(tc_cleanup_flow_action);
3499
3500static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3501 const struct tc_action *act)
3502{
3503#ifdef CONFIG_NET_CLS_ACT
3504 entry->dev = act->ops->get_dev(act, &entry->destructor);
3505 if (!entry->dev)
3506 return;
3507 entry->destructor_priv = entry->dev;
3508#endif
3509}
3510
3511static void tcf_tunnel_encap_put_tunnel(void *priv)
3512{
3513 struct ip_tunnel_info *tunnel = priv;
3514
3515 kfree(tunnel);
3516}
3517
3518static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3519 const struct tc_action *act)
3520{
3521 entry->tunnel = tcf_tunnel_info_copy(act);
3522 if (!entry->tunnel)
3523 return -ENOMEM;
3524 entry->destructor = tcf_tunnel_encap_put_tunnel;
3525 entry->destructor_priv = entry->tunnel;
3526 return 0;
3527}
3528
3529static void tcf_sample_get_group(struct flow_action_entry *entry,
3530 const struct tc_action *act)
3531{
3532#ifdef CONFIG_NET_CLS_ACT
3533 entry->sample.psample_group =
3534 act->ops->get_psample_group(act, &entry->destructor);
3535 entry->destructor_priv = entry->sample.psample_group;
3536#endif
3537}
3538
Olivier Deprez157378f2022-04-04 15:47:50 +02003539static void tcf_gate_entry_destructor(void *priv)
David Brazdil0f672f62019-12-10 10:32:29 +00003540{
Olivier Deprez157378f2022-04-04 15:47:50 +02003541 struct action_gate_entry *oe = priv;
3542
3543 kfree(oe);
3544}
3545
3546static int tcf_gate_get_entries(struct flow_action_entry *entry,
3547 const struct tc_action *act)
3548{
3549 entry->gate.entries = tcf_gate_get_list(act);
3550
3551 if (!entry->gate.entries)
3552 return -EINVAL;
3553
3554 entry->destructor = tcf_gate_entry_destructor;
3555 entry->destructor_priv = entry->gate.entries;
3556
3557 return 0;
3558}
3559
3560static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3561{
3562 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3563 return FLOW_ACTION_HW_STATS_DONT_CARE;
3564 else if (!hw_stats)
3565 return FLOW_ACTION_HW_STATS_DISABLED;
3566
3567 return hw_stats;
3568}
3569
3570int tc_setup_flow_action(struct flow_action *flow_action,
3571 const struct tcf_exts *exts)
3572{
3573 struct tc_action *act;
David Brazdil0f672f62019-12-10 10:32:29 +00003574 int i, j, k, err = 0;
3575
Olivier Deprez157378f2022-04-04 15:47:50 +02003576 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3577 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3578 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3579
David Brazdil0f672f62019-12-10 10:32:29 +00003580 if (!exts)
3581 return 0;
3582
David Brazdil0f672f62019-12-10 10:32:29 +00003583 j = 0;
3584 tcf_exts_for_each_action(i, act, exts) {
3585 struct flow_action_entry *entry;
3586
3587 entry = &flow_action->entries[j];
Olivier Deprez157378f2022-04-04 15:47:50 +02003588 spin_lock_bh(&act->tcfa_lock);
3589 err = tcf_act_get_cookie(entry, act);
3590 if (err)
3591 goto err_out_locked;
3592
3593 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3594
David Brazdil0f672f62019-12-10 10:32:29 +00003595 if (is_tcf_gact_ok(act)) {
3596 entry->id = FLOW_ACTION_ACCEPT;
3597 } else if (is_tcf_gact_shot(act)) {
3598 entry->id = FLOW_ACTION_DROP;
3599 } else if (is_tcf_gact_trap(act)) {
3600 entry->id = FLOW_ACTION_TRAP;
3601 } else if (is_tcf_gact_goto_chain(act)) {
3602 entry->id = FLOW_ACTION_GOTO;
3603 entry->chain_index = tcf_gact_goto_chain_index(act);
3604 } else if (is_tcf_mirred_egress_redirect(act)) {
3605 entry->id = FLOW_ACTION_REDIRECT;
3606 tcf_mirred_get_dev(entry, act);
3607 } else if (is_tcf_mirred_egress_mirror(act)) {
3608 entry->id = FLOW_ACTION_MIRRED;
3609 tcf_mirred_get_dev(entry, act);
3610 } else if (is_tcf_mirred_ingress_redirect(act)) {
3611 entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3612 tcf_mirred_get_dev(entry, act);
3613 } else if (is_tcf_mirred_ingress_mirror(act)) {
3614 entry->id = FLOW_ACTION_MIRRED_INGRESS;
3615 tcf_mirred_get_dev(entry, act);
3616 } else if (is_tcf_vlan(act)) {
3617 switch (tcf_vlan_action(act)) {
3618 case TCA_VLAN_ACT_PUSH:
3619 entry->id = FLOW_ACTION_VLAN_PUSH;
3620 entry->vlan.vid = tcf_vlan_push_vid(act);
3621 entry->vlan.proto = tcf_vlan_push_proto(act);
3622 entry->vlan.prio = tcf_vlan_push_prio(act);
3623 break;
3624 case TCA_VLAN_ACT_POP:
3625 entry->id = FLOW_ACTION_VLAN_POP;
3626 break;
3627 case TCA_VLAN_ACT_MODIFY:
3628 entry->id = FLOW_ACTION_VLAN_MANGLE;
3629 entry->vlan.vid = tcf_vlan_push_vid(act);
3630 entry->vlan.proto = tcf_vlan_push_proto(act);
3631 entry->vlan.prio = tcf_vlan_push_prio(act);
3632 break;
3633 default:
3634 err = -EOPNOTSUPP;
Olivier Deprez157378f2022-04-04 15:47:50 +02003635 goto err_out_locked;
David Brazdil0f672f62019-12-10 10:32:29 +00003636 }
3637 } else if (is_tcf_tunnel_set(act)) {
3638 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3639 err = tcf_tunnel_encap_get_tunnel(entry, act);
3640 if (err)
Olivier Deprez157378f2022-04-04 15:47:50 +02003641 goto err_out_locked;
David Brazdil0f672f62019-12-10 10:32:29 +00003642 } else if (is_tcf_tunnel_release(act)) {
3643 entry->id = FLOW_ACTION_TUNNEL_DECAP;
3644 } else if (is_tcf_pedit(act)) {
3645 for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3646 switch (tcf_pedit_cmd(act, k)) {
3647 case TCA_PEDIT_KEY_EX_CMD_SET:
3648 entry->id = FLOW_ACTION_MANGLE;
3649 break;
3650 case TCA_PEDIT_KEY_EX_CMD_ADD:
3651 entry->id = FLOW_ACTION_ADD;
3652 break;
3653 default:
3654 err = -EOPNOTSUPP;
Olivier Deprez157378f2022-04-04 15:47:50 +02003655 goto err_out_locked;
David Brazdil0f672f62019-12-10 10:32:29 +00003656 }
3657 entry->mangle.htype = tcf_pedit_htype(act, k);
3658 entry->mangle.mask = tcf_pedit_mask(act, k);
3659 entry->mangle.val = tcf_pedit_val(act, k);
3660 entry->mangle.offset = tcf_pedit_offset(act, k);
Olivier Deprez157378f2022-04-04 15:47:50 +02003661 entry->hw_stats = tc_act_hw_stats(act->hw_stats);
David Brazdil0f672f62019-12-10 10:32:29 +00003662 entry = &flow_action->entries[++j];
3663 }
3664 } else if (is_tcf_csum(act)) {
3665 entry->id = FLOW_ACTION_CSUM;
3666 entry->csum_flags = tcf_csum_update_flags(act);
3667 } else if (is_tcf_skbedit_mark(act)) {
3668 entry->id = FLOW_ACTION_MARK;
3669 entry->mark = tcf_skbedit_mark(act);
3670 } else if (is_tcf_sample(act)) {
3671 entry->id = FLOW_ACTION_SAMPLE;
3672 entry->sample.trunc_size = tcf_sample_trunc_size(act);
3673 entry->sample.truncate = tcf_sample_truncate(act);
3674 entry->sample.rate = tcf_sample_rate(act);
3675 tcf_sample_get_group(entry, act);
3676 } else if (is_tcf_police(act)) {
3677 entry->id = FLOW_ACTION_POLICE;
Olivier Deprez157378f2022-04-04 15:47:50 +02003678 entry->police.burst = tcf_police_burst(act);
David Brazdil0f672f62019-12-10 10:32:29 +00003679 entry->police.rate_bytes_ps =
3680 tcf_police_rate_bytes_ps(act);
Olivier Deprez157378f2022-04-04 15:47:50 +02003681 entry->police.mtu = tcf_police_tcfp_mtu(act);
3682 entry->police.index = act->tcfa_index;
David Brazdil0f672f62019-12-10 10:32:29 +00003683 } else if (is_tcf_ct(act)) {
3684 entry->id = FLOW_ACTION_CT;
3685 entry->ct.action = tcf_ct_action(act);
3686 entry->ct.zone = tcf_ct_zone(act);
Olivier Deprez157378f2022-04-04 15:47:50 +02003687 entry->ct.flow_table = tcf_ct_ft(act);
David Brazdil0f672f62019-12-10 10:32:29 +00003688 } else if (is_tcf_mpls(act)) {
3689 switch (tcf_mpls_action(act)) {
3690 case TCA_MPLS_ACT_PUSH:
3691 entry->id = FLOW_ACTION_MPLS_PUSH;
3692 entry->mpls_push.proto = tcf_mpls_proto(act);
3693 entry->mpls_push.label = tcf_mpls_label(act);
3694 entry->mpls_push.tc = tcf_mpls_tc(act);
3695 entry->mpls_push.bos = tcf_mpls_bos(act);
3696 entry->mpls_push.ttl = tcf_mpls_ttl(act);
3697 break;
3698 case TCA_MPLS_ACT_POP:
3699 entry->id = FLOW_ACTION_MPLS_POP;
3700 entry->mpls_pop.proto = tcf_mpls_proto(act);
3701 break;
3702 case TCA_MPLS_ACT_MODIFY:
3703 entry->id = FLOW_ACTION_MPLS_MANGLE;
3704 entry->mpls_mangle.label = tcf_mpls_label(act);
3705 entry->mpls_mangle.tc = tcf_mpls_tc(act);
3706 entry->mpls_mangle.bos = tcf_mpls_bos(act);
3707 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3708 break;
3709 default:
Olivier Deprez157378f2022-04-04 15:47:50 +02003710 err = -EOPNOTSUPP;
3711 goto err_out_locked;
David Brazdil0f672f62019-12-10 10:32:29 +00003712 }
3713 } else if (is_tcf_skbedit_ptype(act)) {
3714 entry->id = FLOW_ACTION_PTYPE;
3715 entry->ptype = tcf_skbedit_ptype(act);
Olivier Deprez157378f2022-04-04 15:47:50 +02003716 } else if (is_tcf_skbedit_priority(act)) {
3717 entry->id = FLOW_ACTION_PRIORITY;
3718 entry->priority = tcf_skbedit_priority(act);
3719 } else if (is_tcf_gate(act)) {
3720 entry->id = FLOW_ACTION_GATE;
3721 entry->gate.index = tcf_gate_index(act);
3722 entry->gate.prio = tcf_gate_prio(act);
3723 entry->gate.basetime = tcf_gate_basetime(act);
3724 entry->gate.cycletime = tcf_gate_cycletime(act);
3725 entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3726 entry->gate.num_entries = tcf_gate_num_entries(act);
3727 err = tcf_gate_get_entries(entry, act);
3728 if (err)
3729 goto err_out_locked;
David Brazdil0f672f62019-12-10 10:32:29 +00003730 } else {
3731 err = -EOPNOTSUPP;
Olivier Deprez157378f2022-04-04 15:47:50 +02003732 goto err_out_locked;
David Brazdil0f672f62019-12-10 10:32:29 +00003733 }
Olivier Deprez157378f2022-04-04 15:47:50 +02003734 spin_unlock_bh(&act->tcfa_lock);
David Brazdil0f672f62019-12-10 10:32:29 +00003735
3736 if (!is_tcf_pedit(act))
3737 j++;
3738 }
3739
3740err_out:
David Brazdil0f672f62019-12-10 10:32:29 +00003741 if (err)
3742 tc_cleanup_flow_action(flow_action);
3743
3744 return err;
Olivier Deprez157378f2022-04-04 15:47:50 +02003745err_out_locked:
3746 spin_unlock_bh(&act->tcfa_lock);
3747 goto err_out;
David Brazdil0f672f62019-12-10 10:32:29 +00003748}
3749EXPORT_SYMBOL(tc_setup_flow_action);
3750
3751unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3752{
3753 unsigned int num_acts = 0;
3754 struct tc_action *act;
3755 int i;
3756
3757 tcf_exts_for_each_action(i, act, exts) {
3758 if (is_tcf_pedit(act))
3759 num_acts += tcf_pedit_nkeys(act);
3760 else
3761 num_acts++;
3762 }
3763 return num_acts;
3764}
3765EXPORT_SYMBOL(tcf_exts_num_actions);
3766
Olivier Deprez157378f2022-04-04 15:47:50 +02003767#ifdef CONFIG_NET_CLS_ACT
3768static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3769 u32 *p_block_index,
3770 struct netlink_ext_ack *extack)
3771{
3772 *p_block_index = nla_get_u32(block_index_attr);
3773 if (!*p_block_index) {
3774 NL_SET_ERR_MSG(extack, "Block number may not be zero");
3775 return -EINVAL;
3776 }
3777
3778 return 0;
3779}
3780
3781int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3782 enum flow_block_binder_type binder_type,
3783 struct nlattr *block_index_attr,
3784 struct netlink_ext_ack *extack)
3785{
3786 u32 block_index;
3787 int err;
3788
3789 if (!block_index_attr)
3790 return 0;
3791
3792 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3793 if (err)
3794 return err;
3795
3796 if (!block_index)
3797 return 0;
3798
3799 qe->info.binder_type = binder_type;
3800 qe->info.chain_head_change = tcf_chain_head_change_dflt;
3801 qe->info.chain_head_change_priv = &qe->filter_chain;
3802 qe->info.block_index = block_index;
3803
3804 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3805}
3806EXPORT_SYMBOL(tcf_qevent_init);
3807
3808void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3809{
3810 if (qe->info.block_index)
3811 tcf_block_put_ext(qe->block, sch, &qe->info);
3812}
3813EXPORT_SYMBOL(tcf_qevent_destroy);
3814
3815int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3816 struct netlink_ext_ack *extack)
3817{
3818 u32 block_index;
3819 int err;
3820
3821 if (!block_index_attr)
3822 return 0;
3823
3824 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3825 if (err)
3826 return err;
3827
3828 /* Bounce newly-configured block or change in block. */
3829 if (block_index != qe->info.block_index) {
3830 NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3831 return -EINVAL;
3832 }
3833
3834 return 0;
3835}
3836EXPORT_SYMBOL(tcf_qevent_validate_change);
3837
3838struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3839 struct sk_buff **to_free, int *ret)
3840{
3841 struct tcf_result cl_res;
3842 struct tcf_proto *fl;
3843
3844 if (!qe->info.block_index)
3845 return skb;
3846
3847 fl = rcu_dereference_bh(qe->filter_chain);
3848
3849 switch (tcf_classify(skb, fl, &cl_res, false)) {
3850 case TC_ACT_SHOT:
3851 qdisc_qstats_drop(sch);
3852 __qdisc_drop(skb, to_free);
3853 *ret = __NET_XMIT_BYPASS;
3854 return NULL;
3855 case TC_ACT_STOLEN:
3856 case TC_ACT_QUEUED:
3857 case TC_ACT_TRAP:
3858 __qdisc_drop(skb, to_free);
3859 *ret = __NET_XMIT_STOLEN;
3860 return NULL;
3861 case TC_ACT_REDIRECT:
3862 skb_do_redirect(skb);
3863 *ret = __NET_XMIT_STOLEN;
3864 return NULL;
3865 }
3866
3867 return skb;
3868}
3869EXPORT_SYMBOL(tcf_qevent_handle);
3870
3871int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3872{
3873 if (!qe->info.block_index)
3874 return 0;
3875 return nla_put_u32(skb, attr_name, qe->info.block_index);
3876}
3877EXPORT_SYMBOL(tcf_qevent_dump);
3878#endif
3879
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003880static __net_init int tcf_net_init(struct net *net)
3881{
3882 struct tcf_net *tn = net_generic(net, tcf_net_id);
3883
David Brazdil0f672f62019-12-10 10:32:29 +00003884 spin_lock_init(&tn->idr_lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003885 idr_init(&tn->idr);
3886 return 0;
3887}
3888
3889static void __net_exit tcf_net_exit(struct net *net)
3890{
3891 struct tcf_net *tn = net_generic(net, tcf_net_id);
3892
3893 idr_destroy(&tn->idr);
3894}
3895
3896static struct pernet_operations tcf_net_ops = {
3897 .init = tcf_net_init,
3898 .exit = tcf_net_exit,
3899 .id = &tcf_net_id,
3900 .size = sizeof(struct tcf_net),
3901};
3902
3903static int __init tc_filter_init(void)
3904{
3905 int err;
3906
3907 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3908 if (!tc_filter_wq)
3909 return -ENOMEM;
3910
3911 err = register_pernet_subsys(&tcf_net_ops);
3912 if (err)
3913 goto err_register_pernet_subsys;
3914
David Brazdil0f672f62019-12-10 10:32:29 +00003915 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3916 RTNL_FLAG_DOIT_UNLOCKED);
3917 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3918 RTNL_FLAG_DOIT_UNLOCKED);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003919 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
David Brazdil0f672f62019-12-10 10:32:29 +00003920 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00003921 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3922 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3923 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3924 tc_dump_chain, 0);
3925
3926 return 0;
3927
3928err_register_pernet_subsys:
3929 destroy_workqueue(tc_filter_wq);
3930 return err;
3931}
3932
3933subsys_initcall(tc_filter_init);