blob: 7b29aa1a3ce9a20ba165fb1030628d83c064a3bb [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * net/sched/act_api.c Packet action API.
4 *
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 * Author: Jamal Hadi Salim
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00006 */
7
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <linux/skbuff.h>
14#include <linux/init.h>
15#include <linux/kmod.h>
16#include <linux/err.h>
17#include <linux/module.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000018#include <net/net_namespace.h>
19#include <net/sock.h>
20#include <net/sch_generic.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/netlink.h>
24
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025static void tcf_action_goto_chain_exec(const struct tc_action *a,
26 struct tcf_result *res)
27{
David Brazdil0f672f62019-12-10 10:32:29 +000028 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000029
30 res->goto_tp = rcu_dereference_bh(chain->filter_chain);
31}
32
33static void tcf_free_cookie_rcu(struct rcu_head *p)
34{
35 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
36
37 kfree(cookie->data);
38 kfree(cookie);
39}
40
41static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
42 struct tc_cookie *new_cookie)
43{
44 struct tc_cookie *old;
45
46 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
47 if (old)
48 call_rcu(&old->rcu, tcf_free_cookie_rcu);
49}
50
David Brazdil0f672f62019-12-10 10:32:29 +000051int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
52 struct tcf_chain **newchain,
53 struct netlink_ext_ack *extack)
54{
55 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
56 u32 chain_index;
57
58 if (!opcode)
59 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
60 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
61 ret = 0;
62 if (ret) {
63 NL_SET_ERR_MSG(extack, "invalid control action");
64 goto end;
65 }
66
67 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
68 chain_index = action & TC_ACT_EXT_VAL_MASK;
69 if (!tp || !newchain) {
70 ret = -EINVAL;
71 NL_SET_ERR_MSG(extack,
72 "can't goto NULL proto/chain");
73 goto end;
74 }
75 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
76 if (!*newchain) {
77 ret = -ENOMEM;
78 NL_SET_ERR_MSG(extack,
79 "can't allocate goto_chain");
80 }
81 }
82end:
83 return ret;
84}
85EXPORT_SYMBOL(tcf_action_check_ctrlact);
86
87struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
88 struct tcf_chain *goto_chain)
89{
90 a->tcfa_action = action;
Olivier Deprez157378f2022-04-04 15:47:50 +020091 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
David Brazdil0f672f62019-12-10 10:32:29 +000092 return goto_chain;
93}
94EXPORT_SYMBOL(tcf_action_set_ctrlact);
95
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000096/* XXX: For standalone actions, we don't need a RCU grace period either, because
97 * actions are always connected to filters and filters are already destroyed in
98 * RCU callbacks, so after a RCU grace period actions are already disconnected
99 * from filters. Readers later can not find us.
100 */
101static void free_tcf(struct tc_action *p)
102{
David Brazdil0f672f62019-12-10 10:32:29 +0000103 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
104
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000105 free_percpu(p->cpu_bstats);
David Brazdil0f672f62019-12-10 10:32:29 +0000106 free_percpu(p->cpu_bstats_hw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107 free_percpu(p->cpu_qstats);
108
109 tcf_set_action_cookie(&p->act_cookie, NULL);
David Brazdil0f672f62019-12-10 10:32:29 +0000110 if (chain)
111 tcf_chain_put_by_act(chain);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112
113 kfree(p);
114}
115
116static void tcf_action_cleanup(struct tc_action *p)
117{
118 if (p->ops->cleanup)
119 p->ops->cleanup(p);
120
121 gen_kill_estimator(&p->tcfa_rate_est);
122 free_tcf(p);
123}
124
125static int __tcf_action_put(struct tc_action *p, bool bind)
126{
127 struct tcf_idrinfo *idrinfo = p->idrinfo;
128
David Brazdil0f672f62019-12-10 10:32:29 +0000129 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 if (bind)
131 atomic_dec(&p->tcfa_bindcnt);
132 idr_remove(&idrinfo->action_idr, p->tcfa_index);
David Brazdil0f672f62019-12-10 10:32:29 +0000133 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134
135 tcf_action_cleanup(p);
136 return 1;
137 }
138
139 if (bind)
140 atomic_dec(&p->tcfa_bindcnt);
141
142 return 0;
143}
144
Olivier Deprez157378f2022-04-04 15:47:50 +0200145static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000146{
147 int ret = 0;
148
149 /* Release with strict==1 and bind==0 is only called through act API
150 * interface (classifiers always bind). Only case when action with
151 * positive reference count and zero bind count can exist is when it was
152 * also created with act API (unbinding last classifier will destroy the
153 * action if it was created by classifier). So only case when bind count
154 * can be changed after initial check is when unbound action is
155 * destroyed by act API while classifier binds to action with same id
156 * concurrently. This result either creation of new action(same behavior
157 * as before), or reusing existing action if concurrent process
158 * increments reference count before action is deleted. Both scenarios
159 * are acceptable.
160 */
161 if (p) {
162 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
163 return -EPERM;
164
165 if (__tcf_action_put(p, bind))
166 ret = ACT_P_DELETED;
167 }
168
169 return ret;
170}
Olivier Deprez157378f2022-04-04 15:47:50 +0200171
172int tcf_idr_release(struct tc_action *a, bool bind)
173{
174 const struct tc_action_ops *ops = a->ops;
175 int ret;
176
177 ret = __tcf_idr_release(a, bind, false);
178 if (ret == ACT_P_DELETED)
179 module_put(ops->owner);
180 return ret;
181}
182EXPORT_SYMBOL(tcf_idr_release);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183
184static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
185{
186 struct tc_cookie *act_cookie;
187 u32 cookie_len = 0;
188
189 rcu_read_lock();
190 act_cookie = rcu_dereference(act->act_cookie);
191
192 if (act_cookie)
193 cookie_len = nla_total_size(act_cookie->len);
194 rcu_read_unlock();
195
196 return nla_total_size(0) /* action number nested */
197 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
198 + cookie_len /* TCA_ACT_COOKIE */
Olivier Deprez157378f2022-04-04 15:47:50 +0200199 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200 + nla_total_size(0) /* TCA_ACT_STATS nested */
Olivier Deprez157378f2022-04-04 15:47:50 +0200201 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202 /* TCA_STATS_BASIC */
203 + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
Olivier Deprez157378f2022-04-04 15:47:50 +0200204 /* TCA_STATS_PKT64 */
205 + nla_total_size_64bit(sizeof(u64))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 /* TCA_STATS_QUEUE */
207 + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
208 + nla_total_size(0) /* TCA_OPTIONS nested */
209 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
210}
211
212static size_t tcf_action_full_attrs_size(size_t sz)
213{
214 return NLMSG_HDRLEN /* struct nlmsghdr */
215 + sizeof(struct tcamsg)
216 + nla_total_size(0) /* TCA_ACT_TAB nested */
217 + sz;
218}
219
220static size_t tcf_action_fill_size(const struct tc_action *act)
221{
222 size_t sz = tcf_action_shared_attrs_size(act);
223
224 if (act->ops->get_fill_size)
225 return act->ops->get_fill_size(act) + sz;
226 return sz;
227}
228
229static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
230 struct netlink_callback *cb)
231{
232 int err = 0, index = -1, s_i = 0, n_i = 0;
233 u32 act_flags = cb->args[2];
234 unsigned long jiffy_since = cb->args[3];
235 struct nlattr *nest;
236 struct idr *idr = &idrinfo->action_idr;
237 struct tc_action *p;
238 unsigned long id = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000239 unsigned long tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240
David Brazdil0f672f62019-12-10 10:32:29 +0000241 mutex_lock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000242
243 s_i = cb->args[0];
244
David Brazdil0f672f62019-12-10 10:32:29 +0000245 idr_for_each_entry_ul(idr, p, tmp, id) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000246 index++;
247 if (index < s_i)
248 continue;
Olivier Deprez0e641232021-09-23 10:07:05 +0200249 if (IS_ERR(p))
250 continue;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000251
252 if (jiffy_since &&
253 time_after(jiffy_since,
254 (unsigned long)p->tcfa_tm.lastuse))
255 continue;
256
David Brazdil0f672f62019-12-10 10:32:29 +0000257 nest = nla_nest_start_noflag(skb, n_i);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 if (!nest) {
259 index--;
260 goto nla_put_failure;
261 }
262 err = tcf_action_dump_1(skb, p, 0, 0);
263 if (err < 0) {
264 index--;
265 nlmsg_trim(skb, nest);
266 goto done;
267 }
268 nla_nest_end(skb, nest);
269 n_i++;
270 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
271 n_i >= TCA_ACT_MAX_PRIO)
272 goto done;
273 }
274done:
275 if (index >= 0)
276 cb->args[0] = index + 1;
277
David Brazdil0f672f62019-12-10 10:32:29 +0000278 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000279 if (n_i) {
280 if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
281 cb->args[1] = n_i;
282 }
283 return n_i;
284
285nla_put_failure:
286 nla_nest_cancel(skb, nest);
287 goto done;
288}
289
David Brazdil0f672f62019-12-10 10:32:29 +0000290static int tcf_idr_release_unsafe(struct tc_action *p)
291{
292 if (atomic_read(&p->tcfa_bindcnt) > 0)
293 return -EPERM;
294
295 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
296 idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
297 tcf_action_cleanup(p);
298 return ACT_P_DELETED;
299 }
300
301 return 0;
302}
303
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
305 const struct tc_action_ops *ops)
306{
307 struct nlattr *nest;
308 int n_i = 0;
309 int ret = -EINVAL;
310 struct idr *idr = &idrinfo->action_idr;
311 struct tc_action *p;
312 unsigned long id = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000313 unsigned long tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000314
David Brazdil0f672f62019-12-10 10:32:29 +0000315 nest = nla_nest_start_noflag(skb, 0);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000316 if (nest == NULL)
317 goto nla_put_failure;
318 if (nla_put_string(skb, TCA_KIND, ops->kind))
319 goto nla_put_failure;
320
David Brazdil0f672f62019-12-10 10:32:29 +0000321 mutex_lock(&idrinfo->lock);
322 idr_for_each_entry_ul(idr, p, tmp, id) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200323 if (IS_ERR(p))
324 continue;
David Brazdil0f672f62019-12-10 10:32:29 +0000325 ret = tcf_idr_release_unsafe(p);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000326 if (ret == ACT_P_DELETED) {
327 module_put(ops->owner);
328 n_i++;
329 } else if (ret < 0) {
David Brazdil0f672f62019-12-10 10:32:29 +0000330 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000331 goto nla_put_failure;
332 }
333 }
David Brazdil0f672f62019-12-10 10:32:29 +0000334 mutex_unlock(&idrinfo->lock);
335
Olivier Deprez0e641232021-09-23 10:07:05 +0200336 ret = nla_put_u32(skb, TCA_FCNT, n_i);
337 if (ret)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000338 goto nla_put_failure;
339 nla_nest_end(skb, nest);
340
341 return n_i;
342nla_put_failure:
343 nla_nest_cancel(skb, nest);
344 return ret;
345}
346
347int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
348 struct netlink_callback *cb, int type,
349 const struct tc_action_ops *ops,
350 struct netlink_ext_ack *extack)
351{
352 struct tcf_idrinfo *idrinfo = tn->idrinfo;
353
354 if (type == RTM_DELACTION) {
355 return tcf_del_walker(idrinfo, skb, ops);
356 } else if (type == RTM_GETACTION) {
357 return tcf_dump_walker(idrinfo, skb, cb);
358 } else {
359 WARN(1, "tcf_generic_walker: unknown command %d\n", type);
360 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
361 return -EINVAL;
362 }
363}
364EXPORT_SYMBOL(tcf_generic_walker);
365
366int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
367{
368 struct tcf_idrinfo *idrinfo = tn->idrinfo;
369 struct tc_action *p;
370
David Brazdil0f672f62019-12-10 10:32:29 +0000371 mutex_lock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000372 p = idr_find(&idrinfo->action_idr, index);
373 if (IS_ERR(p))
374 p = NULL;
375 else if (p)
376 refcount_inc(&p->tcfa_refcnt);
David Brazdil0f672f62019-12-10 10:32:29 +0000377 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000378
379 if (p) {
380 *a = p;
381 return true;
382 }
383 return false;
384}
385EXPORT_SYMBOL(tcf_idr_search);
386
387static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
388{
389 struct tc_action *p;
390 int ret = 0;
391
David Brazdil0f672f62019-12-10 10:32:29 +0000392 mutex_lock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000393 p = idr_find(&idrinfo->action_idr, index);
394 if (!p) {
David Brazdil0f672f62019-12-10 10:32:29 +0000395 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000396 return -ENOENT;
397 }
398
399 if (!atomic_read(&p->tcfa_bindcnt)) {
400 if (refcount_dec_and_test(&p->tcfa_refcnt)) {
401 struct module *owner = p->ops->owner;
402
403 WARN_ON(p != idr_remove(&idrinfo->action_idr,
404 p->tcfa_index));
David Brazdil0f672f62019-12-10 10:32:29 +0000405 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000406
407 tcf_action_cleanup(p);
408 module_put(owner);
409 return 0;
410 }
411 ret = 0;
412 } else {
413 ret = -EPERM;
414 }
415
David Brazdil0f672f62019-12-10 10:32:29 +0000416 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000417 return ret;
418}
419
420int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
421 struct tc_action **a, const struct tc_action_ops *ops,
Olivier Deprez157378f2022-04-04 15:47:50 +0200422 int bind, bool cpustats, u32 flags)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000423{
424 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
425 struct tcf_idrinfo *idrinfo = tn->idrinfo;
426 int err = -ENOMEM;
427
428 if (unlikely(!p))
429 return -ENOMEM;
430 refcount_set(&p->tcfa_refcnt, 1);
431 if (bind)
432 atomic_set(&p->tcfa_bindcnt, 1);
433
434 if (cpustats) {
435 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
436 if (!p->cpu_bstats)
437 goto err1;
David Brazdil0f672f62019-12-10 10:32:29 +0000438 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
439 if (!p->cpu_bstats_hw)
440 goto err2;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000441 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
442 if (!p->cpu_qstats)
David Brazdil0f672f62019-12-10 10:32:29 +0000443 goto err3;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000444 }
445 spin_lock_init(&p->tcfa_lock);
446 p->tcfa_index = index;
447 p->tcfa_tm.install = jiffies;
448 p->tcfa_tm.lastuse = jiffies;
449 p->tcfa_tm.firstuse = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200450 p->tcfa_flags = flags;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000451 if (est) {
452 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
453 &p->tcfa_rate_est,
454 &p->tcfa_lock, NULL, est);
455 if (err)
David Brazdil0f672f62019-12-10 10:32:29 +0000456 goto err4;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000457 }
458
459 p->idrinfo = idrinfo;
Olivier Deprez157378f2022-04-04 15:47:50 +0200460 __module_get(ops->owner);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000461 p->ops = ops;
462 *a = p;
463 return 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000464err4:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000465 free_percpu(p->cpu_qstats);
David Brazdil0f672f62019-12-10 10:32:29 +0000466err3:
467 free_percpu(p->cpu_bstats_hw);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000468err2:
469 free_percpu(p->cpu_bstats);
470err1:
471 kfree(p);
472 return err;
473}
474EXPORT_SYMBOL(tcf_idr_create);
475
Olivier Deprez157378f2022-04-04 15:47:50 +0200476int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
477 struct nlattr *est, struct tc_action **a,
478 const struct tc_action_ops *ops, int bind,
479 u32 flags)
480{
481 /* Set cpustats according to actions flags. */
482 return tcf_idr_create(tn, index, est, a, ops, bind,
483 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
484}
485EXPORT_SYMBOL(tcf_idr_create_from_flags);
486
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000487/* Cleanup idr index that was allocated but not initialized. */
488
489void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
490{
491 struct tcf_idrinfo *idrinfo = tn->idrinfo;
492
David Brazdil0f672f62019-12-10 10:32:29 +0000493 mutex_lock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000494 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
495 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
David Brazdil0f672f62019-12-10 10:32:29 +0000496 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000497}
498EXPORT_SYMBOL(tcf_idr_cleanup);
499
500/* Check if action with specified index exists. If actions is found, increments
501 * its reference and bind counters, and return 1. Otherwise insert temporary
502 * error pointer (to prevent concurrent users from inserting actions with same
503 * index) and return 0.
504 */
505
506int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
507 struct tc_action **a, int bind)
508{
509 struct tcf_idrinfo *idrinfo = tn->idrinfo;
510 struct tc_action *p;
511 int ret;
512
513again:
David Brazdil0f672f62019-12-10 10:32:29 +0000514 mutex_lock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000515 if (*index) {
516 p = idr_find(&idrinfo->action_idr, *index);
517 if (IS_ERR(p)) {
518 /* This means that another process allocated
519 * index but did not assign the pointer yet.
520 */
David Brazdil0f672f62019-12-10 10:32:29 +0000521 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000522 goto again;
523 }
524
525 if (p) {
526 refcount_inc(&p->tcfa_refcnt);
527 if (bind)
528 atomic_inc(&p->tcfa_bindcnt);
529 *a = p;
530 ret = 1;
531 } else {
532 *a = NULL;
533 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
David Brazdil0f672f62019-12-10 10:32:29 +0000534 *index, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000535 if (!ret)
536 idr_replace(&idrinfo->action_idr,
537 ERR_PTR(-EBUSY), *index);
538 }
539 } else {
540 *index = 1;
541 *a = NULL;
542 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
David Brazdil0f672f62019-12-10 10:32:29 +0000543 UINT_MAX, GFP_KERNEL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000544 if (!ret)
545 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
546 *index);
547 }
David Brazdil0f672f62019-12-10 10:32:29 +0000548 mutex_unlock(&idrinfo->lock);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000549 return ret;
550}
551EXPORT_SYMBOL(tcf_idr_check_alloc);
552
553void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
554 struct tcf_idrinfo *idrinfo)
555{
556 struct idr *idr = &idrinfo->action_idr;
557 struct tc_action *p;
558 int ret;
559 unsigned long id = 1;
David Brazdil0f672f62019-12-10 10:32:29 +0000560 unsigned long tmp;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000561
David Brazdil0f672f62019-12-10 10:32:29 +0000562 idr_for_each_entry_ul(idr, p, tmp, id) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000563 ret = __tcf_idr_release(p, false, true);
564 if (ret == ACT_P_DELETED)
565 module_put(ops->owner);
566 else if (ret < 0)
567 return;
568 }
569 idr_destroy(&idrinfo->action_idr);
570}
571EXPORT_SYMBOL(tcf_idrinfo_destroy);
572
573static LIST_HEAD(act_base);
574static DEFINE_RWLOCK(act_mod_lock);
575
576int tcf_register_action(struct tc_action_ops *act,
577 struct pernet_operations *ops)
578{
579 struct tc_action_ops *a;
580 int ret;
581
582 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
583 return -EINVAL;
584
585 /* We have to register pernet ops before making the action ops visible,
586 * otherwise tcf_action_init_1() could get a partially initialized
587 * netns.
588 */
589 ret = register_pernet_subsys(ops);
590 if (ret)
591 return ret;
592
593 write_lock(&act_mod_lock);
594 list_for_each_entry(a, &act_base, head) {
David Brazdil0f672f62019-12-10 10:32:29 +0000595 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000596 write_unlock(&act_mod_lock);
597 unregister_pernet_subsys(ops);
598 return -EEXIST;
599 }
600 }
601 list_add_tail(&act->head, &act_base);
602 write_unlock(&act_mod_lock);
603
604 return 0;
605}
606EXPORT_SYMBOL(tcf_register_action);
607
608int tcf_unregister_action(struct tc_action_ops *act,
609 struct pernet_operations *ops)
610{
611 struct tc_action_ops *a;
612 int err = -ENOENT;
613
614 write_lock(&act_mod_lock);
615 list_for_each_entry(a, &act_base, head) {
616 if (a == act) {
617 list_del(&act->head);
618 err = 0;
619 break;
620 }
621 }
622 write_unlock(&act_mod_lock);
623 if (!err)
624 unregister_pernet_subsys(ops);
625 return err;
626}
627EXPORT_SYMBOL(tcf_unregister_action);
628
629/* lookup by name */
630static struct tc_action_ops *tc_lookup_action_n(char *kind)
631{
632 struct tc_action_ops *a, *res = NULL;
633
634 if (kind) {
635 read_lock(&act_mod_lock);
636 list_for_each_entry(a, &act_base, head) {
637 if (strcmp(kind, a->kind) == 0) {
638 if (try_module_get(a->owner))
639 res = a;
640 break;
641 }
642 }
643 read_unlock(&act_mod_lock);
644 }
645 return res;
646}
647
648/* lookup by nlattr */
649static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
650{
651 struct tc_action_ops *a, *res = NULL;
652
653 if (kind) {
654 read_lock(&act_mod_lock);
655 list_for_each_entry(a, &act_base, head) {
656 if (nla_strcmp(kind, a->kind) == 0) {
657 if (try_module_get(a->owner))
658 res = a;
659 break;
660 }
661 }
662 read_unlock(&act_mod_lock);
663 }
664 return res;
665}
666
667/*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
668#define TCA_ACT_MAX_PRIO_MASK 0x1FF
669int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
670 int nr_actions, struct tcf_result *res)
671{
672 u32 jmp_prgcnt = 0;
673 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
674 int i;
675 int ret = TC_ACT_OK;
676
677 if (skb_skip_tc_classify(skb))
678 return TC_ACT_OK;
679
680restart_act_graph:
681 for (i = 0; i < nr_actions; i++) {
682 const struct tc_action *a = actions[i];
Olivier Deprez157378f2022-04-04 15:47:50 +0200683 int repeat_ttl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000684
685 if (jmp_prgcnt > 0) {
686 jmp_prgcnt -= 1;
687 continue;
688 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200689
690 repeat_ttl = 32;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000691repeat:
692 ret = a->ops->act(skb, a, res);
Olivier Deprez157378f2022-04-04 15:47:50 +0200693
694 if (unlikely(ret == TC_ACT_REPEAT)) {
695 if (--repeat_ttl != 0)
696 goto repeat;
697 /* suspicious opcode, stop pipeline */
698 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
699 return TC_ACT_OK;
700 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000701
702 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
703 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
704 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
705 /* faulty opcode, stop pipeline */
706 return TC_ACT_OK;
707 } else {
708 jmp_ttl -= 1;
709 if (jmp_ttl > 0)
710 goto restart_act_graph;
711 else /* faulty graph, stop pipeline */
712 return TC_ACT_OK;
713 }
714 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000715 if (unlikely(!rcu_access_pointer(a->goto_chain))) {
716 net_warn_ratelimited("can't go to NULL chain!\n");
717 return TC_ACT_SHOT;
718 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000719 tcf_action_goto_chain_exec(a, res);
720 }
721
722 if (ret != TC_ACT_PIPE)
723 break;
724 }
725
726 return ret;
727}
728EXPORT_SYMBOL(tcf_action_exec);
729
730int tcf_action_destroy(struct tc_action *actions[], int bind)
731{
732 const struct tc_action_ops *ops;
733 struct tc_action *a;
734 int ret = 0, i;
735
736 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
737 a = actions[i];
738 actions[i] = NULL;
739 ops = a->ops;
740 ret = __tcf_idr_release(a, bind, true);
741 if (ret == ACT_P_DELETED)
742 module_put(ops->owner);
743 else if (ret < 0)
744 return ret;
745 }
746 return ret;
747}
748
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000749static int tcf_action_put(struct tc_action *p)
750{
751 return __tcf_action_put(p, false);
752}
753
754/* Put all actions in this array, skip those NULL's. */
755static void tcf_action_put_many(struct tc_action *actions[])
756{
757 int i;
758
759 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
760 struct tc_action *a = actions[i];
761 const struct tc_action_ops *ops;
762
763 if (!a)
764 continue;
765 ops = a->ops;
766 if (tcf_action_put(a))
767 module_put(ops->owner);
768 }
769}
770
771int
772tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
773{
774 return a->ops->dump(skb, a, bind, ref);
775}
776
Olivier Deprez157378f2022-04-04 15:47:50 +0200777static int
778tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000779{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000780 unsigned char *b = skb_tail_pointer(skb);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 struct tc_cookie *cookie;
782
783 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
784 goto nla_put_failure;
785 if (tcf_action_copy_stats(skb, a, 0))
786 goto nla_put_failure;
787
788 rcu_read_lock();
789 cookie = rcu_dereference(a->act_cookie);
790 if (cookie) {
791 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
792 rcu_read_unlock();
793 goto nla_put_failure;
794 }
795 }
796 rcu_read_unlock();
797
Olivier Deprez157378f2022-04-04 15:47:50 +0200798 return 0;
799
800nla_put_failure:
801 nlmsg_trim(skb, b);
802 return -1;
803}
804
805int
806tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
807{
808 int err = -EINVAL;
809 unsigned char *b = skb_tail_pointer(skb);
810 struct nlattr *nest;
811
812 if (tcf_action_dump_terse(skb, a))
813 goto nla_put_failure;
814
815 if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
816 nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
817 a->hw_stats, TCA_ACT_HW_STATS_ANY))
818 goto nla_put_failure;
819
820 if (a->used_hw_stats_valid &&
821 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
822 a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
823 goto nla_put_failure;
824
825 if (a->tcfa_flags &&
826 nla_put_bitfield32(skb, TCA_ACT_FLAGS,
827 a->tcfa_flags, a->tcfa_flags))
828 goto nla_put_failure;
829
David Brazdil0f672f62019-12-10 10:32:29 +0000830 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000831 if (nest == NULL)
832 goto nla_put_failure;
833 err = tcf_action_dump_old(skb, a, bind, ref);
834 if (err > 0) {
835 nla_nest_end(skb, nest);
836 return err;
837 }
838
839nla_put_failure:
840 nlmsg_trim(skb, b);
841 return -1;
842}
843EXPORT_SYMBOL(tcf_action_dump_1);
844
845int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
Olivier Deprez157378f2022-04-04 15:47:50 +0200846 int bind, int ref, bool terse)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000847{
848 struct tc_action *a;
849 int err = -EINVAL, i;
850 struct nlattr *nest;
851
852 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
853 a = actions[i];
David Brazdil0f672f62019-12-10 10:32:29 +0000854 nest = nla_nest_start_noflag(skb, i + 1);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000855 if (nest == NULL)
856 goto nla_put_failure;
Olivier Deprez157378f2022-04-04 15:47:50 +0200857 err = terse ? tcf_action_dump_terse(skb, a) :
858 tcf_action_dump_1(skb, a, bind, ref);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000859 if (err < 0)
860 goto errout;
861 nla_nest_end(skb, nest);
862 }
863
864 return 0;
865
866nla_put_failure:
867 err = -EINVAL;
868errout:
869 nla_nest_cancel(skb, nest);
870 return err;
871}
872
873static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
874{
875 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
876 if (!c)
877 return NULL;
878
879 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
880 if (!c->data) {
881 kfree(c);
882 return NULL;
883 }
884 c->len = nla_len(tb[TCA_ACT_COOKIE]);
885
886 return c;
887}
888
Olivier Deprez157378f2022-04-04 15:47:50 +0200889static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
890{
891 struct nla_bitfield32 hw_stats_bf;
892
893 /* If the user did not pass the attr, that means he does
894 * not care about the type. Return "any" in that case
895 * which is setting on all supported types.
896 */
897 if (!hw_stats_attr)
898 return TCA_ACT_HW_STATS_ANY;
899 hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
900 return hw_stats_bf.value;
901}
902
David Brazdil0f672f62019-12-10 10:32:29 +0000903static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
904 [TCA_ACT_KIND] = { .type = NLA_STRING },
905 [TCA_ACT_INDEX] = { .type = NLA_U32 },
906 [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
907 .len = TC_COOKIE_MAX_SIZE },
908 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
Olivier Deprez157378f2022-04-04 15:47:50 +0200909 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
910 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
David Brazdil0f672f62019-12-10 10:32:29 +0000911};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000912
Olivier Deprez0e641232021-09-23 10:07:05 +0200913void tcf_idr_insert_many(struct tc_action *actions[])
914{
915 int i;
916
917 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
918 struct tc_action *a = actions[i];
919 struct tcf_idrinfo *idrinfo;
920
921 if (!a)
922 continue;
923 idrinfo = a->idrinfo;
924 mutex_lock(&idrinfo->lock);
925 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
926 * it is just created, otherwise this is just a nop.
927 */
928 idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
929 mutex_unlock(&idrinfo->lock);
930 }
931}
932
Olivier Deprez157378f2022-04-04 15:47:50 +0200933struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
934 bool rtnl_held,
935 struct netlink_ext_ack *extack)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000936{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000937 struct nlattr *tb[TCA_ACT_MAX + 1];
Olivier Deprez157378f2022-04-04 15:47:50 +0200938 struct tc_action_ops *a_o;
939 char act_name[IFNAMSIZ];
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000940 struct nlattr *kind;
941 int err;
942
943 if (name == NULL) {
David Brazdil0f672f62019-12-10 10:32:29 +0000944 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
945 tcf_action_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000946 if (err < 0)
Olivier Deprez157378f2022-04-04 15:47:50 +0200947 return ERR_PTR(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000948 err = -EINVAL;
949 kind = tb[TCA_ACT_KIND];
950 if (!kind) {
951 NL_SET_ERR_MSG(extack, "TC action kind must be specified");
Olivier Deprez157378f2022-04-04 15:47:50 +0200952 return ERR_PTR(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000953 }
954 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
955 NL_SET_ERR_MSG(extack, "TC action name too long");
Olivier Deprez157378f2022-04-04 15:47:50 +0200956 return ERR_PTR(err);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000957 }
958 } else {
959 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
960 NL_SET_ERR_MSG(extack, "TC action name too long");
Olivier Deprez157378f2022-04-04 15:47:50 +0200961 return ERR_PTR(-EINVAL);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000962 }
963 }
964
965 a_o = tc_lookup_action_n(act_name);
966 if (a_o == NULL) {
967#ifdef CONFIG_MODULES
968 if (rtnl_held)
969 rtnl_unlock();
970 request_module("act_%s", act_name);
971 if (rtnl_held)
972 rtnl_lock();
973
974 a_o = tc_lookup_action_n(act_name);
975
976 /* We dropped the RTNL semaphore in order to
977 * perform the module load. So, even if we
978 * succeeded in loading the module we have to
979 * tell the caller to replay the request. We
980 * indicate this using -EAGAIN.
981 */
982 if (a_o != NULL) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200983 module_put(a_o->owner);
984 return ERR_PTR(-EAGAIN);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000985 }
986#endif
987 NL_SET_ERR_MSG(extack, "Failed to load TC action module");
Olivier Deprez157378f2022-04-04 15:47:50 +0200988 return ERR_PTR(-ENOENT);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000989 }
990
Olivier Deprez157378f2022-04-04 15:47:50 +0200991 return a_o;
992}
993
994struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
995 struct nlattr *nla, struct nlattr *est,
996 char *name, int ovr, int bind,
997 struct tc_action_ops *a_o, int *init_res,
998 bool rtnl_held,
999 struct netlink_ext_ack *extack)
1000{
1001 struct nla_bitfield32 flags = { 0, 0 };
1002 u8 hw_stats = TCA_ACT_HW_STATS_ANY;
1003 struct nlattr *tb[TCA_ACT_MAX + 1];
1004 struct tc_cookie *cookie = NULL;
1005 struct tc_action *a;
1006 int err;
1007
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001008 /* backward compatibility for policer */
Olivier Deprez157378f2022-04-04 15:47:50 +02001009 if (name == NULL) {
1010 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1011 tcf_action_policy, extack);
1012 if (err < 0)
1013 return ERR_PTR(err);
1014 if (tb[TCA_ACT_COOKIE]) {
1015 cookie = nla_memdup_cookie(tb);
1016 if (!cookie) {
1017 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
1018 err = -ENOMEM;
1019 goto err_out;
1020 }
1021 }
1022 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
1023 if (tb[TCA_ACT_FLAGS])
1024 flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
1025
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
Olivier Deprez157378f2022-04-04 15:47:50 +02001027 rtnl_held, tp, flags.value, extack);
1028 } else {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001029 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
Olivier Deprez157378f2022-04-04 15:47:50 +02001030 tp, flags.value, extack);
1031 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001032 if (err < 0)
Olivier Deprez157378f2022-04-04 15:47:50 +02001033 goto err_out;
1034 *init_res = err;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001035
1036 if (!name && tb[TCA_ACT_COOKIE])
1037 tcf_set_action_cookie(&a->act_cookie, cookie);
1038
Olivier Deprez157378f2022-04-04 15:47:50 +02001039 if (!name)
1040 a->hw_stats = hw_stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001041
1042 return a;
1043
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001044err_out:
1045 if (cookie) {
1046 kfree(cookie->data);
1047 kfree(cookie);
1048 }
1049 return ERR_PTR(err);
1050}
1051
1052/* Returns numbers of initialized actions or negative error. */
1053
1054int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
1055 struct nlattr *est, char *name, int ovr, int bind,
Olivier Deprez157378f2022-04-04 15:47:50 +02001056 struct tc_action *actions[], int init_res[], size_t *attr_size,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001057 bool rtnl_held, struct netlink_ext_ack *extack)
1058{
Olivier Deprez157378f2022-04-04 15:47:50 +02001059 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001060 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1061 struct tc_action *act;
1062 size_t sz = 0;
1063 int err;
1064 int i;
1065
David Brazdil0f672f62019-12-10 10:32:29 +00001066 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1067 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001068 if (err < 0)
1069 return err;
1070
1071 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
Olivier Deprez157378f2022-04-04 15:47:50 +02001072 struct tc_action_ops *a_o;
1073
1074 a_o = tc_action_load_ops(name, tb[i], rtnl_held, extack);
1075 if (IS_ERR(a_o)) {
1076 err = PTR_ERR(a_o);
1077 goto err_mod;
1078 }
1079 ops[i - 1] = a_o;
1080 }
1081
1082 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001083 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
Olivier Deprez157378f2022-04-04 15:47:50 +02001084 ops[i - 1], &init_res[i - 1], rtnl_held,
1085 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001086 if (IS_ERR(act)) {
1087 err = PTR_ERR(act);
1088 goto err;
1089 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001090 sz += tcf_action_fill_size(act);
1091 /* Start from index 0 */
1092 actions[i - 1] = act;
1093 }
1094
Olivier Deprez0e641232021-09-23 10:07:05 +02001095 /* We have to commit them all together, because if any error happened in
1096 * between, we could not handle the failure gracefully.
1097 */
1098 tcf_idr_insert_many(actions);
1099
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001100 *attr_size = tcf_action_full_attrs_size(sz);
Olivier Deprez157378f2022-04-04 15:47:50 +02001101 err = i - 1;
1102 goto err_mod;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001103
1104err:
1105 tcf_action_destroy(actions, bind);
Olivier Deprez157378f2022-04-04 15:47:50 +02001106err_mod:
1107 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
1108 if (ops[i])
1109 module_put(ops[i]->owner);
1110 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001111 return err;
1112}
1113
Olivier Deprez157378f2022-04-04 15:47:50 +02001114void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
1115 u64 drops, bool hw)
1116{
1117 if (a->cpu_bstats) {
1118 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
1119
1120 this_cpu_ptr(a->cpu_qstats)->drops += drops;
1121
1122 if (hw)
1123 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
1124 bytes, packets);
1125 return;
1126 }
1127
1128 _bstats_update(&a->tcfa_bstats, bytes, packets);
1129 a->tcfa_qstats.drops += drops;
1130 if (hw)
1131 _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
1132}
1133EXPORT_SYMBOL(tcf_action_update_stats);
1134
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001135int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
1136 int compat_mode)
1137{
1138 int err = 0;
1139 struct gnet_dump d;
1140
1141 if (p == NULL)
1142 goto errout;
1143
1144 /* compat_mode being true specifies a call that is supposed
1145 * to add additional backward compatibility statistic TLVs.
1146 */
1147 if (compat_mode) {
1148 if (p->type == TCA_OLD_COMPAT)
1149 err = gnet_stats_start_copy_compat(skb, 0,
1150 TCA_STATS,
1151 TCA_XSTATS,
1152 &p->tcfa_lock, &d,
1153 TCA_PAD);
1154 else
1155 return 0;
1156 } else
1157 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
1158 &p->tcfa_lock, &d, TCA_ACT_PAD);
1159
1160 if (err < 0)
1161 goto errout;
1162
1163 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
David Brazdil0f672f62019-12-10 10:32:29 +00001164 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
1165 &p->tcfa_bstats_hw) < 0 ||
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001166 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
1167 gnet_stats_copy_queue(&d, p->cpu_qstats,
1168 &p->tcfa_qstats,
1169 p->tcfa_qstats.qlen) < 0)
1170 goto errout;
1171
1172 if (gnet_stats_finish_copy(&d) < 0)
1173 goto errout;
1174
1175 return 0;
1176
1177errout:
1178 return -1;
1179}
1180
1181static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
1182 u32 portid, u32 seq, u16 flags, int event, int bind,
1183 int ref)
1184{
1185 struct tcamsg *t;
1186 struct nlmsghdr *nlh;
1187 unsigned char *b = skb_tail_pointer(skb);
1188 struct nlattr *nest;
1189
1190 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
1191 if (!nlh)
1192 goto out_nlmsg_trim;
1193 t = nlmsg_data(nlh);
1194 t->tca_family = AF_UNSPEC;
1195 t->tca__pad1 = 0;
1196 t->tca__pad2 = 0;
1197
David Brazdil0f672f62019-12-10 10:32:29 +00001198 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001199 if (!nest)
1200 goto out_nlmsg_trim;
1201
Olivier Deprez157378f2022-04-04 15:47:50 +02001202 if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001203 goto out_nlmsg_trim;
1204
1205 nla_nest_end(skb, nest);
1206
1207 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1208 return skb->len;
1209
1210out_nlmsg_trim:
1211 nlmsg_trim(skb, b);
1212 return -1;
1213}
1214
1215static int
1216tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
1217 struct tc_action *actions[], int event,
1218 struct netlink_ext_ack *extack)
1219{
1220 struct sk_buff *skb;
1221
1222 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1223 if (!skb)
1224 return -ENOBUFS;
1225 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
1226 0, 1) <= 0) {
1227 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1228 kfree_skb(skb);
1229 return -EINVAL;
1230 }
1231
1232 return rtnl_unicast(skb, net, portid);
1233}
1234
1235static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
1236 struct nlmsghdr *n, u32 portid,
1237 struct netlink_ext_ack *extack)
1238{
1239 struct nlattr *tb[TCA_ACT_MAX + 1];
1240 const struct tc_action_ops *ops;
1241 struct tc_action *a;
1242 int index;
1243 int err;
1244
David Brazdil0f672f62019-12-10 10:32:29 +00001245 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1246 tcf_action_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001247 if (err < 0)
1248 goto err_out;
1249
1250 err = -EINVAL;
1251 if (tb[TCA_ACT_INDEX] == NULL ||
1252 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
1253 NL_SET_ERR_MSG(extack, "Invalid TC action index value");
1254 goto err_out;
1255 }
1256 index = nla_get_u32(tb[TCA_ACT_INDEX]);
1257
1258 err = -EINVAL;
1259 ops = tc_lookup_action(tb[TCA_ACT_KIND]);
1260 if (!ops) { /* could happen in batch of actions */
David Brazdil0f672f62019-12-10 10:32:29 +00001261 NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001262 goto err_out;
1263 }
1264 err = -ENOENT;
David Brazdil0f672f62019-12-10 10:32:29 +00001265 if (ops->lookup(net, &a, index) == 0) {
1266 NL_SET_ERR_MSG(extack, "TC action with specified index not found");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001267 goto err_mod;
David Brazdil0f672f62019-12-10 10:32:29 +00001268 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001269
1270 module_put(ops->owner);
1271 return a;
1272
1273err_mod:
1274 module_put(ops->owner);
1275err_out:
1276 return ERR_PTR(err);
1277}
1278
1279static int tca_action_flush(struct net *net, struct nlattr *nla,
1280 struct nlmsghdr *n, u32 portid,
1281 struct netlink_ext_ack *extack)
1282{
1283 struct sk_buff *skb;
1284 unsigned char *b;
1285 struct nlmsghdr *nlh;
1286 struct tcamsg *t;
1287 struct netlink_callback dcb;
1288 struct nlattr *nest;
1289 struct nlattr *tb[TCA_ACT_MAX + 1];
1290 const struct tc_action_ops *ops;
1291 struct nlattr *kind;
1292 int err = -ENOMEM;
1293
1294 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1295 if (!skb)
1296 return err;
1297
1298 b = skb_tail_pointer(skb);
1299
David Brazdil0f672f62019-12-10 10:32:29 +00001300 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
1301 tcf_action_policy, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001302 if (err < 0)
1303 goto err_out;
1304
1305 err = -EINVAL;
1306 kind = tb[TCA_ACT_KIND];
1307 ops = tc_lookup_action(kind);
1308 if (!ops) { /*some idjot trying to flush unknown action */
1309 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
1310 goto err_out;
1311 }
1312
1313 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
1314 sizeof(*t), 0);
1315 if (!nlh) {
1316 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
1317 goto out_module_put;
1318 }
1319 t = nlmsg_data(nlh);
1320 t->tca_family = AF_UNSPEC;
1321 t->tca__pad1 = 0;
1322 t->tca__pad2 = 0;
1323
David Brazdil0f672f62019-12-10 10:32:29 +00001324 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001325 if (!nest) {
1326 NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
1327 goto out_module_put;
1328 }
1329
1330 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
1331 if (err <= 0) {
1332 nla_nest_cancel(skb, nest);
1333 goto out_module_put;
1334 }
1335
1336 nla_nest_end(skb, nest);
1337
1338 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1339 nlh->nlmsg_flags |= NLM_F_ROOT;
1340 module_put(ops->owner);
1341 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1342 n->nlmsg_flags & NLM_F_ECHO);
1343 if (err > 0)
1344 return 0;
1345 if (err < 0)
1346 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
1347
1348 return err;
1349
1350out_module_put:
1351 module_put(ops->owner);
1352err_out:
1353 kfree_skb(skb);
1354 return err;
1355}
1356
1357static int tcf_action_delete(struct net *net, struct tc_action *actions[])
1358{
1359 int i;
1360
1361 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
1362 struct tc_action *a = actions[i];
1363 const struct tc_action_ops *ops = a->ops;
1364 /* Actions can be deleted concurrently so we must save their
1365 * type and id to search again after reference is released.
1366 */
1367 struct tcf_idrinfo *idrinfo = a->idrinfo;
1368 u32 act_index = a->tcfa_index;
1369
1370 actions[i] = NULL;
1371 if (tcf_action_put(a)) {
1372 /* last reference, action was deleted concurrently */
1373 module_put(ops->owner);
1374 } else {
1375 int ret;
1376
1377 /* now do the delete */
1378 ret = tcf_idr_delete_index(idrinfo, act_index);
1379 if (ret < 0)
1380 return ret;
1381 }
1382 }
1383 return 0;
1384}
1385
1386static int
1387tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1388 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1389{
1390 int ret;
1391 struct sk_buff *skb;
1392
1393 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1394 GFP_KERNEL);
1395 if (!skb)
1396 return -ENOBUFS;
1397
1398 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
1399 0, 2) <= 0) {
1400 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
1401 kfree_skb(skb);
1402 return -EINVAL;
1403 }
1404
1405 /* now do the delete */
1406 ret = tcf_action_delete(net, actions);
1407 if (ret < 0) {
1408 NL_SET_ERR_MSG(extack, "Failed to delete TC action");
1409 kfree_skb(skb);
1410 return ret;
1411 }
1412
1413 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1414 n->nlmsg_flags & NLM_F_ECHO);
1415 if (ret > 0)
1416 return 0;
1417 return ret;
1418}
1419
1420static int
1421tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1422 u32 portid, int event, struct netlink_ext_ack *extack)
1423{
1424 int i, ret;
1425 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1426 struct tc_action *act;
1427 size_t attr_size = 0;
1428 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
1429
David Brazdil0f672f62019-12-10 10:32:29 +00001430 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
1431 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001432 if (ret < 0)
1433 return ret;
1434
1435 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
1436 if (tb[1])
1437 return tca_action_flush(net, tb[1], n, portid, extack);
1438
1439 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
1440 return -EINVAL;
1441 }
1442
1443 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
1444 act = tcf_action_get_1(net, tb[i], n, portid, extack);
1445 if (IS_ERR(act)) {
1446 ret = PTR_ERR(act);
1447 goto err;
1448 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001449 attr_size += tcf_action_fill_size(act);
1450 actions[i - 1] = act;
1451 }
1452
1453 attr_size = tcf_action_full_attrs_size(attr_size);
1454
1455 if (event == RTM_GETACTION)
1456 ret = tcf_get_notify(net, portid, n, actions, event, extack);
1457 else { /* delete */
1458 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
1459 if (ret)
1460 goto err;
1461 return 0;
1462 }
1463err:
1464 tcf_action_put_many(actions);
1465 return ret;
1466}
1467
1468static int
1469tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
1470 u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
1471{
1472 struct sk_buff *skb;
1473 int err = 0;
1474
1475 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
1476 GFP_KERNEL);
1477 if (!skb)
1478 return -ENOBUFS;
1479
1480 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
1481 RTM_NEWACTION, 0, 0) <= 0) {
1482 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
1483 kfree_skb(skb);
1484 return -EINVAL;
1485 }
1486
1487 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1488 n->nlmsg_flags & NLM_F_ECHO);
1489 if (err > 0)
1490 err = 0;
1491 return err;
1492}
1493
1494static int tcf_action_add(struct net *net, struct nlattr *nla,
1495 struct nlmsghdr *n, u32 portid, int ovr,
1496 struct netlink_ext_ack *extack)
1497{
1498 size_t attr_size = 0;
Olivier Deprez157378f2022-04-04 15:47:50 +02001499 int loop, ret, i;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001500 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
Olivier Deprez157378f2022-04-04 15:47:50 +02001501 int init_res[TCA_ACT_MAX_PRIO] = {};
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001502
David Brazdil0f672f62019-12-10 10:32:29 +00001503 for (loop = 0; loop < 10; loop++) {
1504 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
Olivier Deprez157378f2022-04-04 15:47:50 +02001505 actions, init_res, &attr_size, true, extack);
David Brazdil0f672f62019-12-10 10:32:29 +00001506 if (ret != -EAGAIN)
1507 break;
1508 }
1509
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001510 if (ret < 0)
1511 return ret;
1512 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
Olivier Deprez157378f2022-04-04 15:47:50 +02001513
1514 /* only put existing actions */
1515 for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
1516 if (init_res[i] == ACT_P_CREATED)
1517 actions[i] = NULL;
1518 tcf_action_put_many(actions);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001519
1520 return ret;
1521}
1522
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001523static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
Olivier Deprez157378f2022-04-04 15:47:50 +02001524 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_FLAG_LARGE_DUMP_ON),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001525 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
1526};
1527
1528static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
1529 struct netlink_ext_ack *extack)
1530{
1531 struct net *net = sock_net(skb->sk);
1532 struct nlattr *tca[TCA_ROOT_MAX + 1];
Olivier Deprez157378f2022-04-04 15:47:50 +02001533 u32 portid = NETLINK_CB(skb).portid;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001534 int ret = 0, ovr = 0;
1535
1536 if ((n->nlmsg_type != RTM_GETACTION) &&
1537 !netlink_capable(skb, CAP_NET_ADMIN))
1538 return -EPERM;
1539
David Brazdil0f672f62019-12-10 10:32:29 +00001540 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
1541 TCA_ROOT_MAX, NULL, extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001542 if (ret < 0)
1543 return ret;
1544
1545 if (tca[TCA_ACT_TAB] == NULL) {
1546 NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
1547 return -EINVAL;
1548 }
1549
1550 /* n->nlmsg_flags & NLM_F_CREATE */
1551 switch (n->nlmsg_type) {
1552 case RTM_NEWACTION:
1553 /* we are going to assume all other flags
1554 * imply create only if it doesn't exist
1555 * Note that CREATE | EXCL implies that
1556 * but since we want avoid ambiguity (eg when flags
1557 * is zero) then just set this
1558 */
1559 if (n->nlmsg_flags & NLM_F_REPLACE)
1560 ovr = 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001561 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
1562 extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001563 break;
1564 case RTM_DELACTION:
1565 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1566 portid, RTM_DELACTION, extack);
1567 break;
1568 case RTM_GETACTION:
1569 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1570 portid, RTM_GETACTION, extack);
1571 break;
1572 default:
1573 BUG();
1574 }
1575
1576 return ret;
1577}
1578
1579static struct nlattr *find_dump_kind(struct nlattr **nla)
1580{
1581 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1582 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1583 struct nlattr *kind;
1584
1585 tb1 = nla[TCA_ACT_TAB];
1586 if (tb1 == NULL)
1587 return NULL;
1588
David Brazdil0f672f62019-12-10 10:32:29 +00001589 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001590 return NULL;
1591
1592 if (tb[1] == NULL)
1593 return NULL;
David Brazdil0f672f62019-12-10 10:32:29 +00001594 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001595 return NULL;
1596 kind = tb2[TCA_ACT_KIND];
1597
1598 return kind;
1599}
1600
1601static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1602{
1603 struct net *net = sock_net(skb->sk);
1604 struct nlmsghdr *nlh;
1605 unsigned char *b = skb_tail_pointer(skb);
1606 struct nlattr *nest;
1607 struct tc_action_ops *a_o;
1608 int ret = 0;
1609 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1610 struct nlattr *tb[TCA_ROOT_MAX + 1];
1611 struct nlattr *count_attr = NULL;
1612 unsigned long jiffy_since = 0;
1613 struct nlattr *kind = NULL;
1614 struct nla_bitfield32 bf;
1615 u32 msecs_since = 0;
1616 u32 act_count = 0;
1617
David Brazdil0f672f62019-12-10 10:32:29 +00001618 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
1619 TCA_ROOT_MAX, tcaa_policy, cb->extack);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001620 if (ret < 0)
1621 return ret;
1622
1623 kind = find_dump_kind(tb);
1624 if (kind == NULL) {
1625 pr_info("tc_dump_action: action bad kind\n");
1626 return 0;
1627 }
1628
1629 a_o = tc_lookup_action(kind);
1630 if (a_o == NULL)
1631 return 0;
1632
1633 cb->args[2] = 0;
1634 if (tb[TCA_ROOT_FLAGS]) {
1635 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
1636 cb->args[2] = bf.value;
1637 }
1638
1639 if (tb[TCA_ROOT_TIME_DELTA]) {
1640 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
1641 }
1642
1643 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1644 cb->nlh->nlmsg_type, sizeof(*t), 0);
1645 if (!nlh)
1646 goto out_module_put;
1647
1648 if (msecs_since)
1649 jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
1650
1651 t = nlmsg_data(nlh);
1652 t->tca_family = AF_UNSPEC;
1653 t->tca__pad1 = 0;
1654 t->tca__pad2 = 0;
1655 cb->args[3] = jiffy_since;
1656 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
1657 if (!count_attr)
1658 goto out_module_put;
1659
David Brazdil0f672f62019-12-10 10:32:29 +00001660 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001661 if (nest == NULL)
1662 goto out_module_put;
1663
1664 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
1665 if (ret < 0)
1666 goto out_module_put;
1667
1668 if (ret > 0) {
1669 nla_nest_end(skb, nest);
1670 ret = skb->len;
1671 act_count = cb->args[1];
1672 memcpy(nla_data(count_attr), &act_count, sizeof(u32));
1673 cb->args[1] = 0;
1674 } else
1675 nlmsg_trim(skb, b);
1676
1677 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1678 if (NETLINK_CB(cb->skb).portid && ret)
1679 nlh->nlmsg_flags |= NLM_F_MULTI;
1680 module_put(a_o->owner);
1681 return skb->len;
1682
1683out_module_put:
1684 module_put(a_o->owner);
1685 nlmsg_trim(skb, b);
1686 return skb->len;
1687}
1688
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001689static int __init tc_action_init(void)
1690{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001691 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
1692 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
1693 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1694 0);
1695
1696 return 0;
1697}
1698
1699subsys_initcall(tc_action_init);