blob: e553fc80eb230ec4dade970629de72fd9fde3652 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __NET_PKT_CLS_H
3#define __NET_PKT_CLS_H
4
5#include <linux/pkt_cls.h>
6#include <linux/workqueue.h>
7#include <net/sch_generic.h>
8#include <net/act_api.h>
David Brazdil0f672f62019-12-10 10:32:29 +00009#include <net/net_namespace.h>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000010
11/* TC action not accessible from user space */
David Brazdil0f672f62019-12-10 10:32:29 +000012#define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000013
14/* Basic packet classifier frontend definitions. */
15
16struct tcf_walker {
17 int stop;
18 int skip;
19 int count;
David Brazdil0f672f62019-12-10 10:32:29 +000020 bool nonempty;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021 unsigned long cookie;
22 int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23};
24
25int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000028struct tcf_block_ext_info {
David Brazdil0f672f62019-12-10 10:32:29 +000029 enum flow_block_binder_type binder_type;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000030 tcf_chain_head_change_t *chain_head_change;
31 void *chain_head_change_priv;
32 u32 block_index;
33};
34
35struct tcf_block_cb;
36bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
37
38#ifdef CONFIG_NET_CLS
39struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 u32 chain_index);
41void tcf_chain_put_by_act(struct tcf_chain *chain);
David Brazdil0f672f62019-12-10 10:32:29 +000042struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
43 struct tcf_chain *chain);
44struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
45 struct tcf_proto *tp, bool rtnl_held);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000046void tcf_block_netif_keep_dst(struct tcf_block *block);
47int tcf_block_get(struct tcf_block **p_block,
48 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 struct netlink_ext_ack *extack);
50int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
51 struct tcf_block_ext_info *ei,
52 struct netlink_ext_ack *extack);
53void tcf_block_put(struct tcf_block *block);
54void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
55 struct tcf_block_ext_info *ei);
56
57static inline bool tcf_block_shared(struct tcf_block *block)
58{
59 return block->index;
60}
61
David Brazdil0f672f62019-12-10 10:32:29 +000062static inline bool tcf_block_non_null_shared(struct tcf_block *block)
63{
64 return block && block->index;
65}
66
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68{
69 WARN_ON(tcf_block_shared(block));
70 return block->q;
71}
72
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000073int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
74 struct tcf_result *res, bool compat_mode);
75
76#else
David Brazdil0f672f62019-12-10 10:32:29 +000077static inline bool tcf_block_shared(struct tcf_block *block)
78{
79 return false;
80}
81
82static inline bool tcf_block_non_null_shared(struct tcf_block *block)
83{
84 return false;
85}
86
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000087static inline
88int tcf_block_get(struct tcf_block **p_block,
89 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
90 struct netlink_ext_ack *extack)
91{
92 return 0;
93}
94
95static inline
96int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
97 struct tcf_block_ext_info *ei,
98 struct netlink_ext_ack *extack)
99{
100 return 0;
101}
102
103static inline void tcf_block_put(struct tcf_block *block)
104{
105}
106
107static inline
108void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
109 struct tcf_block_ext_info *ei)
110{
111}
112
113static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
114{
115 return NULL;
116}
117
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118static inline
David Brazdil0f672f62019-12-10 10:32:29 +0000119int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000120 void *cb_priv)
121{
122 return 0;
123}
124
125static inline
David Brazdil0f672f62019-12-10 10:32:29 +0000126void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000127 void *cb_priv)
128{
129}
130
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000131static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
132 struct tcf_result *res, bool compat_mode)
133{
134 return TC_ACT_UNSPEC;
135}
136#endif
137
138static inline unsigned long
139__cls_set_class(unsigned long *clp, unsigned long cl)
140{
141 return xchg(clp, cl);
142}
143
144static inline unsigned long
145cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
146{
147 unsigned long old_cl;
148
149 sch_tree_lock(q);
150 old_cl = __cls_set_class(clp, cl);
151 sch_tree_unlock(q);
152 return old_cl;
153}
154
155static inline void
156tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
157{
158 struct Qdisc *q = tp->chain->block->q;
159 unsigned long cl;
160
161 /* Check q as it is not set for shared blocks. In that case,
162 * setting class is not supported.
163 */
164 if (!q)
165 return;
166 cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
167 cl = cls_set_class(q, &r->class, cl);
168 if (cl)
169 q->ops->cl_ops->unbind_tcf(q, cl);
170}
171
172static inline void
173tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
174{
175 struct Qdisc *q = tp->chain->block->q;
176 unsigned long cl;
177
178 if (!q)
179 return;
180 if ((cl = __cls_set_class(&r->class, 0)) != 0)
181 q->ops->cl_ops->unbind_tcf(q, cl);
182}
183
184struct tcf_exts {
185#ifdef CONFIG_NET_CLS_ACT
186 __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
187 int nr_actions;
188 struct tc_action **actions;
189 struct net *net;
190#endif
191 /* Map to export classifier specific extension TLV types to the
192 * generic extensions API. Unsupported extensions must be set to 0.
193 */
194 int action;
195 int police;
196};
197
David Brazdil0f672f62019-12-10 10:32:29 +0000198static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
199 int action, int police)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000200{
201#ifdef CONFIG_NET_CLS_ACT
202 exts->type = 0;
203 exts->nr_actions = 0;
David Brazdil0f672f62019-12-10 10:32:29 +0000204 exts->net = net;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000205 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
206 GFP_KERNEL);
207 if (!exts->actions)
208 return -ENOMEM;
209#endif
210 exts->action = action;
211 exts->police = police;
212 return 0;
213}
214
215/* Return false if the netns is being destroyed in cleanup_net(). Callers
216 * need to do cleanup synchronously in this case, otherwise may race with
217 * tc_action_net_exit(). Return true for other cases.
218 */
219static inline bool tcf_exts_get_net(struct tcf_exts *exts)
220{
221#ifdef CONFIG_NET_CLS_ACT
222 exts->net = maybe_get_net(exts->net);
223 return exts->net != NULL;
224#else
225 return true;
226#endif
227}
228
229static inline void tcf_exts_put_net(struct tcf_exts *exts)
230{
231#ifdef CONFIG_NET_CLS_ACT
232 if (exts->net)
233 put_net(exts->net);
234#endif
235}
236
237#ifdef CONFIG_NET_CLS_ACT
238#define tcf_exts_for_each_action(i, a, exts) \
239 for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
240#else
241#define tcf_exts_for_each_action(i, a, exts) \
242 for (; 0; (void)(i), (void)(a), (void)(exts))
243#endif
244
245static inline void
246tcf_exts_stats_update(const struct tcf_exts *exts,
247 u64 bytes, u64 packets, u64 lastuse)
248{
249#ifdef CONFIG_NET_CLS_ACT
250 int i;
251
252 preempt_disable();
253
254 for (i = 0; i < exts->nr_actions; i++) {
255 struct tc_action *a = exts->actions[i];
256
David Brazdil0f672f62019-12-10 10:32:29 +0000257 tcf_action_stats_update(a, bytes, packets, lastuse, true);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000258 }
259
260 preempt_enable();
261#endif
262}
263
264/**
265 * tcf_exts_has_actions - check if at least one action is present
266 * @exts: tc filter extensions handle
267 *
268 * Returns true if at least one action is present.
269 */
270static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
271{
272#ifdef CONFIG_NET_CLS_ACT
273 return exts->nr_actions;
274#else
275 return false;
276#endif
277}
278
279/**
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280 * tcf_exts_exec - execute tc filter extensions
281 * @skb: socket buffer
282 * @exts: tc filter extensions handle
283 * @res: desired result
284 *
285 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
286 * a negative number if the filter must be considered unmatched or
287 * a positive action code (TC_ACT_*) which must be returned to the
288 * underlying layer.
289 */
290static inline int
291tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
292 struct tcf_result *res)
293{
294#ifdef CONFIG_NET_CLS_ACT
295 return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
296#endif
297 return TC_ACT_OK;
298}
299
300int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
301 struct nlattr **tb, struct nlattr *rate_tlv,
David Brazdil0f672f62019-12-10 10:32:29 +0000302 struct tcf_exts *exts, bool ovr, bool rtnl_held,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000303 struct netlink_ext_ack *extack);
304void tcf_exts_destroy(struct tcf_exts *exts);
305void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
306int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
307int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
308
309/**
310 * struct tcf_pkt_info - packet information
311 */
312struct tcf_pkt_info {
313 unsigned char * ptr;
314 int nexthdr;
315};
316
317#ifdef CONFIG_NET_EMATCH
318
319struct tcf_ematch_ops;
320
321/**
322 * struct tcf_ematch - extended match (ematch)
323 *
324 * @matchid: identifier to allow userspace to reidentify a match
325 * @flags: flags specifying attributes and the relation to other matches
326 * @ops: the operations lookup table of the corresponding ematch module
327 * @datalen: length of the ematch specific configuration data
328 * @data: ematch specific data
329 */
330struct tcf_ematch {
331 struct tcf_ematch_ops * ops;
332 unsigned long data;
333 unsigned int datalen;
334 u16 matchid;
335 u16 flags;
336 struct net *net;
337};
338
339static inline int tcf_em_is_container(struct tcf_ematch *em)
340{
341 return !em->ops;
342}
343
344static inline int tcf_em_is_simple(struct tcf_ematch *em)
345{
346 return em->flags & TCF_EM_SIMPLE;
347}
348
349static inline int tcf_em_is_inverted(struct tcf_ematch *em)
350{
351 return em->flags & TCF_EM_INVERT;
352}
353
354static inline int tcf_em_last_match(struct tcf_ematch *em)
355{
356 return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
357}
358
359static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
360{
361 if (tcf_em_last_match(em))
362 return 1;
363
364 if (result == 0 && em->flags & TCF_EM_REL_AND)
365 return 1;
366
367 if (result != 0 && em->flags & TCF_EM_REL_OR)
368 return 1;
369
370 return 0;
371}
372
373/**
374 * struct tcf_ematch_tree - ematch tree handle
375 *
376 * @hdr: ematch tree header supplied by userspace
377 * @matches: array of ematches
378 */
379struct tcf_ematch_tree {
380 struct tcf_ematch_tree_hdr hdr;
381 struct tcf_ematch * matches;
382
383};
384
385/**
386 * struct tcf_ematch_ops - ematch module operations
387 *
388 * @kind: identifier (kind) of this ematch module
389 * @datalen: length of expected configuration data (optional)
390 * @change: called during validation (optional)
391 * @match: called during ematch tree evaluation, must return 1/0
392 * @destroy: called during destroyage (optional)
393 * @dump: called during dumping process (optional)
394 * @owner: owner, must be set to THIS_MODULE
395 * @link: link to previous/next ematch module (internal use)
396 */
397struct tcf_ematch_ops {
398 int kind;
399 int datalen;
400 int (*change)(struct net *net, void *,
401 int, struct tcf_ematch *);
402 int (*match)(struct sk_buff *, struct tcf_ematch *,
403 struct tcf_pkt_info *);
404 void (*destroy)(struct tcf_ematch *);
405 int (*dump)(struct sk_buff *, struct tcf_ematch *);
406 struct module *owner;
407 struct list_head link;
408};
409
410int tcf_em_register(struct tcf_ematch_ops *);
411void tcf_em_unregister(struct tcf_ematch_ops *);
412int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
413 struct tcf_ematch_tree *);
414void tcf_em_tree_destroy(struct tcf_ematch_tree *);
415int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
416int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
417 struct tcf_pkt_info *);
418
419/**
420 * tcf_em_tree_match - evaulate an ematch tree
421 *
422 * @skb: socket buffer of the packet in question
423 * @tree: ematch tree to be used for evaluation
424 * @info: packet information examined by classifier
425 *
426 * This function matches @skb against the ematch tree in @tree by going
427 * through all ematches respecting their logic relations returning
428 * as soon as the result is obvious.
429 *
430 * Returns 1 if the ematch tree as-one matches, no ematches are configured
431 * or ematch is not enabled in the kernel, otherwise 0 is returned.
432 */
433static inline int tcf_em_tree_match(struct sk_buff *skb,
434 struct tcf_ematch_tree *tree,
435 struct tcf_pkt_info *info)
436{
437 if (tree->hdr.nmatches)
438 return __tcf_em_tree_match(skb, tree, info);
439 else
440 return 1;
441}
442
443#define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
444
445#else /* CONFIG_NET_EMATCH */
446
447struct tcf_ematch_tree {
448};
449
450#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
451#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
452#define tcf_em_tree_dump(skb, t, tlv) (0)
453#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
454
455#endif /* CONFIG_NET_EMATCH */
456
457static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
458{
459 switch (layer) {
460 case TCF_LAYER_LINK:
461 return skb_mac_header(skb);
462 case TCF_LAYER_NETWORK:
463 return skb_network_header(skb);
464 case TCF_LAYER_TRANSPORT:
465 return skb_transport_header(skb);
466 }
467
468 return NULL;
469}
470
471static inline int tcf_valid_offset(const struct sk_buff *skb,
472 const unsigned char *ptr, const int len)
473{
474 return likely((ptr + len) <= skb_tail_pointer(skb) &&
475 ptr >= skb->head &&
476 (ptr <= (ptr + len)));
477}
478
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000479static inline int
480tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
481 struct netlink_ext_ack *extack)
482{
483 char indev[IFNAMSIZ];
484 struct net_device *dev;
485
486 if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
487 NL_SET_ERR_MSG(extack, "Interface name too long");
488 return -EINVAL;
489 }
490 dev = __dev_get_by_name(net, indev);
491 if (!dev)
492 return -ENODEV;
493 return dev->ifindex;
494}
495
496static inline bool
497tcf_match_indev(struct sk_buff *skb, int ifindex)
498{
499 if (!ifindex)
500 return true;
501 if (!skb->skb_iif)
502 return false;
503 return ifindex == skb->skb_iif;
504}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000505
David Brazdil0f672f62019-12-10 10:32:29 +0000506int tc_setup_flow_action(struct flow_action *flow_action,
507 const struct tcf_exts *exts, bool rtnl_held);
508void tc_cleanup_flow_action(struct flow_action *flow_action);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000509
David Brazdil0f672f62019-12-10 10:32:29 +0000510int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
511 void *type_data, bool err_stop, bool rtnl_held);
512int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
513 enum tc_setup_type type, void *type_data, bool err_stop,
514 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
515int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
516 enum tc_setup_type type, void *type_data, bool err_stop,
517 u32 *old_flags, unsigned int *old_in_hw_count,
518 u32 *new_flags, unsigned int *new_in_hw_count,
519 bool rtnl_held);
520int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
521 enum tc_setup_type type, void *type_data, bool err_stop,
522 u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
523int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
524 bool add, flow_setup_cb_t *cb,
525 enum tc_setup_type type, void *type_data,
526 void *cb_priv, u32 *flags, unsigned int *in_hw_count);
527unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000528
529struct tc_cls_u32_knode {
530 struct tcf_exts *exts;
David Brazdil0f672f62019-12-10 10:32:29 +0000531 struct tcf_result *res;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000532 struct tc_u32_sel *sel;
533 u32 handle;
534 u32 val;
535 u32 mask;
536 u32 link_handle;
537 u8 fshift;
538};
539
540struct tc_cls_u32_hnode {
541 u32 handle;
542 u32 prio;
543 unsigned int divisor;
544};
545
546enum tc_clsu32_command {
547 TC_CLSU32_NEW_KNODE,
548 TC_CLSU32_REPLACE_KNODE,
549 TC_CLSU32_DELETE_KNODE,
550 TC_CLSU32_NEW_HNODE,
551 TC_CLSU32_REPLACE_HNODE,
552 TC_CLSU32_DELETE_HNODE,
553};
554
555struct tc_cls_u32_offload {
David Brazdil0f672f62019-12-10 10:32:29 +0000556 struct flow_cls_common_offload common;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000557 /* knode values */
558 enum tc_clsu32_command command;
559 union {
560 struct tc_cls_u32_knode knode;
561 struct tc_cls_u32_hnode hnode;
562 };
563};
564
565static inline bool tc_can_offload(const struct net_device *dev)
566{
567 return dev->features & NETIF_F_HW_TC;
568}
569
570static inline bool tc_can_offload_extack(const struct net_device *dev,
571 struct netlink_ext_ack *extack)
572{
573 bool can = tc_can_offload(dev);
574
575 if (!can)
576 NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
577
578 return can;
579}
580
581static inline bool
582tc_cls_can_offload_and_chain0(const struct net_device *dev,
David Brazdil0f672f62019-12-10 10:32:29 +0000583 struct flow_cls_common_offload *common)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000584{
585 if (!tc_can_offload_extack(dev, common->extack))
586 return false;
587 if (common->chain_index) {
588 NL_SET_ERR_MSG(common->extack,
589 "Driver supports only offload of chain 0");
590 return false;
591 }
592 return true;
593}
594
595static inline bool tc_skip_hw(u32 flags)
596{
597 return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
598}
599
600static inline bool tc_skip_sw(u32 flags)
601{
602 return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
603}
604
605/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
606static inline bool tc_flags_valid(u32 flags)
607{
608 if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
609 TCA_CLS_FLAGS_VERBOSE))
610 return false;
611
612 flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
613 if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
614 return false;
615
616 return true;
617}
618
619static inline bool tc_in_hw(u32 flags)
620{
621 return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
622}
623
624static inline void
David Brazdil0f672f62019-12-10 10:32:29 +0000625tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000626 const struct tcf_proto *tp, u32 flags,
627 struct netlink_ext_ack *extack)
628{
629 cls_common->chain_index = tp->chain->index;
630 cls_common->protocol = tp->protocol;
David Brazdil0f672f62019-12-10 10:32:29 +0000631 cls_common->prio = tp->prio >> 16;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000632 if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
633 cls_common->extack = extack;
634}
635
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000636enum tc_matchall_command {
637 TC_CLSMATCHALL_REPLACE,
638 TC_CLSMATCHALL_DESTROY,
David Brazdil0f672f62019-12-10 10:32:29 +0000639 TC_CLSMATCHALL_STATS,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000640};
641
642struct tc_cls_matchall_offload {
David Brazdil0f672f62019-12-10 10:32:29 +0000643 struct flow_cls_common_offload common;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000644 enum tc_matchall_command command;
David Brazdil0f672f62019-12-10 10:32:29 +0000645 struct flow_rule *rule;
646 struct flow_stats stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000647 unsigned long cookie;
648};
649
650enum tc_clsbpf_command {
651 TC_CLSBPF_OFFLOAD,
652 TC_CLSBPF_STATS,
653};
654
655struct tc_cls_bpf_offload {
David Brazdil0f672f62019-12-10 10:32:29 +0000656 struct flow_cls_common_offload common;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000657 enum tc_clsbpf_command command;
658 struct tcf_exts *exts;
659 struct bpf_prog *prog;
660 struct bpf_prog *oldprog;
661 const char *name;
662 bool exts_integrated;
663};
664
665struct tc_mqprio_qopt_offload {
666 /* struct tc_mqprio_qopt must always be the first element */
667 struct tc_mqprio_qopt qopt;
668 u16 mode;
669 u16 shaper;
670 u32 flags;
671 u64 min_rate[TC_QOPT_MAX_QUEUE];
672 u64 max_rate[TC_QOPT_MAX_QUEUE];
673};
674
675/* This structure holds cookie structure that is passed from user
676 * to the kernel for actions and classifiers
677 */
678struct tc_cookie {
679 u8 *data;
680 u32 len;
681 struct rcu_head rcu;
682};
683
684struct tc_qopt_offload_stats {
685 struct gnet_stats_basic_packed *bstats;
686 struct gnet_stats_queue *qstats;
687};
688
689enum tc_mq_command {
690 TC_MQ_CREATE,
691 TC_MQ_DESTROY,
692 TC_MQ_STATS,
David Brazdil0f672f62019-12-10 10:32:29 +0000693 TC_MQ_GRAFT,
694};
695
696struct tc_mq_opt_offload_graft_params {
697 unsigned long queue;
698 u32 child_handle;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000699};
700
701struct tc_mq_qopt_offload {
702 enum tc_mq_command command;
703 u32 handle;
David Brazdil0f672f62019-12-10 10:32:29 +0000704 union {
705 struct tc_qopt_offload_stats stats;
706 struct tc_mq_opt_offload_graft_params graft_params;
707 };
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000708};
709
710enum tc_red_command {
711 TC_RED_REPLACE,
712 TC_RED_DESTROY,
713 TC_RED_STATS,
714 TC_RED_XSTATS,
David Brazdil0f672f62019-12-10 10:32:29 +0000715 TC_RED_GRAFT,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000716};
717
718struct tc_red_qopt_offload_params {
719 u32 min;
720 u32 max;
721 u32 probability;
David Brazdil0f672f62019-12-10 10:32:29 +0000722 u32 limit;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000723 bool is_ecn;
David Brazdil0f672f62019-12-10 10:32:29 +0000724 bool is_harddrop;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000725 struct gnet_stats_queue *qstats;
726};
727
728struct tc_red_qopt_offload {
729 enum tc_red_command command;
730 u32 handle;
731 u32 parent;
732 union {
733 struct tc_red_qopt_offload_params set;
734 struct tc_qopt_offload_stats stats;
735 struct red_stats *xstats;
David Brazdil0f672f62019-12-10 10:32:29 +0000736 u32 child_handle;
737 };
738};
739
740enum tc_gred_command {
741 TC_GRED_REPLACE,
742 TC_GRED_DESTROY,
743 TC_GRED_STATS,
744};
745
746struct tc_gred_vq_qopt_offload_params {
747 bool present;
748 u32 limit;
749 u32 prio;
750 u32 min;
751 u32 max;
752 bool is_ecn;
753 bool is_harddrop;
754 u32 probability;
755 /* Only need backlog, see struct tc_prio_qopt_offload_params */
756 u32 *backlog;
757};
758
759struct tc_gred_qopt_offload_params {
760 bool grio_on;
761 bool wred_on;
762 unsigned int dp_cnt;
763 unsigned int dp_def;
764 struct gnet_stats_queue *qstats;
765 struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
766};
767
768struct tc_gred_qopt_offload_stats {
769 struct gnet_stats_basic_packed bstats[MAX_DPs];
770 struct gnet_stats_queue qstats[MAX_DPs];
771 struct red_stats *xstats[MAX_DPs];
772};
773
774struct tc_gred_qopt_offload {
775 enum tc_gred_command command;
776 u32 handle;
777 u32 parent;
778 union {
779 struct tc_gred_qopt_offload_params set;
780 struct tc_gred_qopt_offload_stats stats;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000781 };
782};
783
784enum tc_prio_command {
785 TC_PRIO_REPLACE,
786 TC_PRIO_DESTROY,
787 TC_PRIO_STATS,
788 TC_PRIO_GRAFT,
789};
790
791struct tc_prio_qopt_offload_params {
792 int bands;
793 u8 priomap[TC_PRIO_MAX + 1];
794 /* In case that a prio qdisc is offloaded and now is changed to a
795 * non-offloadedable config, it needs to update the backlog & qlen
796 * values to negate the HW backlog & qlen values (and only them).
797 */
798 struct gnet_stats_queue *qstats;
799};
800
801struct tc_prio_qopt_offload_graft_params {
802 u8 band;
803 u32 child_handle;
804};
805
806struct tc_prio_qopt_offload {
807 enum tc_prio_command command;
808 u32 handle;
809 u32 parent;
810 union {
811 struct tc_prio_qopt_offload_params replace_params;
812 struct tc_qopt_offload_stats stats;
813 struct tc_prio_qopt_offload_graft_params graft_params;
814 };
815};
816
David Brazdil0f672f62019-12-10 10:32:29 +0000817enum tc_root_command {
818 TC_ROOT_GRAFT,
819};
820
821struct tc_root_qopt_offload {
822 enum tc_root_command command;
823 u32 handle;
824 bool ingress;
825};
826
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000827#endif