Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __NET_PKT_CLS_H |
| 3 | #define __NET_PKT_CLS_H |
| 4 | |
| 5 | #include <linux/pkt_cls.h> |
| 6 | #include <linux/workqueue.h> |
| 7 | #include <net/sch_generic.h> |
| 8 | #include <net/act_api.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 9 | #include <net/net_namespace.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | |
| 11 | /* TC action not accessible from user space */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 12 | #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 13 | |
| 14 | /* Basic packet classifier frontend definitions. */ |
| 15 | |
| 16 | struct tcf_walker { |
| 17 | int stop; |
| 18 | int skip; |
| 19 | int count; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 20 | bool nonempty; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | unsigned long cookie; |
| 22 | int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); |
| 23 | }; |
| 24 | |
| 25 | int register_tcf_proto_ops(struct tcf_proto_ops *ops); |
| 26 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); |
| 27 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 28 | struct tcf_block_ext_info { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 29 | enum flow_block_binder_type binder_type; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 30 | tcf_chain_head_change_t *chain_head_change; |
| 31 | void *chain_head_change_priv; |
| 32 | u32 block_index; |
| 33 | }; |
| 34 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 35 | struct tcf_qevent { |
| 36 | struct tcf_block *block; |
| 37 | struct tcf_block_ext_info info; |
| 38 | struct tcf_proto __rcu *filter_chain; |
| 39 | }; |
| 40 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 41 | struct tcf_block_cb; |
| 42 | bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); |
| 43 | |
| 44 | #ifdef CONFIG_NET_CLS |
| 45 | struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, |
| 46 | u32 chain_index); |
| 47 | void tcf_chain_put_by_act(struct tcf_chain *chain); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 48 | struct tcf_chain *tcf_get_next_chain(struct tcf_block *block, |
| 49 | struct tcf_chain *chain); |
| 50 | struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain, |
| 51 | struct tcf_proto *tp, bool rtnl_held); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 52 | void tcf_block_netif_keep_dst(struct tcf_block *block); |
| 53 | int tcf_block_get(struct tcf_block **p_block, |
| 54 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
| 55 | struct netlink_ext_ack *extack); |
| 56 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
| 57 | struct tcf_block_ext_info *ei, |
| 58 | struct netlink_ext_ack *extack); |
| 59 | void tcf_block_put(struct tcf_block *block); |
| 60 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
| 61 | struct tcf_block_ext_info *ei); |
| 62 | |
| 63 | static inline bool tcf_block_shared(struct tcf_block *block) |
| 64 | { |
| 65 | return block->index; |
| 66 | } |
| 67 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 68 | static inline bool tcf_block_non_null_shared(struct tcf_block *block) |
| 69 | { |
| 70 | return block && block->index; |
| 71 | } |
| 72 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 73 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) |
| 74 | { |
| 75 | WARN_ON(tcf_block_shared(block)); |
| 76 | return block->q; |
| 77 | } |
| 78 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 79 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 80 | struct tcf_result *res, bool compat_mode); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 81 | int tcf_classify_ingress(struct sk_buff *skb, |
| 82 | const struct tcf_block *ingress_block, |
| 83 | const struct tcf_proto *tp, struct tcf_result *res, |
| 84 | bool compat_mode); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 85 | |
| 86 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 87 | static inline bool tcf_block_shared(struct tcf_block *block) |
| 88 | { |
| 89 | return false; |
| 90 | } |
| 91 | |
| 92 | static inline bool tcf_block_non_null_shared(struct tcf_block *block) |
| 93 | { |
| 94 | return false; |
| 95 | } |
| 96 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | static inline |
| 98 | int tcf_block_get(struct tcf_block **p_block, |
| 99 | struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, |
| 100 | struct netlink_ext_ack *extack) |
| 101 | { |
| 102 | return 0; |
| 103 | } |
| 104 | |
| 105 | static inline |
| 106 | int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, |
| 107 | struct tcf_block_ext_info *ei, |
| 108 | struct netlink_ext_ack *extack) |
| 109 | { |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | static inline void tcf_block_put(struct tcf_block *block) |
| 114 | { |
| 115 | } |
| 116 | |
| 117 | static inline |
| 118 | void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, |
| 119 | struct tcf_block_ext_info *ei) |
| 120 | { |
| 121 | } |
| 122 | |
| 123 | static inline struct Qdisc *tcf_block_q(struct tcf_block *block) |
| 124 | { |
| 125 | return NULL; |
| 126 | } |
| 127 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 128 | static inline |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 130 | void *cb_priv) |
| 131 | { |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | static inline |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 136 | void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 137 | void *cb_priv) |
| 138 | { |
| 139 | } |
| 140 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
| 142 | struct tcf_result *res, bool compat_mode) |
| 143 | { |
| 144 | return TC_ACT_UNSPEC; |
| 145 | } |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 146 | |
| 147 | static inline int tcf_classify_ingress(struct sk_buff *skb, |
| 148 | const struct tcf_block *ingress_block, |
| 149 | const struct tcf_proto *tp, |
| 150 | struct tcf_result *res, bool compat_mode) |
| 151 | { |
| 152 | return TC_ACT_UNSPEC; |
| 153 | } |
| 154 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 155 | #endif |
| 156 | |
| 157 | static inline unsigned long |
| 158 | __cls_set_class(unsigned long *clp, unsigned long cl) |
| 159 | { |
| 160 | return xchg(clp, cl); |
| 161 | } |
| 162 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 163 | static inline void |
| 164 | __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 165 | { |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 166 | unsigned long cl; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 167 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 168 | cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); |
| 169 | cl = __cls_set_class(&r->class, cl); |
| 170 | if (cl) |
| 171 | q->ops->cl_ops->unbind_tcf(q, cl); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | static inline void |
| 175 | tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base) |
| 176 | { |
| 177 | struct Qdisc *q = tp->chain->block->q; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 178 | |
| 179 | /* Check q as it is not set for shared blocks. In that case, |
| 180 | * setting class is not supported. |
| 181 | */ |
| 182 | if (!q) |
| 183 | return; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 184 | sch_tree_lock(q); |
| 185 | __tcf_bind_filter(q, r, base); |
| 186 | sch_tree_unlock(q); |
| 187 | } |
| 188 | |
| 189 | static inline void |
| 190 | __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) |
| 191 | { |
| 192 | unsigned long cl; |
| 193 | |
| 194 | if ((cl = __cls_set_class(&r->class, 0)) != 0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 195 | q->ops->cl_ops->unbind_tcf(q, cl); |
| 196 | } |
| 197 | |
| 198 | static inline void |
| 199 | tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) |
| 200 | { |
| 201 | struct Qdisc *q = tp->chain->block->q; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 202 | |
| 203 | if (!q) |
| 204 | return; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 205 | __tcf_unbind_filter(q, r); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | struct tcf_exts { |
| 209 | #ifdef CONFIG_NET_CLS_ACT |
| 210 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ |
| 211 | int nr_actions; |
| 212 | struct tc_action **actions; |
| 213 | struct net *net; |
| 214 | #endif |
| 215 | /* Map to export classifier specific extension TLV types to the |
| 216 | * generic extensions API. Unsupported extensions must be set to 0. |
| 217 | */ |
| 218 | int action; |
| 219 | int police; |
| 220 | }; |
| 221 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 222 | static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net, |
| 223 | int action, int police) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 224 | { |
| 225 | #ifdef CONFIG_NET_CLS_ACT |
| 226 | exts->type = 0; |
| 227 | exts->nr_actions = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 228 | exts->net = net; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 229 | exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), |
| 230 | GFP_KERNEL); |
| 231 | if (!exts->actions) |
| 232 | return -ENOMEM; |
| 233 | #endif |
| 234 | exts->action = action; |
| 235 | exts->police = police; |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | /* Return false if the netns is being destroyed in cleanup_net(). Callers |
| 240 | * need to do cleanup synchronously in this case, otherwise may race with |
| 241 | * tc_action_net_exit(). Return true for other cases. |
| 242 | */ |
| 243 | static inline bool tcf_exts_get_net(struct tcf_exts *exts) |
| 244 | { |
| 245 | #ifdef CONFIG_NET_CLS_ACT |
| 246 | exts->net = maybe_get_net(exts->net); |
| 247 | return exts->net != NULL; |
| 248 | #else |
| 249 | return true; |
| 250 | #endif |
| 251 | } |
| 252 | |
| 253 | static inline void tcf_exts_put_net(struct tcf_exts *exts) |
| 254 | { |
| 255 | #ifdef CONFIG_NET_CLS_ACT |
| 256 | if (exts->net) |
| 257 | put_net(exts->net); |
| 258 | #endif |
| 259 | } |
| 260 | |
| 261 | #ifdef CONFIG_NET_CLS_ACT |
| 262 | #define tcf_exts_for_each_action(i, a, exts) \ |
| 263 | for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++) |
| 264 | #else |
| 265 | #define tcf_exts_for_each_action(i, a, exts) \ |
| 266 | for (; 0; (void)(i), (void)(a), (void)(exts)) |
| 267 | #endif |
| 268 | |
| 269 | static inline void |
| 270 | tcf_exts_stats_update(const struct tcf_exts *exts, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 271 | u64 bytes, u64 packets, u64 drops, u64 lastuse, |
| 272 | u8 used_hw_stats, bool used_hw_stats_valid) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 273 | { |
| 274 | #ifdef CONFIG_NET_CLS_ACT |
| 275 | int i; |
| 276 | |
| 277 | preempt_disable(); |
| 278 | |
| 279 | for (i = 0; i < exts->nr_actions; i++) { |
| 280 | struct tc_action *a = exts->actions[i]; |
| 281 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 282 | tcf_action_stats_update(a, bytes, packets, drops, |
| 283 | lastuse, true); |
| 284 | a->used_hw_stats = used_hw_stats; |
| 285 | a->used_hw_stats_valid = used_hw_stats_valid; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 286 | } |
| 287 | |
| 288 | preempt_enable(); |
| 289 | #endif |
| 290 | } |
| 291 | |
| 292 | /** |
| 293 | * tcf_exts_has_actions - check if at least one action is present |
| 294 | * @exts: tc filter extensions handle |
| 295 | * |
| 296 | * Returns true if at least one action is present. |
| 297 | */ |
| 298 | static inline bool tcf_exts_has_actions(struct tcf_exts *exts) |
| 299 | { |
| 300 | #ifdef CONFIG_NET_CLS_ACT |
| 301 | return exts->nr_actions; |
| 302 | #else |
| 303 | return false; |
| 304 | #endif |
| 305 | } |
| 306 | |
| 307 | /** |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 308 | * tcf_exts_exec - execute tc filter extensions |
| 309 | * @skb: socket buffer |
| 310 | * @exts: tc filter extensions handle |
| 311 | * @res: desired result |
| 312 | * |
| 313 | * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, |
| 314 | * a negative number if the filter must be considered unmatched or |
| 315 | * a positive action code (TC_ACT_*) which must be returned to the |
| 316 | * underlying layer. |
| 317 | */ |
| 318 | static inline int |
| 319 | tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, |
| 320 | struct tcf_result *res) |
| 321 | { |
| 322 | #ifdef CONFIG_NET_CLS_ACT |
| 323 | return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); |
| 324 | #endif |
| 325 | return TC_ACT_OK; |
| 326 | } |
| 327 | |
| 328 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, |
| 329 | struct nlattr **tb, struct nlattr *rate_tlv, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 330 | struct tcf_exts *exts, bool ovr, bool rtnl_held, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 331 | struct netlink_ext_ack *extack); |
| 332 | void tcf_exts_destroy(struct tcf_exts *exts); |
| 333 | void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); |
| 334 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 335 | int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 336 | int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); |
| 337 | |
| 338 | /** |
| 339 | * struct tcf_pkt_info - packet information |
| 340 | */ |
| 341 | struct tcf_pkt_info { |
| 342 | unsigned char * ptr; |
| 343 | int nexthdr; |
| 344 | }; |
| 345 | |
| 346 | #ifdef CONFIG_NET_EMATCH |
| 347 | |
| 348 | struct tcf_ematch_ops; |
| 349 | |
| 350 | /** |
| 351 | * struct tcf_ematch - extended match (ematch) |
| 352 | * |
| 353 | * @matchid: identifier to allow userspace to reidentify a match |
| 354 | * @flags: flags specifying attributes and the relation to other matches |
| 355 | * @ops: the operations lookup table of the corresponding ematch module |
| 356 | * @datalen: length of the ematch specific configuration data |
| 357 | * @data: ematch specific data |
| 358 | */ |
| 359 | struct tcf_ematch { |
| 360 | struct tcf_ematch_ops * ops; |
| 361 | unsigned long data; |
| 362 | unsigned int datalen; |
| 363 | u16 matchid; |
| 364 | u16 flags; |
| 365 | struct net *net; |
| 366 | }; |
| 367 | |
| 368 | static inline int tcf_em_is_container(struct tcf_ematch *em) |
| 369 | { |
| 370 | return !em->ops; |
| 371 | } |
| 372 | |
| 373 | static inline int tcf_em_is_simple(struct tcf_ematch *em) |
| 374 | { |
| 375 | return em->flags & TCF_EM_SIMPLE; |
| 376 | } |
| 377 | |
| 378 | static inline int tcf_em_is_inverted(struct tcf_ematch *em) |
| 379 | { |
| 380 | return em->flags & TCF_EM_INVERT; |
| 381 | } |
| 382 | |
| 383 | static inline int tcf_em_last_match(struct tcf_ematch *em) |
| 384 | { |
| 385 | return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END; |
| 386 | } |
| 387 | |
| 388 | static inline int tcf_em_early_end(struct tcf_ematch *em, int result) |
| 389 | { |
| 390 | if (tcf_em_last_match(em)) |
| 391 | return 1; |
| 392 | |
| 393 | if (result == 0 && em->flags & TCF_EM_REL_AND) |
| 394 | return 1; |
| 395 | |
| 396 | if (result != 0 && em->flags & TCF_EM_REL_OR) |
| 397 | return 1; |
| 398 | |
| 399 | return 0; |
| 400 | } |
| 401 | |
| 402 | /** |
| 403 | * struct tcf_ematch_tree - ematch tree handle |
| 404 | * |
| 405 | * @hdr: ematch tree header supplied by userspace |
| 406 | * @matches: array of ematches |
| 407 | */ |
| 408 | struct tcf_ematch_tree { |
| 409 | struct tcf_ematch_tree_hdr hdr; |
| 410 | struct tcf_ematch * matches; |
| 411 | |
| 412 | }; |
| 413 | |
| 414 | /** |
| 415 | * struct tcf_ematch_ops - ematch module operations |
| 416 | * |
| 417 | * @kind: identifier (kind) of this ematch module |
| 418 | * @datalen: length of expected configuration data (optional) |
| 419 | * @change: called during validation (optional) |
| 420 | * @match: called during ematch tree evaluation, must return 1/0 |
| 421 | * @destroy: called during destroyage (optional) |
| 422 | * @dump: called during dumping process (optional) |
| 423 | * @owner: owner, must be set to THIS_MODULE |
| 424 | * @link: link to previous/next ematch module (internal use) |
| 425 | */ |
| 426 | struct tcf_ematch_ops { |
| 427 | int kind; |
| 428 | int datalen; |
| 429 | int (*change)(struct net *net, void *, |
| 430 | int, struct tcf_ematch *); |
| 431 | int (*match)(struct sk_buff *, struct tcf_ematch *, |
| 432 | struct tcf_pkt_info *); |
| 433 | void (*destroy)(struct tcf_ematch *); |
| 434 | int (*dump)(struct sk_buff *, struct tcf_ematch *); |
| 435 | struct module *owner; |
| 436 | struct list_head link; |
| 437 | }; |
| 438 | |
| 439 | int tcf_em_register(struct tcf_ematch_ops *); |
| 440 | void tcf_em_unregister(struct tcf_ematch_ops *); |
| 441 | int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *, |
| 442 | struct tcf_ematch_tree *); |
| 443 | void tcf_em_tree_destroy(struct tcf_ematch_tree *); |
| 444 | int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); |
| 445 | int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, |
| 446 | struct tcf_pkt_info *); |
| 447 | |
| 448 | /** |
| 449 | * tcf_em_tree_match - evaulate an ematch tree |
| 450 | * |
| 451 | * @skb: socket buffer of the packet in question |
| 452 | * @tree: ematch tree to be used for evaluation |
| 453 | * @info: packet information examined by classifier |
| 454 | * |
| 455 | * This function matches @skb against the ematch tree in @tree by going |
| 456 | * through all ematches respecting their logic relations returning |
| 457 | * as soon as the result is obvious. |
| 458 | * |
| 459 | * Returns 1 if the ematch tree as-one matches, no ematches are configured |
| 460 | * or ematch is not enabled in the kernel, otherwise 0 is returned. |
| 461 | */ |
| 462 | static inline int tcf_em_tree_match(struct sk_buff *skb, |
| 463 | struct tcf_ematch_tree *tree, |
| 464 | struct tcf_pkt_info *info) |
| 465 | { |
| 466 | if (tree->hdr.nmatches) |
| 467 | return __tcf_em_tree_match(skb, tree, info); |
| 468 | else |
| 469 | return 1; |
| 470 | } |
| 471 | |
| 472 | #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind)) |
| 473 | |
| 474 | #else /* CONFIG_NET_EMATCH */ |
| 475 | |
| 476 | struct tcf_ematch_tree { |
| 477 | }; |
| 478 | |
| 479 | #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) |
| 480 | #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) |
| 481 | #define tcf_em_tree_dump(skb, t, tlv) (0) |
| 482 | #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) |
| 483 | |
| 484 | #endif /* CONFIG_NET_EMATCH */ |
| 485 | |
| 486 | static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer) |
| 487 | { |
| 488 | switch (layer) { |
| 489 | case TCF_LAYER_LINK: |
| 490 | return skb_mac_header(skb); |
| 491 | case TCF_LAYER_NETWORK: |
| 492 | return skb_network_header(skb); |
| 493 | case TCF_LAYER_TRANSPORT: |
| 494 | return skb_transport_header(skb); |
| 495 | } |
| 496 | |
| 497 | return NULL; |
| 498 | } |
| 499 | |
| 500 | static inline int tcf_valid_offset(const struct sk_buff *skb, |
| 501 | const unsigned char *ptr, const int len) |
| 502 | { |
| 503 | return likely((ptr + len) <= skb_tail_pointer(skb) && |
| 504 | ptr >= skb->head && |
| 505 | (ptr <= (ptr + len))); |
| 506 | } |
| 507 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 508 | static inline int |
| 509 | tcf_change_indev(struct net *net, struct nlattr *indev_tlv, |
| 510 | struct netlink_ext_ack *extack) |
| 511 | { |
| 512 | char indev[IFNAMSIZ]; |
| 513 | struct net_device *dev; |
| 514 | |
| 515 | if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 516 | NL_SET_ERR_MSG_ATTR(extack, indev_tlv, |
| 517 | "Interface name too long"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 518 | return -EINVAL; |
| 519 | } |
| 520 | dev = __dev_get_by_name(net, indev); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 521 | if (!dev) { |
| 522 | NL_SET_ERR_MSG_ATTR(extack, indev_tlv, |
| 523 | "Network device not found"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 524 | return -ENODEV; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 525 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 526 | return dev->ifindex; |
| 527 | } |
| 528 | |
| 529 | static inline bool |
| 530 | tcf_match_indev(struct sk_buff *skb, int ifindex) |
| 531 | { |
| 532 | if (!ifindex) |
| 533 | return true; |
| 534 | if (!skb->skb_iif) |
| 535 | return false; |
| 536 | return ifindex == skb->skb_iif; |
| 537 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 538 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 539 | int tc_setup_flow_action(struct flow_action *flow_action, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 540 | const struct tcf_exts *exts); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 541 | void tc_cleanup_flow_action(struct flow_action *flow_action); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 542 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 543 | int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, |
| 544 | void *type_data, bool err_stop, bool rtnl_held); |
| 545 | int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, |
| 546 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 547 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held); |
| 548 | int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, |
| 549 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 550 | u32 *old_flags, unsigned int *old_in_hw_count, |
| 551 | u32 *new_flags, unsigned int *new_in_hw_count, |
| 552 | bool rtnl_held); |
| 553 | int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, |
| 554 | enum tc_setup_type type, void *type_data, bool err_stop, |
| 555 | u32 *flags, unsigned int *in_hw_count, bool rtnl_held); |
| 556 | int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, |
| 557 | bool add, flow_setup_cb_t *cb, |
| 558 | enum tc_setup_type type, void *type_data, |
| 559 | void *cb_priv, u32 *flags, unsigned int *in_hw_count); |
| 560 | unsigned int tcf_exts_num_actions(struct tcf_exts *exts); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 561 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 562 | #ifdef CONFIG_NET_CLS_ACT |
| 563 | int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, |
| 564 | enum flow_block_binder_type binder_type, |
| 565 | struct nlattr *block_index_attr, |
| 566 | struct netlink_ext_ack *extack); |
| 567 | void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); |
| 568 | int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, |
| 569 | struct netlink_ext_ack *extack); |
| 570 | struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, |
| 571 | struct sk_buff **to_free, int *ret); |
| 572 | int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); |
| 573 | #else |
| 574 | static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, |
| 575 | enum flow_block_binder_type binder_type, |
| 576 | struct nlattr *block_index_attr, |
| 577 | struct netlink_ext_ack *extack) |
| 578 | { |
| 579 | return 0; |
| 580 | } |
| 581 | |
| 582 | static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) |
| 583 | { |
| 584 | } |
| 585 | |
| 586 | static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, |
| 587 | struct netlink_ext_ack *extack) |
| 588 | { |
| 589 | return 0; |
| 590 | } |
| 591 | |
| 592 | static inline struct sk_buff * |
| 593 | tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, |
| 594 | struct sk_buff **to_free, int *ret) |
| 595 | { |
| 596 | return skb; |
| 597 | } |
| 598 | |
| 599 | static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) |
| 600 | { |
| 601 | return 0; |
| 602 | } |
| 603 | #endif |
| 604 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 605 | struct tc_cls_u32_knode { |
| 606 | struct tcf_exts *exts; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 607 | struct tcf_result *res; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 608 | struct tc_u32_sel *sel; |
| 609 | u32 handle; |
| 610 | u32 val; |
| 611 | u32 mask; |
| 612 | u32 link_handle; |
| 613 | u8 fshift; |
| 614 | }; |
| 615 | |
| 616 | struct tc_cls_u32_hnode { |
| 617 | u32 handle; |
| 618 | u32 prio; |
| 619 | unsigned int divisor; |
| 620 | }; |
| 621 | |
| 622 | enum tc_clsu32_command { |
| 623 | TC_CLSU32_NEW_KNODE, |
| 624 | TC_CLSU32_REPLACE_KNODE, |
| 625 | TC_CLSU32_DELETE_KNODE, |
| 626 | TC_CLSU32_NEW_HNODE, |
| 627 | TC_CLSU32_REPLACE_HNODE, |
| 628 | TC_CLSU32_DELETE_HNODE, |
| 629 | }; |
| 630 | |
| 631 | struct tc_cls_u32_offload { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 632 | struct flow_cls_common_offload common; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 633 | /* knode values */ |
| 634 | enum tc_clsu32_command command; |
| 635 | union { |
| 636 | struct tc_cls_u32_knode knode; |
| 637 | struct tc_cls_u32_hnode hnode; |
| 638 | }; |
| 639 | }; |
| 640 | |
| 641 | static inline bool tc_can_offload(const struct net_device *dev) |
| 642 | { |
| 643 | return dev->features & NETIF_F_HW_TC; |
| 644 | } |
| 645 | |
| 646 | static inline bool tc_can_offload_extack(const struct net_device *dev, |
| 647 | struct netlink_ext_ack *extack) |
| 648 | { |
| 649 | bool can = tc_can_offload(dev); |
| 650 | |
| 651 | if (!can) |
| 652 | NL_SET_ERR_MSG(extack, "TC offload is disabled on net device"); |
| 653 | |
| 654 | return can; |
| 655 | } |
| 656 | |
| 657 | static inline bool |
| 658 | tc_cls_can_offload_and_chain0(const struct net_device *dev, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 659 | struct flow_cls_common_offload *common) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 660 | { |
| 661 | if (!tc_can_offload_extack(dev, common->extack)) |
| 662 | return false; |
| 663 | if (common->chain_index) { |
| 664 | NL_SET_ERR_MSG(common->extack, |
| 665 | "Driver supports only offload of chain 0"); |
| 666 | return false; |
| 667 | } |
| 668 | return true; |
| 669 | } |
| 670 | |
| 671 | static inline bool tc_skip_hw(u32 flags) |
| 672 | { |
| 673 | return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; |
| 674 | } |
| 675 | |
| 676 | static inline bool tc_skip_sw(u32 flags) |
| 677 | { |
| 678 | return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false; |
| 679 | } |
| 680 | |
| 681 | /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ |
| 682 | static inline bool tc_flags_valid(u32 flags) |
| 683 | { |
| 684 | if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW | |
| 685 | TCA_CLS_FLAGS_VERBOSE)) |
| 686 | return false; |
| 687 | |
| 688 | flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW; |
| 689 | if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))) |
| 690 | return false; |
| 691 | |
| 692 | return true; |
| 693 | } |
| 694 | |
| 695 | static inline bool tc_in_hw(u32 flags) |
| 696 | { |
| 697 | return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false; |
| 698 | } |
| 699 | |
| 700 | static inline void |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 701 | tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 702 | const struct tcf_proto *tp, u32 flags, |
| 703 | struct netlink_ext_ack *extack) |
| 704 | { |
| 705 | cls_common->chain_index = tp->chain->index; |
| 706 | cls_common->protocol = tp->protocol; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 707 | cls_common->prio = tp->prio >> 16; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 708 | if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) |
| 709 | cls_common->extack = extack; |
| 710 | } |
| 711 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 712 | #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) |
| 713 | static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb) |
| 714 | { |
| 715 | struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT); |
| 716 | |
| 717 | if (tc_skb_ext) |
| 718 | memset(tc_skb_ext, 0, sizeof(*tc_skb_ext)); |
| 719 | return tc_skb_ext; |
| 720 | } |
| 721 | #endif |
| 722 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 723 | enum tc_matchall_command { |
| 724 | TC_CLSMATCHALL_REPLACE, |
| 725 | TC_CLSMATCHALL_DESTROY, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 726 | TC_CLSMATCHALL_STATS, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 727 | }; |
| 728 | |
| 729 | struct tc_cls_matchall_offload { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 730 | struct flow_cls_common_offload common; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 731 | enum tc_matchall_command command; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 732 | struct flow_rule *rule; |
| 733 | struct flow_stats stats; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 734 | unsigned long cookie; |
| 735 | }; |
| 736 | |
| 737 | enum tc_clsbpf_command { |
| 738 | TC_CLSBPF_OFFLOAD, |
| 739 | TC_CLSBPF_STATS, |
| 740 | }; |
| 741 | |
| 742 | struct tc_cls_bpf_offload { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 743 | struct flow_cls_common_offload common; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 744 | enum tc_clsbpf_command command; |
| 745 | struct tcf_exts *exts; |
| 746 | struct bpf_prog *prog; |
| 747 | struct bpf_prog *oldprog; |
| 748 | const char *name; |
| 749 | bool exts_integrated; |
| 750 | }; |
| 751 | |
| 752 | struct tc_mqprio_qopt_offload { |
| 753 | /* struct tc_mqprio_qopt must always be the first element */ |
| 754 | struct tc_mqprio_qopt qopt; |
| 755 | u16 mode; |
| 756 | u16 shaper; |
| 757 | u32 flags; |
| 758 | u64 min_rate[TC_QOPT_MAX_QUEUE]; |
| 759 | u64 max_rate[TC_QOPT_MAX_QUEUE]; |
| 760 | }; |
| 761 | |
| 762 | /* This structure holds cookie structure that is passed from user |
| 763 | * to the kernel for actions and classifiers |
| 764 | */ |
| 765 | struct tc_cookie { |
| 766 | u8 *data; |
| 767 | u32 len; |
| 768 | struct rcu_head rcu; |
| 769 | }; |
| 770 | |
| 771 | struct tc_qopt_offload_stats { |
| 772 | struct gnet_stats_basic_packed *bstats; |
| 773 | struct gnet_stats_queue *qstats; |
| 774 | }; |
| 775 | |
| 776 | enum tc_mq_command { |
| 777 | TC_MQ_CREATE, |
| 778 | TC_MQ_DESTROY, |
| 779 | TC_MQ_STATS, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 780 | TC_MQ_GRAFT, |
| 781 | }; |
| 782 | |
| 783 | struct tc_mq_opt_offload_graft_params { |
| 784 | unsigned long queue; |
| 785 | u32 child_handle; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 786 | }; |
| 787 | |
| 788 | struct tc_mq_qopt_offload { |
| 789 | enum tc_mq_command command; |
| 790 | u32 handle; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 791 | union { |
| 792 | struct tc_qopt_offload_stats stats; |
| 793 | struct tc_mq_opt_offload_graft_params graft_params; |
| 794 | }; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 795 | }; |
| 796 | |
| 797 | enum tc_red_command { |
| 798 | TC_RED_REPLACE, |
| 799 | TC_RED_DESTROY, |
| 800 | TC_RED_STATS, |
| 801 | TC_RED_XSTATS, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 802 | TC_RED_GRAFT, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 803 | }; |
| 804 | |
| 805 | struct tc_red_qopt_offload_params { |
| 806 | u32 min; |
| 807 | u32 max; |
| 808 | u32 probability; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 809 | u32 limit; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 810 | bool is_ecn; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 811 | bool is_harddrop; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 812 | bool is_nodrop; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 813 | struct gnet_stats_queue *qstats; |
| 814 | }; |
| 815 | |
| 816 | struct tc_red_qopt_offload { |
| 817 | enum tc_red_command command; |
| 818 | u32 handle; |
| 819 | u32 parent; |
| 820 | union { |
| 821 | struct tc_red_qopt_offload_params set; |
| 822 | struct tc_qopt_offload_stats stats; |
| 823 | struct red_stats *xstats; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 824 | u32 child_handle; |
| 825 | }; |
| 826 | }; |
| 827 | |
| 828 | enum tc_gred_command { |
| 829 | TC_GRED_REPLACE, |
| 830 | TC_GRED_DESTROY, |
| 831 | TC_GRED_STATS, |
| 832 | }; |
| 833 | |
| 834 | struct tc_gred_vq_qopt_offload_params { |
| 835 | bool present; |
| 836 | u32 limit; |
| 837 | u32 prio; |
| 838 | u32 min; |
| 839 | u32 max; |
| 840 | bool is_ecn; |
| 841 | bool is_harddrop; |
| 842 | u32 probability; |
| 843 | /* Only need backlog, see struct tc_prio_qopt_offload_params */ |
| 844 | u32 *backlog; |
| 845 | }; |
| 846 | |
| 847 | struct tc_gred_qopt_offload_params { |
| 848 | bool grio_on; |
| 849 | bool wred_on; |
| 850 | unsigned int dp_cnt; |
| 851 | unsigned int dp_def; |
| 852 | struct gnet_stats_queue *qstats; |
| 853 | struct tc_gred_vq_qopt_offload_params tab[MAX_DPs]; |
| 854 | }; |
| 855 | |
| 856 | struct tc_gred_qopt_offload_stats { |
| 857 | struct gnet_stats_basic_packed bstats[MAX_DPs]; |
| 858 | struct gnet_stats_queue qstats[MAX_DPs]; |
| 859 | struct red_stats *xstats[MAX_DPs]; |
| 860 | }; |
| 861 | |
| 862 | struct tc_gred_qopt_offload { |
| 863 | enum tc_gred_command command; |
| 864 | u32 handle; |
| 865 | u32 parent; |
| 866 | union { |
| 867 | struct tc_gred_qopt_offload_params set; |
| 868 | struct tc_gred_qopt_offload_stats stats; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 869 | }; |
| 870 | }; |
| 871 | |
| 872 | enum tc_prio_command { |
| 873 | TC_PRIO_REPLACE, |
| 874 | TC_PRIO_DESTROY, |
| 875 | TC_PRIO_STATS, |
| 876 | TC_PRIO_GRAFT, |
| 877 | }; |
| 878 | |
| 879 | struct tc_prio_qopt_offload_params { |
| 880 | int bands; |
| 881 | u8 priomap[TC_PRIO_MAX + 1]; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 882 | /* At the point of un-offloading the Qdisc, the reported backlog and |
| 883 | * qlen need to be reduced by the portion that is in HW. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 884 | */ |
| 885 | struct gnet_stats_queue *qstats; |
| 886 | }; |
| 887 | |
| 888 | struct tc_prio_qopt_offload_graft_params { |
| 889 | u8 band; |
| 890 | u32 child_handle; |
| 891 | }; |
| 892 | |
| 893 | struct tc_prio_qopt_offload { |
| 894 | enum tc_prio_command command; |
| 895 | u32 handle; |
| 896 | u32 parent; |
| 897 | union { |
| 898 | struct tc_prio_qopt_offload_params replace_params; |
| 899 | struct tc_qopt_offload_stats stats; |
| 900 | struct tc_prio_qopt_offload_graft_params graft_params; |
| 901 | }; |
| 902 | }; |
| 903 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 904 | enum tc_root_command { |
| 905 | TC_ROOT_GRAFT, |
| 906 | }; |
| 907 | |
| 908 | struct tc_root_qopt_offload { |
| 909 | enum tc_root_command command; |
| 910 | u32 handle; |
| 911 | bool ingress; |
| 912 | }; |
| 913 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 914 | enum tc_ets_command { |
| 915 | TC_ETS_REPLACE, |
| 916 | TC_ETS_DESTROY, |
| 917 | TC_ETS_STATS, |
| 918 | TC_ETS_GRAFT, |
| 919 | }; |
| 920 | |
| 921 | struct tc_ets_qopt_offload_replace_params { |
| 922 | unsigned int bands; |
| 923 | u8 priomap[TC_PRIO_MAX + 1]; |
| 924 | unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */ |
| 925 | unsigned int weights[TCQ_ETS_MAX_BANDS]; |
| 926 | struct gnet_stats_queue *qstats; |
| 927 | }; |
| 928 | |
| 929 | struct tc_ets_qopt_offload_graft_params { |
| 930 | u8 band; |
| 931 | u32 child_handle; |
| 932 | }; |
| 933 | |
| 934 | struct tc_ets_qopt_offload { |
| 935 | enum tc_ets_command command; |
| 936 | u32 handle; |
| 937 | u32 parent; |
| 938 | union { |
| 939 | struct tc_ets_qopt_offload_replace_params replace_params; |
| 940 | struct tc_qopt_offload_stats stats; |
| 941 | struct tc_ets_qopt_offload_graft_params graft_params; |
| 942 | }; |
| 943 | }; |
| 944 | |
| 945 | enum tc_tbf_command { |
| 946 | TC_TBF_REPLACE, |
| 947 | TC_TBF_DESTROY, |
| 948 | TC_TBF_STATS, |
| 949 | }; |
| 950 | |
| 951 | struct tc_tbf_qopt_offload_replace_params { |
| 952 | struct psched_ratecfg rate; |
| 953 | u32 max_size; |
| 954 | struct gnet_stats_queue *qstats; |
| 955 | }; |
| 956 | |
| 957 | struct tc_tbf_qopt_offload { |
| 958 | enum tc_tbf_command command; |
| 959 | u32 handle; |
| 960 | u32 parent; |
| 961 | union { |
| 962 | struct tc_tbf_qopt_offload_replace_params replace_params; |
| 963 | struct tc_qopt_offload_stats stats; |
| 964 | }; |
| 965 | }; |
| 966 | |
| 967 | enum tc_fifo_command { |
| 968 | TC_FIFO_REPLACE, |
| 969 | TC_FIFO_DESTROY, |
| 970 | TC_FIFO_STATS, |
| 971 | }; |
| 972 | |
| 973 | struct tc_fifo_qopt_offload { |
| 974 | enum tc_fifo_command command; |
| 975 | u32 handle; |
| 976 | u32 parent; |
| 977 | union { |
| 978 | struct tc_qopt_offload_stats stats; |
| 979 | }; |
| 980 | }; |
| 981 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 982 | #endif |