David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/act_police.c Input police filter |
| 4 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 6 | * J Hadi Salim (action changes) |
| 7 | */ |
| 8 | |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/types.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/string.h> |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/skbuff.h> |
| 15 | #include <linux/rtnetlink.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <net/act_api.h> |
| 19 | #include <net/netlink.h> |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 20 | #include <net/pkt_cls.h> |
| 21 | #include <net/tc_act/tc_police.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 22 | |
| 23 | /* Each policer is serialized by its individual spinlock */ |
| 24 | |
| 25 | static unsigned int police_net_id; |
| 26 | static struct tc_action_ops act_police_ops; |
| 27 | |
| 28 | static int tcf_police_walker(struct net *net, struct sk_buff *skb, |
| 29 | struct netlink_callback *cb, int type, |
| 30 | const struct tc_action_ops *ops, |
| 31 | struct netlink_ext_ack *extack) |
| 32 | { |
| 33 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 34 | |
| 35 | return tcf_generic_walker(tn, skb, cb, type, ops, extack); |
| 36 | } |
| 37 | |
| 38 | static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { |
| 39 | [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE }, |
| 40 | [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE }, |
| 41 | [TCA_POLICE_AVRATE] = { .type = NLA_U32 }, |
| 42 | [TCA_POLICE_RESULT] = { .type = NLA_U32 }, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 43 | [TCA_POLICE_RATE64] = { .type = NLA_U64 }, |
| 44 | [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 45 | }; |
| 46 | |
| 47 | static int tcf_police_init(struct net *net, struct nlattr *nla, |
| 48 | struct nlattr *est, struct tc_action **a, |
| 49 | int ovr, int bind, bool rtnl_held, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 50 | struct tcf_proto *tp, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | struct netlink_ext_ack *extack) |
| 52 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 53 | int ret = 0, tcfp_result = TC_ACT_OK, err, size; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 55 | struct tcf_chain *goto_ch = NULL; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 56 | struct tc_police *parm; |
| 57 | struct tcf_police *police; |
| 58 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
| 59 | struct tc_action_net *tn = net_generic(net, police_net_id); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 60 | struct tcf_police_params *new; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 61 | bool exists = false; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 62 | u32 index; |
| 63 | u64 rate64, prate64; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 64 | |
| 65 | if (nla == NULL) |
| 66 | return -EINVAL; |
| 67 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 68 | err = nla_parse_nested_deprecated(tb, TCA_POLICE_MAX, nla, |
| 69 | police_policy, NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 70 | if (err < 0) |
| 71 | return err; |
| 72 | |
| 73 | if (tb[TCA_POLICE_TBF] == NULL) |
| 74 | return -EINVAL; |
| 75 | size = nla_len(tb[TCA_POLICE_TBF]); |
| 76 | if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) |
| 77 | return -EINVAL; |
| 78 | |
| 79 | parm = nla_data(tb[TCA_POLICE_TBF]); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 80 | index = parm->index; |
| 81 | err = tcf_idr_check_alloc(tn, &index, a, bind); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 82 | if (err < 0) |
| 83 | return err; |
| 84 | exists = err; |
| 85 | if (exists && bind) |
| 86 | return 0; |
| 87 | |
| 88 | if (!exists) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 89 | ret = tcf_idr_create(tn, index, NULL, a, |
| 90 | &act_police_ops, bind, true); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | if (ret) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 92 | tcf_idr_cleanup(tn, index); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 93 | return ret; |
| 94 | } |
| 95 | ret = ACT_P_CREATED; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 96 | spin_lock_init(&(to_police(*a)->tcfp_lock)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | } else if (!ovr) { |
| 98 | tcf_idr_release(*a, bind); |
| 99 | return -EEXIST; |
| 100 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 101 | err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); |
| 102 | if (err < 0) |
| 103 | goto release_idr; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 104 | |
| 105 | police = to_police(*a); |
| 106 | if (parm->rate.rate) { |
| 107 | err = -ENOMEM; |
| 108 | R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL); |
| 109 | if (R_tab == NULL) |
| 110 | goto failure; |
| 111 | |
| 112 | if (parm->peakrate.rate) { |
| 113 | P_tab = qdisc_get_rtab(&parm->peakrate, |
| 114 | tb[TCA_POLICE_PEAKRATE], NULL); |
| 115 | if (P_tab == NULL) |
| 116 | goto failure; |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | if (est) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 121 | err = gen_replace_estimator(&police->tcf_bstats, |
| 122 | police->common.cpu_bstats, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | &police->tcf_rate_est, |
| 124 | &police->tcf_lock, |
| 125 | NULL, est); |
| 126 | if (err) |
| 127 | goto failure; |
| 128 | } else if (tb[TCA_POLICE_AVRATE] && |
| 129 | (ret == ACT_P_CREATED || |
| 130 | !gen_estimator_active(&police->tcf_rate_est))) { |
| 131 | err = -EINVAL; |
| 132 | goto failure; |
| 133 | } |
| 134 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 135 | if (tb[TCA_POLICE_RESULT]) { |
| 136 | tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]); |
| 137 | if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) { |
| 138 | NL_SET_ERR_MSG(extack, |
| 139 | "goto chain not allowed on fallback"); |
| 140 | err = -EINVAL; |
| 141 | goto failure; |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | new = kzalloc(sizeof(*new), GFP_KERNEL); |
| 146 | if (unlikely(!new)) { |
| 147 | err = -ENOMEM; |
| 148 | goto failure; |
| 149 | } |
| 150 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 151 | /* No failure allowed after this point */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 152 | new->tcfp_result = tcfp_result; |
| 153 | new->tcfp_mtu = parm->mtu; |
| 154 | if (!new->tcfp_mtu) { |
| 155 | new->tcfp_mtu = ~0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 156 | if (R_tab) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 157 | new->tcfp_mtu = 255 << R_tab->rate.cell_log; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 158 | } |
| 159 | if (R_tab) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 160 | new->rate_present = true; |
| 161 | rate64 = tb[TCA_POLICE_RATE64] ? |
| 162 | nla_get_u64(tb[TCA_POLICE_RATE64]) : 0; |
| 163 | psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 164 | qdisc_put_rtab(R_tab); |
| 165 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 166 | new->rate_present = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 167 | } |
| 168 | if (P_tab) { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 169 | new->peak_present = true; |
| 170 | prate64 = tb[TCA_POLICE_PEAKRATE64] ? |
| 171 | nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0; |
| 172 | psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 173 | qdisc_put_rtab(P_tab); |
| 174 | } else { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 175 | new->peak_present = false; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 176 | } |
| 177 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 178 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); |
| 179 | if (new->peak_present) |
| 180 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, |
| 181 | new->tcfp_mtu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 182 | |
| 183 | if (tb[TCA_POLICE_AVRATE]) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 184 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 186 | spin_lock_bh(&police->tcf_lock); |
| 187 | spin_lock_bh(&police->tcfp_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 188 | police->tcfp_t_c = ktime_get_ns(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 189 | police->tcfp_toks = new->tcfp_burst; |
| 190 | if (new->peak_present) |
| 191 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; |
| 192 | spin_unlock_bh(&police->tcfp_lock); |
| 193 | goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); |
| 194 | rcu_swap_protected(police->params, |
| 195 | new, |
| 196 | lockdep_is_held(&police->tcf_lock)); |
| 197 | spin_unlock_bh(&police->tcf_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 198 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 199 | if (goto_ch) |
| 200 | tcf_chain_put_by_act(goto_ch); |
| 201 | if (new) |
| 202 | kfree_rcu(new, rcu); |
| 203 | |
| 204 | if (ret == ACT_P_CREATED) |
| 205 | tcf_idr_insert(tn, *a); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 206 | return ret; |
| 207 | |
| 208 | failure: |
| 209 | qdisc_put_rtab(P_tab); |
| 210 | qdisc_put_rtab(R_tab); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 211 | if (goto_ch) |
| 212 | tcf_chain_put_by_act(goto_ch); |
| 213 | release_idr: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 214 | tcf_idr_release(*a, bind); |
| 215 | return err; |
| 216 | } |
| 217 | |
| 218 | static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, |
| 219 | struct tcf_result *res) |
| 220 | { |
| 221 | struct tcf_police *police = to_police(a); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 222 | struct tcf_police_params *p; |
| 223 | s64 now, toks, ptoks = 0; |
| 224 | int ret; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 225 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 226 | tcf_lastuse_update(&police->tcf_tm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 227 | bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 228 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 229 | ret = READ_ONCE(police->tcf_action); |
| 230 | p = rcu_dereference_bh(police->params); |
| 231 | |
| 232 | if (p->tcfp_ewma_rate) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 233 | struct gnet_stats_rate_est64 sample; |
| 234 | |
| 235 | if (!gen_estimator_read(&police->tcf_rate_est, &sample) || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 236 | sample.bps >= p->tcfp_ewma_rate) |
| 237 | goto inc_overlimits; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | } |
| 239 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 240 | if (qdisc_pkt_len(skb) <= p->tcfp_mtu) { |
| 241 | if (!p->rate_present) { |
| 242 | ret = p->tcfp_result; |
| 243 | goto end; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | now = ktime_get_ns(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 247 | spin_lock_bh(&police->tcfp_lock); |
| 248 | toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst); |
| 249 | if (p->peak_present) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 250 | ptoks = toks + police->tcfp_ptoks; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 251 | if (ptoks > p->tcfp_mtu_ptoks) |
| 252 | ptoks = p->tcfp_mtu_ptoks; |
| 253 | ptoks -= (s64)psched_l2t_ns(&p->peak, |
| 254 | qdisc_pkt_len(skb)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 255 | } |
| 256 | toks += police->tcfp_toks; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 257 | if (toks > p->tcfp_burst) |
| 258 | toks = p->tcfp_burst; |
| 259 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 260 | if ((toks|ptoks) >= 0) { |
| 261 | police->tcfp_t_c = now; |
| 262 | police->tcfp_toks = toks; |
| 263 | police->tcfp_ptoks = ptoks; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 264 | spin_unlock_bh(&police->tcfp_lock); |
| 265 | ret = p->tcfp_result; |
| 266 | goto inc_drops; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 268 | spin_unlock_bh(&police->tcfp_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 269 | } |
| 270 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 271 | inc_overlimits: |
| 272 | qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats)); |
| 273 | inc_drops: |
| 274 | if (ret == TC_ACT_SHOT) |
| 275 | qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats)); |
| 276 | end: |
| 277 | return ret; |
| 278 | } |
| 279 | |
| 280 | static void tcf_police_cleanup(struct tc_action *a) |
| 281 | { |
| 282 | struct tcf_police *police = to_police(a); |
| 283 | struct tcf_police_params *p; |
| 284 | |
| 285 | p = rcu_dereference_protected(police->params, 1); |
| 286 | if (p) |
| 287 | kfree_rcu(p, rcu); |
| 288 | } |
| 289 | |
| 290 | static void tcf_police_stats_update(struct tc_action *a, |
| 291 | u64 bytes, u32 packets, |
| 292 | u64 lastuse, bool hw) |
| 293 | { |
| 294 | struct tcf_police *police = to_police(a); |
| 295 | struct tcf_t *tm = &police->tcf_tm; |
| 296 | |
| 297 | _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); |
| 298 | if (hw) |
| 299 | _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), |
| 300 | bytes, packets); |
| 301 | tm->lastuse = max_t(u64, tm->lastuse, lastuse); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, |
| 305 | int bind, int ref) |
| 306 | { |
| 307 | unsigned char *b = skb_tail_pointer(skb); |
| 308 | struct tcf_police *police = to_police(a); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 309 | struct tcf_police_params *p; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 310 | struct tc_police opt = { |
| 311 | .index = police->tcf_index, |
| 312 | .refcnt = refcount_read(&police->tcf_refcnt) - ref, |
| 313 | .bindcnt = atomic_read(&police->tcf_bindcnt) - bind, |
| 314 | }; |
| 315 | struct tcf_t t; |
| 316 | |
| 317 | spin_lock_bh(&police->tcf_lock); |
| 318 | opt.action = police->tcf_action; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 319 | p = rcu_dereference_protected(police->params, |
| 320 | lockdep_is_held(&police->tcf_lock)); |
| 321 | opt.mtu = p->tcfp_mtu; |
| 322 | opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); |
| 323 | if (p->rate_present) { |
| 324 | psched_ratecfg_getrate(&opt.rate, &p->rate); |
| 325 | if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) && |
| 326 | nla_put_u64_64bit(skb, TCA_POLICE_RATE64, |
| 327 | police->params->rate.rate_bytes_ps, |
| 328 | TCA_POLICE_PAD)) |
| 329 | goto nla_put_failure; |
| 330 | } |
| 331 | if (p->peak_present) { |
| 332 | psched_ratecfg_getrate(&opt.peakrate, &p->peak); |
| 333 | if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) && |
| 334 | nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64, |
| 335 | police->params->peak.rate_bytes_ps, |
| 336 | TCA_POLICE_PAD)) |
| 337 | goto nla_put_failure; |
| 338 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 339 | if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) |
| 340 | goto nla_put_failure; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 341 | if (p->tcfp_result && |
| 342 | nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 343 | goto nla_put_failure; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 344 | if (p->tcfp_ewma_rate && |
| 345 | nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 346 | goto nla_put_failure; |
| 347 | |
| 348 | t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install); |
| 349 | t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse); |
| 350 | t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse); |
| 351 | t.expires = jiffies_to_clock_t(police->tcf_tm.expires); |
| 352 | if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) |
| 353 | goto nla_put_failure; |
| 354 | spin_unlock_bh(&police->tcf_lock); |
| 355 | |
| 356 | return skb->len; |
| 357 | |
| 358 | nla_put_failure: |
| 359 | spin_unlock_bh(&police->tcf_lock); |
| 360 | nlmsg_trim(skb, b); |
| 361 | return -1; |
| 362 | } |
| 363 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 364 | static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 365 | { |
| 366 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 367 | |
| 368 | return tcf_idr_search(tn, a, index); |
| 369 | } |
| 370 | |
| 371 | MODULE_AUTHOR("Alexey Kuznetsov"); |
| 372 | MODULE_DESCRIPTION("Policing actions"); |
| 373 | MODULE_LICENSE("GPL"); |
| 374 | |
| 375 | static struct tc_action_ops act_police_ops = { |
| 376 | .kind = "police", |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 377 | .id = TCA_ID_POLICE, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 378 | .owner = THIS_MODULE, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 379 | .stats_update = tcf_police_stats_update, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 380 | .act = tcf_police_act, |
| 381 | .dump = tcf_police_dump, |
| 382 | .init = tcf_police_init, |
| 383 | .walk = tcf_police_walker, |
| 384 | .lookup = tcf_police_search, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 385 | .cleanup = tcf_police_cleanup, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 386 | .size = sizeof(struct tcf_police), |
| 387 | }; |
| 388 | |
| 389 | static __net_init int police_init_net(struct net *net) |
| 390 | { |
| 391 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 392 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 393 | return tc_action_net_init(net, tn, &act_police_ops); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 394 | } |
| 395 | |
| 396 | static void __net_exit police_exit_net(struct list_head *net_list) |
| 397 | { |
| 398 | tc_action_net_exit(net_list, police_net_id); |
| 399 | } |
| 400 | |
| 401 | static struct pernet_operations police_net_ops = { |
| 402 | .init = police_init_net, |
| 403 | .exit_batch = police_exit_net, |
| 404 | .id = &police_net_id, |
| 405 | .size = sizeof(struct tc_action_net), |
| 406 | }; |
| 407 | |
| 408 | static int __init police_init_module(void) |
| 409 | { |
| 410 | return tcf_register_action(&act_police_ops, &police_net_ops); |
| 411 | } |
| 412 | |
| 413 | static void __exit police_cleanup_module(void) |
| 414 | { |
| 415 | tcf_unregister_action(&act_police_ops, &police_net_ops); |
| 416 | } |
| 417 | |
| 418 | module_init(police_init_module); |
| 419 | module_exit(police_cleanup_module); |