David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* Copyright (C) 2013 Cisco Systems, Inc, 2013. |
| 3 | * |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 4 | * Author: Vijay Subramanian <vijaynsu@cisco.com> |
| 5 | * Author: Mythili Prabhu <mysuryan@cisco.com> |
| 6 | * |
| 7 | * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no> |
| 8 | * University of Oslo, Norway. |
| 9 | * |
| 10 | * References: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 11 | * RFC 8033: https://tools.ietf.org/html/rfc8033 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | */ |
| 13 | |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/errno.h> |
| 19 | #include <linux/skbuff.h> |
| 20 | #include <net/pkt_sched.h> |
| 21 | #include <net/inet_ecn.h> |
| 22 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 23 | #define QUEUE_THRESHOLD 16384 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | #define DQCOUNT_INVALID -1 |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 25 | #define MAX_PROB 0xffffffffffffffff |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 26 | #define PIE_SCALE 8 |
| 27 | |
| 28 | /* parameters used */ |
| 29 | struct pie_params { |
| 30 | psched_time_t target; /* user specified target delay in pschedtime */ |
| 31 | u32 tupdate; /* timer frequency (in jiffies) */ |
| 32 | u32 limit; /* number of packets that can be enqueued */ |
| 33 | u32 alpha; /* alpha and beta are between 0 and 32 */ |
| 34 | u32 beta; /* and are used for shift relative to 1 */ |
| 35 | bool ecn; /* true if ecn is enabled */ |
| 36 | bool bytemode; /* to scale drop early prob based on pkt size */ |
| 37 | }; |
| 38 | |
| 39 | /* variables used */ |
| 40 | struct pie_vars { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 41 | u64 prob; /* probability but scaled by u64 limit. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 42 | psched_time_t burst_time; |
| 43 | psched_time_t qdelay; |
| 44 | psched_time_t qdelay_old; |
| 45 | u64 dq_count; /* measured in bytes */ |
| 46 | psched_time_t dq_tstamp; /* drain rate */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 47 | u64 accu_prob; /* accumulated drop probability */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 48 | u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */ |
| 49 | u32 qlen_old; /* in bytes */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 50 | u8 accu_prob_overflows; /* overflows of accu_prob */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 51 | }; |
| 52 | |
| 53 | /* statistics gathering */ |
| 54 | struct pie_stats { |
| 55 | u32 packets_in; /* total number of packets enqueued */ |
| 56 | u32 dropped; /* packets dropped due to pie_action */ |
| 57 | u32 overlimit; /* dropped due to lack of space in queue */ |
| 58 | u32 maxq; /* maximum queue size */ |
| 59 | u32 ecn_mark; /* packets marked with ECN */ |
| 60 | }; |
| 61 | |
| 62 | /* private data for the Qdisc */ |
| 63 | struct pie_sched_data { |
| 64 | struct pie_params params; |
| 65 | struct pie_vars vars; |
| 66 | struct pie_stats stats; |
| 67 | struct timer_list adapt_timer; |
| 68 | struct Qdisc *sch; |
| 69 | }; |
| 70 | |
| 71 | static void pie_params_init(struct pie_params *params) |
| 72 | { |
| 73 | params->alpha = 2; |
| 74 | params->beta = 20; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 75 | params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 76 | params->limit = 1000; /* default of 1000 packets */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 77 | params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | params->ecn = false; |
| 79 | params->bytemode = false; |
| 80 | } |
| 81 | |
| 82 | static void pie_vars_init(struct pie_vars *vars) |
| 83 | { |
| 84 | vars->dq_count = DQCOUNT_INVALID; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 85 | vars->accu_prob = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 86 | vars->avg_dq_rate = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 87 | /* default of 150 ms in pschedtime */ |
| 88 | vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); |
| 89 | vars->accu_prob_overflows = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static bool drop_early(struct Qdisc *sch, u32 packet_size) |
| 93 | { |
| 94 | struct pie_sched_data *q = qdisc_priv(sch); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 95 | u64 rnd; |
| 96 | u64 local_prob = q->vars.prob; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | u32 mtu = psched_mtu(qdisc_dev(sch)); |
| 98 | |
| 99 | /* If there is still burst allowance left skip random early drop */ |
| 100 | if (q->vars.burst_time > 0) |
| 101 | return false; |
| 102 | |
| 103 | /* If current delay is less than half of target, and |
| 104 | * if drop prob is low already, disable early_drop |
| 105 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 106 | if ((q->vars.qdelay < q->params.target / 2) && |
| 107 | (q->vars.prob < MAX_PROB / 5)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 108 | return false; |
| 109 | |
| 110 | /* If we have fewer than 2 mtu-sized packets, disable drop_early, |
| 111 | * similar to min_th in RED |
| 112 | */ |
| 113 | if (sch->qstats.backlog < 2 * mtu) |
| 114 | return false; |
| 115 | |
| 116 | /* If bytemode is turned on, use packet size to compute new |
| 117 | * probablity. Smaller packets will have lower drop prob in this case |
| 118 | */ |
| 119 | if (q->params.bytemode && packet_size <= mtu) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 120 | local_prob = (u64)packet_size * div_u64(local_prob, mtu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 121 | else |
| 122 | local_prob = q->vars.prob; |
| 123 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 124 | if (local_prob == 0) { |
| 125 | q->vars.accu_prob = 0; |
| 126 | q->vars.accu_prob_overflows = 0; |
| 127 | } |
| 128 | |
| 129 | if (local_prob > MAX_PROB - q->vars.accu_prob) |
| 130 | q->vars.accu_prob_overflows++; |
| 131 | |
| 132 | q->vars.accu_prob += local_prob; |
| 133 | |
| 134 | if (q->vars.accu_prob_overflows == 0 && |
| 135 | q->vars.accu_prob < (MAX_PROB / 100) * 85) |
| 136 | return false; |
| 137 | if (q->vars.accu_prob_overflows == 8 && |
| 138 | q->vars.accu_prob >= MAX_PROB / 2) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 139 | return true; |
| 140 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 141 | prandom_bytes(&rnd, 8); |
| 142 | if (rnd < local_prob) { |
| 143 | q->vars.accu_prob = 0; |
| 144 | q->vars.accu_prob_overflows = 0; |
| 145 | return true; |
| 146 | } |
| 147 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 148 | return false; |
| 149 | } |
| 150 | |
| 151 | static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 152 | struct sk_buff **to_free) |
| 153 | { |
| 154 | struct pie_sched_data *q = qdisc_priv(sch); |
| 155 | bool enqueue = false; |
| 156 | |
| 157 | if (unlikely(qdisc_qlen(sch) >= sch->limit)) { |
| 158 | q->stats.overlimit++; |
| 159 | goto out; |
| 160 | } |
| 161 | |
| 162 | if (!drop_early(sch, skb->len)) { |
| 163 | enqueue = true; |
| 164 | } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && |
| 165 | INET_ECN_set_ce(skb)) { |
| 166 | /* If packet is ecn capable, mark it if drop probability |
| 167 | * is lower than 10%, else drop it. |
| 168 | */ |
| 169 | q->stats.ecn_mark++; |
| 170 | enqueue = true; |
| 171 | } |
| 172 | |
| 173 | /* we can enqueue the packet */ |
| 174 | if (enqueue) { |
| 175 | q->stats.packets_in++; |
| 176 | if (qdisc_qlen(sch) > q->stats.maxq) |
| 177 | q->stats.maxq = qdisc_qlen(sch); |
| 178 | |
| 179 | return qdisc_enqueue_tail(skb, sch); |
| 180 | } |
| 181 | |
| 182 | out: |
| 183 | q->stats.dropped++; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 184 | q->vars.accu_prob = 0; |
| 185 | q->vars.accu_prob_overflows = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 186 | return qdisc_drop(skb, sch, to_free); |
| 187 | } |
| 188 | |
| 189 | static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = { |
| 190 | [TCA_PIE_TARGET] = {.type = NLA_U32}, |
| 191 | [TCA_PIE_LIMIT] = {.type = NLA_U32}, |
| 192 | [TCA_PIE_TUPDATE] = {.type = NLA_U32}, |
| 193 | [TCA_PIE_ALPHA] = {.type = NLA_U32}, |
| 194 | [TCA_PIE_BETA] = {.type = NLA_U32}, |
| 195 | [TCA_PIE_ECN] = {.type = NLA_U32}, |
| 196 | [TCA_PIE_BYTEMODE] = {.type = NLA_U32}, |
| 197 | }; |
| 198 | |
| 199 | static int pie_change(struct Qdisc *sch, struct nlattr *opt, |
| 200 | struct netlink_ext_ack *extack) |
| 201 | { |
| 202 | struct pie_sched_data *q = qdisc_priv(sch); |
| 203 | struct nlattr *tb[TCA_PIE_MAX + 1]; |
| 204 | unsigned int qlen, dropped = 0; |
| 205 | int err; |
| 206 | |
| 207 | if (!opt) |
| 208 | return -EINVAL; |
| 209 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 210 | err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy, |
| 211 | NULL); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 212 | if (err < 0) |
| 213 | return err; |
| 214 | |
| 215 | sch_tree_lock(sch); |
| 216 | |
| 217 | /* convert from microseconds to pschedtime */ |
| 218 | if (tb[TCA_PIE_TARGET]) { |
| 219 | /* target is in us */ |
| 220 | u32 target = nla_get_u32(tb[TCA_PIE_TARGET]); |
| 221 | |
| 222 | /* convert to pschedtime */ |
| 223 | q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); |
| 224 | } |
| 225 | |
| 226 | /* tupdate is in jiffies */ |
| 227 | if (tb[TCA_PIE_TUPDATE]) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 228 | q->params.tupdate = |
| 229 | usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 230 | |
| 231 | if (tb[TCA_PIE_LIMIT]) { |
| 232 | u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); |
| 233 | |
| 234 | q->params.limit = limit; |
| 235 | sch->limit = limit; |
| 236 | } |
| 237 | |
| 238 | if (tb[TCA_PIE_ALPHA]) |
| 239 | q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); |
| 240 | |
| 241 | if (tb[TCA_PIE_BETA]) |
| 242 | q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]); |
| 243 | |
| 244 | if (tb[TCA_PIE_ECN]) |
| 245 | q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]); |
| 246 | |
| 247 | if (tb[TCA_PIE_BYTEMODE]) |
| 248 | q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); |
| 249 | |
| 250 | /* Drop excess packets if new limit is lower */ |
| 251 | qlen = sch->q.qlen; |
| 252 | while (sch->q.qlen > sch->limit) { |
| 253 | struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); |
| 254 | |
| 255 | dropped += qdisc_pkt_len(skb); |
| 256 | qdisc_qstats_backlog_dec(sch, skb); |
| 257 | rtnl_qdisc_drop(skb, sch); |
| 258 | } |
| 259 | qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); |
| 260 | |
| 261 | sch_tree_unlock(sch); |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb) |
| 266 | { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 267 | struct pie_sched_data *q = qdisc_priv(sch); |
| 268 | int qlen = sch->qstats.backlog; /* current queue size in bytes */ |
| 269 | |
| 270 | /* If current queue is about 10 packets or more and dq_count is unset |
| 271 | * we have enough packets to calculate the drain rate. Save |
| 272 | * current time as dq_tstamp and start measurement cycle. |
| 273 | */ |
| 274 | if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) { |
| 275 | q->vars.dq_tstamp = psched_get_time(); |
| 276 | q->vars.dq_count = 0; |
| 277 | } |
| 278 | |
| 279 | /* Calculate the average drain rate from this value. If queue length |
| 280 | * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset |
| 281 | * the dq_count to -1 as we don't have enough packets to calculate the |
| 282 | * drain rate anymore The following if block is entered only when we |
| 283 | * have a substantial queue built up (QUEUE_THRESHOLD bytes or more) |
| 284 | * and we calculate the drain rate for the threshold here. dq_count is |
| 285 | * in bytes, time difference in psched_time, hence rate is in |
| 286 | * bytes/psched_time. |
| 287 | */ |
| 288 | if (q->vars.dq_count != DQCOUNT_INVALID) { |
| 289 | q->vars.dq_count += skb->len; |
| 290 | |
| 291 | if (q->vars.dq_count >= QUEUE_THRESHOLD) { |
| 292 | psched_time_t now = psched_get_time(); |
| 293 | u32 dtime = now - q->vars.dq_tstamp; |
| 294 | u32 count = q->vars.dq_count << PIE_SCALE; |
| 295 | |
| 296 | if (dtime == 0) |
| 297 | return; |
| 298 | |
| 299 | count = count / dtime; |
| 300 | |
| 301 | if (q->vars.avg_dq_rate == 0) |
| 302 | q->vars.avg_dq_rate = count; |
| 303 | else |
| 304 | q->vars.avg_dq_rate = |
| 305 | (q->vars.avg_dq_rate - |
| 306 | (q->vars.avg_dq_rate >> 3)) + (count >> 3); |
| 307 | |
| 308 | /* If the queue has receded below the threshold, we hold |
| 309 | * on to the last drain rate calculated, else we reset |
| 310 | * dq_count to 0 to re-enter the if block when the next |
| 311 | * packet is dequeued |
| 312 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 313 | if (qlen < QUEUE_THRESHOLD) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 314 | q->vars.dq_count = DQCOUNT_INVALID; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 315 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | q->vars.dq_count = 0; |
| 317 | q->vars.dq_tstamp = psched_get_time(); |
| 318 | } |
| 319 | |
| 320 | if (q->vars.burst_time > 0) { |
| 321 | if (q->vars.burst_time > dtime) |
| 322 | q->vars.burst_time -= dtime; |
| 323 | else |
| 324 | q->vars.burst_time = 0; |
| 325 | } |
| 326 | } |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | static void calculate_probability(struct Qdisc *sch) |
| 331 | { |
| 332 | struct pie_sched_data *q = qdisc_priv(sch); |
| 333 | u32 qlen = sch->qstats.backlog; /* queue size in bytes */ |
| 334 | psched_time_t qdelay = 0; /* in pschedtime */ |
| 335 | psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 336 | s64 delta = 0; /* determines the change in probability */ |
| 337 | u64 oldprob; |
| 338 | u64 alpha, beta; |
| 339 | u32 power; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 340 | bool update_prob = true; |
| 341 | |
| 342 | q->vars.qdelay_old = q->vars.qdelay; |
| 343 | |
| 344 | if (q->vars.avg_dq_rate > 0) |
| 345 | qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; |
| 346 | else |
| 347 | qdelay = 0; |
| 348 | |
| 349 | /* If qdelay is zero and qlen is not, it means qlen is very small, less |
| 350 | * than dequeue_rate, so we do not update probabilty in this round |
| 351 | */ |
| 352 | if (qdelay == 0 && qlen != 0) |
| 353 | update_prob = false; |
| 354 | |
| 355 | /* In the algorithm, alpha and beta are between 0 and 2 with typical |
| 356 | * value for alpha as 0.125. In this implementation, we use values 0-32 |
| 357 | * passed from user space to represent this. Also, alpha and beta have |
| 358 | * unit of HZ and need to be scaled before they can used to update |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 359 | * probability. alpha/beta are updated locally below by scaling down |
| 360 | * by 16 to come to 0-2 range. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 361 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 362 | alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; |
| 363 | beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; |
| 364 | |
| 365 | /* We scale alpha and beta differently depending on how heavy the |
| 366 | * congestion is. Please see RFC 8033 for details. |
| 367 | */ |
| 368 | if (q->vars.prob < MAX_PROB / 10) { |
| 369 | alpha >>= 1; |
| 370 | beta >>= 1; |
| 371 | |
| 372 | power = 100; |
| 373 | while (q->vars.prob < div_u64(MAX_PROB, power) && |
| 374 | power <= 1000000) { |
| 375 | alpha >>= 2; |
| 376 | beta >>= 2; |
| 377 | power *= 10; |
| 378 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | /* alpha and beta should be between 0 and 32, in multiples of 1/16 */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 382 | delta += alpha * (u64)(qdelay - q->params.target); |
| 383 | delta += beta * (u64)(qdelay - qdelay_old); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 384 | |
| 385 | oldprob = q->vars.prob; |
| 386 | |
| 387 | /* to ensure we increase probability in steps of no more than 2% */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 388 | if (delta > (s64)(MAX_PROB / (100 / 2)) && |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 389 | q->vars.prob >= MAX_PROB / 10) |
| 390 | delta = (MAX_PROB / 100) * 2; |
| 391 | |
| 392 | /* Non-linear drop: |
| 393 | * Tune drop probability to increase quickly for high delays(>= 250ms) |
| 394 | * 250ms is derived through experiments and provides error protection |
| 395 | */ |
| 396 | |
| 397 | if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC))) |
| 398 | delta += MAX_PROB / (100 / 2); |
| 399 | |
| 400 | q->vars.prob += delta; |
| 401 | |
| 402 | if (delta > 0) { |
| 403 | /* prevent overflow */ |
| 404 | if (q->vars.prob < oldprob) { |
| 405 | q->vars.prob = MAX_PROB; |
| 406 | /* Prevent normalization error. If probability is at |
| 407 | * maximum value already, we normalize it here, and |
| 408 | * skip the check to do a non-linear drop in the next |
| 409 | * section. |
| 410 | */ |
| 411 | update_prob = false; |
| 412 | } |
| 413 | } else { |
| 414 | /* prevent underflow */ |
| 415 | if (q->vars.prob > oldprob) |
| 416 | q->vars.prob = 0; |
| 417 | } |
| 418 | |
| 419 | /* Non-linear drop in probability: Reduce drop probability quickly if |
| 420 | * delay is 0 for 2 consecutive Tupdate periods. |
| 421 | */ |
| 422 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 423 | if (qdelay == 0 && qdelay_old == 0 && update_prob) |
| 424 | /* Reduce drop probability to 98.4% */ |
| 425 | q->vars.prob -= q->vars.prob / 64u; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 426 | |
| 427 | q->vars.qdelay = qdelay; |
| 428 | q->vars.qlen_old = qlen; |
| 429 | |
| 430 | /* We restart the measurement cycle if the following conditions are met |
| 431 | * 1. If the delay has been low for 2 consecutive Tupdate periods |
| 432 | * 2. Calculated drop probability is zero |
| 433 | * 3. We have atleast one estimate for the avg_dq_rate ie., |
| 434 | * is a non-zero value |
| 435 | */ |
| 436 | if ((q->vars.qdelay < q->params.target / 2) && |
| 437 | (q->vars.qdelay_old < q->params.target / 2) && |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 438 | q->vars.prob == 0 && |
| 439 | q->vars.avg_dq_rate > 0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 440 | pie_vars_init(&q->vars); |
| 441 | } |
| 442 | |
| 443 | static void pie_timer(struct timer_list *t) |
| 444 | { |
| 445 | struct pie_sched_data *q = from_timer(q, t, adapt_timer); |
| 446 | struct Qdisc *sch = q->sch; |
| 447 | spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); |
| 448 | |
| 449 | spin_lock(root_lock); |
| 450 | calculate_probability(sch); |
| 451 | |
| 452 | /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */ |
| 453 | if (q->params.tupdate) |
| 454 | mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); |
| 455 | spin_unlock(root_lock); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 456 | } |
| 457 | |
| 458 | static int pie_init(struct Qdisc *sch, struct nlattr *opt, |
| 459 | struct netlink_ext_ack *extack) |
| 460 | { |
| 461 | struct pie_sched_data *q = qdisc_priv(sch); |
| 462 | |
| 463 | pie_params_init(&q->params); |
| 464 | pie_vars_init(&q->vars); |
| 465 | sch->limit = q->params.limit; |
| 466 | |
| 467 | q->sch = sch; |
| 468 | timer_setup(&q->adapt_timer, pie_timer, 0); |
| 469 | |
| 470 | if (opt) { |
| 471 | int err = pie_change(sch, opt, extack); |
| 472 | |
| 473 | if (err) |
| 474 | return err; |
| 475 | } |
| 476 | |
| 477 | mod_timer(&q->adapt_timer, jiffies + HZ / 2); |
| 478 | return 0; |
| 479 | } |
| 480 | |
| 481 | static int pie_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 482 | { |
| 483 | struct pie_sched_data *q = qdisc_priv(sch); |
| 484 | struct nlattr *opts; |
| 485 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 486 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
| 487 | if (!opts) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 488 | goto nla_put_failure; |
| 489 | |
| 490 | /* convert target from pschedtime to us */ |
| 491 | if (nla_put_u32(skb, TCA_PIE_TARGET, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 492 | ((u32)PSCHED_TICKS2NS(q->params.target)) / |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 493 | NSEC_PER_USEC) || |
| 494 | nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) || |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 495 | nla_put_u32(skb, TCA_PIE_TUPDATE, |
| 496 | jiffies_to_usecs(q->params.tupdate)) || |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 497 | nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) || |
| 498 | nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) || |
| 499 | nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || |
| 500 | nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode)) |
| 501 | goto nla_put_failure; |
| 502 | |
| 503 | return nla_nest_end(skb, opts); |
| 504 | |
| 505 | nla_put_failure: |
| 506 | nla_nest_cancel(skb, opts); |
| 507 | return -1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d) |
| 511 | { |
| 512 | struct pie_sched_data *q = qdisc_priv(sch); |
| 513 | struct tc_pie_xstats st = { |
| 514 | .prob = q->vars.prob, |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 515 | .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 516 | NSEC_PER_USEC, |
| 517 | /* unscale and return dq_rate in bytes per sec */ |
| 518 | .avg_dq_rate = q->vars.avg_dq_rate * |
| 519 | (PSCHED_TICKS_PER_SEC) >> PIE_SCALE, |
| 520 | .packets_in = q->stats.packets_in, |
| 521 | .overlimit = q->stats.overlimit, |
| 522 | .maxq = q->stats.maxq, |
| 523 | .dropped = q->stats.dropped, |
| 524 | .ecn_mark = q->stats.ecn_mark, |
| 525 | }; |
| 526 | |
| 527 | return gnet_stats_copy_app(d, &st, sizeof(st)); |
| 528 | } |
| 529 | |
| 530 | static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch) |
| 531 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 532 | struct sk_buff *skb = qdisc_dequeue_head(sch); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 533 | |
| 534 | if (!skb) |
| 535 | return NULL; |
| 536 | |
| 537 | pie_process_dequeue(sch, skb); |
| 538 | return skb; |
| 539 | } |
| 540 | |
| 541 | static void pie_reset(struct Qdisc *sch) |
| 542 | { |
| 543 | struct pie_sched_data *q = qdisc_priv(sch); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 544 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 545 | qdisc_reset_queue(sch); |
| 546 | pie_vars_init(&q->vars); |
| 547 | } |
| 548 | |
| 549 | static void pie_destroy(struct Qdisc *sch) |
| 550 | { |
| 551 | struct pie_sched_data *q = qdisc_priv(sch); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 552 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 553 | q->params.tupdate = 0; |
| 554 | del_timer_sync(&q->adapt_timer); |
| 555 | } |
| 556 | |
| 557 | static struct Qdisc_ops pie_qdisc_ops __read_mostly = { |
| 558 | .id = "pie", |
| 559 | .priv_size = sizeof(struct pie_sched_data), |
| 560 | .enqueue = pie_qdisc_enqueue, |
| 561 | .dequeue = pie_qdisc_dequeue, |
| 562 | .peek = qdisc_peek_dequeued, |
| 563 | .init = pie_init, |
| 564 | .destroy = pie_destroy, |
| 565 | .reset = pie_reset, |
| 566 | .change = pie_change, |
| 567 | .dump = pie_dump, |
| 568 | .dump_stats = pie_dump_stats, |
| 569 | .owner = THIS_MODULE, |
| 570 | }; |
| 571 | |
| 572 | static int __init pie_module_init(void) |
| 573 | { |
| 574 | return register_qdisc(&pie_qdisc_ops); |
| 575 | } |
| 576 | |
| 577 | static void __exit pie_module_exit(void) |
| 578 | { |
| 579 | unregister_qdisc(&pie_qdisc_ops); |
| 580 | } |
| 581 | |
| 582 | module_init(pie_module_init); |
| 583 | module_exit(pie_module_exit); |
| 584 | |
| 585 | MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler"); |
| 586 | MODULE_AUTHOR("Vijay Subramanian"); |
| 587 | MODULE_AUTHOR("Mythili Prabhu"); |
| 588 | MODULE_LICENSE("GPL"); |