David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Pluggable TCP congestion control support and newReno |
| 4 | * congestion control. |
| 5 | * Based on ideas from I/O scheduler support and Web100. |
| 6 | * |
| 7 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> |
| 8 | */ |
| 9 | |
| 10 | #define pr_fmt(fmt) "TCP: " fmt |
| 11 | |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/list.h> |
| 16 | #include <linux/gfp.h> |
| 17 | #include <linux/jhash.h> |
| 18 | #include <net/tcp.h> |
| 19 | |
| 20 | static DEFINE_SPINLOCK(tcp_cong_list_lock); |
| 21 | static LIST_HEAD(tcp_cong_list); |
| 22 | |
| 23 | /* Simple linear search, don't expect many entries! */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 24 | struct tcp_congestion_ops *tcp_ca_find(const char *name) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | { |
| 26 | struct tcp_congestion_ops *e; |
| 27 | |
| 28 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
| 29 | if (strcmp(e->name, name) == 0) |
| 30 | return e; |
| 31 | } |
| 32 | |
| 33 | return NULL; |
| 34 | } |
| 35 | |
| 36 | /* Must be called with rcu lock held */ |
| 37 | static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net, |
| 38 | const char *name) |
| 39 | { |
| 40 | struct tcp_congestion_ops *ca = tcp_ca_find(name); |
| 41 | |
| 42 | #ifdef CONFIG_MODULES |
| 43 | if (!ca && capable(CAP_NET_ADMIN)) { |
| 44 | rcu_read_unlock(); |
| 45 | request_module("tcp_%s", name); |
| 46 | rcu_read_lock(); |
| 47 | ca = tcp_ca_find(name); |
| 48 | } |
| 49 | #endif |
| 50 | return ca; |
| 51 | } |
| 52 | |
| 53 | /* Simple linear search, not much in here. */ |
| 54 | struct tcp_congestion_ops *tcp_ca_find_key(u32 key) |
| 55 | { |
| 56 | struct tcp_congestion_ops *e; |
| 57 | |
| 58 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
| 59 | if (e->key == key) |
| 60 | return e; |
| 61 | } |
| 62 | |
| 63 | return NULL; |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * Attach new congestion control algorithm to the list |
| 68 | * of available options. |
| 69 | */ |
| 70 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) |
| 71 | { |
| 72 | int ret = 0; |
| 73 | |
| 74 | /* all algorithms must implement these */ |
| 75 | if (!ca->ssthresh || !ca->undo_cwnd || |
| 76 | !(ca->cong_avoid || ca->cong_control)) { |
| 77 | pr_err("%s does not implement required ops\n", ca->name); |
| 78 | return -EINVAL; |
| 79 | } |
| 80 | |
| 81 | ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name)); |
| 82 | |
| 83 | spin_lock(&tcp_cong_list_lock); |
| 84 | if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) { |
| 85 | pr_notice("%s already registered or non-unique key\n", |
| 86 | ca->name); |
| 87 | ret = -EEXIST; |
| 88 | } else { |
| 89 | list_add_tail_rcu(&ca->list, &tcp_cong_list); |
| 90 | pr_debug("%s registered\n", ca->name); |
| 91 | } |
| 92 | spin_unlock(&tcp_cong_list_lock); |
| 93 | |
| 94 | return ret; |
| 95 | } |
| 96 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); |
| 97 | |
| 98 | /* |
| 99 | * Remove congestion control algorithm, called from |
| 100 | * the module's remove function. Module ref counts are used |
| 101 | * to ensure that this can't be done till all sockets using |
| 102 | * that method are closed. |
| 103 | */ |
| 104 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) |
| 105 | { |
| 106 | spin_lock(&tcp_cong_list_lock); |
| 107 | list_del_rcu(&ca->list); |
| 108 | spin_unlock(&tcp_cong_list_lock); |
| 109 | |
| 110 | /* Wait for outstanding readers to complete before the |
| 111 | * module gets removed entirely. |
| 112 | * |
| 113 | * A try_module_get() should fail by now as our module is |
| 114 | * in "going" state since no refs are held anymore and |
| 115 | * module_exit() handler being called. |
| 116 | */ |
| 117 | synchronize_rcu(); |
| 118 | } |
| 119 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); |
| 120 | |
| 121 | u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca) |
| 122 | { |
| 123 | const struct tcp_congestion_ops *ca; |
| 124 | u32 key = TCP_CA_UNSPEC; |
| 125 | |
| 126 | might_sleep(); |
| 127 | |
| 128 | rcu_read_lock(); |
| 129 | ca = tcp_ca_find_autoload(net, name); |
| 130 | if (ca) { |
| 131 | key = ca->key; |
| 132 | *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN; |
| 133 | } |
| 134 | rcu_read_unlock(); |
| 135 | |
| 136 | return key; |
| 137 | } |
| 138 | EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name); |
| 139 | |
| 140 | char *tcp_ca_get_name_by_key(u32 key, char *buffer) |
| 141 | { |
| 142 | const struct tcp_congestion_ops *ca; |
| 143 | char *ret = NULL; |
| 144 | |
| 145 | rcu_read_lock(); |
| 146 | ca = tcp_ca_find_key(key); |
| 147 | if (ca) |
| 148 | ret = strncpy(buffer, ca->name, |
| 149 | TCP_CA_NAME_MAX); |
| 150 | rcu_read_unlock(); |
| 151 | |
| 152 | return ret; |
| 153 | } |
| 154 | EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key); |
| 155 | |
| 156 | /* Assign choice of congestion control. */ |
| 157 | void tcp_assign_congestion_control(struct sock *sk) |
| 158 | { |
| 159 | struct net *net = sock_net(sk); |
| 160 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 161 | const struct tcp_congestion_ops *ca; |
| 162 | |
| 163 | rcu_read_lock(); |
| 164 | ca = rcu_dereference(net->ipv4.tcp_congestion_control); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 165 | if (unlikely(!bpf_try_module_get(ca, ca->owner))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 166 | ca = &tcp_reno; |
| 167 | icsk->icsk_ca_ops = ca; |
| 168 | rcu_read_unlock(); |
| 169 | |
| 170 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
| 171 | if (ca->flags & TCP_CONG_NEEDS_ECN) |
| 172 | INET_ECN_xmit(sk); |
| 173 | else |
| 174 | INET_ECN_dontxmit(sk); |
| 175 | } |
| 176 | |
| 177 | void tcp_init_congestion_control(struct sock *sk) |
| 178 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 179 | struct inet_connection_sock *icsk = inet_csk(sk); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 180 | |
| 181 | tcp_sk(sk)->prior_ssthresh = 0; |
| 182 | if (icsk->icsk_ca_ops->init) |
| 183 | icsk->icsk_ca_ops->init(sk); |
| 184 | if (tcp_ca_needs_ecn(sk)) |
| 185 | INET_ECN_xmit(sk); |
| 186 | else |
| 187 | INET_ECN_dontxmit(sk); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 188 | icsk->icsk_ca_initialized = 1; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | static void tcp_reinit_congestion_control(struct sock *sk, |
| 192 | const struct tcp_congestion_ops *ca) |
| 193 | { |
| 194 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 195 | |
| 196 | tcp_cleanup_congestion_control(sk); |
| 197 | icsk->icsk_ca_ops = ca; |
| 198 | icsk->icsk_ca_setsockopt = 1; |
| 199 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); |
| 200 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 201 | if (ca->flags & TCP_CONG_NEEDS_ECN) |
| 202 | INET_ECN_xmit(sk); |
| 203 | else |
| 204 | INET_ECN_dontxmit(sk); |
| 205 | |
| 206 | if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 207 | tcp_init_congestion_control(sk); |
| 208 | } |
| 209 | |
| 210 | /* Manage refcounts on socket close. */ |
| 211 | void tcp_cleanup_congestion_control(struct sock *sk) |
| 212 | { |
| 213 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 214 | |
| 215 | if (icsk->icsk_ca_ops->release) |
| 216 | icsk->icsk_ca_ops->release(sk); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 217 | bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | /* Used by sysctl to change default congestion control */ |
| 221 | int tcp_set_default_congestion_control(struct net *net, const char *name) |
| 222 | { |
| 223 | struct tcp_congestion_ops *ca; |
| 224 | const struct tcp_congestion_ops *prev; |
| 225 | int ret; |
| 226 | |
| 227 | rcu_read_lock(); |
| 228 | ca = tcp_ca_find_autoload(net, name); |
| 229 | if (!ca) { |
| 230 | ret = -ENOENT; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 231 | } else if (!bpf_try_module_get(ca, ca->owner)) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 232 | ret = -EBUSY; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 233 | } else if (!net_eq(net, &init_net) && |
| 234 | !(ca->flags & TCP_CONG_NON_RESTRICTED)) { |
| 235 | /* Only init netns can set default to a restricted algorithm */ |
| 236 | ret = -EPERM; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 237 | } else { |
| 238 | prev = xchg(&net->ipv4.tcp_congestion_control, ca); |
| 239 | if (prev) |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 240 | bpf_module_put(prev, prev->owner); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 241 | |
| 242 | ca->flags |= TCP_CONG_NON_RESTRICTED; |
| 243 | ret = 0; |
| 244 | } |
| 245 | rcu_read_unlock(); |
| 246 | |
| 247 | return ret; |
| 248 | } |
| 249 | |
| 250 | /* Set default value from kernel configuration at bootup */ |
| 251 | static int __init tcp_congestion_default(void) |
| 252 | { |
| 253 | return tcp_set_default_congestion_control(&init_net, |
| 254 | CONFIG_DEFAULT_TCP_CONG); |
| 255 | } |
| 256 | late_initcall(tcp_congestion_default); |
| 257 | |
| 258 | /* Build string with list of available congestion control values */ |
| 259 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) |
| 260 | { |
| 261 | struct tcp_congestion_ops *ca; |
| 262 | size_t offs = 0; |
| 263 | |
| 264 | rcu_read_lock(); |
| 265 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
| 266 | offs += snprintf(buf + offs, maxlen - offs, |
| 267 | "%s%s", |
| 268 | offs == 0 ? "" : " ", ca->name); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 269 | |
| 270 | if (WARN_ON_ONCE(offs >= maxlen)) |
| 271 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 272 | } |
| 273 | rcu_read_unlock(); |
| 274 | } |
| 275 | |
| 276 | /* Get current default congestion control */ |
| 277 | void tcp_get_default_congestion_control(struct net *net, char *name) |
| 278 | { |
| 279 | const struct tcp_congestion_ops *ca; |
| 280 | |
| 281 | rcu_read_lock(); |
| 282 | ca = rcu_dereference(net->ipv4.tcp_congestion_control); |
| 283 | strncpy(name, ca->name, TCP_CA_NAME_MAX); |
| 284 | rcu_read_unlock(); |
| 285 | } |
| 286 | |
| 287 | /* Built list of non-restricted congestion control values */ |
| 288 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) |
| 289 | { |
| 290 | struct tcp_congestion_ops *ca; |
| 291 | size_t offs = 0; |
| 292 | |
| 293 | *buf = '\0'; |
| 294 | rcu_read_lock(); |
| 295 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
| 296 | if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) |
| 297 | continue; |
| 298 | offs += snprintf(buf + offs, maxlen - offs, |
| 299 | "%s%s", |
| 300 | offs == 0 ? "" : " ", ca->name); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 301 | |
| 302 | if (WARN_ON_ONCE(offs >= maxlen)) |
| 303 | break; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 304 | } |
| 305 | rcu_read_unlock(); |
| 306 | } |
| 307 | |
| 308 | /* Change list of non-restricted congestion control */ |
| 309 | int tcp_set_allowed_congestion_control(char *val) |
| 310 | { |
| 311 | struct tcp_congestion_ops *ca; |
| 312 | char *saved_clone, *clone, *name; |
| 313 | int ret = 0; |
| 314 | |
| 315 | saved_clone = clone = kstrdup(val, GFP_USER); |
| 316 | if (!clone) |
| 317 | return -ENOMEM; |
| 318 | |
| 319 | spin_lock(&tcp_cong_list_lock); |
| 320 | /* pass 1 check for bad entries */ |
| 321 | while ((name = strsep(&clone, " ")) && *name) { |
| 322 | ca = tcp_ca_find(name); |
| 323 | if (!ca) { |
| 324 | ret = -ENOENT; |
| 325 | goto out; |
| 326 | } |
| 327 | } |
| 328 | |
| 329 | /* pass 2 clear old values */ |
| 330 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) |
| 331 | ca->flags &= ~TCP_CONG_NON_RESTRICTED; |
| 332 | |
| 333 | /* pass 3 mark as allowed */ |
| 334 | while ((name = strsep(&val, " ")) && *name) { |
| 335 | ca = tcp_ca_find(name); |
| 336 | WARN_ON(!ca); |
| 337 | if (ca) |
| 338 | ca->flags |= TCP_CONG_NON_RESTRICTED; |
| 339 | } |
| 340 | out: |
| 341 | spin_unlock(&tcp_cong_list_lock); |
| 342 | kfree(saved_clone); |
| 343 | |
| 344 | return ret; |
| 345 | } |
| 346 | |
| 347 | /* Change congestion control for socket. If load is false, then it is the |
| 348 | * responsibility of the caller to call tcp_init_congestion_control or |
| 349 | * tcp_reinit_congestion_control (if the current congestion control was |
| 350 | * already initialized. |
| 351 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 352 | int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 353 | bool cap_net_admin) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 354 | { |
| 355 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 356 | const struct tcp_congestion_ops *ca; |
| 357 | int err = 0; |
| 358 | |
| 359 | if (icsk->icsk_ca_dst_locked) |
| 360 | return -EPERM; |
| 361 | |
| 362 | rcu_read_lock(); |
| 363 | if (!load) |
| 364 | ca = tcp_ca_find(name); |
| 365 | else |
| 366 | ca = tcp_ca_find_autoload(sock_net(sk), name); |
| 367 | |
| 368 | /* No change asking for existing value */ |
| 369 | if (ca == icsk->icsk_ca_ops) { |
| 370 | icsk->icsk_ca_setsockopt = 1; |
| 371 | goto out; |
| 372 | } |
| 373 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 374 | if (!ca) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 375 | err = -ENOENT; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 376 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 377 | err = -EPERM; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 378 | else if (!bpf_try_module_get(ca, ca->owner)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 379 | err = -EBUSY; |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 380 | else |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 381 | tcp_reinit_congestion_control(sk, ca); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 382 | out: |
| 383 | rcu_read_unlock(); |
| 384 | return err; |
| 385 | } |
| 386 | |
| 387 | /* Slow start is used when congestion window is no greater than the slow start |
| 388 | * threshold. We base on RFC2581 and also handle stretch ACKs properly. |
| 389 | * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but |
| 390 | * something better;) a packet is only considered (s)acked in its entirety to |
| 391 | * defend the ACK attacks described in the RFC. Slow start processes a stretch |
| 392 | * ACK of degree N as if N acks of degree 1 are received back to back except |
| 393 | * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and |
| 394 | * returns the leftover acks to adjust cwnd in congestion avoidance mode. |
| 395 | */ |
| 396 | u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) |
| 397 | { |
| 398 | u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); |
| 399 | |
| 400 | acked -= cwnd - tp->snd_cwnd; |
| 401 | tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); |
| 402 | |
| 403 | return acked; |
| 404 | } |
| 405 | EXPORT_SYMBOL_GPL(tcp_slow_start); |
| 406 | |
| 407 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w), |
| 408 | * for every packet that was ACKed. |
| 409 | */ |
| 410 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked) |
| 411 | { |
| 412 | /* If credits accumulated at a higher w, apply them gently now. */ |
| 413 | if (tp->snd_cwnd_cnt >= w) { |
| 414 | tp->snd_cwnd_cnt = 0; |
| 415 | tp->snd_cwnd++; |
| 416 | } |
| 417 | |
| 418 | tp->snd_cwnd_cnt += acked; |
| 419 | if (tp->snd_cwnd_cnt >= w) { |
| 420 | u32 delta = tp->snd_cwnd_cnt / w; |
| 421 | |
| 422 | tp->snd_cwnd_cnt -= delta * w; |
| 423 | tp->snd_cwnd += delta; |
| 424 | } |
| 425 | tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); |
| 426 | } |
| 427 | EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); |
| 428 | |
| 429 | /* |
| 430 | * TCP Reno congestion control |
| 431 | * This is special case used for fallback as well. |
| 432 | */ |
| 433 | /* This is Jacobson's slow start and congestion avoidance. |
| 434 | * SIGCOMM '88, p. 328. |
| 435 | */ |
| 436 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) |
| 437 | { |
| 438 | struct tcp_sock *tp = tcp_sk(sk); |
| 439 | |
| 440 | if (!tcp_is_cwnd_limited(sk)) |
| 441 | return; |
| 442 | |
| 443 | /* In "safe" area, increase. */ |
| 444 | if (tcp_in_slow_start(tp)) { |
| 445 | acked = tcp_slow_start(tp, acked); |
| 446 | if (!acked) |
| 447 | return; |
| 448 | } |
| 449 | /* In dangerous area, increase slowly. */ |
| 450 | tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); |
| 451 | } |
| 452 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); |
| 453 | |
| 454 | /* Slow start threshold is half the congestion window (min 2) */ |
| 455 | u32 tcp_reno_ssthresh(struct sock *sk) |
| 456 | { |
| 457 | const struct tcp_sock *tp = tcp_sk(sk); |
| 458 | |
| 459 | return max(tp->snd_cwnd >> 1U, 2U); |
| 460 | } |
| 461 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); |
| 462 | |
| 463 | u32 tcp_reno_undo_cwnd(struct sock *sk) |
| 464 | { |
| 465 | const struct tcp_sock *tp = tcp_sk(sk); |
| 466 | |
| 467 | return max(tp->snd_cwnd, tp->prior_cwnd); |
| 468 | } |
| 469 | EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); |
| 470 | |
| 471 | struct tcp_congestion_ops tcp_reno = { |
| 472 | .flags = TCP_CONG_NON_RESTRICTED, |
| 473 | .name = "reno", |
| 474 | .owner = THIS_MODULE, |
| 475 | .ssthresh = tcp_reno_ssthresh, |
| 476 | .cong_avoid = tcp_reno_cong_avoid, |
| 477 | .undo_cwnd = tcp_reno_undo_cwnd, |
| 478 | }; |