linux/net/ipv4/tcp_cong.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Pluggable TCP congestion control support and newReno
   4 * congestion control.
   5 * Based on ideas from I/O scheduler support and Web100.
   6 *
   7 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
   8 */
   9
  10#define pr_fmt(fmt) "TCP: " fmt
  11
  12#include <linux/module.h>
  13#include <linux/mm.h>
  14#include <linux/types.h>
  15#include <linux/list.h>
  16#include <linux/gfp.h>
  17#include <linux/jhash.h>
  18#include <net/tcp.h>
  19
  20static DEFINE_SPINLOCK(tcp_cong_list_lock);
  21static LIST_HEAD(tcp_cong_list);
  22
  23/* Simple linear search, don't expect many entries! */
  24struct tcp_congestion_ops *tcp_ca_find(const char *name)
  25{
  26        struct tcp_congestion_ops *e;
  27
  28        list_for_each_entry_rcu(e, &tcp_cong_list, list) {
  29                if (strcmp(e->name, name) == 0)
  30                        return e;
  31        }
  32
  33        return NULL;
  34}
  35
  36/* Must be called with rcu lock held */
  37static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net,
  38                                                       const char *name)
  39{
  40        struct tcp_congestion_ops *ca = tcp_ca_find(name);
  41
  42#ifdef CONFIG_MODULES
  43        if (!ca && capable(CAP_NET_ADMIN)) {
  44                rcu_read_unlock();
  45                request_module("tcp_%s", name);
  46                rcu_read_lock();
  47                ca = tcp_ca_find(name);
  48        }
  49#endif
  50        return ca;
  51}
  52
  53/* Simple linear search, not much in here. */
  54struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
  55{
  56        struct tcp_congestion_ops *e;
  57
  58        list_for_each_entry_rcu(e, &tcp_cong_list, list) {
  59                if (e->key == key)
  60                        return e;
  61        }
  62
  63        return NULL;
  64}
  65
  66/*
  67 * Attach new congestion control algorithm to the list
  68 * of available options.
  69 */
  70int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
  71{
  72        int ret = 0;
  73
  74        /* all algorithms must implement these */
  75        if (!ca->ssthresh || !ca->undo_cwnd ||
  76            !(ca->cong_avoid || ca->cong_control)) {
  77                pr_err("%s does not implement required ops\n", ca->name);
  78                return -EINVAL;
  79        }
  80
  81        ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
  82
  83        spin_lock(&tcp_cong_list_lock);
  84        if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
  85                pr_notice("%s already registered or non-unique key\n",
  86                          ca->name);
  87                ret = -EEXIST;
  88        } else {
  89                list_add_tail_rcu(&ca->list, &tcp_cong_list);
  90                pr_debug("%s registered\n", ca->name);
  91        }
  92        spin_unlock(&tcp_cong_list_lock);
  93
  94        return ret;
  95}
  96EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
  97
  98/*
  99 * Remove congestion control algorithm, called from
 100 * the module's remove function.  Module ref counts are used
 101 * to ensure that this can't be done till all sockets using
 102 * that method are closed.
 103 */
 104void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
 105{
 106        spin_lock(&tcp_cong_list_lock);
 107        list_del_rcu(&ca->list);
 108        spin_unlock(&tcp_cong_list_lock);
 109
 110        /* Wait for outstanding readers to complete before the
 111         * module gets removed entirely.
 112         *
 113         * A try_module_get() should fail by now as our module is
 114         * in "going" state since no refs are held anymore and
 115         * module_exit() handler being called.
 116         */
 117        synchronize_rcu();
 118}
 119EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 120
 121u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
 122{
 123        const struct tcp_congestion_ops *ca;
 124        u32 key = TCP_CA_UNSPEC;
 125
 126        might_sleep();
 127
 128        rcu_read_lock();
 129        ca = tcp_ca_find_autoload(net, name);
 130        if (ca) {
 131                key = ca->key;
 132                *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
 133        }
 134        rcu_read_unlock();
 135
 136        return key;
 137}
 138
 139char *tcp_ca_get_name_by_key(u32 key, char *buffer)
 140{
 141        const struct tcp_congestion_ops *ca;
 142        char *ret = NULL;
 143
 144        rcu_read_lock();
 145        ca = tcp_ca_find_key(key);
 146        if (ca)
 147                ret = strncpy(buffer, ca->name,
 148                              TCP_CA_NAME_MAX);
 149        rcu_read_unlock();
 150
 151        return ret;
 152}
 153
 154/* Assign choice of congestion control. */
 155void tcp_assign_congestion_control(struct sock *sk)
 156{
 157        struct net *net = sock_net(sk);
 158        struct inet_connection_sock *icsk = inet_csk(sk);
 159        const struct tcp_congestion_ops *ca;
 160
 161        rcu_read_lock();
 162        ca = rcu_dereference(net->ipv4.tcp_congestion_control);
 163        if (unlikely(!bpf_try_module_get(ca, ca->owner)))
 164                ca = &tcp_reno;
 165        icsk->icsk_ca_ops = ca;
 166        rcu_read_unlock();
 167
 168        memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 169        if (ca->flags & TCP_CONG_NEEDS_ECN)
 170                INET_ECN_xmit(sk);
 171        else
 172                INET_ECN_dontxmit(sk);
 173}
 174
 175void tcp_init_congestion_control(struct sock *sk)
 176{
 177        struct inet_connection_sock *icsk = inet_csk(sk);
 178
 179        tcp_sk(sk)->prior_ssthresh = 0;
 180        if (icsk->icsk_ca_ops->init)
 181                icsk->icsk_ca_ops->init(sk);
 182        if (tcp_ca_needs_ecn(sk))
 183                INET_ECN_xmit(sk);
 184        else
 185                INET_ECN_dontxmit(sk);
 186        icsk->icsk_ca_initialized = 1;
 187}
 188
 189static void tcp_reinit_congestion_control(struct sock *sk,
 190                                          const struct tcp_congestion_ops *ca)
 191{
 192        struct inet_connection_sock *icsk = inet_csk(sk);
 193
 194        tcp_cleanup_congestion_control(sk);
 195        icsk->icsk_ca_ops = ca;
 196        icsk->icsk_ca_setsockopt = 1;
 197        memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 198
 199        if (ca->flags & TCP_CONG_NEEDS_ECN)
 200                INET_ECN_xmit(sk);
 201        else
 202                INET_ECN_dontxmit(sk);
 203
 204        if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
 205                tcp_init_congestion_control(sk);
 206}
 207
 208/* Manage refcounts on socket close. */
 209void tcp_cleanup_congestion_control(struct sock *sk)
 210{
 211        struct inet_connection_sock *icsk = inet_csk(sk);
 212
 213        if (icsk->icsk_ca_ops->release)
 214                icsk->icsk_ca_ops->release(sk);
 215        bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
 216}
 217
 218/* Used by sysctl to change default congestion control */
 219int tcp_set_default_congestion_control(struct net *net, const char *name)
 220{
 221        struct tcp_congestion_ops *ca;
 222        const struct tcp_congestion_ops *prev;
 223        int ret;
 224
 225        rcu_read_lock();
 226        ca = tcp_ca_find_autoload(net, name);
 227        if (!ca) {
 228                ret = -ENOENT;
 229        } else if (!bpf_try_module_get(ca, ca->owner)) {
 230                ret = -EBUSY;
 231        } else if (!net_eq(net, &init_net) &&
 232                        !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
 233                /* Only init netns can set default to a restricted algorithm */
 234                ret = -EPERM;
 235        } else {
 236                prev = xchg(&net->ipv4.tcp_congestion_control, ca);
 237                if (prev)
 238                        bpf_module_put(prev, prev->owner);
 239
 240                ca->flags |= TCP_CONG_NON_RESTRICTED;
 241                ret = 0;
 242        }
 243        rcu_read_unlock();
 244
 245        return ret;
 246}
 247
 248/* Set default value from kernel configuration at bootup */
 249static int __init tcp_congestion_default(void)
 250{
 251        return tcp_set_default_congestion_control(&init_net,
 252                                                  CONFIG_DEFAULT_TCP_CONG);
 253}
 254late_initcall(tcp_congestion_default);
 255
 256/* Build string with list of available congestion control values */
 257void tcp_get_available_congestion_control(char *buf, size_t maxlen)
 258{
 259        struct tcp_congestion_ops *ca;
 260        size_t offs = 0;
 261
 262        rcu_read_lock();
 263        list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 264                offs += snprintf(buf + offs, maxlen - offs,
 265                                 "%s%s",
 266                                 offs == 0 ? "" : " ", ca->name);
 267
 268                if (WARN_ON_ONCE(offs >= maxlen))
 269                        break;
 270        }
 271        rcu_read_unlock();
 272}
 273
 274/* Get current default congestion control */
 275void tcp_get_default_congestion_control(struct net *net, char *name)
 276{
 277        const struct tcp_congestion_ops *ca;
 278
 279        rcu_read_lock();
 280        ca = rcu_dereference(net->ipv4.tcp_congestion_control);
 281        strncpy(name, ca->name, TCP_CA_NAME_MAX);
 282        rcu_read_unlock();
 283}
 284
 285/* Built list of non-restricted congestion control values */
 286void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
 287{
 288        struct tcp_congestion_ops *ca;
 289        size_t offs = 0;
 290
 291        *buf = '\0';
 292        rcu_read_lock();
 293        list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 294                if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
 295                        continue;
 296                offs += snprintf(buf + offs, maxlen - offs,
 297                                 "%s%s",
 298                                 offs == 0 ? "" : " ", ca->name);
 299
 300                if (WARN_ON_ONCE(offs >= maxlen))
 301                        break;
 302        }
 303        rcu_read_unlock();
 304}
 305
 306/* Change list of non-restricted congestion control */
 307int tcp_set_allowed_congestion_control(char *val)
 308{
 309        struct tcp_congestion_ops *ca;
 310        char *saved_clone, *clone, *name;
 311        int ret = 0;
 312
 313        saved_clone = clone = kstrdup(val, GFP_USER);
 314        if (!clone)
 315                return -ENOMEM;
 316
 317        spin_lock(&tcp_cong_list_lock);
 318        /* pass 1 check for bad entries */
 319        while ((name = strsep(&clone, " ")) && *name) {
 320                ca = tcp_ca_find(name);
 321                if (!ca) {
 322                        ret = -ENOENT;
 323                        goto out;
 324                }
 325        }
 326
 327        /* pass 2 clear old values */
 328        list_for_each_entry_rcu(ca, &tcp_cong_list, list)
 329                ca->flags &= ~TCP_CONG_NON_RESTRICTED;
 330
 331        /* pass 3 mark as allowed */
 332        while ((name = strsep(&val, " ")) && *name) {
 333                ca = tcp_ca_find(name);
 334                WARN_ON(!ca);
 335                if (ca)
 336                        ca->flags |= TCP_CONG_NON_RESTRICTED;
 337        }
 338out:
 339        spin_unlock(&tcp_cong_list_lock);
 340        kfree(saved_clone);
 341
 342        return ret;
 343}
 344
 345/* Change congestion control for socket. If load is false, then it is the
 346 * responsibility of the caller to call tcp_init_congestion_control or
 347 * tcp_reinit_congestion_control (if the current congestion control was
 348 * already initialized.
 349 */
 350int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
 351                               bool cap_net_admin)
 352{
 353        struct inet_connection_sock *icsk = inet_csk(sk);
 354        const struct tcp_congestion_ops *ca;
 355        int err = 0;
 356
 357        if (icsk->icsk_ca_dst_locked)
 358                return -EPERM;
 359
 360        rcu_read_lock();
 361        if (!load)
 362                ca = tcp_ca_find(name);
 363        else
 364                ca = tcp_ca_find_autoload(sock_net(sk), name);
 365
 366        /* No change asking for existing value */
 367        if (ca == icsk->icsk_ca_ops) {
 368                icsk->icsk_ca_setsockopt = 1;
 369                goto out;
 370        }
 371
 372        if (!ca)
 373                err = -ENOENT;
 374        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
 375                err = -EPERM;
 376        else if (!bpf_try_module_get(ca, ca->owner))
 377                err = -EBUSY;
 378        else
 379                tcp_reinit_congestion_control(sk, ca);
 380 out:
 381        rcu_read_unlock();
 382        return err;
 383}
 384
 385/* Slow start is used when congestion window is no greater than the slow start
 386 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
 387 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
 388 * something better;) a packet is only considered (s)acked in its entirety to
 389 * defend the ACK attacks described in the RFC. Slow start processes a stretch
 390 * ACK of degree N as if N acks of degree 1 are received back to back except
 391 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
 392 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
 393 */
 394u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 395{
 396        u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
 397
 398        acked -= cwnd - tp->snd_cwnd;
 399        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
 400
 401        return acked;
 402}
 403EXPORT_SYMBOL_GPL(tcp_slow_start);
 404
 405/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
 406 * for every packet that was ACKed.
 407 */
 408void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 409{
 410        /* If credits accumulated at a higher w, apply them gently now. */
 411        if (tp->snd_cwnd_cnt >= w) {
 412                tp->snd_cwnd_cnt = 0;
 413                tp->snd_cwnd++;
 414        }
 415
 416        tp->snd_cwnd_cnt += acked;
 417        if (tp->snd_cwnd_cnt >= w) {
 418                u32 delta = tp->snd_cwnd_cnt / w;
 419
 420                tp->snd_cwnd_cnt -= delta * w;
 421                tp->snd_cwnd += delta;
 422        }
 423        tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
 424}
 425EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 426
 427/*
 428 * TCP Reno congestion control
 429 * This is special case used for fallback as well.
 430 */
 431/* This is Jacobson's slow start and congestion avoidance.
 432 * SIGCOMM '88, p. 328.
 433 */
 434void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 435{
 436        struct tcp_sock *tp = tcp_sk(sk);
 437
 438        if (!tcp_is_cwnd_limited(sk))
 439                return;
 440
 441        /* In "safe" area, increase. */
 442        if (tcp_in_slow_start(tp)) {
 443                acked = tcp_slow_start(tp, acked);
 444                if (!acked)
 445                        return;
 446        }
 447        /* In dangerous area, increase slowly. */
 448        tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
 449}
 450EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 451
 452/* Slow start threshold is half the congestion window (min 2) */
 453u32 tcp_reno_ssthresh(struct sock *sk)
 454{
 455        const struct tcp_sock *tp = tcp_sk(sk);
 456
 457        return max(tp->snd_cwnd >> 1U, 2U);
 458}
 459EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
 460
 461u32 tcp_reno_undo_cwnd(struct sock *sk)
 462{
 463        const struct tcp_sock *tp = tcp_sk(sk);
 464
 465        return max(tp->snd_cwnd, tp->prior_cwnd);
 466}
 467EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
 468
 469struct tcp_congestion_ops tcp_reno = {
 470        .flags          = TCP_CONG_NON_RESTRICTED,
 471        .name           = "reno",
 472        .owner          = THIS_MODULE,
 473        .ssthresh       = tcp_reno_ssthresh,
 474        .cong_avoid     = tcp_reno_cong_avoid,
 475        .undo_cwnd      = tcp_reno_undo_cwnd,
 476};
 477