linux/net/ipv4/tcp_cong.c
<<
>>
Prefs
   1/*
   2 * Pluggable TCP congestion control support and newReno
   3 * congestion control.
   4 * Based on ideas from I/O scheduler support and Web100.
   5 *
   6 * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
   7 */
   8
   9#define pr_fmt(fmt) "TCP: " fmt
  10
  11#include <linux/module.h>
  12#include <linux/mm.h>
  13#include <linux/types.h>
  14#include <linux/list.h>
  15#include <linux/gfp.h>
  16#include <linux/jhash.h>
  17#include <net/tcp.h>
  18
  19static DEFINE_SPINLOCK(tcp_cong_list_lock);
  20static LIST_HEAD(tcp_cong_list);
  21
  22/* Simple linear search, don't expect many entries! */
  23static struct tcp_congestion_ops *tcp_ca_find(const char *name)
  24{
  25        struct tcp_congestion_ops *e;
  26
  27        list_for_each_entry_rcu(e, &tcp_cong_list, list) {
  28                if (strcmp(e->name, name) == 0)
  29                        return e;
  30        }
  31
  32        return NULL;
  33}
  34
  35/* Must be called with rcu lock held */
  36static const struct tcp_congestion_ops *__tcp_ca_find_autoload(const char *name)
  37{
  38        const struct tcp_congestion_ops *ca = tcp_ca_find(name);
  39#ifdef CONFIG_MODULES
  40        if (!ca && capable(CAP_NET_ADMIN)) {
  41                rcu_read_unlock();
  42                request_module("tcp_%s", name);
  43                rcu_read_lock();
  44                ca = tcp_ca_find(name);
  45        }
  46#endif
  47        return ca;
  48}
  49
  50/* Simple linear search, not much in here. */
  51struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
  52{
  53        struct tcp_congestion_ops *e;
  54
  55        list_for_each_entry_rcu(e, &tcp_cong_list, list) {
  56                if (e->key == key)
  57                        return e;
  58        }
  59
  60        return NULL;
  61}
  62
  63/*
  64 * Attach new congestion control algorithm to the list
  65 * of available options.
  66 */
  67int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
  68{
  69        int ret = 0;
  70
  71        /* all algorithms must implement ssthresh and cong_avoid ops */
  72        if (!ca->ssthresh || !ca->cong_avoid) {
  73                pr_err("%s does not implement required ops\n", ca->name);
  74                return -EINVAL;
  75        }
  76
  77        ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
  78
  79        spin_lock(&tcp_cong_list_lock);
  80        if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
  81                pr_notice("%s already registered or non-unique key\n",
  82                          ca->name);
  83                ret = -EEXIST;
  84        } else {
  85                list_add_tail_rcu(&ca->list, &tcp_cong_list);
  86                pr_info("%s registered\n", ca->name);
  87        }
  88        spin_unlock(&tcp_cong_list_lock);
  89
  90        return ret;
  91}
  92EXPORT_SYMBOL_GPL(tcp_register_congestion_control);
  93
  94/*
  95 * Remove congestion control algorithm, called from
  96 * the module's remove function.  Module ref counts are used
  97 * to ensure that this can't be done till all sockets using
  98 * that method are closed.
  99 */
 100void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
 101{
 102        spin_lock(&tcp_cong_list_lock);
 103        list_del_rcu(&ca->list);
 104        spin_unlock(&tcp_cong_list_lock);
 105
 106        /* Wait for outstanding readers to complete before the
 107         * module gets removed entirely.
 108         *
 109         * A try_module_get() should fail by now as our module is
 110         * in "going" state since no refs are held anymore and
 111         * module_exit() handler being called.
 112         */
 113        synchronize_rcu();
 114}
 115EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
 116
 117u32 tcp_ca_get_key_by_name(const char *name)
 118{
 119        const struct tcp_congestion_ops *ca;
 120        u32 key;
 121
 122        might_sleep();
 123
 124        rcu_read_lock();
 125        ca = __tcp_ca_find_autoload(name);
 126        key = ca ? ca->key : TCP_CA_UNSPEC;
 127        rcu_read_unlock();
 128
 129        return key;
 130}
 131EXPORT_SYMBOL_GPL(tcp_ca_get_key_by_name);
 132
 133char *tcp_ca_get_name_by_key(u32 key, char *buffer)
 134{
 135        const struct tcp_congestion_ops *ca;
 136        char *ret = NULL;
 137
 138        rcu_read_lock();
 139        ca = tcp_ca_find_key(key);
 140        if (ca)
 141                ret = strncpy(buffer, ca->name,
 142                              TCP_CA_NAME_MAX);
 143        rcu_read_unlock();
 144
 145        return ret;
 146}
 147EXPORT_SYMBOL_GPL(tcp_ca_get_name_by_key);
 148
 149/* Assign choice of congestion control. */
 150void tcp_assign_congestion_control(struct sock *sk)
 151{
 152        struct inet_connection_sock *icsk = inet_csk(sk);
 153        struct tcp_congestion_ops *ca;
 154
 155        rcu_read_lock();
 156        list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 157                if (likely(try_module_get(ca->owner))) {
 158                        icsk->icsk_ca_ops = ca;
 159                        goto out;
 160                }
 161                /* Fallback to next available. The last really
 162                 * guaranteed fallback is Reno from this list.
 163                 */
 164        }
 165out:
 166        rcu_read_unlock();
 167
 168        /* Clear out private data before diag gets it and
 169         * the ca has not been initialized.
 170         */
 171        if (ca->get_info)
 172                memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 173}
 174
 175void tcp_init_congestion_control(struct sock *sk)
 176{
 177        const struct inet_connection_sock *icsk = inet_csk(sk);
 178
 179        if (icsk->icsk_ca_ops->init)
 180                icsk->icsk_ca_ops->init(sk);
 181}
 182
 183static void tcp_reinit_congestion_control(struct sock *sk,
 184                                          const struct tcp_congestion_ops *ca)
 185{
 186        struct inet_connection_sock *icsk = inet_csk(sk);
 187
 188        tcp_cleanup_congestion_control(sk);
 189        icsk->icsk_ca_ops = ca;
 190
 191        if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
 192                icsk->icsk_ca_ops->init(sk);
 193}
 194
 195/* Manage refcounts on socket close. */
 196void tcp_cleanup_congestion_control(struct sock *sk)
 197{
 198        struct inet_connection_sock *icsk = inet_csk(sk);
 199
 200        if (icsk->icsk_ca_ops->release)
 201                icsk->icsk_ca_ops->release(sk);
 202        module_put(icsk->icsk_ca_ops->owner);
 203}
 204
 205/* Used by sysctl to change default congestion control */
 206int tcp_set_default_congestion_control(const char *name)
 207{
 208        struct tcp_congestion_ops *ca;
 209        int ret = -ENOENT;
 210
 211        spin_lock(&tcp_cong_list_lock);
 212        ca = tcp_ca_find(name);
 213#ifdef CONFIG_MODULES
 214        if (!ca && capable(CAP_NET_ADMIN)) {
 215                spin_unlock(&tcp_cong_list_lock);
 216
 217                request_module("tcp_%s", name);
 218                spin_lock(&tcp_cong_list_lock);
 219                ca = tcp_ca_find(name);
 220        }
 221#endif
 222
 223        if (ca) {
 224                ca->flags |= TCP_CONG_NON_RESTRICTED;   /* default is always allowed */
 225                list_move(&ca->list, &tcp_cong_list);
 226                ret = 0;
 227        }
 228        spin_unlock(&tcp_cong_list_lock);
 229
 230        return ret;
 231}
 232
 233/* Set default value from kernel configuration at bootup */
 234static int __init tcp_congestion_default(void)
 235{
 236        return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG);
 237}
 238late_initcall(tcp_congestion_default);
 239
 240/* Build string with list of available congestion control values */
 241void tcp_get_available_congestion_control(char *buf, size_t maxlen)
 242{
 243        struct tcp_congestion_ops *ca;
 244        size_t offs = 0;
 245
 246        rcu_read_lock();
 247        list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 248                offs += snprintf(buf + offs, maxlen - offs,
 249                                 "%s%s",
 250                                 offs == 0 ? "" : " ", ca->name);
 251        }
 252        rcu_read_unlock();
 253}
 254
 255/* Get current default congestion control */
 256void tcp_get_default_congestion_control(char *name)
 257{
 258        struct tcp_congestion_ops *ca;
 259        /* We will always have reno... */
 260        BUG_ON(list_empty(&tcp_cong_list));
 261
 262        rcu_read_lock();
 263        ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list);
 264        strncpy(name, ca->name, TCP_CA_NAME_MAX);
 265        rcu_read_unlock();
 266}
 267
 268/* Built list of non-restricted congestion control values */
 269void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
 270{
 271        struct tcp_congestion_ops *ca;
 272        size_t offs = 0;
 273
 274        *buf = '\0';
 275        rcu_read_lock();
 276        list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
 277                if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
 278                        continue;
 279                offs += snprintf(buf + offs, maxlen - offs,
 280                                 "%s%s",
 281                                 offs == 0 ? "" : " ", ca->name);
 282        }
 283        rcu_read_unlock();
 284}
 285
 286/* Change list of non-restricted congestion control */
 287int tcp_set_allowed_congestion_control(char *val)
 288{
 289        struct tcp_congestion_ops *ca;
 290        char *saved_clone, *clone, *name;
 291        int ret = 0;
 292
 293        saved_clone = clone = kstrdup(val, GFP_USER);
 294        if (!clone)
 295                return -ENOMEM;
 296
 297        spin_lock(&tcp_cong_list_lock);
 298        /* pass 1 check for bad entries */
 299        while ((name = strsep(&clone, " ")) && *name) {
 300                ca = tcp_ca_find(name);
 301                if (!ca) {
 302                        ret = -ENOENT;
 303                        goto out;
 304                }
 305        }
 306
 307        /* pass 2 clear old values */
 308        list_for_each_entry_rcu(ca, &tcp_cong_list, list)
 309                ca->flags &= ~TCP_CONG_NON_RESTRICTED;
 310
 311        /* pass 3 mark as allowed */
 312        while ((name = strsep(&val, " ")) && *name) {
 313                ca = tcp_ca_find(name);
 314                WARN_ON(!ca);
 315                if (ca)
 316                        ca->flags |= TCP_CONG_NON_RESTRICTED;
 317        }
 318out:
 319        spin_unlock(&tcp_cong_list_lock);
 320        kfree(saved_clone);
 321
 322        return ret;
 323}
 324
 325/* Change congestion control for socket */
 326int tcp_set_congestion_control(struct sock *sk, const char *name)
 327{
 328        struct inet_connection_sock *icsk = inet_csk(sk);
 329        const struct tcp_congestion_ops *ca;
 330        int err = 0;
 331
 332        if (icsk->icsk_ca_dst_locked)
 333                return -EPERM;
 334
 335        rcu_read_lock();
 336        ca = __tcp_ca_find_autoload(name);
 337        /* No change asking for existing value */
 338        if (ca == icsk->icsk_ca_ops)
 339                goto out;
 340        if (!ca)
 341                err = -ENOENT;
 342        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
 343                   ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)))
 344                err = -EPERM;
 345        else if (!try_module_get(ca->owner))
 346                err = -EBUSY;
 347        else
 348                tcp_reinit_congestion_control(sk, ca);
 349 out:
 350        rcu_read_unlock();
 351        return err;
 352}
 353
 354/* Slow start is used when congestion window is no greater than the slow start
 355 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
 356 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
 357 * something better;) a packet is only considered (s)acked in its entirety to
 358 * defend the ACK attacks described in the RFC. Slow start processes a stretch
 359 * ACK of degree N as if N acks of degree 1 are received back to back except
 360 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
 361 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
 362 */
 363u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 364{
 365        u32 cwnd = tp->snd_cwnd + acked;
 366
 367        if (cwnd > tp->snd_ssthresh)
 368                cwnd = tp->snd_ssthresh + 1;
 369        acked -= cwnd - tp->snd_cwnd;
 370        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
 371
 372        return acked;
 373}
 374EXPORT_SYMBOL_GPL(tcp_slow_start);
 375
 376/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w),
 377 * for every packet that was ACKed.
 378 */
 379void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
 380{
 381        /* If credits accumulated at a higher w, apply them gently now. */
 382        if (tp->snd_cwnd_cnt >= w) {
 383                tp->snd_cwnd_cnt = 0;
 384                tp->snd_cwnd++;
 385        }
 386
 387        tp->snd_cwnd_cnt += acked;
 388        if (tp->snd_cwnd_cnt >= w) {
 389                u32 delta = tp->snd_cwnd_cnt / w;
 390
 391                tp->snd_cwnd_cnt -= delta * w;
 392                tp->snd_cwnd += delta;
 393        }
 394        tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
 395}
 396EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
 397
 398/*
 399 * TCP Reno congestion control
 400 * This is special case used for fallback as well.
 401 */
 402/* This is Jacobson's slow start and congestion avoidance.
 403 * SIGCOMM '88, p. 328.
 404 */
 405void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 406{
 407        struct tcp_sock *tp = tcp_sk(sk);
 408
 409        if (!tcp_is_cwnd_limited(sk))
 410                return;
 411
 412        /* In "safe" area, increase. */
 413        if (tp->snd_cwnd <= tp->snd_ssthresh) {
 414                acked = tcp_slow_start(tp, acked);
 415                if (!acked)
 416                        return;
 417        }
 418        /* In dangerous area, increase slowly. */
 419        tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
 420}
 421EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
 422
 423/* Slow start threshold is half the congestion window (min 2) */
 424u32 tcp_reno_ssthresh(struct sock *sk)
 425{
 426        const struct tcp_sock *tp = tcp_sk(sk);
 427
 428        return max(tp->snd_cwnd >> 1U, 2U);
 429}
 430EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
 431
 432struct tcp_congestion_ops tcp_reno = {
 433        .flags          = TCP_CONG_NON_RESTRICTED,
 434        .name           = "reno",
 435        .owner          = THIS_MODULE,
 436        .ssthresh       = tcp_reno_ssthresh,
 437        .cong_avoid     = tcp_reno_cong_avoid,
 438};
 439