linux/net/ipv4/tcp_dctcp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* DataCenter TCP (DCTCP) congestion control.
   3 *
   4 * http://simula.stanford.edu/~alizade/Site/DCTCP.html
   5 *
   6 * This is an implementation of DCTCP over Reno, an enhancement to the
   7 * TCP congestion control algorithm designed for data centers. DCTCP
   8 * leverages Explicit Congestion Notification (ECN) in the network to
   9 * provide multi-bit feedback to the end hosts. DCTCP's goal is to meet
  10 * the following three data center transport requirements:
  11 *
  12 *  - High burst tolerance (incast due to partition/aggregate)
  13 *  - Low latency (short flows, queries)
  14 *  - High throughput (continuous data updates, large file transfers)
  15 *    with commodity shallow buffered switches
  16 *
  17 * The algorithm is described in detail in the following two papers:
  18 *
  19 * 1) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye,
  20 *    Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan:
  21 *      "Data Center TCP (DCTCP)", Data Center Networks session
  22 *      Proc. ACM SIGCOMM, New Delhi, 2010.
  23 *   http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
  24 *
  25 * 2) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar:
  26 *      "Analysis of DCTCP: Stability, Convergence, and Fairness"
  27 *      Proc. ACM SIGMETRICS, San Jose, 2011.
  28 *   http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf
  29 *
  30 * Initial prototype from Abdul Kabbani, Masato Yasuda and Mohammad Alizadeh.
  31 *
  32 * Authors:
  33 *
  34 *      Daniel Borkmann <dborkman@redhat.com>
  35 *      Florian Westphal <fw@strlen.de>
  36 *      Glenn Judd <glenn.judd@morganstanley.com>
  37 */
  38
  39#include <linux/module.h>
  40#include <linux/mm.h>
  41#include <net/tcp.h>
  42#include <linux/inet_diag.h>
  43#include "tcp_dctcp.h"
  44
  45#define DCTCP_MAX_ALPHA 1024U
  46
  47struct dctcp {
  48        u32 old_delivered;
  49        u32 old_delivered_ce;
  50        u32 prior_rcv_nxt;
  51        u32 dctcp_alpha;
  52        u32 next_seq;
  53        u32 ce_state;
  54        u32 loss_cwnd;
  55};
  56
  57static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
  58module_param(dctcp_shift_g, uint, 0644);
  59MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
  60
  61static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
  62module_param(dctcp_alpha_on_init, uint, 0644);
  63MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
  64
  65static struct tcp_congestion_ops dctcp_reno;
  66
  67static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
  68{
  69        ca->next_seq = tp->snd_nxt;
  70
  71        ca->old_delivered = tp->delivered;
  72        ca->old_delivered_ce = tp->delivered_ce;
  73}
  74
  75static void dctcp_init(struct sock *sk)
  76{
  77        const struct tcp_sock *tp = tcp_sk(sk);
  78
  79        if ((tp->ecn_flags & TCP_ECN_OK) ||
  80            (sk->sk_state == TCP_LISTEN ||
  81             sk->sk_state == TCP_CLOSE)) {
  82                struct dctcp *ca = inet_csk_ca(sk);
  83
  84                ca->prior_rcv_nxt = tp->rcv_nxt;
  85
  86                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
  87
  88                ca->loss_cwnd = 0;
  89                ca->ce_state = 0;
  90
  91                dctcp_reset(tp, ca);
  92                return;
  93        }
  94
  95        /* No ECN support? Fall back to Reno. Also need to clear
  96         * ECT from sk since it is set during 3WHS for DCTCP.
  97         */
  98        inet_csk(sk)->icsk_ca_ops = &dctcp_reno;
  99        INET_ECN_dontxmit(sk);
 100}
 101
 102static u32 dctcp_ssthresh(struct sock *sk)
 103{
 104        struct dctcp *ca = inet_csk_ca(sk);
 105        struct tcp_sock *tp = tcp_sk(sk);
 106
 107        ca->loss_cwnd = tp->snd_cwnd;
 108        return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
 109}
 110
 111static void dctcp_update_alpha(struct sock *sk, u32 flags)
 112{
 113        const struct tcp_sock *tp = tcp_sk(sk);
 114        struct dctcp *ca = inet_csk_ca(sk);
 115
 116        /* Expired RTT */
 117        if (!before(tp->snd_una, ca->next_seq)) {
 118                u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
 119                u32 alpha = ca->dctcp_alpha;
 120
 121                /* alpha = (1 - g) * alpha + g * F */
 122
 123                alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
 124                if (delivered_ce) {
 125                        u32 delivered = tp->delivered - ca->old_delivered;
 126
 127                        /* If dctcp_shift_g == 1, a 32bit value would overflow
 128                         * after 8 M packets.
 129                         */
 130                        delivered_ce <<= (10 - dctcp_shift_g);
 131                        delivered_ce /= max(1U, delivered);
 132
 133                        alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
 134                }
 135                /* dctcp_alpha can be read from dctcp_get_info() without
 136                 * synchro, so we ask compiler to not use dctcp_alpha
 137                 * as a temporary variable in prior operations.
 138                 */
 139                WRITE_ONCE(ca->dctcp_alpha, alpha);
 140                dctcp_reset(tp, ca);
 141        }
 142}
 143
 144static void dctcp_react_to_loss(struct sock *sk)
 145{
 146        struct dctcp *ca = inet_csk_ca(sk);
 147        struct tcp_sock *tp = tcp_sk(sk);
 148
 149        ca->loss_cwnd = tp->snd_cwnd;
 150        tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
 151}
 152
 153static void dctcp_state(struct sock *sk, u8 new_state)
 154{
 155        if (new_state == TCP_CA_Recovery &&
 156            new_state != inet_csk(sk)->icsk_ca_state)
 157                dctcp_react_to_loss(sk);
 158        /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
 159         * one loss-adjustment per RTT.
 160         */
 161}
 162
 163static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
 164{
 165        struct dctcp *ca = inet_csk_ca(sk);
 166
 167        switch (ev) {
 168        case CA_EVENT_ECN_IS_CE:
 169        case CA_EVENT_ECN_NO_CE:
 170                dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
 171                break;
 172        case CA_EVENT_LOSS:
 173                dctcp_react_to_loss(sk);
 174                break;
 175        default:
 176                /* Don't care for the rest. */
 177                break;
 178        }
 179}
 180
 181static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
 182                             union tcp_cc_info *info)
 183{
 184        const struct dctcp *ca = inet_csk_ca(sk);
 185        const struct tcp_sock *tp = tcp_sk(sk);
 186
 187        /* Fill it also in case of VEGASINFO due to req struct limits.
 188         * We can still correctly retrieve it later.
 189         */
 190        if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
 191            ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
 192                memset(&info->dctcp, 0, sizeof(info->dctcp));
 193                if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
 194                        info->dctcp.dctcp_enabled = 1;
 195                        info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
 196                        info->dctcp.dctcp_alpha = ca->dctcp_alpha;
 197                        info->dctcp.dctcp_ab_ecn = tp->mss_cache *
 198                                                   (tp->delivered_ce - ca->old_delivered_ce);
 199                        info->dctcp.dctcp_ab_tot = tp->mss_cache *
 200                                                   (tp->delivered - ca->old_delivered);
 201                }
 202
 203                *attr = INET_DIAG_DCTCPINFO;
 204                return sizeof(info->dctcp);
 205        }
 206        return 0;
 207}
 208
 209static u32 dctcp_cwnd_undo(struct sock *sk)
 210{
 211        const struct dctcp *ca = inet_csk_ca(sk);
 212
 213        return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
 214}
 215
 216static struct tcp_congestion_ops dctcp __read_mostly = {
 217        .init           = dctcp_init,
 218        .in_ack_event   = dctcp_update_alpha,
 219        .cwnd_event     = dctcp_cwnd_event,
 220        .ssthresh       = dctcp_ssthresh,
 221        .cong_avoid     = tcp_reno_cong_avoid,
 222        .undo_cwnd      = dctcp_cwnd_undo,
 223        .set_state      = dctcp_state,
 224        .get_info       = dctcp_get_info,
 225        .flags          = TCP_CONG_NEEDS_ECN,
 226        .owner          = THIS_MODULE,
 227        .name           = "dctcp",
 228};
 229
 230static struct tcp_congestion_ops dctcp_reno __read_mostly = {
 231        .ssthresh       = tcp_reno_ssthresh,
 232        .cong_avoid     = tcp_reno_cong_avoid,
 233        .undo_cwnd      = tcp_reno_undo_cwnd,
 234        .get_info       = dctcp_get_info,
 235        .owner          = THIS_MODULE,
 236        .name           = "dctcp-reno",
 237};
 238
 239static int __init dctcp_register(void)
 240{
 241        BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
 242        return tcp_register_congestion_control(&dctcp);
 243}
 244
 245static void __exit dctcp_unregister(void)
 246{
 247        tcp_unregister_congestion_control(&dctcp);
 248}
 249
 250module_init(dctcp_register);
 251module_exit(dctcp_unregister);
 252
 253MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
 254MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 255MODULE_AUTHOR("Glenn Judd <glenn.judd@morganstanley.com>");
 256
 257MODULE_LICENSE("GPL v2");
 258MODULE_DESCRIPTION("DataCenter TCP (DCTCP)");
 259