linux/net/ipv4/tcp_recovery.c
<<
>>
Prefs
   1#include <linux/tcp.h>
   2#include <net/tcp.h>
   3
   4int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
   5
   6static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
   7{
   8        struct tcp_sock *tp = tcp_sk(sk);
   9
  10        tcp_skb_mark_lost_uncond_verify(tp, skb);
  11        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
  12                /* Account for retransmits that are lost again */
  13                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
  14                tp->retrans_out -= tcp_skb_pcount(skb);
  15                NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
  16                              tcp_skb_pcount(skb));
  17        }
  18}
  19
  20static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
  21                                const struct skb_mstamp *t2,
  22                                u32 seq1, u32 seq2)
  23{
  24        return skb_mstamp_after(t1, t2) ||
  25               (t1->v64 == t2->v64 && after(seq1, seq2));
  26}
  27
  28/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
  29 *
  30 * Marks a packet lost, if some packet sent later has been (s)acked.
  31 * The underlying idea is similar to the traditional dupthresh and FACK
  32 * but they look at different metrics:
  33 *
  34 * dupthresh: 3 OOO packets delivered (packet count)
  35 * FACK: sequence delta to highest sacked sequence (sequence space)
  36 * RACK: sent time delta to the latest delivered packet (time domain)
  37 *
  38 * The advantage of RACK is it applies to both original and retransmitted
  39 * packet and therefore is robust against tail losses. Another advantage
  40 * is being more resilient to reordering by simply allowing some
  41 * "settling delay", instead of tweaking the dupthresh.
  42 *
  43 * When tcp_rack_detect_loss() detects some packets are lost and we
  44 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
  45 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
  46 * make us enter the CA_Recovery state.
  47 */
  48static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
  49{
  50        struct tcp_sock *tp = tcp_sk(sk);
  51        struct sk_buff *skb;
  52        u32 reo_wnd;
  53
  54        *reo_timeout = 0;
  55        /* To be more reordering resilient, allow min_rtt/4 settling delay
  56         * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
  57         * RTT because reordering is often a path property and less related
  58         * to queuing or delayed ACKs.
  59         */
  60        reo_wnd = 1000;
  61        if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
  62                reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
  63
  64        tcp_for_write_queue(skb, sk) {
  65                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
  66
  67                if (skb == tcp_send_head(sk))
  68                        break;
  69
  70                /* Skip ones already (s)acked */
  71                if (!after(scb->end_seq, tp->snd_una) ||
  72                    scb->sacked & TCPCB_SACKED_ACKED)
  73                        continue;
  74
  75                if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
  76                                        tp->rack.end_seq, scb->end_seq)) {
  77                        /* Step 3 in draft-cheng-tcpm-rack-00.txt:
  78                         * A packet is lost if its elapsed time is beyond
  79                         * the recent RTT plus the reordering window.
  80                         */
  81                        u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp,
  82                                                          &skb->skb_mstamp);
  83                        s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
  84
  85                        if (remaining < 0) {
  86                                tcp_rack_mark_skb_lost(sk, skb);
  87                                continue;
  88                        }
  89
  90                        /* Skip ones marked lost but not yet retransmitted */
  91                        if ((scb->sacked & TCPCB_LOST) &&
  92                            !(scb->sacked & TCPCB_SACKED_RETRANS))
  93                                continue;
  94
  95                        /* Record maximum wait time (+1 to avoid 0) */
  96                        *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
  97
  98                } else if (!(scb->sacked & TCPCB_RETRANS)) {
  99                        /* Original data are sent sequentially so stop early
 100                         * b/c the rest are all sent after rack_sent
 101                         */
 102                        break;
 103                }
 104        }
 105}
 106
 107void tcp_rack_mark_lost(struct sock *sk)
 108{
 109        struct tcp_sock *tp = tcp_sk(sk);
 110        u32 timeout;
 111
 112        if (!tp->rack.advanced)
 113                return;
 114
 115        /* Reset the advanced flag to avoid unnecessary queue scanning */
 116        tp->rack.advanced = 0;
 117        tcp_rack_detect_loss(sk, &timeout);
 118        if (timeout) {
 119                timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
 120                inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
 121                                          timeout, inet_csk(sk)->icsk_rto);
 122        }
 123}
 124
 125/* Record the most recently (re)sent time among the (s)acked packets
 126 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
 127 * draft-cheng-tcpm-rack-00.txt
 128 */
 129void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
 130                      const struct skb_mstamp *xmit_time)
 131{
 132        u32 rtt_us;
 133
 134        if (tp->rack.mstamp.v64 &&
 135            !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
 136                                 end_seq, tp->rack.end_seq))
 137                return;
 138
 139        rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time);
 140        if (sacked & TCPCB_RETRANS) {
 141                /* If the sacked packet was retransmitted, it's ambiguous
 142                 * whether the retransmission or the original (or the prior
 143                 * retransmission) was sacked.
 144                 *
 145                 * If the original is lost, there is no ambiguity. Otherwise
 146                 * we assume the original can be delayed up to aRTT + min_rtt.
 147                 * the aRTT term is bounded by the fast recovery or timeout,
 148                 * so it's at least one RTT (i.e., retransmission is at least
 149                 * an RTT later).
 150                 */
 151                if (rtt_us < tcp_min_rtt(tp))
 152                        return;
 153        }
 154        tp->rack.rtt_us = rtt_us;
 155        tp->rack.mstamp = *xmit_time;
 156        tp->rack.end_seq = end_seq;
 157        tp->rack.advanced = 1;
 158}
 159
 160/* We have waited long enough to accommodate reordering. Mark the expired
 161 * packets lost and retransmit them.
 162 */
 163void tcp_rack_reo_timeout(struct sock *sk)
 164{
 165        struct tcp_sock *tp = tcp_sk(sk);
 166        u32 timeout, prior_inflight;
 167
 168        prior_inflight = tcp_packets_in_flight(tp);
 169        skb_mstamp_get(&tp->tcp_mstamp);
 170        tcp_rack_detect_loss(sk, &timeout);
 171        if (prior_inflight != tcp_packets_in_flight(tp)) {
 172                if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
 173                        tcp_enter_recovery(sk, false);
 174                        if (!inet_csk(sk)->icsk_ca_ops->cong_control)
 175                                tcp_cwnd_reduction(sk, 1, 0);
 176                }
 177                tcp_xmit_retransmit_queue(sk);
 178        }
 179        if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
 180                tcp_rearm_rto(sk);
 181}
 182