linux/net/ipv4/tcp_recovery.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/tcp.h>
   3#include <net/tcp.h>
   4
   5static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
   6{
   7        return t1 > t2 || (t1 == t2 && after(seq1, seq2));
   8}
   9
  10static u32 tcp_rack_reo_wnd(const struct sock *sk)
  11{
  12        struct tcp_sock *tp = tcp_sk(sk);
  13
  14        if (!tp->reord_seen) {
  15                /* If reordering has not been observed, be aggressive during
  16                 * the recovery or starting the recovery by DUPACK threshold.
  17                 */
  18                if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
  19                        return 0;
  20
  21                if (tp->sacked_out >= tp->reordering &&
  22                    !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
  23                        return 0;
  24        }
  25
  26        /* To be more reordering resilient, allow min_rtt/4 settling delay.
  27         * Use min_rtt instead of the smoothed RTT because reordering is
  28         * often a path property and less related to queuing or delayed ACKs.
  29         * Upon receiving DSACKs, linearly increase the window up to the
  30         * smoothed RTT.
  31         */
  32        return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
  33                   tp->srtt_us >> 3);
  34}
  35
  36s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
  37{
  38        return tp->rack.rtt_us + reo_wnd -
  39               tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
  40}
  41
  42/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
  43 *
  44 * Marks a packet lost, if some packet sent later has been (s)acked.
  45 * The underlying idea is similar to the traditional dupthresh and FACK
  46 * but they look at different metrics:
  47 *
  48 * dupthresh: 3 OOO packets delivered (packet count)
  49 * FACK: sequence delta to highest sacked sequence (sequence space)
  50 * RACK: sent time delta to the latest delivered packet (time domain)
  51 *
  52 * The advantage of RACK is it applies to both original and retransmitted
  53 * packet and therefore is robust against tail losses. Another advantage
  54 * is being more resilient to reordering by simply allowing some
  55 * "settling delay", instead of tweaking the dupthresh.
  56 *
  57 * When tcp_rack_detect_loss() detects some packets are lost and we
  58 * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
  59 * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
  60 * make us enter the CA_Recovery state.
  61 */
  62static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
  63{
  64        struct tcp_sock *tp = tcp_sk(sk);
  65        struct sk_buff *skb, *n;
  66        u32 reo_wnd;
  67
  68        *reo_timeout = 0;
  69        reo_wnd = tcp_rack_reo_wnd(sk);
  70        list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
  71                                 tcp_tsorted_anchor) {
  72                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
  73                s32 remaining;
  74
  75                /* Skip ones marked lost but not yet retransmitted */
  76                if ((scb->sacked & TCPCB_LOST) &&
  77                    !(scb->sacked & TCPCB_SACKED_RETRANS))
  78                        continue;
  79
  80                if (!tcp_rack_sent_after(tp->rack.mstamp,
  81                                         tcp_skb_timestamp_us(skb),
  82                                         tp->rack.end_seq, scb->end_seq))
  83                        break;
  84
  85                /* A packet is lost if it has not been s/acked beyond
  86                 * the recent RTT plus the reordering window.
  87                 */
  88                remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
  89                if (remaining <= 0) {
  90                        tcp_mark_skb_lost(sk, skb);
  91                        list_del_init(&skb->tcp_tsorted_anchor);
  92                } else {
  93                        /* Record maximum wait time */
  94                        *reo_timeout = max_t(u32, *reo_timeout, remaining);
  95                }
  96        }
  97}
  98
  99void tcp_rack_mark_lost(struct sock *sk)
 100{
 101        struct tcp_sock *tp = tcp_sk(sk);
 102        u32 timeout;
 103
 104        if (!tp->rack.advanced)
 105                return;
 106
 107        /* Reset the advanced flag to avoid unnecessary queue scanning */
 108        tp->rack.advanced = 0;
 109        tcp_rack_detect_loss(sk, &timeout);
 110        if (timeout) {
 111                timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
 112                inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
 113                                          timeout, inet_csk(sk)->icsk_rto);
 114        }
 115}
 116
 117/* Record the most recently (re)sent time among the (s)acked packets
 118 * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
 119 * draft-cheng-tcpm-rack-00.txt
 120 */
 121void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
 122                      u64 xmit_time)
 123{
 124        u32 rtt_us;
 125
 126        rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
 127        if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
 128                /* If the sacked packet was retransmitted, it's ambiguous
 129                 * whether the retransmission or the original (or the prior
 130                 * retransmission) was sacked.
 131                 *
 132                 * If the original is lost, there is no ambiguity. Otherwise
 133                 * we assume the original can be delayed up to aRTT + min_rtt.
 134                 * the aRTT term is bounded by the fast recovery or timeout,
 135                 * so it's at least one RTT (i.e., retransmission is at least
 136                 * an RTT later).
 137                 */
 138                return;
 139        }
 140        tp->rack.advanced = 1;
 141        tp->rack.rtt_us = rtt_us;
 142        if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
 143                                end_seq, tp->rack.end_seq)) {
 144                tp->rack.mstamp = xmit_time;
 145                tp->rack.end_seq = end_seq;
 146        }
 147}
 148
 149/* We have waited long enough to accommodate reordering. Mark the expired
 150 * packets lost and retransmit them.
 151 */
 152void tcp_rack_reo_timeout(struct sock *sk)
 153{
 154        struct tcp_sock *tp = tcp_sk(sk);
 155        u32 timeout, prior_inflight;
 156
 157        prior_inflight = tcp_packets_in_flight(tp);
 158        tcp_rack_detect_loss(sk, &timeout);
 159        if (prior_inflight != tcp_packets_in_flight(tp)) {
 160                if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
 161                        tcp_enter_recovery(sk, false);
 162                        if (!inet_csk(sk)->icsk_ca_ops->cong_control)
 163                                tcp_cwnd_reduction(sk, 1, 0);
 164                }
 165                tcp_xmit_retransmit_queue(sk);
 166        }
 167        if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
 168                tcp_rearm_rto(sk);
 169}
 170
 171/* Updates the RACK's reo_wnd based on DSACK and no. of recoveries.
 172 *
 173 * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded
 174 * by srtt), since there is possibility that spurious retransmission was
 175 * due to reordering delay longer than reo_wnd.
 176 *
 177 * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16)
 178 * no. of successful recoveries (accounts for full DSACK-based loss
 179 * recovery undo). After that, reset it to default (min_rtt/4).
 180 *
 181 * At max, reo_wnd is incremented only once per rtt. So that the new
 182 * DSACK on which we are reacting, is due to the spurious retx (approx)
 183 * after the reo_wnd has been updated last time.
 184 *
 185 * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than
 186 * absolute value to account for change in rtt.
 187 */
 188void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
 189{
 190        struct tcp_sock *tp = tcp_sk(sk);
 191
 192        if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
 193            !rs->prior_delivered)
 194                return;
 195
 196        /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */
 197        if (before(rs->prior_delivered, tp->rack.last_delivered))
 198                tp->rack.dsack_seen = 0;
 199
 200        /* Adjust the reo_wnd if update is pending */
 201        if (tp->rack.dsack_seen) {
 202                tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
 203                                               tp->rack.reo_wnd_steps + 1);
 204                tp->rack.dsack_seen = 0;
 205                tp->rack.last_delivered = tp->delivered;
 206                tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
 207        } else if (!tp->rack.reo_wnd_persist) {
 208                tp->rack.reo_wnd_steps = 1;
 209        }
 210}
 211
 212/* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
 213 * the next unacked packet upon receiving
 214 * a) three or more DUPACKs to start the fast recovery
 215 * b) an ACK acknowledging new data during the fast recovery.
 216 */
 217void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
 218{
 219        const u8 state = inet_csk(sk)->icsk_ca_state;
 220        struct tcp_sock *tp = tcp_sk(sk);
 221
 222        if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||
 223            (state == TCP_CA_Recovery && snd_una_advanced)) {
 224                struct sk_buff *skb = tcp_rtx_queue_head(sk);
 225                u32 mss;
 226
 227                if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
 228                        return;
 229
 230                mss = tcp_skb_mss(skb);
 231                if (tcp_skb_pcount(skb) > 1 && skb->len > mss)
 232                        tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
 233                                     mss, mss, GFP_ATOMIC);
 234
 235                tcp_mark_skb_lost(sk, skb);
 236        }
 237}
 238