linux/net/ipv4/tcp_timer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   4 *              operating system.  INET is implemented using the  BSD Socket
   5 *              interface as the means of communication with the user level.
   6 *
   7 *              Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:     Ross Biro
  10 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *              Florian La Roche, <flla@stud.uni-sb.de>
  14 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *              Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *              Jorge Cwik, <jorge@laser.satlink.net>
  20 */
  21
  22#include <linux/module.h>
  23#include <linux/gfp.h>
  24#include <net/tcp.h>
  25
  26static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
  27{
  28        struct inet_connection_sock *icsk = inet_csk(sk);
  29        u32 elapsed, start_ts;
  30        s32 remaining;
  31
  32        start_ts = tcp_sk(sk)->retrans_stamp;
  33        if (!icsk->icsk_user_timeout)
  34                return icsk->icsk_rto;
  35        elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
  36        remaining = icsk->icsk_user_timeout - elapsed;
  37        if (remaining <= 0)
  38                return 1; /* user timeout has passed; fire ASAP */
  39
  40        return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
  41}
  42
  43/**
  44 *  tcp_write_err() - close socket and save error info
  45 *  @sk:  The socket the error has appeared on.
  46 *
  47 *  Returns: Nothing (void)
  48 */
  49
  50static void tcp_write_err(struct sock *sk)
  51{
  52        sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  53        sk->sk_error_report(sk);
  54
  55        tcp_write_queue_purge(sk);
  56        tcp_done(sk);
  57        __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  58}
  59
  60/**
  61 *  tcp_out_of_resources() - Close socket if out of resources
  62 *  @sk:        pointer to current socket
  63 *  @do_reset:  send a last packet with reset flag
  64 *
  65 *  Do not allow orphaned sockets to eat all our resources.
  66 *  This is direct violation of TCP specs, but it is required
  67 *  to prevent DoS attacks. It is called when a retransmission timeout
  68 *  or zero probe timeout occurs on orphaned socket.
  69 *
  70 *  Also close if our net namespace is exiting; in that case there is no
  71 *  hope of ever communicating again since all netns interfaces are already
  72 *  down (or about to be down), and we need to release our dst references,
  73 *  which have been moved to the netns loopback interface, so the namespace
  74 *  can finish exiting.  This condition is only possible if we are a kernel
  75 *  socket, as those do not hold references to the namespace.
  76 *
  77 *  Criteria is still not confirmed experimentally and may change.
  78 *  We kill the socket, if:
  79 *  1. If number of orphaned sockets exceeds an administratively configured
  80 *     limit.
  81 *  2. If we have strong memory pressure.
  82 *  3. If our net namespace is exiting.
  83 */
  84static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  85{
  86        struct tcp_sock *tp = tcp_sk(sk);
  87        int shift = 0;
  88
  89        /* If peer does not open window for long time, or did not transmit
  90         * anything for long time, penalize it. */
  91        if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  92                shift++;
  93
  94        /* If some dubious ICMP arrived, penalize even more. */
  95        if (sk->sk_err_soft)
  96                shift++;
  97
  98        if (tcp_check_oom(sk, shift)) {
  99                /* Catch exceptional cases, when connection requires reset.
 100                 *      1. Last segment was sent recently. */
 101                if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
 102                    /*  2. Window is closed. */
 103                    (!tp->snd_wnd && !tp->packets_out))
 104                        do_reset = true;
 105                if (do_reset)
 106                        tcp_send_active_reset(sk, GFP_ATOMIC);
 107                tcp_done(sk);
 108                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
 109                return 1;
 110        }
 111
 112        if (!check_net(sock_net(sk))) {
 113                /* Not possible to send reset; just close */
 114                tcp_done(sk);
 115                return 1;
 116        }
 117
 118        return 0;
 119}
 120
 121/**
 122 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
 123 *  @sk:    Pointer to the current socket.
 124 *  @alive: bool, socket alive state
 125 */
 126static int tcp_orphan_retries(struct sock *sk, bool alive)
 127{
 128        int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 129
 130        /* We know from an ICMP that something is wrong. */
 131        if (sk->sk_err_soft && !alive)
 132                retries = 0;
 133
 134        /* However, if socket sent something recently, select some safe
 135         * number of retries. 8 corresponds to >100 seconds with minimal
 136         * RTO of 200msec. */
 137        if (retries == 0 && alive)
 138                retries = 8;
 139        return retries;
 140}
 141
 142static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
 143{
 144        const struct net *net = sock_net(sk);
 145        int mss;
 146
 147        /* Black hole detection */
 148        if (!net->ipv4.sysctl_tcp_mtu_probing)
 149                return;
 150
 151        if (!icsk->icsk_mtup.enabled) {
 152                icsk->icsk_mtup.enabled = 1;
 153                icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
 154        } else {
 155                mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
 156                mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
 157                mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len);
 158                mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
 159                icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
 160        }
 161        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 162}
 163
 164static unsigned int tcp_model_timeout(struct sock *sk,
 165                                      unsigned int boundary,
 166                                      unsigned int rto_base)
 167{
 168        unsigned int linear_backoff_thresh, timeout;
 169
 170        linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
 171        if (boundary <= linear_backoff_thresh)
 172                timeout = ((2 << boundary) - 1) * rto_base;
 173        else
 174                timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
 175                        (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 176        return jiffies_to_msecs(timeout);
 177}
 178/**
 179 *  retransmits_timed_out() - returns true if this connection has timed out
 180 *  @sk:       The current socket
 181 *  @boundary: max number of retransmissions
 182 *  @timeout:  A custom timeout value.
 183 *             If set to 0 the default timeout is calculated and used.
 184 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
 185 *
 186 * The default "timeout" value this function can calculate and use
 187 * is equivalent to the timeout of a TCP Connection
 188 * after "boundary" unsuccessful, exponentially backed-off
 189 * retransmissions with an initial RTO of TCP_RTO_MIN.
 190 */
 191static bool retransmits_timed_out(struct sock *sk,
 192                                  unsigned int boundary,
 193                                  unsigned int timeout)
 194{
 195        unsigned int start_ts;
 196
 197        if (!inet_csk(sk)->icsk_retransmits)
 198                return false;
 199
 200        start_ts = tcp_sk(sk)->retrans_stamp;
 201        if (likely(timeout == 0))
 202                timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
 203
 204        return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
 205}
 206
 207/* A write timeout has occurred. Process the after effects. */
 208static int tcp_write_timeout(struct sock *sk)
 209{
 210        struct inet_connection_sock *icsk = inet_csk(sk);
 211        struct tcp_sock *tp = tcp_sk(sk);
 212        struct net *net = sock_net(sk);
 213        bool expired, do_reset;
 214        int retry_until;
 215
 216        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 217                if (icsk->icsk_retransmits) {
 218                        dst_negative_advice(sk);
 219                } else {
 220                        sk_rethink_txhash(sk);
 221                }
 222                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
 223                expired = icsk->icsk_retransmits >= retry_until;
 224        } else {
 225                if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
 226                        /* Black hole detection */
 227                        tcp_mtu_probing(icsk, sk);
 228
 229                        dst_negative_advice(sk);
 230                } else {
 231                        sk_rethink_txhash(sk);
 232                }
 233
 234                retry_until = net->ipv4.sysctl_tcp_retries2;
 235                if (sock_flag(sk, SOCK_DEAD)) {
 236                        const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 237
 238                        retry_until = tcp_orphan_retries(sk, alive);
 239                        do_reset = alive ||
 240                                !retransmits_timed_out(sk, retry_until, 0);
 241
 242                        if (tcp_out_of_resources(sk, do_reset))
 243                                return 1;
 244                }
 245                expired = retransmits_timed_out(sk, retry_until,
 246                                                icsk->icsk_user_timeout);
 247        }
 248        tcp_fastopen_active_detect_blackhole(sk, expired);
 249
 250        if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
 251                tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
 252                                  icsk->icsk_retransmits,
 253                                  icsk->icsk_rto, (int)expired);
 254
 255        if (expired) {
 256                /* Has it gone just too far? */
 257                tcp_write_err(sk);
 258                return 1;
 259        }
 260
 261        return 0;
 262}
 263
 264/* Called with BH disabled */
 265void tcp_delack_timer_handler(struct sock *sk)
 266{
 267        struct inet_connection_sock *icsk = inet_csk(sk);
 268
 269        sk_mem_reclaim_partial(sk);
 270
 271        if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
 272            !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
 273                goto out;
 274
 275        if (time_after(icsk->icsk_ack.timeout, jiffies)) {
 276                sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
 277                goto out;
 278        }
 279        icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
 280
 281        if (inet_csk_ack_scheduled(sk)) {
 282                if (!inet_csk_in_pingpong_mode(sk)) {
 283                        /* Delayed ACK missed: inflate ATO. */
 284                        icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
 285                } else {
 286                        /* Delayed ACK missed: leave pingpong mode and
 287                         * deflate ATO.
 288                         */
 289                        inet_csk_exit_pingpong_mode(sk);
 290                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
 291                }
 292                tcp_mstamp_refresh(tcp_sk(sk));
 293                tcp_send_ack(sk);
 294                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
 295        }
 296
 297out:
 298        if (tcp_under_memory_pressure(sk))
 299                sk_mem_reclaim(sk);
 300}
 301
 302
 303/**
 304 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
 305 *  @data:  Pointer to the current socket. (gets casted to struct sock *)
 306 *
 307 *  This function gets (indirectly) called when the kernel timer for a TCP packet
 308 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
 309 *
 310 *  Returns: Nothing (void)
 311 */
 312static void tcp_delack_timer(struct timer_list *t)
 313{
 314        struct inet_connection_sock *icsk =
 315                        from_timer(icsk, t, icsk_delack_timer);
 316        struct sock *sk = &icsk->icsk_inet.sk;
 317
 318        bh_lock_sock(sk);
 319        if (!sock_owned_by_user(sk)) {
 320                tcp_delack_timer_handler(sk);
 321        } else {
 322                icsk->icsk_ack.blocked = 1;
 323                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 324                /* deleguate our work to tcp_release_cb() */
 325                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
 326                        sock_hold(sk);
 327        }
 328        bh_unlock_sock(sk);
 329        sock_put(sk);
 330}
 331
 332static void tcp_probe_timer(struct sock *sk)
 333{
 334        struct inet_connection_sock *icsk = inet_csk(sk);
 335        struct sk_buff *skb = tcp_send_head(sk);
 336        struct tcp_sock *tp = tcp_sk(sk);
 337        int max_probes;
 338
 339        if (tp->packets_out || !skb) {
 340                icsk->icsk_probes_out = 0;
 341                return;
 342        }
 343
 344        /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
 345         * long as the receiver continues to respond probes. We support this by
 346         * default and reset icsk_probes_out with incoming ACKs. But if the
 347         * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
 348         * kill the socket when the retry count and the time exceeds the
 349         * corresponding system limit. We also implement similar policy when
 350         * we use RTO to probe window in tcp_retransmit_timer().
 351         */
 352        if (icsk->icsk_user_timeout) {
 353                u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
 354                                                tcp_probe0_base(sk));
 355
 356                if (elapsed >= icsk->icsk_user_timeout)
 357                        goto abort;
 358        }
 359
 360        max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
 361        if (sock_flag(sk, SOCK_DEAD)) {
 362                const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 363
 364                max_probes = tcp_orphan_retries(sk, alive);
 365                if (!alive && icsk->icsk_backoff >= max_probes)
 366                        goto abort;
 367                if (tcp_out_of_resources(sk, true))
 368                        return;
 369        }
 370
 371        if (icsk->icsk_probes_out >= max_probes) {
 372abort:          tcp_write_err(sk);
 373        } else {
 374                /* Only send another probe if we didn't close things up. */
 375                tcp_send_probe0(sk);
 376        }
 377}
 378
 379/*
 380 *      Timer for Fast Open socket to retransmit SYNACK. Note that the
 381 *      sk here is the child socket, not the parent (listener) socket.
 382 */
 383static void tcp_fastopen_synack_timer(struct sock *sk)
 384{
 385        struct inet_connection_sock *icsk = inet_csk(sk);
 386        int max_retries = icsk->icsk_syn_retries ? :
 387            sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
 388        struct tcp_sock *tp = tcp_sk(sk);
 389        struct request_sock *req;
 390
 391        req = tcp_sk(sk)->fastopen_rsk;
 392        req->rsk_ops->syn_ack_timeout(req);
 393
 394        if (req->num_timeout >= max_retries) {
 395                tcp_write_err(sk);
 396                return;
 397        }
 398        /* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
 399        if (icsk->icsk_retransmits == 1)
 400                tcp_enter_loss(sk);
 401        /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
 402         * returned from rtx_syn_ack() to make it more persistent like
 403         * regular retransmit because if the child socket has been accepted
 404         * it's not good to give up too easily.
 405         */
 406        inet_rtx_syn_ack(sk, req);
 407        req->num_timeout++;
 408        icsk->icsk_retransmits++;
 409        if (!tp->retrans_stamp)
 410                tp->retrans_stamp = tcp_time_stamp(tp);
 411        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 412                          TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 413}
 414
 415
 416/**
 417 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
 418 *  @sk:  Pointer to the current socket.
 419 *
 420 *  This function gets called when the kernel timer for a TCP packet
 421 *  of this socket expires.
 422 *
 423 *  It handles retransmission, timer adjustment and other necesarry measures.
 424 *
 425 *  Returns: Nothing (void)
 426 */
 427void tcp_retransmit_timer(struct sock *sk)
 428{
 429        struct tcp_sock *tp = tcp_sk(sk);
 430        struct net *net = sock_net(sk);
 431        struct inet_connection_sock *icsk = inet_csk(sk);
 432
 433        if (tp->fastopen_rsk) {
 434                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
 435                             sk->sk_state != TCP_FIN_WAIT1);
 436                tcp_fastopen_synack_timer(sk);
 437                /* Before we receive ACK to our SYN-ACK don't retransmit
 438                 * anything else (e.g., data or FIN segments).
 439                 */
 440                return;
 441        }
 442        if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
 443                return;
 444
 445        tp->tlp_high_seq = 0;
 446
 447        if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
 448            !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
 449                /* Receiver dastardly shrinks window. Our retransmits
 450                 * become zero probes, but we should not timeout this
 451                 * connection. If the socket is an orphan, time it out,
 452                 * we cannot allow such beasts to hang infinitely.
 453                 */
 454                struct inet_sock *inet = inet_sk(sk);
 455                if (sk->sk_family == AF_INET) {
 456                        net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
 457                                            &inet->inet_daddr,
 458                                            ntohs(inet->inet_dport),
 459                                            inet->inet_num,
 460                                            tp->snd_una, tp->snd_nxt);
 461                }
 462#if IS_ENABLED(CONFIG_IPV6)
 463                else if (sk->sk_family == AF_INET6) {
 464                        net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
 465                                            &sk->sk_v6_daddr,
 466                                            ntohs(inet->inet_dport),
 467                                            inet->inet_num,
 468                                            tp->snd_una, tp->snd_nxt);
 469                }
 470#endif
 471                if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
 472                        tcp_write_err(sk);
 473                        goto out;
 474                }
 475                tcp_enter_loss(sk);
 476                tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
 477                __sk_dst_reset(sk);
 478                goto out_reset_timer;
 479        }
 480
 481        __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
 482        if (tcp_write_timeout(sk))
 483                goto out;
 484
 485        if (icsk->icsk_retransmits == 0) {
 486                int mib_idx = 0;
 487
 488                if (icsk->icsk_ca_state == TCP_CA_Recovery) {
 489                        if (tcp_is_sack(tp))
 490                                mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
 491                        else
 492                                mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
 493                } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
 494                        mib_idx = LINUX_MIB_TCPLOSSFAILURES;
 495                } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
 496                           tp->sacked_out) {
 497                        if (tcp_is_sack(tp))
 498                                mib_idx = LINUX_MIB_TCPSACKFAILURES;
 499                        else
 500                                mib_idx = LINUX_MIB_TCPRENOFAILURES;
 501                }
 502                if (mib_idx)
 503                        __NET_INC_STATS(sock_net(sk), mib_idx);
 504        }
 505
 506        tcp_enter_loss(sk);
 507
 508        icsk->icsk_retransmits++;
 509        if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
 510                /* Retransmission failed because of local congestion,
 511                 * Let senders fight for local resources conservatively.
 512                 */
 513                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 514                                          TCP_RESOURCE_PROBE_INTERVAL,
 515                                          TCP_RTO_MAX);
 516                goto out;
 517        }
 518
 519        /* Increase the timeout each time we retransmit.  Note that
 520         * we do not increase the rtt estimate.  rto is initialized
 521         * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
 522         * that doubling rto each time is the least we can get away with.
 523         * In KA9Q, Karn uses this for the first few times, and then
 524         * goes to quadratic.  netBSD doubles, but only goes up to *64,
 525         * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
 526         * defined in the protocol as the maximum possible RTT.  I guess
 527         * we'll have to use something other than TCP to talk to the
 528         * University of Mars.
 529         *
 530         * PAWS allows us longer timeouts and large windows, so once
 531         * implemented ftp to mars will work nicely. We will have to fix
 532         * the 120 second clamps though!
 533         */
 534        icsk->icsk_backoff++;
 535
 536out_reset_timer:
 537        /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
 538         * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
 539         * might be increased if the stream oscillates between thin and thick,
 540         * thus the old value might already be too high compared to the value
 541         * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
 542         * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
 543         * exponential backoff behaviour to avoid continue hammering
 544         * linear-timeout retransmissions into a black hole
 545         */
 546        if (sk->sk_state == TCP_ESTABLISHED &&
 547            (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
 548            tcp_stream_is_thin(tp) &&
 549            icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
 550                icsk->icsk_backoff = 0;
 551                icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 552        } else {
 553                /* Use normal (exponential) backoff */
 554                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
 555        }
 556        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 557                                  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
 558        if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
 559                __sk_dst_reset(sk);
 560
 561out:;
 562}
 563
 564/* Called with bottom-half processing disabled.
 565   Called by tcp_write_timer() */
 566void tcp_write_timer_handler(struct sock *sk)
 567{
 568        struct inet_connection_sock *icsk = inet_csk(sk);
 569        int event;
 570
 571        if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
 572            !icsk->icsk_pending)
 573                goto out;
 574
 575        if (time_after(icsk->icsk_timeout, jiffies)) {
 576                sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
 577                goto out;
 578        }
 579
 580        tcp_mstamp_refresh(tcp_sk(sk));
 581        event = icsk->icsk_pending;
 582
 583        switch (event) {
 584        case ICSK_TIME_REO_TIMEOUT:
 585                tcp_rack_reo_timeout(sk);
 586                break;
 587        case ICSK_TIME_LOSS_PROBE:
 588                tcp_send_loss_probe(sk);
 589                break;
 590        case ICSK_TIME_RETRANS:
 591                icsk->icsk_pending = 0;
 592                tcp_retransmit_timer(sk);
 593                break;
 594        case ICSK_TIME_PROBE0:
 595                icsk->icsk_pending = 0;
 596                tcp_probe_timer(sk);
 597                break;
 598        }
 599
 600out:
 601        sk_mem_reclaim(sk);
 602}
 603
 604static void tcp_write_timer(struct timer_list *t)
 605{
 606        struct inet_connection_sock *icsk =
 607                        from_timer(icsk, t, icsk_retransmit_timer);
 608        struct sock *sk = &icsk->icsk_inet.sk;
 609
 610        bh_lock_sock(sk);
 611        if (!sock_owned_by_user(sk)) {
 612                tcp_write_timer_handler(sk);
 613        } else {
 614                /* delegate our work to tcp_release_cb() */
 615                if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
 616                        sock_hold(sk);
 617        }
 618        bh_unlock_sock(sk);
 619        sock_put(sk);
 620}
 621
 622void tcp_syn_ack_timeout(const struct request_sock *req)
 623{
 624        struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
 625
 626        __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
 627}
 628EXPORT_SYMBOL(tcp_syn_ack_timeout);
 629
 630void tcp_set_keepalive(struct sock *sk, int val)
 631{
 632        if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
 633                return;
 634
 635        if (val && !sock_flag(sk, SOCK_KEEPOPEN))
 636                inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
 637        else if (!val)
 638                inet_csk_delete_keepalive_timer(sk);
 639}
 640EXPORT_SYMBOL_GPL(tcp_set_keepalive);
 641
 642
 643static void tcp_keepalive_timer (struct timer_list *t)
 644{
 645        struct sock *sk = from_timer(sk, t, sk_timer);
 646        struct inet_connection_sock *icsk = inet_csk(sk);
 647        struct tcp_sock *tp = tcp_sk(sk);
 648        u32 elapsed;
 649
 650        /* Only process if socket is not in use. */
 651        bh_lock_sock(sk);
 652        if (sock_owned_by_user(sk)) {
 653                /* Try again later. */
 654                inet_csk_reset_keepalive_timer (sk, HZ/20);
 655                goto out;
 656        }
 657
 658        if (sk->sk_state == TCP_LISTEN) {
 659                pr_err("Hmm... keepalive on a LISTEN ???\n");
 660                goto out;
 661        }
 662
 663        tcp_mstamp_refresh(tp);
 664        if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
 665                if (tp->linger2 >= 0) {
 666                        const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
 667
 668                        if (tmo > 0) {
 669                                tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
 670                                goto out;
 671                        }
 672                }
 673                tcp_send_active_reset(sk, GFP_ATOMIC);
 674                goto death;
 675        }
 676
 677        if (!sock_flag(sk, SOCK_KEEPOPEN) ||
 678            ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
 679                goto out;
 680
 681        elapsed = keepalive_time_when(tp);
 682
 683        /* It is alive without keepalive 8) */
 684        if (tp->packets_out || !tcp_write_queue_empty(sk))
 685                goto resched;
 686
 687        elapsed = keepalive_time_elapsed(tp);
 688
 689        if (elapsed >= keepalive_time_when(tp)) {
 690                /* If the TCP_USER_TIMEOUT option is enabled, use that
 691                 * to determine when to timeout instead.
 692                 */
 693                if ((icsk->icsk_user_timeout != 0 &&
 694                    elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
 695                    icsk->icsk_probes_out > 0) ||
 696                    (icsk->icsk_user_timeout == 0 &&
 697                    icsk->icsk_probes_out >= keepalive_probes(tp))) {
 698                        tcp_send_active_reset(sk, GFP_ATOMIC);
 699                        tcp_write_err(sk);
 700                        goto out;
 701                }
 702                if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
 703                        icsk->icsk_probes_out++;
 704                        elapsed = keepalive_intvl_when(tp);
 705                } else {
 706                        /* If keepalive was lost due to local congestion,
 707                         * try harder.
 708                         */
 709                        elapsed = TCP_RESOURCE_PROBE_INTERVAL;
 710                }
 711        } else {
 712                /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
 713                elapsed = keepalive_time_when(tp) - elapsed;
 714        }
 715
 716        sk_mem_reclaim(sk);
 717
 718resched:
 719        inet_csk_reset_keepalive_timer (sk, elapsed);
 720        goto out;
 721
 722death:
 723        tcp_done(sk);
 724
 725out:
 726        bh_unlock_sock(sk);
 727        sock_put(sk);
 728}
 729
 730static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
 731{
 732        struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
 733        struct sock *sk = (struct sock *)tp;
 734
 735        bh_lock_sock(sk);
 736        if (!sock_owned_by_user(sk)) {
 737                if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
 738                        tcp_send_ack(sk);
 739        } else {
 740                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
 741                                      &sk->sk_tsq_flags))
 742                        sock_hold(sk);
 743        }
 744        bh_unlock_sock(sk);
 745
 746        sock_put(sk);
 747
 748        return HRTIMER_NORESTART;
 749}
 750
 751void tcp_init_xmit_timers(struct sock *sk)
 752{
 753        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
 754                                  &tcp_keepalive_timer);
 755        hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
 756                     HRTIMER_MODE_ABS_PINNED_SOFT);
 757        tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
 758
 759        hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
 760                     HRTIMER_MODE_REL_PINNED_SOFT);
 761        tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
 762}
 763