linux/net/ipv4/tcp_timer.c
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:     Ross Biro
   9 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *              Florian La Roche, <flla@stud.uni-sb.de>
  13 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *              Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *              Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21#include <linux/module.h>
  22#include <linux/gfp.h>
  23#include <net/tcp.h>
  24
  25/**
  26 *  tcp_write_err() - close socket and save error info
  27 *  @sk:  The socket the error has appeared on.
  28 *
  29 *  Returns: Nothing (void)
  30 */
  31
  32static void tcp_write_err(struct sock *sk)
  33{
  34        sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
  35        sk->sk_error_report(sk);
  36
  37        tcp_write_queue_purge(sk);
  38        tcp_done(sk);
  39        __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
  40}
  41
  42/**
  43 *  tcp_out_of_resources() - Close socket if out of resources
  44 *  @sk:        pointer to current socket
  45 *  @do_reset:  send a last packet with reset flag
  46 *
  47 *  Do not allow orphaned sockets to eat all our resources.
  48 *  This is direct violation of TCP specs, but it is required
  49 *  to prevent DoS attacks. It is called when a retransmission timeout
  50 *  or zero probe timeout occurs on orphaned socket.
  51 *
  52 *  Also close if our net namespace is exiting; in that case there is no
  53 *  hope of ever communicating again since all netns interfaces are already
  54 *  down (or about to be down), and we need to release our dst references,
  55 *  which have been moved to the netns loopback interface, so the namespace
  56 *  can finish exiting.  This condition is only possible if we are a kernel
  57 *  socket, as those do not hold references to the namespace.
  58 *
  59 *  Criteria is still not confirmed experimentally and may change.
  60 *  We kill the socket, if:
  61 *  1. If number of orphaned sockets exceeds an administratively configured
  62 *     limit.
  63 *  2. If we have strong memory pressure.
  64 *  3. If our net namespace is exiting.
  65 */
  66static int tcp_out_of_resources(struct sock *sk, bool do_reset)
  67{
  68        struct tcp_sock *tp = tcp_sk(sk);
  69        int shift = 0;
  70
  71        /* If peer does not open window for long time, or did not transmit
  72         * anything for long time, penalize it. */
  73        if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
  74                shift++;
  75
  76        /* If some dubious ICMP arrived, penalize even more. */
  77        if (sk->sk_err_soft)
  78                shift++;
  79
  80        if (tcp_check_oom(sk, shift)) {
  81                /* Catch exceptional cases, when connection requires reset.
  82                 *      1. Last segment was sent recently. */
  83                if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
  84                    /*  2. Window is closed. */
  85                    (!tp->snd_wnd && !tp->packets_out))
  86                        do_reset = true;
  87                if (do_reset)
  88                        tcp_send_active_reset(sk, GFP_ATOMIC);
  89                tcp_done(sk);
  90                __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
  91                return 1;
  92        }
  93
  94        if (!check_net(sock_net(sk))) {
  95                /* Not possible to send reset; just close */
  96                tcp_done(sk);
  97                return 1;
  98        }
  99
 100        return 0;
 101}
 102
 103/**
 104 *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
 105 *  @sk:    Pointer to the current socket.
 106 *  @alive: bool, socket alive state
 107 */
 108static int tcp_orphan_retries(struct sock *sk, bool alive)
 109{
 110        int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 111
 112        /* We know from an ICMP that something is wrong. */
 113        if (sk->sk_err_soft && !alive)
 114                retries = 0;
 115
 116        /* However, if socket sent something recently, select some safe
 117         * number of retries. 8 corresponds to >100 seconds with minimal
 118         * RTO of 200msec. */
 119        if (retries == 0 && alive)
 120                retries = 8;
 121        return retries;
 122}
 123
 124static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
 125{
 126        const struct net *net = sock_net(sk);
 127        int mss;
 128
 129        /* Black hole detection */
 130        if (!net->ipv4.sysctl_tcp_mtu_probing)
 131                return;
 132
 133        if (!icsk->icsk_mtup.enabled) {
 134                icsk->icsk_mtup.enabled = 1;
 135                icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
 136        } else {
 137                mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
 138                mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
 139                mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len);
 140                icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
 141        }
 142        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 143}
 144
 145
 146/**
 147 *  retransmits_timed_out() - returns true if this connection has timed out
 148 *  @sk:       The current socket
 149 *  @boundary: max number of retransmissions
 150 *  @timeout:  A custom timeout value.
 151 *             If set to 0 the default timeout is calculated and used.
 152 *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
 153 *
 154 * The default "timeout" value this function can calculate and use
 155 * is equivalent to the timeout of a TCP Connection
 156 * after "boundary" unsuccessful, exponentially backed-off
 157 * retransmissions with an initial RTO of TCP_RTO_MIN.
 158 */
 159static bool retransmits_timed_out(struct sock *sk,
 160                                  unsigned int boundary,
 161                                  unsigned int timeout)
 162{
 163        const unsigned int rto_base = TCP_RTO_MIN;
 164        unsigned int linear_backoff_thresh, start_ts;
 165
 166        if (!inet_csk(sk)->icsk_retransmits)
 167                return false;
 168
 169        start_ts = tcp_sk(sk)->retrans_stamp;
 170        if (unlikely(!start_ts)) {
 171                struct sk_buff *head = tcp_rtx_queue_head(sk);
 172
 173                if (!head)
 174                        return false;
 175                start_ts = tcp_skb_timestamp(head);
 176        }
 177
 178        if (likely(timeout == 0)) {
 179                linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
 180
 181                if (boundary <= linear_backoff_thresh)
 182                        timeout = ((2 << boundary) - 1) * rto_base;
 183                else
 184                        timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
 185                                (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
 186        }
 187        return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
 188}
 189
 190/* A write timeout has occurred. Process the after effects. */
 191static int tcp_write_timeout(struct sock *sk)
 192{
 193        struct inet_connection_sock *icsk = inet_csk(sk);
 194        struct tcp_sock *tp = tcp_sk(sk);
 195        struct net *net = sock_net(sk);
 196        bool expired, do_reset;
 197        int retry_until;
 198
 199        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 200                if (icsk->icsk_retransmits) {
 201                        dst_negative_advice(sk);
 202                } else if (!tp->syn_data && !tp->syn_fastopen) {
 203                        sk_rethink_txhash(sk);
 204                }
 205                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
 206                expired = icsk->icsk_retransmits >= retry_until;
 207        } else {
 208                if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
 209                        /* Black hole detection */
 210                        tcp_mtu_probing(icsk, sk);
 211
 212                        dst_negative_advice(sk);
 213                } else {
 214                        sk_rethink_txhash(sk);
 215                }
 216
 217                retry_until = net->ipv4.sysctl_tcp_retries2;
 218                if (sock_flag(sk, SOCK_DEAD)) {
 219                        const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 220
 221                        retry_until = tcp_orphan_retries(sk, alive);
 222                        do_reset = alive ||
 223                                !retransmits_timed_out(sk, retry_until, 0);
 224
 225                        if (tcp_out_of_resources(sk, do_reset))
 226                                return 1;
 227                }
 228                expired = retransmits_timed_out(sk, retry_until,
 229                                                icsk->icsk_user_timeout);
 230        }
 231        tcp_fastopen_active_detect_blackhole(sk, expired);
 232
 233        if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
 234                tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
 235                                  icsk->icsk_retransmits,
 236                                  icsk->icsk_rto, (int)expired);
 237
 238        if (expired) {
 239                /* Has it gone just too far? */
 240                tcp_write_err(sk);
 241                return 1;
 242        }
 243
 244        return 0;
 245}
 246
 247/* Called with BH disabled */
 248void tcp_delack_timer_handler(struct sock *sk)
 249{
 250        struct inet_connection_sock *icsk = inet_csk(sk);
 251
 252        sk_mem_reclaim_partial(sk);
 253
 254        if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
 255            !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
 256                goto out;
 257
 258        if (time_after(icsk->icsk_ack.timeout, jiffies)) {
 259                sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
 260                goto out;
 261        }
 262        icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
 263
 264        if (inet_csk_ack_scheduled(sk)) {
 265                if (!icsk->icsk_ack.pingpong) {
 266                        /* Delayed ACK missed: inflate ATO. */
 267                        icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
 268                } else {
 269                        /* Delayed ACK missed: leave pingpong mode and
 270                         * deflate ATO.
 271                         */
 272                        icsk->icsk_ack.pingpong = 0;
 273                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
 274                }
 275                tcp_mstamp_refresh(tcp_sk(sk));
 276                tcp_send_ack(sk);
 277                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
 278        }
 279
 280out:
 281        if (tcp_under_memory_pressure(sk))
 282                sk_mem_reclaim(sk);
 283}
 284
 285
 286/**
 287 *  tcp_delack_timer() - The TCP delayed ACK timeout handler
 288 *  @data:  Pointer to the current socket. (gets casted to struct sock *)
 289 *
 290 *  This function gets (indirectly) called when the kernel timer for a TCP packet
 291 *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
 292 *
 293 *  Returns: Nothing (void)
 294 */
 295static void tcp_delack_timer(struct timer_list *t)
 296{
 297        struct inet_connection_sock *icsk =
 298                        from_timer(icsk, t, icsk_delack_timer);
 299        struct sock *sk = &icsk->icsk_inet.sk;
 300
 301        bh_lock_sock(sk);
 302        if (!sock_owned_by_user(sk)) {
 303                tcp_delack_timer_handler(sk);
 304        } else {
 305                icsk->icsk_ack.blocked = 1;
 306                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
 307                /* deleguate our work to tcp_release_cb() */
 308                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
 309                        sock_hold(sk);
 310        }
 311        bh_unlock_sock(sk);
 312        sock_put(sk);
 313}
 314
 315static void tcp_probe_timer(struct sock *sk)
 316{
 317        struct inet_connection_sock *icsk = inet_csk(sk);
 318        struct sk_buff *skb = tcp_send_head(sk);
 319        struct tcp_sock *tp = tcp_sk(sk);
 320        int max_probes;
 321        u32 start_ts;
 322
 323        if (tp->packets_out || !skb) {
 324                icsk->icsk_probes_out = 0;
 325                return;
 326        }
 327
 328        /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
 329         * long as the receiver continues to respond probes. We support this by
 330         * default and reset icsk_probes_out with incoming ACKs. But if the
 331         * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
 332         * kill the socket when the retry count and the time exceeds the
 333         * corresponding system limit. We also implement similar policy when
 334         * we use RTO to probe window in tcp_retransmit_timer().
 335         */
 336        start_ts = tcp_skb_timestamp(skb);
 337        if (!start_ts)
 338                skb->skb_mstamp = tp->tcp_mstamp;
 339        else if (icsk->icsk_user_timeout &&
 340                 (s32)(tcp_time_stamp(tp) - start_ts) >
 341                 jiffies_to_msecs(icsk->icsk_user_timeout))
 342                goto abort;
 343
 344        max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
 345        if (sock_flag(sk, SOCK_DEAD)) {
 346                const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 347
 348                max_probes = tcp_orphan_retries(sk, alive);
 349                if (!alive && icsk->icsk_backoff >= max_probes)
 350                        goto abort;
 351                if (tcp_out_of_resources(sk, true))
 352                        return;
 353        }
 354
 355        if (icsk->icsk_probes_out > max_probes) {
 356abort:          tcp_write_err(sk);
 357        } else {
 358                /* Only send another probe if we didn't close things up. */
 359                tcp_send_probe0(sk);
 360        }
 361}
 362
 363/*
 364 *      Timer for Fast Open socket to retransmit SYNACK. Note that the
 365 *      sk here is the child socket, not the parent (listener) socket.
 366 */
 367static void tcp_fastopen_synack_timer(struct sock *sk)
 368{
 369        struct inet_connection_sock *icsk = inet_csk(sk);
 370        int max_retries = icsk->icsk_syn_retries ? :
 371            sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
 372        struct request_sock *req;
 373
 374        req = tcp_sk(sk)->fastopen_rsk;
 375        req->rsk_ops->syn_ack_timeout(req);
 376
 377        if (req->num_timeout >= max_retries) {
 378                tcp_write_err(sk);
 379                return;
 380        }
 381        /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
 382         * returned from rtx_syn_ack() to make it more persistent like
 383         * regular retransmit because if the child socket has been accepted
 384         * it's not good to give up too easily.
 385         */
 386        inet_rtx_syn_ack(sk, req);
 387        req->num_timeout++;
 388        icsk->icsk_retransmits++;
 389        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 390                          TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 391}
 392
 393
 394/**
 395 *  tcp_retransmit_timer() - The TCP retransmit timeout handler
 396 *  @sk:  Pointer to the current socket.
 397 *
 398 *  This function gets called when the kernel timer for a TCP packet
 399 *  of this socket expires.
 400 *
 401 *  It handles retransmission, timer adjustment and other necesarry measures.
 402 *
 403 *  Returns: Nothing (void)
 404 */
 405void tcp_retransmit_timer(struct sock *sk)
 406{
 407        struct tcp_sock *tp = tcp_sk(sk);
 408        struct net *net = sock_net(sk);
 409        struct inet_connection_sock *icsk = inet_csk(sk);
 410
 411        if (tp->fastopen_rsk) {
 412                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
 413                             sk->sk_state != TCP_FIN_WAIT1);
 414                tcp_fastopen_synack_timer(sk);
 415                /* Before we receive ACK to our SYN-ACK don't retransmit
 416                 * anything else (e.g., data or FIN segments).
 417                 */
 418                return;
 419        }
 420        if (!tp->packets_out)
 421                goto out;
 422
 423        WARN_ON(tcp_rtx_queue_empty(sk));
 424
 425        tp->tlp_high_seq = 0;
 426
 427        if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
 428            !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
 429                /* Receiver dastardly shrinks window. Our retransmits
 430                 * become zero probes, but we should not timeout this
 431                 * connection. If the socket is an orphan, time it out,
 432                 * we cannot allow such beasts to hang infinitely.
 433                 */
 434                struct inet_sock *inet = inet_sk(sk);
 435                if (sk->sk_family == AF_INET) {
 436                        net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
 437                                            &inet->inet_daddr,
 438                                            ntohs(inet->inet_dport),
 439                                            inet->inet_num,
 440                                            tp->snd_una, tp->snd_nxt);
 441                }
 442#if IS_ENABLED(CONFIG_IPV6)
 443                else if (sk->sk_family == AF_INET6) {
 444                        net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
 445                                            &sk->sk_v6_daddr,
 446                                            ntohs(inet->inet_dport),
 447                                            inet->inet_num,
 448                                            tp->snd_una, tp->snd_nxt);
 449                }
 450#endif
 451                if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
 452                        tcp_write_err(sk);
 453                        goto out;
 454                }
 455                tcp_enter_loss(sk);
 456                tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
 457                __sk_dst_reset(sk);
 458                goto out_reset_timer;
 459        }
 460
 461        if (tcp_write_timeout(sk))
 462                goto out;
 463
 464        if (icsk->icsk_retransmits == 0) {
 465                int mib_idx;
 466
 467                if (icsk->icsk_ca_state == TCP_CA_Recovery) {
 468                        if (tcp_is_sack(tp))
 469                                mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
 470                        else
 471                                mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
 472                } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
 473                        mib_idx = LINUX_MIB_TCPLOSSFAILURES;
 474                } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
 475                           tp->sacked_out) {
 476                        if (tcp_is_sack(tp))
 477                                mib_idx = LINUX_MIB_TCPSACKFAILURES;
 478                        else
 479                                mib_idx = LINUX_MIB_TCPRENOFAILURES;
 480                } else {
 481                        mib_idx = LINUX_MIB_TCPTIMEOUTS;
 482                }
 483                __NET_INC_STATS(sock_net(sk), mib_idx);
 484        }
 485
 486        tcp_enter_loss(sk);
 487
 488        if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
 489                /* Retransmission failed because of local congestion,
 490                 * do not backoff.
 491                 */
 492                if (!icsk->icsk_retransmits)
 493                        icsk->icsk_retransmits = 1;
 494                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 495                                          min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
 496                                          TCP_RTO_MAX);
 497                goto out;
 498        }
 499
 500        /* Increase the timeout each time we retransmit.  Note that
 501         * we do not increase the rtt estimate.  rto is initialized
 502         * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
 503         * that doubling rto each time is the least we can get away with.
 504         * In KA9Q, Karn uses this for the first few times, and then
 505         * goes to quadratic.  netBSD doubles, but only goes up to *64,
 506         * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
 507         * defined in the protocol as the maximum possible RTT.  I guess
 508         * we'll have to use something other than TCP to talk to the
 509         * University of Mars.
 510         *
 511         * PAWS allows us longer timeouts and large windows, so once
 512         * implemented ftp to mars will work nicely. We will have to fix
 513         * the 120 second clamps though!
 514         */
 515        icsk->icsk_backoff++;
 516        icsk->icsk_retransmits++;
 517
 518out_reset_timer:
 519        /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
 520         * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
 521         * might be increased if the stream oscillates between thin and thick,
 522         * thus the old value might already be too high compared to the value
 523         * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
 524         * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
 525         * exponential backoff behaviour to avoid continue hammering
 526         * linear-timeout retransmissions into a black hole
 527         */
 528        if (sk->sk_state == TCP_ESTABLISHED &&
 529            (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
 530            tcp_stream_is_thin(tp) &&
 531            icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
 532                icsk->icsk_backoff = 0;
 533                icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 534        } else {
 535                /* Use normal (exponential) backoff */
 536                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
 537        }
 538        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
 539        if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
 540                __sk_dst_reset(sk);
 541
 542out:;
 543}
 544
 545/* Called with bottom-half processing disabled.
 546   Called by tcp_write_timer() */
 547void tcp_write_timer_handler(struct sock *sk)
 548{
 549        struct inet_connection_sock *icsk = inet_csk(sk);
 550        int event;
 551
 552        if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
 553            !icsk->icsk_pending)
 554                goto out;
 555
 556        if (time_after(icsk->icsk_timeout, jiffies)) {
 557                sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
 558                goto out;
 559        }
 560
 561        tcp_mstamp_refresh(tcp_sk(sk));
 562        event = icsk->icsk_pending;
 563
 564        switch (event) {
 565        case ICSK_TIME_REO_TIMEOUT:
 566                tcp_rack_reo_timeout(sk);
 567                break;
 568        case ICSK_TIME_LOSS_PROBE:
 569                tcp_send_loss_probe(sk);
 570                break;
 571        case ICSK_TIME_RETRANS:
 572                icsk->icsk_pending = 0;
 573                tcp_retransmit_timer(sk);
 574                break;
 575        case ICSK_TIME_PROBE0:
 576                icsk->icsk_pending = 0;
 577                tcp_probe_timer(sk);
 578                break;
 579        }
 580
 581out:
 582        sk_mem_reclaim(sk);
 583}
 584
 585static void tcp_write_timer(struct timer_list *t)
 586{
 587        struct inet_connection_sock *icsk =
 588                        from_timer(icsk, t, icsk_retransmit_timer);
 589        struct sock *sk = &icsk->icsk_inet.sk;
 590
 591        bh_lock_sock(sk);
 592        if (!sock_owned_by_user(sk)) {
 593                tcp_write_timer_handler(sk);
 594        } else {
 595                /* delegate our work to tcp_release_cb() */
 596                if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
 597                        sock_hold(sk);
 598        }
 599        bh_unlock_sock(sk);
 600        sock_put(sk);
 601}
 602
 603void tcp_syn_ack_timeout(const struct request_sock *req)
 604{
 605        struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
 606
 607        __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
 608}
 609EXPORT_SYMBOL(tcp_syn_ack_timeout);
 610
 611void tcp_set_keepalive(struct sock *sk, int val)
 612{
 613        if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
 614                return;
 615
 616        if (val && !sock_flag(sk, SOCK_KEEPOPEN))
 617                inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
 618        else if (!val)
 619                inet_csk_delete_keepalive_timer(sk);
 620}
 621EXPORT_SYMBOL_GPL(tcp_set_keepalive);
 622
 623
 624static void tcp_keepalive_timer (struct timer_list *t)
 625{
 626        struct sock *sk = from_timer(sk, t, sk_timer);
 627        struct inet_connection_sock *icsk = inet_csk(sk);
 628        struct tcp_sock *tp = tcp_sk(sk);
 629        u32 elapsed;
 630
 631        /* Only process if socket is not in use. */
 632        bh_lock_sock(sk);
 633        if (sock_owned_by_user(sk)) {
 634                /* Try again later. */
 635                inet_csk_reset_keepalive_timer (sk, HZ/20);
 636                goto out;
 637        }
 638
 639        if (sk->sk_state == TCP_LISTEN) {
 640                pr_err("Hmm... keepalive on a LISTEN ???\n");
 641                goto out;
 642        }
 643
 644        tcp_mstamp_refresh(tp);
 645        if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
 646                if (tp->linger2 >= 0) {
 647                        const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
 648
 649                        if (tmo > 0) {
 650                                tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
 651                                goto out;
 652                        }
 653                }
 654                tcp_send_active_reset(sk, GFP_ATOMIC);
 655                goto death;
 656        }
 657
 658        if (!sock_flag(sk, SOCK_KEEPOPEN) ||
 659            ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
 660                goto out;
 661
 662        elapsed = keepalive_time_when(tp);
 663
 664        /* It is alive without keepalive 8) */
 665        if (tp->packets_out || !tcp_write_queue_empty(sk))
 666                goto resched;
 667
 668        elapsed = keepalive_time_elapsed(tp);
 669
 670        if (elapsed >= keepalive_time_when(tp)) {
 671                /* If the TCP_USER_TIMEOUT option is enabled, use that
 672                 * to determine when to timeout instead.
 673                 */
 674                if ((icsk->icsk_user_timeout != 0 &&
 675                    elapsed >= icsk->icsk_user_timeout &&
 676                    icsk->icsk_probes_out > 0) ||
 677                    (icsk->icsk_user_timeout == 0 &&
 678                    icsk->icsk_probes_out >= keepalive_probes(tp))) {
 679                        tcp_send_active_reset(sk, GFP_ATOMIC);
 680                        tcp_write_err(sk);
 681                        goto out;
 682                }
 683                if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
 684                        icsk->icsk_probes_out++;
 685                        elapsed = keepalive_intvl_when(tp);
 686                } else {
 687                        /* If keepalive was lost due to local congestion,
 688                         * try harder.
 689                         */
 690                        elapsed = TCP_RESOURCE_PROBE_INTERVAL;
 691                }
 692        } else {
 693                /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
 694                elapsed = keepalive_time_when(tp) - elapsed;
 695        }
 696
 697        sk_mem_reclaim(sk);
 698
 699resched:
 700        inet_csk_reset_keepalive_timer (sk, elapsed);
 701        goto out;
 702
 703death:
 704        tcp_done(sk);
 705
 706out:
 707        bh_unlock_sock(sk);
 708        sock_put(sk);
 709}
 710
 711static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
 712{
 713        struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
 714        struct sock *sk = (struct sock *)tp;
 715
 716        bh_lock_sock(sk);
 717        if (!sock_owned_by_user(sk)) {
 718                if (tp->compressed_ack)
 719                        tcp_send_ack(sk);
 720        } else {
 721                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
 722                                      &sk->sk_tsq_flags))
 723                        sock_hold(sk);
 724        }
 725        bh_unlock_sock(sk);
 726
 727        sock_put(sk);
 728
 729        return HRTIMER_NORESTART;
 730}
 731
 732void tcp_init_xmit_timers(struct sock *sk)
 733{
 734        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
 735                                  &tcp_keepalive_timer);
 736        hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
 737                     HRTIMER_MODE_ABS_PINNED_SOFT);
 738        tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
 739
 740        hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
 741                     HRTIMER_MODE_REL_PINNED_SOFT);
 742        tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
 743}
 744