linux/net/ipv4/tcp_fastopen.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/crypto.h>
   3#include <linux/err.h>
   4#include <linux/init.h>
   5#include <linux/kernel.h>
   6#include <linux/list.h>
   7#include <linux/tcp.h>
   8#include <linux/rcupdate.h>
   9#include <linux/rculist.h>
  10#include <net/inetpeer.h>
  11#include <net/tcp.h>
  12
  13void tcp_fastopen_init_key_once(struct net *net)
  14{
  15        u8 key[TCP_FASTOPEN_KEY_LENGTH];
  16        struct tcp_fastopen_context *ctxt;
  17
  18        rcu_read_lock();
  19        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
  20        if (ctxt) {
  21                rcu_read_unlock();
  22                return;
  23        }
  24        rcu_read_unlock();
  25
  26        /* tcp_fastopen_reset_cipher publishes the new context
  27         * atomically, so we allow this race happening here.
  28         *
  29         * All call sites of tcp_fastopen_cookie_gen also check
  30         * for a valid cookie, so this is an acceptable risk.
  31         */
  32        get_random_bytes(key, sizeof(key));
  33        tcp_fastopen_reset_cipher(net, NULL, key, NULL);
  34}
  35
  36static void tcp_fastopen_ctx_free(struct rcu_head *head)
  37{
  38        struct tcp_fastopen_context *ctx =
  39            container_of(head, struct tcp_fastopen_context, rcu);
  40
  41        kfree_sensitive(ctx);
  42}
  43
  44void tcp_fastopen_destroy_cipher(struct sock *sk)
  45{
  46        struct tcp_fastopen_context *ctx;
  47
  48        ctx = rcu_dereference_protected(
  49                        inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
  50        if (ctx)
  51                call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
  52}
  53
  54void tcp_fastopen_ctx_destroy(struct net *net)
  55{
  56        struct tcp_fastopen_context *ctxt;
  57
  58        spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
  59
  60        ctxt = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
  61                                lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
  62        rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, NULL);
  63        spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
  64
  65        if (ctxt)
  66                call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
  67}
  68
  69int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
  70                              void *primary_key, void *backup_key)
  71{
  72        struct tcp_fastopen_context *ctx, *octx;
  73        struct fastopen_queue *q;
  74        int err = 0;
  75
  76        ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  77        if (!ctx) {
  78                err = -ENOMEM;
  79                goto out;
  80        }
  81
  82        ctx->key[0].key[0] = get_unaligned_le64(primary_key);
  83        ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
  84        if (backup_key) {
  85                ctx->key[1].key[0] = get_unaligned_le64(backup_key);
  86                ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
  87                ctx->num = 2;
  88        } else {
  89                ctx->num = 1;
  90        }
  91
  92        spin_lock(&net->ipv4.tcp_fastopen_ctx_lock);
  93        if (sk) {
  94                q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
  95                octx = rcu_dereference_protected(q->ctx,
  96                        lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
  97                rcu_assign_pointer(q->ctx, ctx);
  98        } else {
  99                octx = rcu_dereference_protected(net->ipv4.tcp_fastopen_ctx,
 100                        lockdep_is_held(&net->ipv4.tcp_fastopen_ctx_lock));
 101                rcu_assign_pointer(net->ipv4.tcp_fastopen_ctx, ctx);
 102        }
 103        spin_unlock(&net->ipv4.tcp_fastopen_ctx_lock);
 104
 105        if (octx)
 106                call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
 107out:
 108        return err;
 109}
 110
 111int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
 112                            u64 *key)
 113{
 114        struct tcp_fastopen_context *ctx;
 115        int n_keys = 0, i;
 116
 117        rcu_read_lock();
 118        if (icsk)
 119                ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
 120        else
 121                ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
 122        if (ctx) {
 123                n_keys = tcp_fastopen_context_len(ctx);
 124                for (i = 0; i < n_keys; i++) {
 125                        put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
 126                        put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
 127                }
 128        }
 129        rcu_read_unlock();
 130
 131        return n_keys;
 132}
 133
 134static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
 135                                             struct sk_buff *syn,
 136                                             const siphash_key_t *key,
 137                                             struct tcp_fastopen_cookie *foc)
 138{
 139        BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));
 140
 141        if (req->rsk_ops->family == AF_INET) {
 142                const struct iphdr *iph = ip_hdr(syn);
 143
 144                foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
 145                                          sizeof(iph->saddr) +
 146                                          sizeof(iph->daddr),
 147                                          key));
 148                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
 149                return true;
 150        }
 151#if IS_ENABLED(CONFIG_IPV6)
 152        if (req->rsk_ops->family == AF_INET6) {
 153                const struct ipv6hdr *ip6h = ipv6_hdr(syn);
 154
 155                foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
 156                                          sizeof(ip6h->saddr) +
 157                                          sizeof(ip6h->daddr),
 158                                          key));
 159                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
 160                return true;
 161        }
 162#endif
 163        return false;
 164}
 165
 166/* Generate the fastopen cookie by applying SipHash to both the source and
 167 * destination addresses.
 168 */
 169static void tcp_fastopen_cookie_gen(struct sock *sk,
 170                                    struct request_sock *req,
 171                                    struct sk_buff *syn,
 172                                    struct tcp_fastopen_cookie *foc)
 173{
 174        struct tcp_fastopen_context *ctx;
 175
 176        rcu_read_lock();
 177        ctx = tcp_fastopen_get_ctx(sk);
 178        if (ctx)
 179                __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
 180        rcu_read_unlock();
 181}
 182
 183/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
 184 * queue this additional data / FIN.
 185 */
 186void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
 187{
 188        struct tcp_sock *tp = tcp_sk(sk);
 189
 190        if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
 191                return;
 192
 193        skb = skb_clone(skb, GFP_ATOMIC);
 194        if (!skb)
 195                return;
 196
 197        skb_dst_drop(skb);
 198        /* segs_in has been initialized to 1 in tcp_create_openreq_child().
 199         * Hence, reset segs_in to 0 before calling tcp_segs_in()
 200         * to avoid double counting.  Also, tcp_segs_in() expects
 201         * skb->len to include the tcp_hdrlen.  Hence, it should
 202         * be called before __skb_pull().
 203         */
 204        tp->segs_in = 0;
 205        tcp_segs_in(tp, skb);
 206        __skb_pull(skb, tcp_hdrlen(skb));
 207        sk_forced_mem_schedule(sk, skb->truesize);
 208        skb_set_owner_r(skb, sk);
 209
 210        TCP_SKB_CB(skb)->seq++;
 211        TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
 212
 213        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 214        __skb_queue_tail(&sk->sk_receive_queue, skb);
 215        tp->syn_data_acked = 1;
 216
 217        /* u64_stats_update_begin(&tp->syncp) not needed here,
 218         * as we certainly are not changing upper 32bit value (0)
 219         */
 220        tp->bytes_received = skb->len;
 221
 222        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
 223                tcp_fin(sk);
 224}
 225
 226/* returns 0 - no key match, 1 for primary, 2 for backup */
 227static int tcp_fastopen_cookie_gen_check(struct sock *sk,
 228                                         struct request_sock *req,
 229                                         struct sk_buff *syn,
 230                                         struct tcp_fastopen_cookie *orig,
 231                                         struct tcp_fastopen_cookie *valid_foc)
 232{
 233        struct tcp_fastopen_cookie search_foc = { .len = -1 };
 234        struct tcp_fastopen_cookie *foc = valid_foc;
 235        struct tcp_fastopen_context *ctx;
 236        int i, ret = 0;
 237
 238        rcu_read_lock();
 239        ctx = tcp_fastopen_get_ctx(sk);
 240        if (!ctx)
 241                goto out;
 242        for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
 243                __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
 244                if (tcp_fastopen_cookie_match(foc, orig)) {
 245                        ret = i + 1;
 246                        goto out;
 247                }
 248                foc = &search_foc;
 249        }
 250out:
 251        rcu_read_unlock();
 252        return ret;
 253}
 254
 255static struct sock *tcp_fastopen_create_child(struct sock *sk,
 256                                              struct sk_buff *skb,
 257                                              struct request_sock *req)
 258{
 259        struct tcp_sock *tp;
 260        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
 261        struct sock *child;
 262        bool own_req;
 263
 264        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
 265                                                         NULL, &own_req);
 266        if (!child)
 267                return NULL;
 268
 269        spin_lock(&queue->fastopenq.lock);
 270        queue->fastopenq.qlen++;
 271        spin_unlock(&queue->fastopenq.lock);
 272
 273        /* Initialize the child socket. Have to fix some values to take
 274         * into account the child is a Fast Open socket and is created
 275         * only out of the bits carried in the SYN packet.
 276         */
 277        tp = tcp_sk(child);
 278
 279        rcu_assign_pointer(tp->fastopen_rsk, req);
 280        tcp_rsk(req)->tfo_listener = true;
 281
 282        /* RFC1323: The window in SYN & SYN/ACK segments is never
 283         * scaled. So correct it appropriately.
 284         */
 285        tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
 286        tp->max_window = tp->snd_wnd;
 287
 288        /* Activate the retrans timer so that SYNACK can be retransmitted.
 289         * The request socket is not added to the ehash
 290         * because it's been added to the accept queue directly.
 291         */
 292        inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
 293                                  TCP_TIMEOUT_INIT, TCP_RTO_MAX);
 294
 295        refcount_set(&req->rsk_refcnt, 2);
 296
 297        /* Now finish processing the fastopen child socket. */
 298        tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
 299
 300        tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
 301
 302        tcp_fastopen_add_skb(child, skb);
 303
 304        tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
 305        tp->rcv_wup = tp->rcv_nxt;
 306        /* tcp_conn_request() is sending the SYNACK,
 307         * and queues the child into listener accept queue.
 308         */
 309        return child;
 310}
 311
 312static bool tcp_fastopen_queue_check(struct sock *sk)
 313{
 314        struct fastopen_queue *fastopenq;
 315
 316        /* Make sure the listener has enabled fastopen, and we don't
 317         * exceed the max # of pending TFO requests allowed before trying
 318         * to validating the cookie in order to avoid burning CPU cycles
 319         * unnecessarily.
 320         *
 321         * XXX (TFO) - The implication of checking the max_qlen before
 322         * processing a cookie request is that clients can't differentiate
 323         * between qlen overflow causing Fast Open to be disabled
 324         * temporarily vs a server not supporting Fast Open at all.
 325         */
 326        fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
 327        if (fastopenq->max_qlen == 0)
 328                return false;
 329
 330        if (fastopenq->qlen >= fastopenq->max_qlen) {
 331                struct request_sock *req1;
 332                spin_lock(&fastopenq->lock);
 333                req1 = fastopenq->rskq_rst_head;
 334                if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
 335                        __NET_INC_STATS(sock_net(sk),
 336                                        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
 337                        spin_unlock(&fastopenq->lock);
 338                        return false;
 339                }
 340                fastopenq->rskq_rst_head = req1->dl_next;
 341                fastopenq->qlen--;
 342                spin_unlock(&fastopenq->lock);
 343                reqsk_put(req1);
 344        }
 345        return true;
 346}
 347
 348static bool tcp_fastopen_no_cookie(const struct sock *sk,
 349                                   const struct dst_entry *dst,
 350                                   int flag)
 351{
 352        return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
 353               tcp_sk(sk)->fastopen_no_cookie ||
 354               (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
 355}
 356
 357/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
 358 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
 359 * cookie request (foc->len == 0).
 360 */
 361struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
 362                              struct request_sock *req,
 363                              struct tcp_fastopen_cookie *foc,
 364                              const struct dst_entry *dst)
 365{
 366        bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
 367        int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
 368        struct tcp_fastopen_cookie valid_foc = { .len = -1 };
 369        struct sock *child;
 370        int ret = 0;
 371
 372        if (foc->len == 0) /* Client requests a cookie */
 373                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
 374
 375        if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
 376              (syn_data || foc->len >= 0) &&
 377              tcp_fastopen_queue_check(sk))) {
 378                foc->len = -1;
 379                return NULL;
 380        }
 381
 382        if (syn_data &&
 383            tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
 384                goto fastopen;
 385
 386        if (foc->len == 0) {
 387                /* Client requests a cookie. */
 388                tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
 389        } else if (foc->len > 0) {
 390                ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
 391                                                    &valid_foc);
 392                if (!ret) {
 393                        NET_INC_STATS(sock_net(sk),
 394                                      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 395                } else {
 396                        /* Cookie is valid. Create a (full) child socket to
 397                         * accept the data in SYN before returning a SYN-ACK to
 398                         * ack the data. If we fail to create the socket, fall
 399                         * back and ack the ISN only but includes the same
 400                         * cookie.
 401                         *
 402                         * Note: Data-less SYN with valid cookie is allowed to
 403                         * send data in SYN_RECV state.
 404                         */
 405fastopen:
 406                        child = tcp_fastopen_create_child(sk, skb, req);
 407                        if (child) {
 408                                if (ret == 2) {
 409                                        valid_foc.exp = foc->exp;
 410                                        *foc = valid_foc;
 411                                        NET_INC_STATS(sock_net(sk),
 412                                                      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
 413                                } else {
 414                                        foc->len = -1;
 415                                }
 416                                NET_INC_STATS(sock_net(sk),
 417                                              LINUX_MIB_TCPFASTOPENPASSIVE);
 418                                return child;
 419                        }
 420                        NET_INC_STATS(sock_net(sk),
 421                                      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
 422                }
 423        }
 424        valid_foc.exp = foc->exp;
 425        *foc = valid_foc;
 426        return NULL;
 427}
 428
 429bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
 430                               struct tcp_fastopen_cookie *cookie)
 431{
 432        const struct dst_entry *dst;
 433
 434        tcp_fastopen_cache_get(sk, mss, cookie);
 435
 436        /* Firewall blackhole issue check */
 437        if (tcp_fastopen_active_should_disable(sk)) {
 438                cookie->len = -1;
 439                return false;
 440        }
 441
 442        dst = __sk_dst_get(sk);
 443
 444        if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
 445                cookie->len = -1;
 446                return true;
 447        }
 448        if (cookie->len > 0)
 449                return true;
 450        tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
 451        return false;
 452}
 453
 454/* This function checks if we want to defer sending SYN until the first
 455 * write().  We defer under the following conditions:
 456 * 1. fastopen_connect sockopt is set
 457 * 2. we have a valid cookie
 458 * Return value: return true if we want to defer until application writes data
 459 *               return false if we want to send out SYN immediately
 460 */
 461bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
 462{
 463        struct tcp_fastopen_cookie cookie = { .len = 0 };
 464        struct tcp_sock *tp = tcp_sk(sk);
 465        u16 mss;
 466
 467        if (tp->fastopen_connect && !tp->fastopen_req) {
 468                if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
 469                        inet_sk(sk)->defer_connect = 1;
 470                        return true;
 471                }
 472
 473                /* Alloc fastopen_req in order for FO option to be included
 474                 * in SYN
 475                 */
 476                tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
 477                                           sk->sk_allocation);
 478                if (tp->fastopen_req)
 479                        tp->fastopen_req->cookie = cookie;
 480                else
 481                        *err = -ENOBUFS;
 482        }
 483        return false;
 484}
 485EXPORT_SYMBOL(tcp_fastopen_defer_connect);
 486
 487/*
 488 * The following code block is to deal with middle box issues with TFO:
 489 * Middlebox firewall issues can potentially cause server's data being
 490 * blackholed after a successful 3WHS using TFO.
 491 * The proposed solution is to disable active TFO globally under the
 492 * following circumstances:
 493 *   1. client side TFO socket receives out of order FIN
 494 *   2. client side TFO socket receives out of order RST
 495 *   3. client side TFO socket has timed out three times consecutively during
 496 *      or after handshake
 497 * We disable active side TFO globally for 1hr at first. Then if it
 498 * happens again, we disable it for 2h, then 4h, 8h, ...
 499 * And we reset the timeout back to 1hr when we see a successful active
 500 * TFO connection with data exchanges.
 501 */
 502
 503/* Disable active TFO and record current jiffies and
 504 * tfo_active_disable_times
 505 */
 506void tcp_fastopen_active_disable(struct sock *sk)
 507{
 508        struct net *net = sock_net(sk);
 509
 510        if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)
 511                return;
 512
 513        /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
 514        WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);
 515
 516        /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
 517         * We want net->ipv4.tfo_active_disable_stamp to be updated first.
 518         */
 519        smp_mb__before_atomic();
 520        atomic_inc(&net->ipv4.tfo_active_disable_times);
 521
 522        NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
 523}
 524
 525/* Calculate timeout for tfo active disable
 526 * Return true if we are still in the active TFO disable period
 527 * Return false if timeout already expired and we should use active TFO
 528 */
 529bool tcp_fastopen_active_should_disable(struct sock *sk)
 530{
 531        unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
 532        unsigned long timeout;
 533        int tfo_da_times;
 534        int multiplier;
 535
 536        if (!tfo_bh_timeout)
 537                return false;
 538
 539        tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
 540        if (!tfo_da_times)
 541                return false;
 542
 543        /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
 544        smp_rmb();
 545
 546        /* Limit timeout to max: 2^6 * initial timeout */
 547        multiplier = 1 << min(tfo_da_times - 1, 6);
 548
 549        /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
 550        timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
 551                  multiplier * tfo_bh_timeout * HZ;
 552        if (time_before(jiffies, timeout))
 553                return true;
 554
 555        /* Mark check bit so we can check for successful active TFO
 556         * condition and reset tfo_active_disable_times
 557         */
 558        tcp_sk(sk)->syn_fastopen_ch = 1;
 559        return false;
 560}
 561
 562/* Disable active TFO if FIN is the only packet in the ofo queue
 563 * and no data is received.
 564 * Also check if we can reset tfo_active_disable_times if data is
 565 * received successfully on a marked active TFO sockets opened on
 566 * a non-loopback interface
 567 */
 568void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
 569{
 570        struct tcp_sock *tp = tcp_sk(sk);
 571        struct dst_entry *dst;
 572        struct sk_buff *skb;
 573
 574        if (!tp->syn_fastopen)
 575                return;
 576
 577        if (!tp->data_segs_in) {
 578                skb = skb_rb_first(&tp->out_of_order_queue);
 579                if (skb && !skb_rb_next(skb)) {
 580                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
 581                                tcp_fastopen_active_disable(sk);
 582                                return;
 583                        }
 584                }
 585        } else if (tp->syn_fastopen_ch &&
 586                   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
 587                dst = sk_dst_get(sk);
 588                if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK)))
 589                        atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
 590                dst_release(dst);
 591        }
 592}
 593
 594void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
 595{
 596        u32 timeouts = inet_csk(sk)->icsk_retransmits;
 597        struct tcp_sock *tp = tcp_sk(sk);
 598
 599        /* Broken middle-boxes may black-hole Fast Open connection during or
 600         * even after the handshake. Be extremely conservative and pause
 601         * Fast Open globally after hitting the third consecutive timeout or
 602         * exceeding the configured timeout limit.
 603         */
 604        if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
 605            (timeouts == 2 || (timeouts < 2 && expired))) {
 606                tcp_fastopen_active_disable(sk);
 607                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
 608        }
 609}
 610