linux/net/ipv4/tcp_minisocks.c
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:     Ross Biro
   9 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *              Florian La Roche, <flla@stud.uni-sb.de>
  13 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *              Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *              Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21#include <linux/mm.h>
  22#include <linux/module.h>
  23#include <linux/slab.h>
  24#include <linux/sysctl.h>
  25#include <linux/workqueue.h>
  26#include <net/tcp.h>
  27#include <net/inet_common.h>
  28#include <net/xfrm.h>
  29
  30int sysctl_tcp_syncookies __read_mostly = 1;
  31EXPORT_SYMBOL(sysctl_tcp_syncookies);
  32
  33int sysctl_tcp_abort_on_overflow __read_mostly;
  34
  35struct inet_timewait_death_row tcp_death_row = {
  36        .sysctl_max_tw_buckets = NR_FILE * 2,
  37        .period         = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
  38        .death_lock     = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
  39        .hashinfo       = &tcp_hashinfo,
  40        .tw_timer       = TIMER_INITIALIZER(inet_twdr_hangman, 0,
  41                                            (unsigned long)&tcp_death_row),
  42        .twkill_work    = __WORK_INITIALIZER(tcp_death_row.twkill_work,
  43                                             inet_twdr_twkill_work),
  44/* Short-time timewait calendar */
  45
  46        .twcal_hand     = -1,
  47        .twcal_timer    = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
  48                                            (unsigned long)&tcp_death_row),
  49};
  50EXPORT_SYMBOL_GPL(tcp_death_row);
  51
  52static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
  53{
  54        if (seq == s_win)
  55                return true;
  56        if (after(end_seq, s_win) && before(seq, e_win))
  57                return true;
  58        return seq == e_win && seq == end_seq;
  59}
  60
  61/*
  62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
  63 *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
  64 *   (and, probably, tail of data) and one or more our ACKs are lost.
  65 * * What is TIME-WAIT timeout? It is associated with maximal packet
  66 *   lifetime in the internet, which results in wrong conclusion, that
  67 *   it is set to catch "old duplicate segments" wandering out of their path.
  68 *   It is not quite correct. This timeout is calculated so that it exceeds
  69 *   maximal retransmission timeout enough to allow to lose one (or more)
  70 *   segments sent by peer and our ACKs. This time may be calculated from RTO.
  71 * * When TIME-WAIT socket receives RST, it means that another end
  72 *   finally closed and we are allowed to kill TIME-WAIT too.
  73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
  74 *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
  75 *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
  76 * * If we invented some more clever way to catch duplicates
  77 *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
  78 *
  79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
  80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
  81 * from the very beginning.
  82 *
  83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
  84 * is _not_ stateless. It means, that strictly speaking we must
  85 * spinlock it. I do not want! Well, probability of misbehaviour
  86 * is ridiculously low and, seems, we could use some mb() tricks
  87 * to avoid misread sequence numbers, states etc.  --ANK
  88 *
  89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
  90 */
  91enum tcp_tw_status
  92tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
  93                           const struct tcphdr *th)
  94{
  95        struct tcp_options_received tmp_opt;
  96        const u8 *hash_location;
  97        struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
  98        bool paws_reject = false;
  99
 100        tmp_opt.saw_tstamp = 0;
 101        if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
 102                tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
 103
 104                if (tmp_opt.saw_tstamp) {
 105                        tmp_opt.rcv_tsecr       -= tcptw->tw_ts_offset;
 106                        tmp_opt.ts_recent       = tcptw->tw_ts_recent;
 107                        tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
 108                        paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
 109                }
 110        }
 111
 112        if (tw->tw_substate == TCP_FIN_WAIT2) {
 113                /* Just repeat all the checks of tcp_rcv_state_process() */
 114
 115                /* Out of window, send ACK */
 116                if (paws_reject ||
 117                    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
 118                                   tcptw->tw_rcv_nxt,
 119                                   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
 120                        return TCP_TW_ACK;
 121
 122                if (th->rst)
 123                        goto kill;
 124
 125                if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
 126                        goto kill_with_rst;
 127
 128                /* Dup ACK? */
 129                if (!th->ack ||
 130                    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
 131                    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
 132                        inet_twsk_put(tw);
 133                        return TCP_TW_SUCCESS;
 134                }
 135
 136                /* New data or FIN. If new data arrive after half-duplex close,
 137                 * reset.
 138                 */
 139                if (!th->fin ||
 140                    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
 141kill_with_rst:
 142                        inet_twsk_deschedule(tw, &tcp_death_row);
 143                        inet_twsk_put(tw);
 144                        return TCP_TW_RST;
 145                }
 146
 147                /* FIN arrived, enter true time-wait state. */
 148                tw->tw_substate   = TCP_TIME_WAIT;
 149                tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
 150                if (tmp_opt.saw_tstamp) {
 151                        tcptw->tw_ts_recent_stamp = get_seconds();
 152                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
 153                }
 154
 155                if (tcp_death_row.sysctl_tw_recycle &&
 156                    tcptw->tw_ts_recent_stamp &&
 157                    tcp_tw_remember_stamp(tw))
 158                        inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
 159                                           TCP_TIMEWAIT_LEN);
 160                else
 161                        inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
 162                                           TCP_TIMEWAIT_LEN);
 163                return TCP_TW_ACK;
 164        }
 165
 166        /*
 167         *      Now real TIME-WAIT state.
 168         *
 169         *      RFC 1122:
 170         *      "When a connection is [...] on TIME-WAIT state [...]
 171         *      [a TCP] MAY accept a new SYN from the remote TCP to
 172         *      reopen the connection directly, if it:
 173         *
 174         *      (1)  assigns its initial sequence number for the new
 175         *      connection to be larger than the largest sequence
 176         *      number it used on the previous connection incarnation,
 177         *      and
 178         *
 179         *      (2)  returns to TIME-WAIT state if the SYN turns out
 180         *      to be an old duplicate".
 181         */
 182
 183        if (!paws_reject &&
 184            (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
 185             (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
 186                /* In window segment, it may be only reset or bare ack. */
 187
 188                if (th->rst) {
 189                        /* This is TIME_WAIT assassination, in two flavors.
 190                         * Oh well... nobody has a sufficient solution to this
 191                         * protocol bug yet.
 192                         */
 193                        if (sysctl_tcp_rfc1337 == 0) {
 194kill:
 195                                inet_twsk_deschedule(tw, &tcp_death_row);
 196                                inet_twsk_put(tw);
 197                                return TCP_TW_SUCCESS;
 198                        }
 199                }
 200                inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
 201                                   TCP_TIMEWAIT_LEN);
 202
 203                if (tmp_opt.saw_tstamp) {
 204                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
 205                        tcptw->tw_ts_recent_stamp = get_seconds();
 206                }
 207
 208                inet_twsk_put(tw);
 209                return TCP_TW_SUCCESS;
 210        }
 211
 212        /* Out of window segment.
 213
 214           All the segments are ACKed immediately.
 215
 216           The only exception is new SYN. We accept it, if it is
 217           not old duplicate and we are not in danger to be killed
 218           by delayed old duplicates. RFC check is that it has
 219           newer sequence number works at rates <40Mbit/sec.
 220           However, if paws works, it is reliable AND even more,
 221           we even may relax silly seq space cutoff.
 222
 223           RED-PEN: we violate main RFC requirement, if this SYN will appear
 224           old duplicate (i.e. we receive RST in reply to SYN-ACK),
 225           we must return socket to time-wait state. It is not good,
 226           but not fatal yet.
 227         */
 228
 229        if (th->syn && !th->rst && !th->ack && !paws_reject &&
 230            (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
 231             (tmp_opt.saw_tstamp &&
 232              (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
 233                u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
 234                if (isn == 0)
 235                        isn++;
 236                TCP_SKB_CB(skb)->when = isn;
 237                return TCP_TW_SYN;
 238        }
 239
 240        if (paws_reject)
 241                NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
 242
 243        if (!th->rst) {
 244                /* In this case we must reset the TIMEWAIT timer.
 245                 *
 246                 * If it is ACKless SYN it may be both old duplicate
 247                 * and new good SYN with random sequence number <rcv_nxt.
 248                 * Do not reschedule in the last case.
 249                 */
 250                if (paws_reject || th->ack)
 251                        inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
 252                                           TCP_TIMEWAIT_LEN);
 253
 254                /* Send ACK. Note, we do not put the bucket,
 255                 * it will be released by caller.
 256                 */
 257                return TCP_TW_ACK;
 258        }
 259        inet_twsk_put(tw);
 260        return TCP_TW_SUCCESS;
 261}
 262EXPORT_SYMBOL(tcp_timewait_state_process);
 263
 264/*
 265 * Move a socket to time-wait or dead fin-wait-2 state.
 266 */
 267void tcp_time_wait(struct sock *sk, int state, int timeo)
 268{
 269        struct inet_timewait_sock *tw = NULL;
 270        const struct inet_connection_sock *icsk = inet_csk(sk);
 271        const struct tcp_sock *tp = tcp_sk(sk);
 272        bool recycle_ok = false;
 273
 274        if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
 275                recycle_ok = tcp_remember_stamp(sk);
 276
 277        if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
 278                tw = inet_twsk_alloc(sk, state);
 279
 280        if (tw != NULL) {
 281                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
 282                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 283                struct inet_sock *inet = inet_sk(sk);
 284
 285                tw->tw_transparent      = inet->transparent;
 286                tw->tw_rcv_wscale       = tp->rx_opt.rcv_wscale;
 287                tcptw->tw_rcv_nxt       = tp->rcv_nxt;
 288                tcptw->tw_snd_nxt       = tp->snd_nxt;
 289                tcptw->tw_rcv_wnd       = tcp_receive_window(tp);
 290                tcptw->tw_ts_recent     = tp->rx_opt.ts_recent;
 291                tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
 292                tcptw->tw_ts_offset     = tp->tsoffset;
 293
 294#if IS_ENABLED(CONFIG_IPV6)
 295                if (tw->tw_family == PF_INET6) {
 296                        struct ipv6_pinfo *np = inet6_sk(sk);
 297                        struct inet6_timewait_sock *tw6;
 298
 299                        tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
 300                        tw6 = inet6_twsk((struct sock *)tw);
 301                        tw6->tw_v6_daddr = np->daddr;
 302                        tw6->tw_v6_rcv_saddr = np->rcv_saddr;
 303                        tw->tw_tclass = np->tclass;
 304                        tw->tw_ipv6only = np->ipv6only;
 305                }
 306#endif
 307
 308#ifdef CONFIG_TCP_MD5SIG
 309                /*
 310                 * The timewait bucket does not have the key DB from the
 311                 * sock structure. We just make a quick copy of the
 312                 * md5 key being used (if indeed we are using one)
 313                 * so the timewait ack generating code has the key.
 314                 */
 315                do {
 316                        struct tcp_md5sig_key *key;
 317                        tcptw->tw_md5_key = NULL;
 318                        key = tp->af_specific->md5_lookup(sk, sk);
 319                        if (key != NULL) {
 320                                tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
 321                                if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL)
 322                                        BUG();
 323                        }
 324                } while (0);
 325#endif
 326
 327                /* Linkage updates. */
 328                __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
 329
 330                /* Get the TIME_WAIT timeout firing. */
 331                if (timeo < rto)
 332                        timeo = rto;
 333
 334                if (recycle_ok) {
 335                        tw->tw_timeout = rto;
 336                } else {
 337                        tw->tw_timeout = TCP_TIMEWAIT_LEN;
 338                        if (state == TCP_TIME_WAIT)
 339                                timeo = TCP_TIMEWAIT_LEN;
 340                }
 341
 342                inet_twsk_schedule(tw, &tcp_death_row, timeo,
 343                                   TCP_TIMEWAIT_LEN);
 344                inet_twsk_put(tw);
 345        } else {
 346                /* Sorry, if we're out of memory, just CLOSE this
 347                 * socket up.  We've got bigger problems than
 348                 * non-graceful socket closings.
 349                 */
 350                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
 351        }
 352
 353        tcp_update_metrics(sk);
 354        tcp_done(sk);
 355}
 356
 357void tcp_twsk_destructor(struct sock *sk)
 358{
 359#ifdef CONFIG_TCP_MD5SIG
 360        struct tcp_timewait_sock *twsk = tcp_twsk(sk);
 361
 362        if (twsk->tw_md5_key) {
 363                tcp_free_md5sig_pool();
 364                kfree_rcu(twsk->tw_md5_key, rcu);
 365        }
 366#endif
 367}
 368EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
 369
 370static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
 371                                         struct request_sock *req)
 372{
 373        tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
 374}
 375
 376/* This is not only more efficient than what we used to do, it eliminates
 377 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
 378 *
 379 * Actually, we could lots of memory writes here. tp of listening
 380 * socket contains all necessary default parameters.
 381 */
 382struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
 383{
 384        struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
 385
 386        if (newsk != NULL) {
 387                const struct inet_request_sock *ireq = inet_rsk(req);
 388                struct tcp_request_sock *treq = tcp_rsk(req);
 389                struct inet_connection_sock *newicsk = inet_csk(newsk);
 390                struct tcp_sock *newtp = tcp_sk(newsk);
 391                struct tcp_sock *oldtp = tcp_sk(sk);
 392                struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
 393
 394                /* TCP Cookie Transactions require space for the cookie pair,
 395                 * as it differs for each connection.  There is no need to
 396                 * copy any s_data_payload stored at the original socket.
 397                 * Failure will prevent resuming the connection.
 398                 *
 399                 * Presumed copied, in order of appearance:
 400                 *      cookie_in_always, cookie_out_never
 401                 */
 402                if (oldcvp != NULL) {
 403                        struct tcp_cookie_values *newcvp =
 404                                kzalloc(sizeof(*newtp->cookie_values),
 405                                        GFP_ATOMIC);
 406
 407                        if (newcvp != NULL) {
 408                                kref_init(&newcvp->kref);
 409                                newcvp->cookie_desired =
 410                                                oldcvp->cookie_desired;
 411                                newtp->cookie_values = newcvp;
 412                        } else {
 413                                /* Not Yet Implemented */
 414                                newtp->cookie_values = NULL;
 415                        }
 416                }
 417
 418                /* Now setup tcp_sock */
 419                newtp->pred_flags = 0;
 420
 421                newtp->rcv_wup = newtp->copied_seq =
 422                newtp->rcv_nxt = treq->rcv_isn + 1;
 423
 424                newtp->snd_sml = newtp->snd_una =
 425                newtp->snd_nxt = newtp->snd_up =
 426                        treq->snt_isn + 1 + tcp_s_data_size(oldtp);
 427
 428                tcp_prequeue_init(newtp);
 429                INIT_LIST_HEAD(&newtp->tsq_node);
 430
 431                tcp_init_wl(newtp, treq->rcv_isn);
 432
 433                newtp->srtt = 0;
 434                newtp->mdev = TCP_TIMEOUT_INIT;
 435                newicsk->icsk_rto = TCP_TIMEOUT_INIT;
 436
 437                newtp->packets_out = 0;
 438                newtp->retrans_out = 0;
 439                newtp->sacked_out = 0;
 440                newtp->fackets_out = 0;
 441                newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 442                tcp_enable_early_retrans(newtp);
 443
 444                /* So many TCP implementations out there (incorrectly) count the
 445                 * initial SYN frame in their delayed-ACK and congestion control
 446                 * algorithms that we must have the following bandaid to talk
 447                 * efficiently to them.  -DaveM
 448                 */
 449                newtp->snd_cwnd = TCP_INIT_CWND;
 450                newtp->snd_cwnd_cnt = 0;
 451
 452                newtp->frto_counter = 0;
 453                newtp->frto_highmark = 0;
 454
 455                if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
 456                    !try_module_get(newicsk->icsk_ca_ops->owner))
 457                        newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
 458
 459                tcp_set_ca_state(newsk, TCP_CA_Open);
 460                tcp_init_xmit_timers(newsk);
 461                skb_queue_head_init(&newtp->out_of_order_queue);
 462                newtp->write_seq = newtp->pushed_seq =
 463                        treq->snt_isn + 1 + tcp_s_data_size(oldtp);
 464
 465                newtp->rx_opt.saw_tstamp = 0;
 466
 467                newtp->rx_opt.dsack = 0;
 468                newtp->rx_opt.num_sacks = 0;
 469
 470                newtp->urg_data = 0;
 471
 472                if (sock_flag(newsk, SOCK_KEEPOPEN))
 473                        inet_csk_reset_keepalive_timer(newsk,
 474                                                       keepalive_time_when(newtp));
 475
 476                newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
 477                if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
 478                        if (sysctl_tcp_fack)
 479                                tcp_enable_fack(newtp);
 480                }
 481                newtp->window_clamp = req->window_clamp;
 482                newtp->rcv_ssthresh = req->rcv_wnd;
 483                newtp->rcv_wnd = req->rcv_wnd;
 484                newtp->rx_opt.wscale_ok = ireq->wscale_ok;
 485                if (newtp->rx_opt.wscale_ok) {
 486                        newtp->rx_opt.snd_wscale = ireq->snd_wscale;
 487                        newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
 488                } else {
 489                        newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
 490                        newtp->window_clamp = min(newtp->window_clamp, 65535U);
 491                }
 492                newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
 493                                  newtp->rx_opt.snd_wscale);
 494                newtp->max_window = newtp->snd_wnd;
 495
 496                if (newtp->rx_opt.tstamp_ok) {
 497                        newtp->rx_opt.ts_recent = req->ts_recent;
 498                        newtp->rx_opt.ts_recent_stamp = get_seconds();
 499                        newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
 500                } else {
 501                        newtp->rx_opt.ts_recent_stamp = 0;
 502                        newtp->tcp_header_len = sizeof(struct tcphdr);
 503                }
 504                newtp->tsoffset = 0;
 505#ifdef CONFIG_TCP_MD5SIG
 506                newtp->md5sig_info = NULL;      /*XXX*/
 507                if (newtp->af_specific->md5_lookup(sk, newsk))
 508                        newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
 509#endif
 510                if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
 511                        newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
 512                newtp->rx_opt.mss_clamp = req->mss;
 513                TCP_ECN_openreq_child(newtp, req);
 514                newtp->fastopen_rsk = NULL;
 515                newtp->syn_data_acked = 0;
 516
 517                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
 518        }
 519        return newsk;
 520}
 521EXPORT_SYMBOL(tcp_create_openreq_child);
 522
 523/*
 524 * Process an incoming packet for SYN_RECV sockets represented as a
 525 * request_sock. Normally sk is the listener socket but for TFO it
 526 * points to the child socket.
 527 *
 528 * XXX (TFO) - The current impl contains a special check for ack
 529 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
 530 *
 531 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
 532 */
 533
 534struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 535                           struct request_sock *req,
 536                           struct request_sock **prev,
 537                           bool fastopen)
 538{
 539        struct tcp_options_received tmp_opt;
 540        const u8 *hash_location;
 541        struct sock *child;
 542        const struct tcphdr *th = tcp_hdr(skb);
 543        __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
 544        bool paws_reject = false;
 545
 546        BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
 547
 548        tmp_opt.saw_tstamp = 0;
 549        if (th->doff > (sizeof(struct tcphdr)>>2)) {
 550                tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
 551
 552                if (tmp_opt.saw_tstamp) {
 553                        tmp_opt.ts_recent = req->ts_recent;
 554                        /* We do not store true stamp, but it is not required,
 555                         * it can be estimated (approximately)
 556                         * from another data.
 557                         */
 558                        tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
 559                        paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
 560                }
 561        }
 562
 563        /* Check for pure retransmitted SYN. */
 564        if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
 565            flg == TCP_FLAG_SYN &&
 566            !paws_reject) {
 567                /*
 568                 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
 569                 * this case on figure 6 and figure 8, but formal
 570                 * protocol description says NOTHING.
 571                 * To be more exact, it says that we should send ACK,
 572                 * because this segment (at least, if it has no data)
 573                 * is out of window.
 574                 *
 575                 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
 576                 *  describe SYN-RECV state. All the description
 577                 *  is wrong, we cannot believe to it and should
 578                 *  rely only on common sense and implementation
 579                 *  experience.
 580                 *
 581                 * Enforce "SYN-ACK" according to figure 8, figure 6
 582                 * of RFC793, fixed by RFC1122.
 583                 *
 584                 * Note that even if there is new data in the SYN packet
 585                 * they will be thrown away too.
 586                 */
 587                inet_rtx_syn_ack(sk, req);
 588                return NULL;
 589        }
 590
 591        /* Further reproduces section "SEGMENT ARRIVES"
 592           for state SYN-RECEIVED of RFC793.
 593           It is broken, however, it does not work only
 594           when SYNs are crossed.
 595
 596           You would think that SYN crossing is impossible here, since
 597           we should have a SYN_SENT socket (from connect()) on our end,
 598           but this is not true if the crossed SYNs were sent to both
 599           ends by a malicious third party.  We must defend against this,
 600           and to do that we first verify the ACK (as per RFC793, page
 601           36) and reset if it is invalid.  Is this a true full defense?
 602           To convince ourselves, let us consider a way in which the ACK
 603           test can still pass in this 'malicious crossed SYNs' case.
 604           Malicious sender sends identical SYNs (and thus identical sequence
 605           numbers) to both A and B:
 606
 607                A: gets SYN, seq=7
 608                B: gets SYN, seq=7
 609
 610           By our good fortune, both A and B select the same initial
 611           send sequence number of seven :-)
 612
 613                A: sends SYN|ACK, seq=7, ack_seq=8
 614                B: sends SYN|ACK, seq=7, ack_seq=8
 615
 616           So we are now A eating this SYN|ACK, ACK test passes.  So
 617           does sequence test, SYN is truncated, and thus we consider
 618           it a bare ACK.
 619
 620           If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
 621           bare ACK.  Otherwise, we create an established connection.  Both
 622           ends (listening sockets) accept the new incoming connection and try
 623           to talk to each other. 8-)
 624
 625           Note: This case is both harmless, and rare.  Possibility is about the
 626           same as us discovering intelligent life on another plant tomorrow.
 627
 628           But generally, we should (RFC lies!) to accept ACK
 629           from SYNACK both here and in tcp_rcv_state_process().
 630           tcp_rcv_state_process() does not, hence, we do not too.
 631
 632           Note that the case is absolutely generic:
 633           we cannot optimize anything here without
 634           violating protocol. All the checks must be made
 635           before attempt to create socket.
 636         */
 637
 638        /* RFC793 page 36: "If the connection is in any non-synchronized state ...
 639         *                  and the incoming segment acknowledges something not yet
 640         *                  sent (the segment carries an unacceptable ACK) ...
 641         *                  a reset is sent."
 642         *
 643         * Invalid ACK: reset will be sent by listening socket.
 644         * Note that the ACK validity check for a Fast Open socket is done
 645         * elsewhere and is checked directly against the child socket rather
 646         * than req because user data may have been sent out.
 647         */
 648        if ((flg & TCP_FLAG_ACK) && !fastopen &&
 649            (TCP_SKB_CB(skb)->ack_seq !=
 650             tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk))))
 651                return sk;
 652
 653        /* Also, it would be not so bad idea to check rcv_tsecr, which
 654         * is essentially ACK extension and too early or too late values
 655         * should cause reset in unsynchronized states.
 656         */
 657
 658        /* RFC793: "first check sequence number". */
 659
 660        if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
 661                                          tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
 662                /* Out of window: send ACK and drop. */
 663                if (!(flg & TCP_FLAG_RST))
 664                        req->rsk_ops->send_ack(sk, skb, req);
 665                if (paws_reject)
 666                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
 667                return NULL;
 668        }
 669
 670        /* In sequence, PAWS is OK. */
 671
 672        if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
 673                req->ts_recent = tmp_opt.rcv_tsval;
 674
 675        if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
 676                /* Truncate SYN, it is out of window starting
 677                   at tcp_rsk(req)->rcv_isn + 1. */
 678                flg &= ~TCP_FLAG_SYN;
 679        }
 680
 681        /* RFC793: "second check the RST bit" and
 682         *         "fourth, check the SYN bit"
 683         */
 684        if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
 685                TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
 686                goto embryonic_reset;
 687        }
 688
 689        /* ACK sequence verified above, just make sure ACK is
 690         * set.  If ACK not set, just silently drop the packet.
 691         *
 692         * XXX (TFO) - if we ever allow "data after SYN", the
 693         * following check needs to be removed.
 694         */
 695        if (!(flg & TCP_FLAG_ACK))
 696                return NULL;
 697
 698        /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
 699        if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
 700                tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
 701        else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
 702                tcp_rsk(req)->snt_synack = 0;
 703
 704        /* For Fast Open no more processing is needed (sk is the
 705         * child socket).
 706         */
 707        if (fastopen)
 708                return sk;
 709
 710        /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
 711        if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
 712            TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
 713                inet_rsk(req)->acked = 1;
 714                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
 715                return NULL;
 716        }
 717
 718        /* OK, ACK is valid, create big socket and
 719         * feed this segment to it. It will repeat all
 720         * the tests. THIS SEGMENT MUST MOVE SOCKET TO
 721         * ESTABLISHED STATE. If it will be dropped after
 722         * socket is created, wait for troubles.
 723         */
 724        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
 725        if (child == NULL)
 726                goto listen_overflow;
 727
 728        inet_csk_reqsk_queue_unlink(sk, req, prev);
 729        inet_csk_reqsk_queue_removed(sk, req);
 730
 731        inet_csk_reqsk_queue_add(sk, req, child);
 732        return child;
 733
 734listen_overflow:
 735        if (!sysctl_tcp_abort_on_overflow) {
 736                inet_rsk(req)->acked = 1;
 737                return NULL;
 738        }
 739
 740embryonic_reset:
 741        if (!(flg & TCP_FLAG_RST)) {
 742                /* Received a bad SYN pkt - for TFO We try not to reset
 743                 * the local connection unless it's really necessary to
 744                 * avoid becoming vulnerable to outside attack aiming at
 745                 * resetting legit local connections.
 746                 */
 747                req->rsk_ops->send_reset(sk, skb);
 748        } else if (fastopen) { /* received a valid RST pkt */
 749                reqsk_fastopen_remove(sk, req, true);
 750                tcp_reset(sk);
 751        }
 752        if (!fastopen) {
 753                inet_csk_reqsk_queue_drop(sk, req, prev);
 754                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
 755        }
 756        return NULL;
 757}
 758EXPORT_SYMBOL(tcp_check_req);
 759
 760/*
 761 * Queue segment on the new socket if the new socket is active,
 762 * otherwise we just shortcircuit this and continue with
 763 * the new socket.
 764 *
 765 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
 766 * when entering. But other states are possible due to a race condition
 767 * where after __inet_lookup_established() fails but before the listener
 768 * locked is obtained, other packets cause the same connection to
 769 * be created.
 770 */
 771
 772int tcp_child_process(struct sock *parent, struct sock *child,
 773                      struct sk_buff *skb)
 774{
 775        int ret = 0;
 776        int state = child->sk_state;
 777
 778        if (!sock_owned_by_user(child)) {
 779                ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
 780                                            skb->len);
 781                /* Wakeup parent, send SIGIO */
 782                if (state == TCP_SYN_RECV && child->sk_state != state)
 783                        parent->sk_data_ready(parent, 0);
 784        } else {
 785                /* Alas, it is possible again, because we do lookup
 786                 * in main socket hash table and lock on listening
 787                 * socket does not protect us more.
 788                 */
 789                __sk_add_backlog(child, skb);
 790        }
 791
 792        bh_unlock_sock(child);
 793        sock_put(child);
 794        return ret;
 795}
 796EXPORT_SYMBOL(tcp_child_process);
 797