linux/net/ipv6/tcp_ipv6.c
<<
>>
Prefs
   1/*
   2 *      TCP over IPv6
   3 *      Linux INET6 implementation
   4 *
   5 *      Authors:
   6 *      Pedro Roque             <roque@di.fc.ul.pt>
   7 *
   8 *      Based on:
   9 *      linux/net/ipv4/tcp.c
  10 *      linux/net/ipv4/tcp_input.c
  11 *      linux/net/ipv4/tcp_output.c
  12 *
  13 *      Fixes:
  14 *      Hideaki YOSHIFUJI       :       sin6_scope_id support
  15 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
  16 *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
  17 *                                      a single port at the same time.
  18 *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
  19 *
  20 *      This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/tcp_memcontrol.h>
  65#include <net/busy_poll.h>
  66
  67#include <linux/proc_fs.h>
  68#include <linux/seq_file.h>
  69
  70#include <linux/crypto.h>
  71#include <linux/scatterlist.h>
  72
  73static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  74static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  75                                      struct request_sock *req);
  76
  77static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  78
  79static const struct inet_connection_sock_af_ops ipv6_mapped;
  80static const struct inet_connection_sock_af_ops ipv6_specific;
  81#ifdef CONFIG_TCP_MD5SIG
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  83static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  84#else
  85static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  86                                                   const struct in6_addr *addr)
  87{
  88        return NULL;
  89}
  90#endif
  91
  92static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  93{
  94        struct dst_entry *dst = skb_dst(skb);
  95
  96        if (dst && dst_hold_safe(dst)) {
  97                const struct rt6_info *rt = (const struct rt6_info *)dst;
  98
  99                sk->sk_rx_dst = dst;
 100                inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 101                inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 102        }
 103}
 104
 105static void tcp_v6_hash(struct sock *sk)
 106{
 107        if (sk->sk_state != TCP_CLOSE) {
 108                if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 109                        tcp_prot.hash(sk);
 110                        return;
 111                }
 112                local_bh_disable();
 113                __inet6_hash(sk, NULL);
 114                local_bh_enable();
 115        }
 116}
 117
 118static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 119{
 120        return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 121                                            ipv6_hdr(skb)->saddr.s6_addr32,
 122                                            tcp_hdr(skb)->dest,
 123                                            tcp_hdr(skb)->source);
 124}
 125
 126static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 127                          int addr_len)
 128{
 129        struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 130        struct inet_sock *inet = inet_sk(sk);
 131        struct inet_connection_sock *icsk = inet_csk(sk);
 132        struct ipv6_pinfo *np = inet6_sk(sk);
 133        struct tcp_sock *tp = tcp_sk(sk);
 134        struct in6_addr *saddr = NULL, *final_p, final;
 135        struct ipv6_txoptions *opt;
 136        struct rt6_info *rt;
 137        struct flowi6 fl6;
 138        struct dst_entry *dst;
 139        int addr_type;
 140        int err;
 141
 142        if (addr_len < SIN6_LEN_RFC2133)
 143                return -EINVAL;
 144
 145        if (usin->sin6_family != AF_INET6)
 146                return -EAFNOSUPPORT;
 147
 148        memset(&fl6, 0, sizeof(fl6));
 149
 150        if (np->sndflow) {
 151                fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 152                IP6_ECN_flow_init(fl6.flowlabel);
 153                if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 154                        struct ip6_flowlabel *flowlabel;
 155                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 156                        if (flowlabel == NULL)
 157                                return -EINVAL;
 158                        fl6_sock_release(flowlabel);
 159                }
 160        }
 161
 162        /*
 163         *      connect() to INADDR_ANY means loopback (BSD'ism).
 164         */
 165
 166        if (ipv6_addr_any(&usin->sin6_addr))
 167                usin->sin6_addr.s6_addr[15] = 0x1;
 168
 169        addr_type = ipv6_addr_type(&usin->sin6_addr);
 170
 171        if (addr_type & IPV6_ADDR_MULTICAST)
 172                return -ENETUNREACH;
 173
 174        if (addr_type&IPV6_ADDR_LINKLOCAL) {
 175                if (addr_len >= sizeof(struct sockaddr_in6) &&
 176                    usin->sin6_scope_id) {
 177                        /* If interface is set while binding, indices
 178                         * must coincide.
 179                         */
 180                        if (sk->sk_bound_dev_if &&
 181                            sk->sk_bound_dev_if != usin->sin6_scope_id)
 182                                return -EINVAL;
 183
 184                        sk->sk_bound_dev_if = usin->sin6_scope_id;
 185                }
 186
 187                /* Connect to link-local address requires an interface */
 188                if (!sk->sk_bound_dev_if)
 189                        return -EINVAL;
 190        }
 191
 192        if (tp->rx_opt.ts_recent_stamp &&
 193            !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 194                tp->rx_opt.ts_recent = 0;
 195                tp->rx_opt.ts_recent_stamp = 0;
 196                tp->write_seq = 0;
 197        }
 198
 199        sk->sk_v6_daddr = usin->sin6_addr;
 200        np->flow_label = fl6.flowlabel;
 201
 202        /*
 203         *      TCP over IPv4
 204         */
 205
 206        if (addr_type == IPV6_ADDR_MAPPED) {
 207                u32 exthdrlen = icsk->icsk_ext_hdr_len;
 208                struct sockaddr_in sin;
 209
 210                SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 211
 212                if (__ipv6_only_sock(sk))
 213                        return -ENETUNREACH;
 214
 215                sin.sin_family = AF_INET;
 216                sin.sin_port = usin->sin6_port;
 217                sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 218
 219                icsk->icsk_af_ops = &ipv6_mapped;
 220                sk->sk_backlog_rcv = tcp_v4_do_rcv;
 221#ifdef CONFIG_TCP_MD5SIG
 222                tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 223#endif
 224
 225                err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 226
 227                if (err) {
 228                        icsk->icsk_ext_hdr_len = exthdrlen;
 229                        icsk->icsk_af_ops = &ipv6_specific;
 230                        sk->sk_backlog_rcv = tcp_v6_do_rcv;
 231#ifdef CONFIG_TCP_MD5SIG
 232                        tp->af_specific = &tcp_sock_ipv6_specific;
 233#endif
 234                        goto failure;
 235                } else {
 236                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 237                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 238                                               &sk->sk_v6_rcv_saddr);
 239                }
 240
 241                return err;
 242        }
 243
 244        if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 245                saddr = &sk->sk_v6_rcv_saddr;
 246
 247        fl6.flowi6_proto = IPPROTO_TCP;
 248        fl6.daddr = sk->sk_v6_daddr;
 249        fl6.saddr = saddr ? *saddr : np->saddr;
 250        fl6.flowi6_oif = sk->sk_bound_dev_if;
 251        fl6.flowi6_mark = sk->sk_mark;
 252        fl6.fl6_dport = usin->sin6_port;
 253        fl6.fl6_sport = inet->inet_sport;
 254
 255        opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 256        final_p = fl6_update_dst(&fl6, opt, &final);
 257
 258        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 259
 260        dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 261        if (IS_ERR(dst)) {
 262                err = PTR_ERR(dst);
 263                goto failure;
 264        }
 265
 266        if (saddr == NULL) {
 267                saddr = &fl6.saddr;
 268                sk->sk_v6_rcv_saddr = *saddr;
 269        }
 270
 271        /* set the source address */
 272        np->saddr = *saddr;
 273        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 274
 275        sk->sk_gso_type = SKB_GSO_TCPV6;
 276        ip6_dst_store(sk, dst, NULL, NULL);
 277
 278        rt = (struct rt6_info *) dst;
 279        if (tcp_death_row.sysctl_tw_recycle &&
 280            !tp->rx_opt.ts_recent_stamp &&
 281            ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 282                tcp_fetch_timewait_stamp(sk, dst);
 283
 284        icsk->icsk_ext_hdr_len = 0;
 285        if (opt)
 286                icsk->icsk_ext_hdr_len = opt->opt_flen +
 287                                         opt->opt_nflen;
 288
 289        tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 290
 291        inet->inet_dport = usin->sin6_port;
 292
 293        tcp_set_state(sk, TCP_SYN_SENT);
 294        err = inet6_hash_connect(&tcp_death_row, sk);
 295        if (err)
 296                goto late_failure;
 297
 298        sk_set_txhash(sk);
 299
 300        if (!tp->write_seq && likely(!tp->repair))
 301                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 302                                                             sk->sk_v6_daddr.s6_addr32,
 303                                                             inet->inet_sport,
 304                                                             inet->inet_dport);
 305
 306        err = tcp_connect(sk);
 307        if (err)
 308                goto late_failure;
 309
 310        return 0;
 311
 312late_failure:
 313        tcp_set_state(sk, TCP_CLOSE);
 314        __sk_dst_reset(sk);
 315failure:
 316        inet->inet_dport = 0;
 317        sk->sk_route_caps = 0;
 318        return err;
 319}
 320
 321static void tcp_v6_mtu_reduced(struct sock *sk)
 322{
 323        struct dst_entry *dst;
 324
 325        if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 326                return;
 327
 328        dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 329        if (!dst)
 330                return;
 331
 332        if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 333                tcp_sync_mss(sk, dst_mtu(dst));
 334                tcp_simple_retransmit(sk);
 335        }
 336}
 337
 338static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 339                u8 type, u8 code, int offset, __be32 info)
 340{
 341        const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 342        const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 343        struct ipv6_pinfo *np;
 344        struct sock *sk;
 345        int err;
 346        struct tcp_sock *tp;
 347        struct request_sock *fastopen;
 348        __u32 seq, snd_una;
 349        struct net *net = dev_net(skb->dev);
 350
 351        sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 352                        th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 353
 354        if (sk == NULL) {
 355                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 356                                   ICMP6_MIB_INERRORS);
 357                return;
 358        }
 359
 360        if (sk->sk_state == TCP_TIME_WAIT) {
 361                inet_twsk_put(inet_twsk(sk));
 362                return;
 363        }
 364
 365        bh_lock_sock(sk);
 366        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 367                NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 368
 369        if (sk->sk_state == TCP_CLOSE)
 370                goto out;
 371
 372        if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 373                NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 374                goto out;
 375        }
 376
 377        tp = tcp_sk(sk);
 378        seq = ntohl(th->seq);
 379        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 380        fastopen = tp->fastopen_rsk;
 381        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 382        if (sk->sk_state != TCP_LISTEN &&
 383            !between(seq, snd_una, tp->snd_nxt)) {
 384                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 385                goto out;
 386        }
 387
 388        np = inet6_sk(sk);
 389
 390        if (type == NDISC_REDIRECT) {
 391                if (!sock_owned_by_user(sk)) {
 392                        struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 393
 394                        if (dst)
 395                                dst->ops->redirect(dst, sk, skb);
 396                }
 397                goto out;
 398        }
 399
 400        if (type == ICMPV6_PKT_TOOBIG) {
 401                /* We are not interested in TCP_LISTEN and open_requests
 402                 * (SYN-ACKs send out by Linux are always <576bytes so
 403                 * they should go through unfragmented).
 404                 */
 405                if (sk->sk_state == TCP_LISTEN)
 406                        goto out;
 407
 408                if (!ip6_sk_accept_pmtu(sk))
 409                        goto out;
 410
 411                tp->mtu_info = ntohl(info);
 412                if (!sock_owned_by_user(sk))
 413                        tcp_v6_mtu_reduced(sk);
 414                else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 415                                           &tp->tsq_flags))
 416                        sock_hold(sk);
 417                goto out;
 418        }
 419
 420        icmpv6_err_convert(type, code, &err);
 421
 422        /* Might be for an request_sock */
 423        switch (sk->sk_state) {
 424                struct request_sock *req, **prev;
 425        case TCP_LISTEN:
 426                if (sock_owned_by_user(sk))
 427                        goto out;
 428
 429                req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 430                                           &hdr->saddr, inet6_iif(skb));
 431                if (!req)
 432                        goto out;
 433
 434                /* ICMPs are not backlogged, hence we cannot get
 435                 * an established socket here.
 436                 */
 437                WARN_ON(req->sk != NULL);
 438
 439                if (seq != tcp_rsk(req)->snt_isn) {
 440                        NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 441                        goto out;
 442                }
 443
 444                inet_csk_reqsk_queue_drop(sk, req, prev);
 445                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 446                goto out;
 447
 448        case TCP_SYN_SENT:
 449        case TCP_SYN_RECV:
 450                /* Only in fast or simultaneous open. If a fast open socket is
 451                 * is already accepted it is treated as a connected one below.
 452                 */
 453                if (fastopen && fastopen->sk == NULL)
 454                        break;
 455
 456                if (!sock_owned_by_user(sk)) {
 457                        sk->sk_err = err;
 458                        sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
 459
 460                        tcp_done(sk);
 461                } else
 462                        sk->sk_err_soft = err;
 463                goto out;
 464        }
 465
 466        if (!sock_owned_by_user(sk) && np->recverr) {
 467                sk->sk_err = err;
 468                sk->sk_error_report(sk);
 469        } else
 470                sk->sk_err_soft = err;
 471
 472out:
 473        bh_unlock_sock(sk);
 474        sock_put(sk);
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
 479                              struct flowi *fl,
 480                              struct request_sock *req,
 481                              u16 queue_mapping,
 482                              struct tcp_fastopen_cookie *foc)
 483{
 484        struct inet_request_sock *ireq = inet_rsk(req);
 485        struct ipv6_pinfo *np = inet6_sk(sk);
 486        struct flowi6 *fl6 = &fl->u.ip6;
 487        struct sk_buff *skb;
 488        int err = -ENOMEM;
 489
 490        /* First, grab a route. */
 491        if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
 492                goto done;
 493
 494        skb = tcp_make_synack(sk, dst, req, foc);
 495
 496        if (skb) {
 497                __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 498                                    &ireq->ir_v6_rmt_addr);
 499
 500                fl6->daddr = ireq->ir_v6_rmt_addr;
 501                skb_set_queue_mapping(skb, queue_mapping);
 502                rcu_read_lock();
 503                err = ip6_xmit(sk, skb, fl6, sk->sk_mark,
 504                               rcu_dereference(np->opt), np->tclass);
 505                rcu_read_unlock();
 506                err = net_xmit_eval(err);
 507        }
 508
 509done:
 510        return err;
 511}
 512
 513
 514static void tcp_v6_reqsk_destructor(struct request_sock *req)
 515{
 516        kfree_skb(inet_rsk(req)->pktopts);
 517}
 518
 519#ifdef CONFIG_TCP_MD5SIG
 520static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 521                                                   const struct in6_addr *addr)
 522{
 523        return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 524}
 525
 526static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 527                                                struct sock *addr_sk)
 528{
 529        return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 530}
 531
 532static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 533                                                      struct request_sock *req)
 534{
 535        return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 536}
 537
 538static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 539                                 int optlen)
 540{
 541        struct tcp_md5sig cmd;
 542        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 543
 544        if (optlen < sizeof(cmd))
 545                return -EINVAL;
 546
 547        if (copy_from_user(&cmd, optval, sizeof(cmd)))
 548                return -EFAULT;
 549
 550        if (sin6->sin6_family != AF_INET6)
 551                return -EINVAL;
 552
 553        if (!cmd.tcpm_keylen) {
 554                if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 555                        return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 556                                              AF_INET);
 557                return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 558                                      AF_INET6);
 559        }
 560
 561        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 562                return -EINVAL;
 563
 564        if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 565                return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 566                                      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 567
 568        return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 569                              AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 570}
 571
 572static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 573                                        const struct in6_addr *daddr,
 574                                        const struct in6_addr *saddr, int nbytes)
 575{
 576        struct tcp6_pseudohdr *bp;
 577        struct scatterlist sg;
 578
 579        bp = &hp->md5_blk.ip6;
 580        /* 1. TCP pseudo-header (RFC2460) */
 581        bp->saddr = *saddr;
 582        bp->daddr = *daddr;
 583        bp->protocol = cpu_to_be32(IPPROTO_TCP);
 584        bp->len = cpu_to_be32(nbytes);
 585
 586        sg_init_one(&sg, bp, sizeof(*bp));
 587        return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 588}
 589
 590static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 591                               const struct in6_addr *daddr, struct in6_addr *saddr,
 592                               const struct tcphdr *th)
 593{
 594        struct tcp_md5sig_pool *hp;
 595        struct hash_desc *desc;
 596
 597        hp = tcp_get_md5sig_pool();
 598        if (!hp)
 599                goto clear_hash_noput;
 600        desc = &hp->md5_desc;
 601
 602        if (crypto_hash_init(desc))
 603                goto clear_hash;
 604        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 605                goto clear_hash;
 606        if (tcp_md5_hash_header(hp, th))
 607                goto clear_hash;
 608        if (tcp_md5_hash_key(hp, key))
 609                goto clear_hash;
 610        if (crypto_hash_final(desc, md5_hash))
 611                goto clear_hash;
 612
 613        tcp_put_md5sig_pool();
 614        return 0;
 615
 616clear_hash:
 617        tcp_put_md5sig_pool();
 618clear_hash_noput:
 619        memset(md5_hash, 0, 16);
 620        return 1;
 621}
 622
 623static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 624                               const struct sock *sk,
 625                               const struct request_sock *req,
 626                               const struct sk_buff *skb)
 627{
 628        const struct in6_addr *saddr, *daddr;
 629        struct tcp_md5sig_pool *hp;
 630        struct hash_desc *desc;
 631        const struct tcphdr *th = tcp_hdr(skb);
 632
 633        if (sk) {
 634                saddr = &inet6_sk(sk)->saddr;
 635                daddr = &sk->sk_v6_daddr;
 636        } else if (req) {
 637                saddr = &inet_rsk(req)->ir_v6_loc_addr;
 638                daddr = &inet_rsk(req)->ir_v6_rmt_addr;
 639        } else {
 640                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 641                saddr = &ip6h->saddr;
 642                daddr = &ip6h->daddr;
 643        }
 644
 645        hp = tcp_get_md5sig_pool();
 646        if (!hp)
 647                goto clear_hash_noput;
 648        desc = &hp->md5_desc;
 649
 650        if (crypto_hash_init(desc))
 651                goto clear_hash;
 652
 653        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 654                goto clear_hash;
 655        if (tcp_md5_hash_header(hp, th))
 656                goto clear_hash;
 657        if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 658                goto clear_hash;
 659        if (tcp_md5_hash_key(hp, key))
 660                goto clear_hash;
 661        if (crypto_hash_final(desc, md5_hash))
 662                goto clear_hash;
 663
 664        tcp_put_md5sig_pool();
 665        return 0;
 666
 667clear_hash:
 668        tcp_put_md5sig_pool();
 669clear_hash_noput:
 670        memset(md5_hash, 0, 16);
 671        return 1;
 672}
 673
 674static int __tcp_v6_inbound_md5_hash(struct sock *sk,
 675                                     const struct sk_buff *skb)
 676{
 677        const __u8 *hash_location = NULL;
 678        struct tcp_md5sig_key *hash_expected;
 679        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 680        const struct tcphdr *th = tcp_hdr(skb);
 681        int genhash;
 682        u8 newhash[16];
 683
 684        hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 685        hash_location = tcp_parse_md5sig_option(th);
 686
 687        /* We've parsed the options - do we have a hash? */
 688        if (!hash_expected && !hash_location)
 689                return 0;
 690
 691        if (hash_expected && !hash_location) {
 692                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 693                return 1;
 694        }
 695
 696        if (!hash_expected && hash_location) {
 697                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 698                return 1;
 699        }
 700
 701        /* check the signature */
 702        genhash = tcp_v6_md5_hash_skb(newhash,
 703                                      hash_expected,
 704                                      NULL, NULL, skb);
 705
 706        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 707                net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 708                                     genhash ? "failed" : "mismatch",
 709                                     &ip6h->saddr, ntohs(th->source),
 710                                     &ip6h->daddr, ntohs(th->dest));
 711                return 1;
 712        }
 713        return 0;
 714}
 715
 716static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 717{
 718        int ret;
 719
 720        rcu_read_lock();
 721        ret = __tcp_v6_inbound_md5_hash(sk, skb);
 722        rcu_read_unlock();
 723
 724        return ret;
 725}
 726
 727#endif
 728
 729static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
 730                            struct sk_buff *skb)
 731{
 732        struct inet_request_sock *ireq = inet_rsk(req);
 733        struct ipv6_pinfo *np = inet6_sk(sk);
 734
 735        ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 736        ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 737
 738        ireq->ir_iif = sk->sk_bound_dev_if;
 739
 740        /* So that link locals have meaning */
 741        if (!sk->sk_bound_dev_if &&
 742            ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 743                ireq->ir_iif = inet6_iif(skb);
 744
 745        if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 746            (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
 747             np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 748             np->rxopt.bits.rxohlim)) {
 749                atomic_inc(&skb->users);
 750                ireq->pktopts = skb;
 751        }
 752}
 753
 754static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
 755                                          const struct request_sock *req,
 756                                          bool *strict)
 757{
 758        if (strict)
 759                *strict = true;
 760        return inet6_csk_route_req(sk, &fl->u.ip6, req);
 761}
 762
 763struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 764        .family         =       AF_INET6,
 765        .obj_size       =       sizeof(struct tcp6_request_sock),
 766        .rtx_syn_ack    =       tcp_rtx_synack,
 767        .send_ack       =       tcp_v6_reqsk_send_ack,
 768        .destructor     =       tcp_v6_reqsk_destructor,
 769        .send_reset     =       tcp_v6_send_reset,
 770        .syn_ack_timeout =      tcp_syn_ack_timeout,
 771};
 772
 773static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 774        .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
 775                                sizeof(struct ipv6hdr),
 776#ifdef CONFIG_TCP_MD5SIG
 777        .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
 778        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
 779#endif
 780        .init_req       =       tcp_v6_init_req,
 781#ifdef CONFIG_SYN_COOKIES
 782        .cookie_init_seq =      cookie_v6_init_sequence,
 783#endif
 784        .route_req      =       tcp_v6_route_req,
 785        .init_seq       =       tcp_v6_init_sequence,
 786        .send_synack    =       tcp_v6_send_synack,
 787        .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
 788};
 789
 790static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
 791                                 u32 ack, u32 win, u32 tsval, u32 tsecr,
 792                                 int oif, struct tcp_md5sig_key *key, int rst,
 793                                 u8 tclass, u32 label)
 794{
 795        const struct tcphdr *th = tcp_hdr(skb);
 796        struct tcphdr *t1;
 797        struct sk_buff *buff;
 798        struct flowi6 fl6;
 799        struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 800        struct sock *ctl_sk = net->ipv6.tcp_sk;
 801        unsigned int tot_len = sizeof(struct tcphdr);
 802        struct dst_entry *dst;
 803        __be32 *topt;
 804
 805        if (tsecr)
 806                tot_len += TCPOLEN_TSTAMP_ALIGNED;
 807#ifdef CONFIG_TCP_MD5SIG
 808        if (key)
 809                tot_len += TCPOLEN_MD5SIG_ALIGNED;
 810#endif
 811
 812        buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 813                         GFP_ATOMIC);
 814        if (buff == NULL)
 815                return;
 816
 817        skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 818
 819        t1 = skb_push(buff, tot_len);
 820        skb_reset_transport_header(buff);
 821
 822        /* Swap the send and the receive. */
 823        memset(t1, 0, sizeof(*t1));
 824        t1->dest = th->source;
 825        t1->source = th->dest;
 826        t1->doff = tot_len / 4;
 827        t1->seq = htonl(seq);
 828        t1->ack_seq = htonl(ack);
 829        t1->ack = !rst || !th->ack;
 830        t1->rst = rst;
 831        t1->window = htons(win);
 832
 833        topt = (__be32 *)(t1 + 1);
 834
 835        if (tsecr) {
 836                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 837                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 838                *topt++ = htonl(tsval);
 839                *topt++ = htonl(tsecr);
 840        }
 841
 842#ifdef CONFIG_TCP_MD5SIG
 843        if (key) {
 844                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 845                                (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 846                tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 847                                    &ipv6_hdr(skb)->saddr,
 848                                    &ipv6_hdr(skb)->daddr, t1);
 849        }
 850#endif
 851
 852        memset(&fl6, 0, sizeof(fl6));
 853        fl6.daddr = ipv6_hdr(skb)->saddr;
 854        fl6.saddr = ipv6_hdr(skb)->daddr;
 855        fl6.flowlabel = label;
 856
 857        buff->ip_summed = CHECKSUM_PARTIAL;
 858        buff->csum = 0;
 859
 860        __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 861
 862        fl6.flowi6_proto = IPPROTO_TCP;
 863        if (rt6_need_strict(&fl6.daddr) && !oif)
 864                fl6.flowi6_oif = inet6_iif(skb);
 865        else
 866                fl6.flowi6_oif = oif;
 867        fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
 868        fl6.fl6_dport = t1->dest;
 869        fl6.fl6_sport = t1->source;
 870        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 871
 872        /* Pass a socket to ip6_dst_lookup either it is for RST
 873         * Underlying function will use this to retrieve the network
 874         * namespace
 875         */
 876        dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 877        if (!IS_ERR(dst)) {
 878                skb_dst_set(buff, dst);
 879                ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
 880                TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 881                if (rst)
 882                        TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 883                return;
 884        }
 885
 886        kfree_skb(buff);
 887}
 888
 889static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 890{
 891        const struct tcphdr *th = tcp_hdr(skb);
 892        u32 seq = 0, ack_seq = 0;
 893        struct tcp_md5sig_key *key = NULL;
 894#ifdef CONFIG_TCP_MD5SIG
 895        const __u8 *hash_location = NULL;
 896        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 897        unsigned char newhash[16];
 898        int genhash;
 899        struct sock *sk1 = NULL;
 900#endif
 901        int oif;
 902
 903        if (th->rst)
 904                return;
 905
 906        /* If sk not NULL, it means we did a successful lookup and incoming
 907         * route had to be correct. prequeue might have dropped our dst.
 908         */
 909        if (!sk && !ipv6_unicast_destination(skb))
 910                return;
 911
 912#ifdef CONFIG_TCP_MD5SIG
 913        hash_location = tcp_parse_md5sig_option(th);
 914        if (sk && sk_fullsock(sk)) {
 915                key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 916        } else if (hash_location) {
 917                /*
 918                 * active side is lost. Try to find listening socket through
 919                 * source port, and then find md5 key through listening socket.
 920                 * we are not loose security here:
 921                 * Incoming packet is checked with md5 hash with finding key,
 922                 * no RST generated if md5 hash doesn't match.
 923                 */
 924                sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 925                                           &tcp_hashinfo, &ipv6h->saddr,
 926                                           th->source, &ipv6h->daddr,
 927                                           ntohs(th->source), inet6_iif(skb));
 928                if (!sk1)
 929                        return;
 930
 931                rcu_read_lock();
 932                key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 933                if (!key)
 934                        goto release_sk1;
 935
 936                genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
 937                if (genhash || memcmp(hash_location, newhash, 16) != 0)
 938                        goto release_sk1;
 939        }
 940#endif
 941
 942        if (th->ack)
 943                seq = ntohl(th->ack_seq);
 944        else
 945                ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 946                          (th->doff << 2);
 947
 948        oif = sk ? sk->sk_bound_dev_if : 0;
 949        tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 950
 951#ifdef CONFIG_TCP_MD5SIG
 952release_sk1:
 953        if (sk1) {
 954                rcu_read_unlock();
 955                sock_put(sk1);
 956        }
 957#endif
 958}
 959
 960static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
 961                            u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 962                            struct tcp_md5sig_key *key, u8 tclass,
 963                            u32 label)
 964{
 965        tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 966                             tclass, label);
 967}
 968
 969static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 970{
 971        struct inet_timewait_sock *tw = inet_twsk(sk);
 972        struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 973
 974        tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 975                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 976                        tcp_time_stamp + tcptw->tw_ts_offset,
 977                        tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 978                        tw->tw_tclass, (tw->tw_flowlabel << 12));
 979
 980        inet_twsk_put(tw);
 981}
 982
 983static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 984                                  struct request_sock *req)
 985{
 986        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 987         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 988         */
 989        tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 990                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 991                        tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
 992                        tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 993                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 994                        0, 0);
 995}
 996
 997
 998static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
 999{
1000        struct request_sock *req, **prev;
1001        const struct tcphdr *th = tcp_hdr(skb);
1002        struct sock *nsk;
1003
1004        /* Find possible connection requests. */
1005        req = inet6_csk_search_req(sk, &prev, th->source,
1006                                   &ipv6_hdr(skb)->saddr,
1007                                   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1008        if (req)
1009                return tcp_check_req(sk, skb, req, prev, false);
1010
1011        nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1012                        &ipv6_hdr(skb)->saddr, th->source,
1013                        &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1014
1015        if (nsk) {
1016                if (nsk->sk_state != TCP_TIME_WAIT) {
1017                        bh_lock_sock(nsk);
1018                        return nsk;
1019                }
1020                inet_twsk_put(inet_twsk(nsk));
1021                return NULL;
1022        }
1023
1024#ifdef CONFIG_SYN_COOKIES
1025        if (!th->syn)
1026                sk = cookie_v6_check(sk, skb);
1027#endif
1028        return sk;
1029}
1030
1031static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1032{
1033        if (skb->protocol == htons(ETH_P_IP))
1034                return tcp_v4_conn_request(sk, skb);
1035
1036        if (!ipv6_unicast_destination(skb))
1037                goto drop;
1038
1039        return tcp_conn_request(&tcp6_request_sock_ops,
1040                                &tcp_request_sock_ipv6_ops, sk, skb);
1041drop:
1042        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1043        return 0; /* don't send reset */
1044}
1045
1046static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1047                                         struct request_sock *req,
1048                                         struct dst_entry *dst)
1049{
1050        struct inet_request_sock *ireq;
1051        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1052        struct ipv6_txoptions *opt;
1053        struct tcp6_sock *newtcp6sk;
1054        struct inet_sock *newinet;
1055        struct tcp_sock *newtp;
1056        struct sock *newsk;
1057#ifdef CONFIG_TCP_MD5SIG
1058        struct tcp_md5sig_key *key;
1059#endif
1060        struct flowi6 fl6;
1061
1062        if (skb->protocol == htons(ETH_P_IP)) {
1063                /*
1064                 *      v6 mapped
1065                 */
1066
1067                newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1068
1069                if (newsk == NULL)
1070                        return NULL;
1071
1072                newtcp6sk = (struct tcp6_sock *)newsk;
1073                inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1074
1075                newinet = inet_sk(newsk);
1076                newnp = inet6_sk(newsk);
1077                newtp = tcp_sk(newsk);
1078
1079                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1080
1081                ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1082
1083                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1084
1085                newsk->sk_v6_rcv_saddr = newnp->saddr;
1086
1087                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1088                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1089#ifdef CONFIG_TCP_MD5SIG
1090                newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1091#endif
1092
1093                newnp->ipv6_mc_list = NULL;
1094                newnp->ipv6_ac_list = NULL;
1095                newnp->ipv6_fl_list = NULL;
1096                newnp->pktoptions  = NULL;
1097                newnp->opt         = NULL;
1098                newnp->mcast_oif   = inet6_iif(skb);
1099                newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1100                newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1101
1102                /*
1103                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1104                 * here, tcp_create_openreq_child now does this for us, see the comment in
1105                 * that function for the gory details. -acme
1106                 */
1107
1108                /* It is tricky place. Until this moment IPv4 tcp
1109                   worked with IPv6 icsk.icsk_af_ops.
1110                   Sync it now.
1111                 */
1112                tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1113
1114                return newsk;
1115        }
1116
1117        ireq = inet_rsk(req);
1118
1119        if (sk_acceptq_is_full(sk))
1120                goto out_overflow;
1121
1122        if (!dst) {
1123                dst = inet6_csk_route_req(sk, &fl6, req);
1124                if (!dst)
1125                        goto out;
1126        }
1127
1128        newsk = tcp_create_openreq_child(sk, req, skb);
1129        if (newsk == NULL)
1130                goto out_nonewsk;
1131
1132        /*
1133         * No need to charge this sock to the relevant IPv6 refcnt debug socks
1134         * count here, tcp_create_openreq_child now does this for us, see the
1135         * comment in that function for the gory details. -acme
1136         */
1137
1138        newsk->sk_gso_type = SKB_GSO_TCPV6;
1139        ip6_dst_store(newsk, dst, NULL, NULL);
1140        inet6_sk_rx_dst_set(newsk, skb);
1141
1142        newtcp6sk = (struct tcp6_sock *)newsk;
1143        inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1144
1145        newtp = tcp_sk(newsk);
1146        newinet = inet_sk(newsk);
1147        newnp = inet6_sk(newsk);
1148
1149        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1150
1151        newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1152        newnp->saddr = ireq->ir_v6_loc_addr;
1153        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1154        newsk->sk_bound_dev_if = ireq->ir_iif;
1155
1156        sk_set_txhash(newsk);
1157
1158        /* Now IPv6 options...
1159
1160           First: no IPv4 options.
1161         */
1162        newinet->inet_opt = NULL;
1163        newnp->ipv6_mc_list = NULL;
1164        newnp->ipv6_ac_list = NULL;
1165        newnp->ipv6_fl_list = NULL;
1166
1167        /* Clone RX bits */
1168        newnp->rxopt.all = np->rxopt.all;
1169
1170        /* Clone pktoptions received with SYN */
1171        newnp->pktoptions = NULL;
1172        if (ireq->pktopts != NULL) {
1173                newnp->pktoptions = skb_clone(ireq->pktopts,
1174                                              sk_gfp_atomic(sk, GFP_ATOMIC));
1175                consume_skb(ireq->pktopts);
1176                ireq->pktopts = NULL;
1177                if (newnp->pktoptions)
1178                        skb_set_owner_r(newnp->pktoptions, newsk);
1179        }
1180        newnp->opt        = NULL;
1181        newnp->mcast_oif  = inet6_iif(skb);
1182        newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1183        newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1184
1185        /* Clone native IPv6 options from listening socket (if any)
1186
1187           Yes, keeping reference count would be much more clever,
1188           but we make one more one thing there: reattach optmem
1189           to newsk.
1190         */
1191        opt = rcu_dereference(np->opt);
1192        if (opt) {
1193                opt = ipv6_dup_options(newsk, opt);
1194                RCU_INIT_POINTER(newnp->opt, opt);
1195        }
1196        inet_csk(newsk)->icsk_ext_hdr_len = 0;
1197        if (opt)
1198                inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1199                                                    opt->opt_flen;
1200
1201        tcp_ca_openreq_child(newsk, dst);
1202
1203        tcp_sync_mss(newsk, dst_mtu(dst));
1204        newtp->advmss = dst_metric_advmss(dst);
1205        if (tcp_sk(sk)->rx_opt.user_mss &&
1206            tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1207                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1208
1209        tcp_initialize_rcv_mss(newsk);
1210
1211        newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1212        newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1213
1214#ifdef CONFIG_TCP_MD5SIG
1215        /* Copy over the MD5 key from the original socket */
1216        key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1217        if (key != NULL) {
1218                /* We're using one, so create a matching key
1219                 * on the newsk structure. If we fail to get
1220                 * memory, then we end up not copying the key
1221                 * across. Shucks.
1222                 */
1223                tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1224                               AF_INET6, key->key, key->keylen,
1225                               sk_gfp_atomic(sk, GFP_ATOMIC));
1226        }
1227#endif
1228
1229        if (__inet_inherit_port(sk, newsk) < 0) {
1230                inet_csk_prepare_forced_close(newsk);
1231                tcp_done(newsk);
1232                goto out;
1233        }
1234        __inet6_hash(newsk, NULL);
1235
1236        return newsk;
1237
1238out_overflow:
1239        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1240out_nonewsk:
1241        dst_release(dst);
1242out:
1243        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1244        return NULL;
1245}
1246
1247/* The socket must have it's spinlock held when we get
1248 * here.
1249 *
1250 * We have a potential double-lock case here, so even when
1251 * doing backlog processing we use the BH locking scheme.
1252 * This is because we cannot sleep with the original spinlock
1253 * held.
1254 */
1255static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1256{
1257        struct ipv6_pinfo *np = inet6_sk(sk);
1258        struct tcp_sock *tp;
1259        struct sk_buff *opt_skb = NULL;
1260
1261        /* Imagine: socket is IPv6. IPv4 packet arrives,
1262           goes to IPv4 receive handler and backlogged.
1263           From backlog it always goes here. Kerboom...
1264           Fortunately, tcp_rcv_established and rcv_established
1265           handle them correctly, but it is not case with
1266           tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1267         */
1268
1269        if (skb->protocol == htons(ETH_P_IP))
1270                return tcp_v4_do_rcv(sk, skb);
1271
1272        if (tcp_filter(sk, skb))
1273                goto discard;
1274
1275        /*
1276         *      socket locking is here for SMP purposes as backlog rcv
1277         *      is currently called with bh processing disabled.
1278         */
1279
1280        /* Do Stevens' IPV6_PKTOPTIONS.
1281
1282           Yes, guys, it is the only place in our code, where we
1283           may make it not affecting IPv4.
1284           The rest of code is protocol independent,
1285           and I do not like idea to uglify IPv4.
1286
1287           Actually, all the idea behind IPV6_PKTOPTIONS
1288           looks not very well thought. For now we latch
1289           options, received in the last packet, enqueued
1290           by tcp. Feel free to propose better solution.
1291                                               --ANK (980728)
1292         */
1293        if (np->rxopt.all)
1294                opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1295
1296        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1297                struct dst_entry *dst = sk->sk_rx_dst;
1298
1299                sock_rps_save_rxhash(sk, skb);
1300                if (dst) {
1301                        if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1302                            dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1303                                dst_release(dst);
1304                                sk->sk_rx_dst = NULL;
1305                        }
1306                }
1307
1308                tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1309                if (opt_skb)
1310                        goto ipv6_pktoptions;
1311                return 0;
1312        }
1313
1314        if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1315                goto csum_err;
1316
1317        if (sk->sk_state == TCP_LISTEN) {
1318                struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1319                if (!nsk)
1320                        goto discard;
1321
1322                /*
1323                 * Queue it on the new socket if the new socket is active,
1324                 * otherwise we just shortcircuit this and continue with
1325                 * the new socket..
1326                 */
1327                if (nsk != sk) {
1328                        sock_rps_save_rxhash(nsk, skb);
1329                        if (tcp_child_process(sk, nsk, skb))
1330                                goto reset;
1331                        if (opt_skb)
1332                                __kfree_skb(opt_skb);
1333                        return 0;
1334                }
1335        } else
1336                sock_rps_save_rxhash(sk, skb);
1337
1338        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1339                goto reset;
1340        if (opt_skb)
1341                goto ipv6_pktoptions;
1342        return 0;
1343
1344reset:
1345        tcp_v6_send_reset(sk, skb);
1346discard:
1347        if (opt_skb)
1348                __kfree_skb(opt_skb);
1349        kfree_skb(skb);
1350        return 0;
1351csum_err:
1352        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1353        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1354        goto discard;
1355
1356
1357ipv6_pktoptions:
1358        /* Do you ask, what is it?
1359
1360           1. skb was enqueued by tcp.
1361           2. skb is added to tail of read queue, rather than out of order.
1362           3. socket is not in passive state.
1363           4. Finally, it really contains options, which user wants to receive.
1364         */
1365        tp = tcp_sk(sk);
1366        if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1367            !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1368                if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1369                        np->mcast_oif = inet6_iif(opt_skb);
1370                if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1371                        np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1372                if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1373                        np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1374                if (ipv6_opt_accepted(sk, opt_skb)) {
1375                        skb_set_owner_r(opt_skb, sk);
1376                        opt_skb = xchg(&np->pktoptions, opt_skb);
1377                } else {
1378                        __kfree_skb(opt_skb);
1379                        opt_skb = xchg(&np->pktoptions, NULL);
1380                }
1381        }
1382
1383        kfree_skb(opt_skb);
1384        return 0;
1385}
1386
1387static int tcp_v6_rcv(struct sk_buff *skb)
1388{
1389        const struct tcphdr *th;
1390        const struct ipv6hdr *hdr;
1391        struct sock *sk;
1392        int ret;
1393        struct net *net = dev_net(skb->dev);
1394
1395        if (skb->pkt_type != PACKET_HOST)
1396                goto discard_it;
1397
1398        /*
1399         *      Count it even if it's bad.
1400         */
1401        TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1402
1403        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1404                goto discard_it;
1405
1406        th = tcp_hdr(skb);
1407
1408        if (th->doff < sizeof(struct tcphdr)/4)
1409                goto bad_packet;
1410        if (!pskb_may_pull(skb, th->doff*4))
1411                goto discard_it;
1412
1413        if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1414                goto csum_error;
1415
1416        th = tcp_hdr(skb);
1417        hdr = ipv6_hdr(skb);
1418        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1419        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1420                                    skb->len - th->doff*4);
1421        TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1422        TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1423        TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1424        TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1425        TCP_SKB_CB(skb)->sacked = 0;
1426
1427        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1428        if (!sk)
1429                goto no_tcp_socket;
1430
1431process:
1432        if (sk->sk_state == TCP_TIME_WAIT)
1433                goto do_time_wait;
1434
1435        if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1436                NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1437                goto discard_and_relse;
1438        }
1439
1440        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1441                goto discard_and_relse;
1442
1443#ifdef CONFIG_TCP_MD5SIG
1444        if (tcp_v6_inbound_md5_hash(sk, skb))
1445                goto discard_and_relse;
1446#endif
1447
1448        if (tcp_filter(sk, skb))
1449                goto discard_and_relse;
1450        th = (const struct tcphdr *)skb->data;
1451        hdr = ipv6_hdr(skb);
1452
1453        sk_mark_napi_id(sk, skb);
1454        skb->dev = NULL;
1455
1456        bh_lock_sock_nested(sk);
1457        tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1458        ret = 0;
1459        if (!sock_owned_by_user(sk)) {
1460                if (!tcp_prequeue(sk, skb))
1461                        ret = tcp_v6_do_rcv(sk, skb);
1462        } else if (unlikely(sk_add_backlog(sk, skb,
1463                                           sk->sk_rcvbuf + sk->sk_sndbuf))) {
1464                bh_unlock_sock(sk);
1465                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1466                goto discard_and_relse;
1467        }
1468        bh_unlock_sock(sk);
1469
1470        sock_put(sk);
1471        return ret ? -1 : 0;
1472
1473no_tcp_socket:
1474        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1475                goto discard_it;
1476
1477        if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1478csum_error:
1479                TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1480bad_packet:
1481                TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1482        } else {
1483                tcp_v6_send_reset(NULL, skb);
1484        }
1485
1486discard_it:
1487        kfree_skb(skb);
1488        return 0;
1489
1490discard_and_relse:
1491        sock_put(sk);
1492        goto discard_it;
1493
1494do_time_wait:
1495        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1496                inet_twsk_put(inet_twsk(sk));
1497                goto discard_it;
1498        }
1499
1500        if (skb->len < (th->doff<<2)) {
1501                inet_twsk_put(inet_twsk(sk));
1502                goto bad_packet;
1503        }
1504        if (tcp_checksum_complete(skb)) {
1505                inet_twsk_put(inet_twsk(sk));
1506                goto csum_error;
1507        }
1508
1509        switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1510        case TCP_TW_SYN:
1511        {
1512                struct sock *sk2;
1513
1514                sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1515                                            &ipv6_hdr(skb)->saddr, th->source,
1516                                            &ipv6_hdr(skb)->daddr,
1517                                            ntohs(th->dest), inet6_iif(skb));
1518                if (sk2 != NULL) {
1519                        struct inet_timewait_sock *tw = inet_twsk(sk);
1520                        inet_twsk_deschedule(tw, &tcp_death_row);
1521                        inet_twsk_put(tw);
1522                        sk = sk2;
1523                        goto process;
1524                }
1525                /* Fall through to ACK */
1526        }
1527        case TCP_TW_ACK:
1528                tcp_v6_timewait_ack(sk, skb);
1529                break;
1530        case TCP_TW_RST:
1531                tcp_v6_send_reset(sk, skb);
1532                inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1533                inet_twsk_put(inet_twsk(sk));
1534                goto discard_it;
1535        case TCP_TW_SUCCESS:
1536                ;
1537        }
1538        goto discard_it;
1539}
1540
1541static void tcp_v6_early_demux(struct sk_buff *skb)
1542{
1543        const struct ipv6hdr *hdr;
1544        const struct tcphdr *th;
1545        struct sock *sk;
1546
1547        if (skb->pkt_type != PACKET_HOST)
1548                return;
1549
1550        if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1551                return;
1552
1553        hdr = ipv6_hdr(skb);
1554        th = tcp_hdr(skb);
1555
1556        if (th->doff < sizeof(struct tcphdr) / 4)
1557                return;
1558
1559        sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1560                                        &hdr->saddr, th->source,
1561                                        &hdr->daddr, ntohs(th->dest),
1562                                        inet6_iif(skb));
1563        if (sk) {
1564                skb->sk = sk;
1565                skb->destructor = sock_edemux;
1566                if (sk->sk_state != TCP_TIME_WAIT) {
1567                        struct dst_entry *dst = sk->sk_rx_dst;
1568
1569                        if (dst)
1570                                dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1571                        if (dst &&
1572                            inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1573                                skb_dst_set_noref(skb, dst);
1574                }
1575        }
1576}
1577
1578static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1579        .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1580        .twsk_unique    = tcp_twsk_unique,
1581        .twsk_destructor = tcp_twsk_destructor,
1582};
1583
1584static const struct inet_connection_sock_af_ops ipv6_specific = {
1585        .queue_xmit        = inet6_csk_xmit,
1586        .send_check        = tcp_v6_send_check,
1587        .rebuild_header    = inet6_sk_rebuild_header,
1588        .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1589        .conn_request      = tcp_v6_conn_request,
1590        .syn_recv_sock     = tcp_v6_syn_recv_sock,
1591        .net_header_len    = sizeof(struct ipv6hdr),
1592        .net_frag_header_len = sizeof(struct frag_hdr),
1593        .setsockopt        = ipv6_setsockopt,
1594        .getsockopt        = ipv6_getsockopt,
1595        .addr2sockaddr     = inet6_csk_addr2sockaddr,
1596        .sockaddr_len      = sizeof(struct sockaddr_in6),
1597        .bind_conflict     = inet6_csk_bind_conflict,
1598#ifdef CONFIG_COMPAT
1599        .compat_setsockopt = compat_ipv6_setsockopt,
1600        .compat_getsockopt = compat_ipv6_getsockopt,
1601#endif
1602        .mtu_reduced       = tcp_v6_mtu_reduced,
1603};
1604
1605#ifdef CONFIG_TCP_MD5SIG
1606static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1607        .md5_lookup     =       tcp_v6_md5_lookup,
1608        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1609        .md5_parse      =       tcp_v6_parse_md5_keys,
1610};
1611#endif
1612
1613/*
1614 *      TCP over IPv4 via INET6 API
1615 */
1616static const struct inet_connection_sock_af_ops ipv6_mapped = {
1617        .queue_xmit        = ip_queue_xmit,
1618        .send_check        = tcp_v4_send_check,
1619        .rebuild_header    = inet_sk_rebuild_header,
1620        .sk_rx_dst_set     = inet_sk_rx_dst_set,
1621        .conn_request      = tcp_v6_conn_request,
1622        .syn_recv_sock     = tcp_v6_syn_recv_sock,
1623        .net_header_len    = sizeof(struct iphdr),
1624        .setsockopt        = ipv6_setsockopt,
1625        .getsockopt        = ipv6_getsockopt,
1626        .addr2sockaddr     = inet6_csk_addr2sockaddr,
1627        .sockaddr_len      = sizeof(struct sockaddr_in6),
1628        .bind_conflict     = inet6_csk_bind_conflict,
1629#ifdef CONFIG_COMPAT
1630        .compat_setsockopt = compat_ipv6_setsockopt,
1631        .compat_getsockopt = compat_ipv6_getsockopt,
1632#endif
1633        .mtu_reduced       = tcp_v4_mtu_reduced,
1634};
1635
1636#ifdef CONFIG_TCP_MD5SIG
1637static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1638        .md5_lookup     =       tcp_v4_md5_lookup,
1639        .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1640        .md5_parse      =       tcp_v6_parse_md5_keys,
1641};
1642#endif
1643
1644/* NOTE: A lot of things set to zero explicitly by call to
1645 *       sk_alloc() so need not be done here.
1646 */
1647static int tcp_v6_init_sock(struct sock *sk)
1648{
1649        struct inet_connection_sock *icsk = inet_csk(sk);
1650
1651        tcp_init_sock(sk);
1652
1653        icsk->icsk_af_ops = &ipv6_specific;
1654
1655#ifdef CONFIG_TCP_MD5SIG
1656        tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1657#endif
1658
1659        return 0;
1660}
1661
1662static void tcp_v6_destroy_sock(struct sock *sk)
1663{
1664        tcp_v4_destroy_sock(sk);
1665        inet6_destroy_sock(sk);
1666}
1667
1668#ifdef CONFIG_PROC_FS
1669/* Proc filesystem TCPv6 sock list dumping. */
1670static void get_openreq6(struct seq_file *seq,
1671                         const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1672{
1673        int ttd = req->expires - jiffies;
1674        const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675        const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676
1677        if (ttd < 0)
1678                ttd = 0;
1679
1680        seq_printf(seq,
1681                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1682                   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1683                   i,
1684                   src->s6_addr32[0], src->s6_addr32[1],
1685                   src->s6_addr32[2], src->s6_addr32[3],
1686                   inet_rsk(req)->ir_num,
1687                   dest->s6_addr32[0], dest->s6_addr32[1],
1688                   dest->s6_addr32[2], dest->s6_addr32[3],
1689                   ntohs(inet_rsk(req)->ir_rmt_port),
1690                   TCP_SYN_RECV,
1691                   0, 0, /* could print option size, but that is af dependent. */
1692                   1,   /* timers active (only the expire timer) */
1693                   jiffies_to_clock_t(ttd),
1694                   req->num_timeout,
1695                   from_kuid_munged(seq_user_ns(seq), uid),
1696                   0,  /* non standard timer */
1697                   0, /* open_requests have no inode */
1698                   0, req);
1699}
1700
1701static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1702{
1703        const struct in6_addr *dest, *src;
1704        __u16 destp, srcp;
1705        int timer_active;
1706        unsigned long timer_expires;
1707        const struct inet_sock *inet = inet_sk(sp);
1708        const struct tcp_sock *tp = tcp_sk(sp);
1709        const struct inet_connection_sock *icsk = inet_csk(sp);
1710        struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1711
1712        dest  = &sp->sk_v6_daddr;
1713        src   = &sp->sk_v6_rcv_saddr;
1714        destp = ntohs(inet->inet_dport);
1715        srcp  = ntohs(inet->inet_sport);
1716
1717        if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1718                timer_active    = 1;
1719                timer_expires   = icsk->icsk_timeout;
1720        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1721                timer_active    = 4;
1722                timer_expires   = icsk->icsk_timeout;
1723        } else if (timer_pending(&sp->sk_timer)) {
1724                timer_active    = 2;
1725                timer_expires   = sp->sk_timer.expires;
1726        } else {
1727                timer_active    = 0;
1728                timer_expires = jiffies;
1729        }
1730
1731        seq_printf(seq,
1732                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1733                   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1734                   i,
1735                   src->s6_addr32[0], src->s6_addr32[1],
1736                   src->s6_addr32[2], src->s6_addr32[3], srcp,
1737                   dest->s6_addr32[0], dest->s6_addr32[1],
1738                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1739                   sp->sk_state,
1740                   tp->write_seq-tp->snd_una,
1741                   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1742                   timer_active,
1743                   jiffies_delta_to_clock_t(timer_expires - jiffies),
1744                   icsk->icsk_retransmits,
1745                   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1746                   icsk->icsk_probes_out,
1747                   sock_i_ino(sp),
1748                   atomic_read(&sp->sk_refcnt), sp,
1749                   jiffies_to_clock_t(icsk->icsk_rto),
1750                   jiffies_to_clock_t(icsk->icsk_ack.ato),
1751                   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1752                   tp->snd_cwnd,
1753                   sp->sk_state == TCP_LISTEN ?
1754                        (fastopenq ? fastopenq->max_qlen : 0) :
1755                        (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1756                   );
1757}
1758
1759static void get_timewait6_sock(struct seq_file *seq,
1760                               struct inet_timewait_sock *tw, int i)
1761{
1762        const struct in6_addr *dest, *src;
1763        __u16 destp, srcp;
1764        s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1765
1766        dest = &tw->tw_v6_daddr;
1767        src  = &tw->tw_v6_rcv_saddr;
1768        destp = ntohs(tw->tw_dport);
1769        srcp  = ntohs(tw->tw_sport);
1770
1771        seq_printf(seq,
1772                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1773                   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1774                   i,
1775                   src->s6_addr32[0], src->s6_addr32[1],
1776                   src->s6_addr32[2], src->s6_addr32[3], srcp,
1777                   dest->s6_addr32[0], dest->s6_addr32[1],
1778                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1779                   tw->tw_substate, 0, 0,
1780                   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1781                   atomic_read(&tw->tw_refcnt), tw);
1782}
1783
1784static int tcp6_seq_show(struct seq_file *seq, void *v)
1785{
1786        struct tcp_iter_state *st;
1787        struct sock *sk = v;
1788
1789        if (v == SEQ_START_TOKEN) {
1790                seq_puts(seq,
1791                         "  sl  "
1792                         "local_address                         "
1793                         "remote_address                        "
1794                         "st tx_queue rx_queue tr tm->when retrnsmt"
1795                         "   uid  timeout inode\n");
1796                goto out;
1797        }
1798        st = seq->private;
1799
1800        switch (st->state) {
1801        case TCP_SEQ_STATE_LISTENING:
1802        case TCP_SEQ_STATE_ESTABLISHED:
1803                if (sk->sk_state == TCP_TIME_WAIT)
1804                        get_timewait6_sock(seq, v, st->num);
1805                else
1806                        get_tcp6_sock(seq, v, st->num);
1807                break;
1808        case TCP_SEQ_STATE_OPENREQ:
1809                get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1810                break;
1811        }
1812out:
1813        return 0;
1814}
1815
1816static const struct file_operations tcp6_afinfo_seq_fops = {
1817        .owner   = THIS_MODULE,
1818        .open    = tcp_seq_open,
1819        .read    = seq_read,
1820        .llseek  = seq_lseek,
1821        .release = seq_release_net
1822};
1823
1824static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1825        .name           = "tcp6",
1826        .family         = AF_INET6,
1827        .seq_fops       = &tcp6_afinfo_seq_fops,
1828        .seq_ops        = {
1829                .show           = tcp6_seq_show,
1830        },
1831};
1832
1833int __net_init tcp6_proc_init(struct net *net)
1834{
1835        return tcp_proc_register(net, &tcp6_seq_afinfo);
1836}
1837
1838void tcp6_proc_exit(struct net *net)
1839{
1840        tcp_proc_unregister(net, &tcp6_seq_afinfo);
1841}
1842#endif
1843
1844static void tcp_v6_clear_sk(struct sock *sk, int size)
1845{
1846        struct inet_sock *inet = inet_sk(sk);
1847
1848        /* we do not want to clear pinet6 field, because of RCU lookups */
1849        sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1850
1851        size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1852        memset(&inet->pinet6 + 1, 0, size);
1853}
1854
1855struct proto tcpv6_prot = {
1856        .name                   = "TCPv6",
1857        .owner                  = THIS_MODULE,
1858        .close                  = tcp_close,
1859        .connect                = tcp_v6_connect,
1860        .disconnect             = tcp_disconnect,
1861        .accept                 = inet_csk_accept,
1862        .ioctl                  = tcp_ioctl,
1863        .init                   = tcp_v6_init_sock,
1864        .destroy                = tcp_v6_destroy_sock,
1865        .shutdown               = tcp_shutdown,
1866        .setsockopt             = tcp_setsockopt,
1867        .getsockopt             = tcp_getsockopt,
1868        .recvmsg                = tcp_recvmsg,
1869        .sendmsg                = tcp_sendmsg,
1870        .sendpage               = tcp_sendpage,
1871        .backlog_rcv            = tcp_v6_do_rcv,
1872        .release_cb             = tcp_release_cb,
1873        .hash                   = tcp_v6_hash,
1874        .unhash                 = inet_unhash,
1875        .get_port               = inet_csk_get_port,
1876        .enter_memory_pressure  = tcp_enter_memory_pressure,
1877        .stream_memory_free     = tcp_stream_memory_free,
1878        .sockets_allocated      = &tcp_sockets_allocated,
1879        .memory_allocated       = &tcp_memory_allocated,
1880        .memory_pressure        = &tcp_memory_pressure,
1881        .orphan_count           = &tcp_orphan_count,
1882        .sysctl_wmem            = sysctl_tcp_wmem,
1883        .sysctl_rmem            = sysctl_tcp_rmem,
1884        .max_header             = MAX_TCP_HEADER,
1885        .obj_size               = sizeof(struct tcp6_sock),
1886        .slab_flags             = SLAB_DESTROY_BY_RCU,
1887        .twsk_prot              = &tcp6_timewait_sock_ops,
1888        .rsk_prot               = &tcp6_request_sock_ops,
1889        .h.hashinfo             = &tcp_hashinfo,
1890        .no_autobind            = true,
1891#ifdef CONFIG_COMPAT
1892        .compat_setsockopt      = compat_tcp_setsockopt,
1893        .compat_getsockopt      = compat_tcp_getsockopt,
1894#endif
1895#ifdef CONFIG_MEMCG_KMEM
1896        .proto_cgroup           = tcp_proto_cgroup,
1897#endif
1898        .clear_sk               = tcp_v6_clear_sk,
1899};
1900
1901static const struct inet6_protocol tcpv6_protocol = {
1902        .early_demux    =       tcp_v6_early_demux,
1903        .handler        =       tcp_v6_rcv,
1904        .err_handler    =       tcp_v6_err,
1905        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1906};
1907
1908static struct inet_protosw tcpv6_protosw = {
1909        .type           =       SOCK_STREAM,
1910        .protocol       =       IPPROTO_TCP,
1911        .prot           =       &tcpv6_prot,
1912        .ops            =       &inet6_stream_ops,
1913        .flags          =       INET_PROTOSW_PERMANENT |
1914                                INET_PROTOSW_ICSK,
1915};
1916
1917static int __net_init tcpv6_net_init(struct net *net)
1918{
1919        return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1920                                    SOCK_RAW, IPPROTO_TCP, net);
1921}
1922
1923static void __net_exit tcpv6_net_exit(struct net *net)
1924{
1925        inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1926}
1927
1928static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1929{
1930        inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1931}
1932
1933static struct pernet_operations tcpv6_net_ops = {
1934        .init       = tcpv6_net_init,
1935        .exit       = tcpv6_net_exit,
1936        .exit_batch = tcpv6_net_exit_batch,
1937};
1938
1939int __init tcpv6_init(void)
1940{
1941        int ret;
1942
1943        ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1944        if (ret)
1945                goto out;
1946
1947        /* register inet6 protocol */
1948        ret = inet6_register_protosw(&tcpv6_protosw);
1949        if (ret)
1950                goto out_tcpv6_protocol;
1951
1952        ret = register_pernet_subsys(&tcpv6_net_ops);
1953        if (ret)
1954                goto out_tcpv6_protosw;
1955out:
1956        return ret;
1957
1958out_tcpv6_protosw:
1959        inet6_unregister_protosw(&tcpv6_protosw);
1960out_tcpv6_protocol:
1961        inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1962        goto out;
1963}
1964
1965void tcpv6_exit(void)
1966{
1967        unregister_pernet_subsys(&tcpv6_net_ops);
1968        inet6_unregister_protosw(&tcpv6_protosw);
1969        inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1970}
1971