linux/net/ipv6/tcp_ipv6.c
<<
>>
Prefs
   1/*
   2 *      TCP over IPv6
   3 *      Linux INET6 implementation
   4 *
   5 *      Authors:
   6 *      Pedro Roque             <roque@di.fc.ul.pt>
   7 *
   8 *      Based on:
   9 *      linux/net/ipv4/tcp.c
  10 *      linux/net/ipv4/tcp_input.c
  11 *      linux/net/ipv4/tcp_output.c
  12 *
  13 *      Fixes:
  14 *      Hideaki YOSHIFUJI       :       sin6_scope_id support
  15 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
  16 *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
  17 *                                      a single port at the same time.
  18 *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
  19 *
  20 *      This program is free software; you can redistribute it and/or
  21 *      modify it under the terms of the GNU General Public License
  22 *      as published by the Free Software Foundation; either version
  23 *      2 of the License, or (at your option) any later version.
  24 */
  25
  26#include <linux/bottom_half.h>
  27#include <linux/module.h>
  28#include <linux/errno.h>
  29#include <linux/types.h>
  30#include <linux/socket.h>
  31#include <linux/sockios.h>
  32#include <linux/net.h>
  33#include <linux/jiffies.h>
  34#include <linux/in.h>
  35#include <linux/in6.h>
  36#include <linux/netdevice.h>
  37#include <linux/init.h>
  38#include <linux/jhash.h>
  39#include <linux/ipsec.h>
  40#include <linux/times.h>
  41#include <linux/slab.h>
  42#include <linux/uaccess.h>
  43#include <linux/ipv6.h>
  44#include <linux/icmpv6.h>
  45#include <linux/random.h>
  46
  47#include <net/tcp.h>
  48#include <net/ndisc.h>
  49#include <net/inet6_hashtables.h>
  50#include <net/inet6_connection_sock.h>
  51#include <net/ipv6.h>
  52#include <net/transp_v6.h>
  53#include <net/addrconf.h>
  54#include <net/ip6_route.h>
  55#include <net/ip6_checksum.h>
  56#include <net/inet_ecn.h>
  57#include <net/protocol.h>
  58#include <net/xfrm.h>
  59#include <net/snmp.h>
  60#include <net/dsfield.h>
  61#include <net/timewait_sock.h>
  62#include <net/inet_common.h>
  63#include <net/secure_seq.h>
  64#include <net/tcp_memcontrol.h>
  65#include <net/busy_poll.h>
  66
  67#include <linux/proc_fs.h>
  68#include <linux/seq_file.h>
  69
  70#include <linux/crypto.h>
  71#include <linux/scatterlist.h>
  72
  73static void     tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
  74static void     tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  75                                      struct request_sock *req);
  76
  77static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
  78
  79static const struct inet_connection_sock_af_ops ipv6_mapped;
  80static const struct inet_connection_sock_af_ops ipv6_specific;
  81#ifdef CONFIG_TCP_MD5SIG
  82static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
  83static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
  84#else
  85static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
  86                                                   const struct in6_addr *addr)
  87{
  88        return NULL;
  89}
  90#endif
  91
  92static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
  93{
  94        struct dst_entry *dst = skb_dst(skb);
  95
  96        if (dst && dst_hold_safe(dst)) {
  97                const struct rt6_info *rt = (const struct rt6_info *)dst;
  98
  99                sk->sk_rx_dst = dst;
 100                inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
 101                inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
 102        }
 103}
 104
 105static void tcp_v6_hash(struct sock *sk)
 106{
 107        if (sk->sk_state != TCP_CLOSE) {
 108                if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
 109                        tcp_prot.hash(sk);
 110                        return;
 111                }
 112                local_bh_disable();
 113                __inet6_hash(sk, NULL);
 114                local_bh_enable();
 115        }
 116}
 117
 118static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
 119{
 120        return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
 121                                            ipv6_hdr(skb)->saddr.s6_addr32,
 122                                            tcp_hdr(skb)->dest,
 123                                            tcp_hdr(skb)->source);
 124}
 125
 126static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 127                          int addr_len)
 128{
 129        struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
 130        struct inet_sock *inet = inet_sk(sk);
 131        struct inet_connection_sock *icsk = inet_csk(sk);
 132        struct ipv6_pinfo *np = inet6_sk(sk);
 133        struct tcp_sock *tp = tcp_sk(sk);
 134        struct in6_addr *saddr = NULL, *final_p, final;
 135        struct ipv6_txoptions *opt;
 136        struct rt6_info *rt;
 137        struct flowi6 fl6;
 138        struct dst_entry *dst;
 139        int addr_type;
 140        int err;
 141
 142        if (addr_len < SIN6_LEN_RFC2133)
 143                return -EINVAL;
 144
 145        if (usin->sin6_family != AF_INET6)
 146                return -EAFNOSUPPORT;
 147
 148        memset(&fl6, 0, sizeof(fl6));
 149
 150        if (np->sndflow) {
 151                fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
 152                IP6_ECN_flow_init(fl6.flowlabel);
 153                if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
 154                        struct ip6_flowlabel *flowlabel;
 155                        flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
 156                        if (flowlabel == NULL)
 157                                return -EINVAL;
 158                        fl6_sock_release(flowlabel);
 159                }
 160        }
 161
 162        /*
 163         *      connect() to INADDR_ANY means loopback (BSD'ism).
 164         */
 165
 166        if (ipv6_addr_any(&usin->sin6_addr))
 167                usin->sin6_addr.s6_addr[15] = 0x1;
 168
 169        addr_type = ipv6_addr_type(&usin->sin6_addr);
 170
 171        if (addr_type & IPV6_ADDR_MULTICAST)
 172                return -ENETUNREACH;
 173
 174        if (addr_type&IPV6_ADDR_LINKLOCAL) {
 175                if (addr_len >= sizeof(struct sockaddr_in6) &&
 176                    usin->sin6_scope_id) {
 177                        /* If interface is set while binding, indices
 178                         * must coincide.
 179                         */
 180                        if (sk->sk_bound_dev_if &&
 181                            sk->sk_bound_dev_if != usin->sin6_scope_id)
 182                                return -EINVAL;
 183
 184                        sk->sk_bound_dev_if = usin->sin6_scope_id;
 185                }
 186
 187                /* Connect to link-local address requires an interface */
 188                if (!sk->sk_bound_dev_if)
 189                        return -EINVAL;
 190        }
 191
 192        if (tp->rx_opt.ts_recent_stamp &&
 193            !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
 194                tp->rx_opt.ts_recent = 0;
 195                tp->rx_opt.ts_recent_stamp = 0;
 196                tp->write_seq = 0;
 197        }
 198
 199        sk->sk_v6_daddr = usin->sin6_addr;
 200        np->flow_label = fl6.flowlabel;
 201
 202        /*
 203         *      TCP over IPv4
 204         */
 205
 206        if (addr_type == IPV6_ADDR_MAPPED) {
 207                u32 exthdrlen = icsk->icsk_ext_hdr_len;
 208                struct sockaddr_in sin;
 209
 210                SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
 211
 212                if (__ipv6_only_sock(sk))
 213                        return -ENETUNREACH;
 214
 215                sin.sin_family = AF_INET;
 216                sin.sin_port = usin->sin6_port;
 217                sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
 218
 219                icsk->icsk_af_ops = &ipv6_mapped;
 220                sk->sk_backlog_rcv = tcp_v4_do_rcv;
 221#ifdef CONFIG_TCP_MD5SIG
 222                tp->af_specific = &tcp_sock_ipv6_mapped_specific;
 223#endif
 224
 225                err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
 226
 227                if (err) {
 228                        icsk->icsk_ext_hdr_len = exthdrlen;
 229                        icsk->icsk_af_ops = &ipv6_specific;
 230                        sk->sk_backlog_rcv = tcp_v6_do_rcv;
 231#ifdef CONFIG_TCP_MD5SIG
 232                        tp->af_specific = &tcp_sock_ipv6_specific;
 233#endif
 234                        goto failure;
 235                } else {
 236                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 237                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
 238                                               &sk->sk_v6_rcv_saddr);
 239                }
 240
 241                return err;
 242        }
 243
 244        if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
 245                saddr = &sk->sk_v6_rcv_saddr;
 246
 247        fl6.flowi6_proto = IPPROTO_TCP;
 248        fl6.daddr = sk->sk_v6_daddr;
 249        fl6.saddr = saddr ? *saddr : np->saddr;
 250        fl6.flowi6_oif = sk->sk_bound_dev_if;
 251        fl6.flowi6_mark = sk->sk_mark;
 252        fl6.fl6_dport = usin->sin6_port;
 253        fl6.fl6_sport = inet->inet_sport;
 254
 255        opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
 256        final_p = fl6_update_dst(&fl6, opt, &final);
 257
 258        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 259
 260        dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
 261        if (IS_ERR(dst)) {
 262                err = PTR_ERR(dst);
 263                goto failure;
 264        }
 265
 266        if (saddr == NULL) {
 267                saddr = &fl6.saddr;
 268                sk->sk_v6_rcv_saddr = *saddr;
 269        }
 270
 271        /* set the source address */
 272        np->saddr = *saddr;
 273        inet->inet_rcv_saddr = LOOPBACK4_IPV6;
 274
 275        sk->sk_gso_type = SKB_GSO_TCPV6;
 276        ip6_dst_store(sk, dst, NULL, NULL);
 277
 278        rt = (struct rt6_info *) dst;
 279        if (tcp_death_row.sysctl_tw_recycle &&
 280            !tp->rx_opt.ts_recent_stamp &&
 281            ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
 282                tcp_fetch_timewait_stamp(sk, dst);
 283
 284        icsk->icsk_ext_hdr_len = 0;
 285        if (opt)
 286                icsk->icsk_ext_hdr_len = opt->opt_flen +
 287                                         opt->opt_nflen;
 288
 289        tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
 290
 291        inet->inet_dport = usin->sin6_port;
 292
 293        tcp_set_state(sk, TCP_SYN_SENT);
 294        err = inet6_hash_connect(&tcp_death_row, sk);
 295        if (err)
 296                goto late_failure;
 297
 298        sk_set_txhash(sk);
 299
 300        if (!tp->write_seq && likely(!tp->repair))
 301                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
 302                                                             sk->sk_v6_daddr.s6_addr32,
 303                                                             inet->inet_sport,
 304                                                             inet->inet_dport);
 305
 306        err = tcp_connect(sk);
 307        if (err)
 308                goto late_failure;
 309
 310        return 0;
 311
 312late_failure:
 313        tcp_set_state(sk, TCP_CLOSE);
 314        __sk_dst_reset(sk);
 315failure:
 316        inet->inet_dport = 0;
 317        sk->sk_route_caps = 0;
 318        return err;
 319}
 320
 321static void tcp_v6_mtu_reduced(struct sock *sk)
 322{
 323        struct dst_entry *dst;
 324
 325        if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
 326                return;
 327
 328        dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
 329        if (!dst)
 330                return;
 331
 332        if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
 333                tcp_sync_mss(sk, dst_mtu(dst));
 334                tcp_simple_retransmit(sk);
 335        }
 336}
 337
 338static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 339                u8 type, u8 code, int offset, __be32 info)
 340{
 341        const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
 342        const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
 343        struct ipv6_pinfo *np;
 344        struct sock *sk;
 345        int err;
 346        struct tcp_sock *tp;
 347        struct request_sock *fastopen;
 348        __u32 seq, snd_una;
 349        struct net *net = dev_net(skb->dev);
 350
 351        sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
 352                        th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
 353
 354        if (sk == NULL) {
 355                ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
 356                                   ICMP6_MIB_INERRORS);
 357                return;
 358        }
 359
 360        if (sk->sk_state == TCP_TIME_WAIT) {
 361                inet_twsk_put(inet_twsk(sk));
 362                return;
 363        }
 364
 365        bh_lock_sock(sk);
 366        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
 367                NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 368
 369        if (sk->sk_state == TCP_CLOSE)
 370                goto out;
 371
 372        if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
 373                NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
 374                goto out;
 375        }
 376
 377        tp = tcp_sk(sk);
 378        seq = ntohl(th->seq);
 379        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
 380        fastopen = tp->fastopen_rsk;
 381        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
 382        if (sk->sk_state != TCP_LISTEN &&
 383            !between(seq, snd_una, tp->snd_nxt)) {
 384                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 385                goto out;
 386        }
 387
 388        np = inet6_sk(sk);
 389
 390        if (type == NDISC_REDIRECT) {
 391                if (!sock_owned_by_user(sk)) {
 392                        struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
 393
 394                        if (dst)
 395                                dst->ops->redirect(dst, sk, skb);
 396                }
 397                goto out;
 398        }
 399
 400        if (type == ICMPV6_PKT_TOOBIG) {
 401                /* We are not interested in TCP_LISTEN and open_requests
 402                 * (SYN-ACKs send out by Linux are always <576bytes so
 403                 * they should go through unfragmented).
 404                 */
 405                if (sk->sk_state == TCP_LISTEN)
 406                        goto out;
 407
 408                if (!ip6_sk_accept_pmtu(sk))
 409                        goto out;
 410
 411                tp->mtu_info = ntohl(info);
 412                if (!sock_owned_by_user(sk))
 413                        tcp_v6_mtu_reduced(sk);
 414                else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
 415                                           &tp->tsq_flags))
 416                        sock_hold(sk);
 417                goto out;
 418        }
 419
 420        icmpv6_err_convert(type, code, &err);
 421
 422        /* Might be for an request_sock */
 423        switch (sk->sk_state) {
 424                struct request_sock *req, **prev;
 425        case TCP_LISTEN:
 426                if (sock_owned_by_user(sk))
 427                        goto out;
 428
 429                req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
 430                                           &hdr->saddr, inet6_iif(skb));
 431                if (!req)
 432                        goto out;
 433
 434                /* ICMPs are not backlogged, hence we cannot get
 435                 * an established socket here.
 436                 */
 437                WARN_ON(req->sk != NULL);
 438
 439                if (seq != tcp_rsk(req)->snt_isn) {
 440                        NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
 441                        goto out;
 442                }
 443
 444                inet_csk_reqsk_queue_drop(sk, req, prev);
 445                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
 446                goto out;
 447
 448        case TCP_SYN_SENT:
 449        case TCP_SYN_RECV:
 450                /* Only in fast or simultaneous open. If a fast open socket is
 451                 * is already accepted it is treated as a connected one below.
 452                 */
 453                if (fastopen && fastopen->sk == NULL)
 454                        break;
 455
 456                if (!sock_owned_by_user(sk)) {
 457                        sk->sk_err = err;
 458                        sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
 459
 460                        tcp_done(sk);
 461                } else
 462                        sk->sk_err_soft = err;
 463                goto out;
 464        }
 465
 466        if (!sock_owned_by_user(sk) && np->recverr) {
 467                sk->sk_err = err;
 468                sk->sk_error_report(sk);
 469        } else
 470                sk->sk_err_soft = err;
 471
 472out:
 473        bh_unlock_sock(sk);
 474        sock_put(sk);
 475}
 476
 477
 478static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
 479                              struct flowi *fl,
 480                              struct request_sock *req,
 481                              u16 queue_mapping,
 482                              struct tcp_fastopen_cookie *foc)
 483{
 484        struct inet_request_sock *ireq = inet_rsk(req);
 485        struct ipv6_pinfo *np = inet6_sk(sk);
 486        struct flowi6 *fl6 = &fl->u.ip6;
 487        struct sk_buff *skb;
 488        int err = -ENOMEM;
 489
 490        /* First, grab a route. */
 491        if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
 492                goto done;
 493
 494        skb = tcp_make_synack(sk, dst, req, foc);
 495
 496        if (skb) {
 497                __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
 498                                    &ireq->ir_v6_rmt_addr);
 499
 500                fl6->daddr = ireq->ir_v6_rmt_addr;
 501                skb_set_queue_mapping(skb, queue_mapping);
 502                rcu_read_lock();
 503                err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
 504                               np->tclass);
 505                rcu_read_unlock();
 506                err = net_xmit_eval(err);
 507        }
 508
 509done:
 510        return err;
 511}
 512
 513
 514static void tcp_v6_reqsk_destructor(struct request_sock *req)
 515{
 516        kfree_skb(inet_rsk(req)->pktopts);
 517}
 518
 519#ifdef CONFIG_TCP_MD5SIG
 520static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 521                                                   const struct in6_addr *addr)
 522{
 523        return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
 524}
 525
 526static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
 527                                                struct sock *addr_sk)
 528{
 529        return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 530}
 531
 532static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
 533                                                      struct request_sock *req)
 534{
 535        return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 536}
 537
 538static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
 539                                 int optlen)
 540{
 541        struct tcp_md5sig cmd;
 542        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
 543
 544        if (optlen < sizeof(cmd))
 545                return -EINVAL;
 546
 547        if (copy_from_user(&cmd, optval, sizeof(cmd)))
 548                return -EFAULT;
 549
 550        if (sin6->sin6_family != AF_INET6)
 551                return -EINVAL;
 552
 553        if (!cmd.tcpm_keylen) {
 554                if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 555                        return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 556                                              AF_INET);
 557                return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 558                                      AF_INET6);
 559        }
 560
 561        if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
 562                return -EINVAL;
 563
 564        if (ipv6_addr_v4mapped(&sin6->sin6_addr))
 565                return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
 566                                      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 567
 568        return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
 569                              AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
 570}
 571
 572static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
 573                                        const struct in6_addr *daddr,
 574                                        const struct in6_addr *saddr, int nbytes)
 575{
 576        struct tcp6_pseudohdr *bp;
 577        struct scatterlist sg;
 578
 579        bp = &hp->md5_blk.ip6;
 580        /* 1. TCP pseudo-header (RFC2460) */
 581        bp->saddr = *saddr;
 582        bp->daddr = *daddr;
 583        bp->protocol = cpu_to_be32(IPPROTO_TCP);
 584        bp->len = cpu_to_be32(nbytes);
 585
 586        sg_init_one(&sg, bp, sizeof(*bp));
 587        return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
 588}
 589
 590static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
 591                               const struct in6_addr *daddr, struct in6_addr *saddr,
 592                               const struct tcphdr *th)
 593{
 594        struct tcp_md5sig_pool *hp;
 595        struct hash_desc *desc;
 596
 597        hp = tcp_get_md5sig_pool();
 598        if (!hp)
 599                goto clear_hash_noput;
 600        desc = &hp->md5_desc;
 601
 602        if (crypto_hash_init(desc))
 603                goto clear_hash;
 604        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
 605                goto clear_hash;
 606        if (tcp_md5_hash_header(hp, th))
 607                goto clear_hash;
 608        if (tcp_md5_hash_key(hp, key))
 609                goto clear_hash;
 610        if (crypto_hash_final(desc, md5_hash))
 611                goto clear_hash;
 612
 613        tcp_put_md5sig_pool();
 614        return 0;
 615
 616clear_hash:
 617        tcp_put_md5sig_pool();
 618clear_hash_noput:
 619        memset(md5_hash, 0, 16);
 620        return 1;
 621}
 622
 623static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 624                               const struct sock *sk,
 625                               const struct request_sock *req,
 626                               const struct sk_buff *skb)
 627{
 628        const struct in6_addr *saddr, *daddr;
 629        struct tcp_md5sig_pool *hp;
 630        struct hash_desc *desc;
 631        const struct tcphdr *th = tcp_hdr(skb);
 632
 633        if (sk) {
 634                saddr = &inet6_sk(sk)->saddr;
 635                daddr = &sk->sk_v6_daddr;
 636        } else if (req) {
 637                saddr = &inet_rsk(req)->ir_v6_loc_addr;
 638                daddr = &inet_rsk(req)->ir_v6_rmt_addr;
 639        } else {
 640                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 641                saddr = &ip6h->saddr;
 642                daddr = &ip6h->daddr;
 643        }
 644
 645        hp = tcp_get_md5sig_pool();
 646        if (!hp)
 647                goto clear_hash_noput;
 648        desc = &hp->md5_desc;
 649
 650        if (crypto_hash_init(desc))
 651                goto clear_hash;
 652
 653        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
 654                goto clear_hash;
 655        if (tcp_md5_hash_header(hp, th))
 656                goto clear_hash;
 657        if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
 658                goto clear_hash;
 659        if (tcp_md5_hash_key(hp, key))
 660                goto clear_hash;
 661        if (crypto_hash_final(desc, md5_hash))
 662                goto clear_hash;
 663
 664        tcp_put_md5sig_pool();
 665        return 0;
 666
 667clear_hash:
 668        tcp_put_md5sig_pool();
 669clear_hash_noput:
 670        memset(md5_hash, 0, 16);
 671        return 1;
 672}
 673
 674static int __tcp_v6_inbound_md5_hash(struct sock *sk,
 675                                     const struct sk_buff *skb)
 676{
 677        const __u8 *hash_location = NULL;
 678        struct tcp_md5sig_key *hash_expected;
 679        const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 680        const struct tcphdr *th = tcp_hdr(skb);
 681        int genhash;
 682        u8 newhash[16];
 683
 684        hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
 685        hash_location = tcp_parse_md5sig_option(th);
 686
 687        /* We've parsed the options - do we have a hash? */
 688        if (!hash_expected && !hash_location)
 689                return 0;
 690
 691        if (hash_expected && !hash_location) {
 692                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
 693                return 1;
 694        }
 695
 696        if (!hash_expected && hash_location) {
 697                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
 698                return 1;
 699        }
 700
 701        /* check the signature */
 702        genhash = tcp_v6_md5_hash_skb(newhash,
 703                                      hash_expected,
 704                                      NULL, NULL, skb);
 705
 706        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
 707                net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
 708                                     genhash ? "failed" : "mismatch",
 709                                     &ip6h->saddr, ntohs(th->source),
 710                                     &ip6h->daddr, ntohs(th->dest));
 711                return 1;
 712        }
 713        return 0;
 714}
 715
 716static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 717{
 718        int ret;
 719
 720        rcu_read_lock();
 721        ret = __tcp_v6_inbound_md5_hash(sk, skb);
 722        rcu_read_unlock();
 723
 724        return ret;
 725}
 726
 727#endif
 728
 729static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
 730                            struct sk_buff *skb)
 731{
 732        struct inet_request_sock *ireq = inet_rsk(req);
 733        struct ipv6_pinfo *np = inet6_sk(sk);
 734
 735        ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
 736        ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 737
 738        ireq->ir_iif = sk->sk_bound_dev_if;
 739
 740        /* So that link locals have meaning */
 741        if (!sk->sk_bound_dev_if &&
 742            ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
 743                ireq->ir_iif = inet6_iif(skb);
 744
 745        if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
 746            (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
 747             np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
 748             np->rxopt.bits.rxohlim)) {
 749                atomic_inc(&skb->users);
 750                ireq->pktopts = skb;
 751        }
 752}
 753
 754static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
 755                                          const struct request_sock *req,
 756                                          bool *strict)
 757{
 758        if (strict)
 759                *strict = true;
 760        return inet6_csk_route_req(sk, &fl->u.ip6, req);
 761}
 762
 763struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 764        .family         =       AF_INET6,
 765        .obj_size       =       sizeof(struct tcp6_request_sock),
 766        .rtx_syn_ack    =       tcp_rtx_synack,
 767        .send_ack       =       tcp_v6_reqsk_send_ack,
 768        .destructor     =       tcp_v6_reqsk_destructor,
 769        .send_reset     =       tcp_v6_send_reset,
 770        .syn_ack_timeout =      tcp_syn_ack_timeout,
 771};
 772
 773static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 774        .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
 775                                sizeof(struct ipv6hdr),
 776#ifdef CONFIG_TCP_MD5SIG
 777        .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
 778        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
 779#endif
 780        .init_req       =       tcp_v6_init_req,
 781#ifdef CONFIG_SYN_COOKIES
 782        .cookie_init_seq =      cookie_v6_init_sequence,
 783#endif
 784        .route_req      =       tcp_v6_route_req,
 785        .init_seq       =       tcp_v6_init_sequence,
 786        .send_synack    =       tcp_v6_send_synack,
 787        .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
 788};
 789
 790static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq,
 791                                 u32 ack, u32 win, u32 tsval, u32 tsecr,
 792                                 int oif, struct tcp_md5sig_key *key, int rst,
 793                                 u8 tclass, u32 label)
 794{
 795        const struct tcphdr *th = tcp_hdr(skb);
 796        struct tcphdr *t1;
 797        struct sk_buff *buff;
 798        struct flowi6 fl6;
 799        struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
 800        struct sock *ctl_sk = net->ipv6.tcp_sk;
 801        unsigned int tot_len = sizeof(struct tcphdr);
 802        struct dst_entry *dst;
 803        __be32 *topt;
 804
 805        if (tsecr)
 806                tot_len += TCPOLEN_TSTAMP_ALIGNED;
 807#ifdef CONFIG_TCP_MD5SIG
 808        if (key)
 809                tot_len += TCPOLEN_MD5SIG_ALIGNED;
 810#endif
 811
 812        buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
 813                         GFP_ATOMIC);
 814        if (buff == NULL)
 815                return;
 816
 817        skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
 818
 819        t1 = (struct tcphdr *) skb_push(buff, tot_len);
 820        skb_reset_transport_header(buff);
 821
 822        /* Swap the send and the receive. */
 823        memset(t1, 0, sizeof(*t1));
 824        t1->dest = th->source;
 825        t1->source = th->dest;
 826        t1->doff = tot_len / 4;
 827        t1->seq = htonl(seq);
 828        t1->ack_seq = htonl(ack);
 829        t1->ack = !rst || !th->ack;
 830        t1->rst = rst;
 831        t1->window = htons(win);
 832
 833        topt = (__be32 *)(t1 + 1);
 834
 835        if (tsecr) {
 836                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 837                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
 838                *topt++ = htonl(tsval);
 839                *topt++ = htonl(tsecr);
 840        }
 841
 842#ifdef CONFIG_TCP_MD5SIG
 843        if (key) {
 844                *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 845                                (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 846                tcp_v6_md5_hash_hdr((__u8 *)topt, key,
 847                                    &ipv6_hdr(skb)->saddr,
 848                                    &ipv6_hdr(skb)->daddr, t1);
 849        }
 850#endif
 851
 852        memset(&fl6, 0, sizeof(fl6));
 853        fl6.daddr = ipv6_hdr(skb)->saddr;
 854        fl6.saddr = ipv6_hdr(skb)->daddr;
 855        fl6.flowlabel = label;
 856
 857        buff->ip_summed = CHECKSUM_PARTIAL;
 858        buff->csum = 0;
 859
 860        __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
 861
 862        fl6.flowi6_proto = IPPROTO_TCP;
 863        if (rt6_need_strict(&fl6.daddr) && !oif)
 864                fl6.flowi6_oif = inet6_iif(skb);
 865        else
 866                fl6.flowi6_oif = oif;
 867        fl6.fl6_dport = t1->dest;
 868        fl6.fl6_sport = t1->source;
 869        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 870
 871        /* Pass a socket to ip6_dst_lookup either it is for RST
 872         * Underlying function will use this to retrieve the network
 873         * namespace
 874         */
 875        dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
 876        if (!IS_ERR(dst)) {
 877                skb_dst_set(buff, dst);
 878                ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
 879                TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
 880                if (rst)
 881                        TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
 882                return;
 883        }
 884
 885        kfree_skb(buff);
 886}
 887
 888static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 889{
 890        const struct tcphdr *th = tcp_hdr(skb);
 891        u32 seq = 0, ack_seq = 0;
 892        struct tcp_md5sig_key *key = NULL;
 893#ifdef CONFIG_TCP_MD5SIG
 894        const __u8 *hash_location = NULL;
 895        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
 896        unsigned char newhash[16];
 897        int genhash;
 898        struct sock *sk1 = NULL;
 899#endif
 900        int oif;
 901
 902        if (th->rst)
 903                return;
 904
 905        /* If sk not NULL, it means we did a successful lookup and incoming
 906         * route had to be correct. prequeue might have dropped our dst.
 907         */
 908        if (!sk && !ipv6_unicast_destination(skb))
 909                return;
 910
 911#ifdef CONFIG_TCP_MD5SIG
 912        hash_location = tcp_parse_md5sig_option(th);
 913        if (sk && sk_fullsock(sk)) {
 914                key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
 915        } else if (hash_location) {
 916                /*
 917                 * active side is lost. Try to find listening socket through
 918                 * source port, and then find md5 key through listening socket.
 919                 * we are not loose security here:
 920                 * Incoming packet is checked with md5 hash with finding key,
 921                 * no RST generated if md5 hash doesn't match.
 922                 */
 923                sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
 924                                           &tcp_hashinfo, &ipv6h->saddr,
 925                                           th->source, &ipv6h->daddr,
 926                                           ntohs(th->source), inet6_iif(skb));
 927                if (!sk1)
 928                        return;
 929
 930                rcu_read_lock();
 931                key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
 932                if (!key)
 933                        goto release_sk1;
 934
 935                genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, NULL, skb);
 936                if (genhash || memcmp(hash_location, newhash, 16) != 0)
 937                        goto release_sk1;
 938        }
 939#endif
 940
 941        if (th->ack)
 942                seq = ntohl(th->ack_seq);
 943        else
 944                ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
 945                          (th->doff << 2);
 946
 947        oif = sk ? sk->sk_bound_dev_if : 0;
 948        tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
 949
 950#ifdef CONFIG_TCP_MD5SIG
 951release_sk1:
 952        if (sk1) {
 953                rcu_read_unlock();
 954                sock_put(sk1);
 955        }
 956#endif
 957}
 958
 959static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq,
 960                            u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
 961                            struct tcp_md5sig_key *key, u8 tclass,
 962                            u32 label)
 963{
 964        tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
 965                             tclass, label);
 966}
 967
 968static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
 969{
 970        struct inet_timewait_sock *tw = inet_twsk(sk);
 971        struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 972
 973        tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
 974                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
 975                        tcp_time_stamp + tcptw->tw_ts_offset,
 976                        tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
 977                        tw->tw_tclass, (tw->tw_flowlabel << 12));
 978
 979        inet_twsk_put(tw);
 980}
 981
 982static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
 983                                  struct request_sock *req)
 984{
 985        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
 986         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
 987         */
 988        tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
 989                        tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
 990                        tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
 991                        tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
 992                        tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
 993                        0, 0);
 994}
 995
 996
 997static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
 998{
 999        struct request_sock *req, **prev;
1000        const struct tcphdr *th = tcp_hdr(skb);
1001        struct sock *nsk;
1002
1003        /* Find possible connection requests. */
1004        req = inet6_csk_search_req(sk, &prev, th->source,
1005                                   &ipv6_hdr(skb)->saddr,
1006                                   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1007        if (req)
1008                return tcp_check_req(sk, skb, req, prev, false);
1009
1010        nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1011                        &ipv6_hdr(skb)->saddr, th->source,
1012                        &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1013
1014        if (nsk) {
1015                if (nsk->sk_state != TCP_TIME_WAIT) {
1016                        bh_lock_sock(nsk);
1017                        return nsk;
1018                }
1019                inet_twsk_put(inet_twsk(nsk));
1020                return NULL;
1021        }
1022
1023#ifdef CONFIG_SYN_COOKIES
1024        if (!th->syn)
1025                sk = cookie_v6_check(sk, skb);
1026#endif
1027        return sk;
1028}
1029
1030static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1031{
1032        if (skb->protocol == htons(ETH_P_IP))
1033                return tcp_v4_conn_request(sk, skb);
1034
1035        if (!ipv6_unicast_destination(skb))
1036                goto drop;
1037
1038        return tcp_conn_request(&tcp6_request_sock_ops,
1039                                &tcp_request_sock_ipv6_ops, sk, skb);
1040drop:
1041        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1042        return 0; /* don't send reset */
1043}
1044
1045static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1046                                         struct request_sock *req,
1047                                         struct dst_entry *dst)
1048{
1049        struct inet_request_sock *ireq;
1050        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1051        struct ipv6_txoptions *opt;
1052        struct tcp6_sock *newtcp6sk;
1053        struct inet_sock *newinet;
1054        struct tcp_sock *newtp;
1055        struct sock *newsk;
1056#ifdef CONFIG_TCP_MD5SIG
1057        struct tcp_md5sig_key *key;
1058#endif
1059        struct flowi6 fl6;
1060
1061        if (skb->protocol == htons(ETH_P_IP)) {
1062                /*
1063                 *      v6 mapped
1064                 */
1065
1066                newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1067
1068                if (newsk == NULL)
1069                        return NULL;
1070
1071                newtcp6sk = (struct tcp6_sock *)newsk;
1072                inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1073
1074                newinet = inet_sk(newsk);
1075                newnp = inet6_sk(newsk);
1076                newtp = tcp_sk(newsk);
1077
1078                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1079
1080                ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
1081
1082                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1083
1084                newsk->sk_v6_rcv_saddr = newnp->saddr;
1085
1086                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1087                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1088#ifdef CONFIG_TCP_MD5SIG
1089                newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1090#endif
1091
1092                newnp->ipv6_mc_list = NULL;
1093                newnp->ipv6_ac_list = NULL;
1094                newnp->ipv6_fl_list = NULL;
1095                newnp->pktoptions  = NULL;
1096                newnp->opt         = NULL;
1097                newnp->mcast_oif   = inet6_iif(skb);
1098                newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1099                newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1100
1101                /*
1102                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1103                 * here, tcp_create_openreq_child now does this for us, see the comment in
1104                 * that function for the gory details. -acme
1105                 */
1106
1107                /* It is tricky place. Until this moment IPv4 tcp
1108                   worked with IPv6 icsk.icsk_af_ops.
1109                   Sync it now.
1110                 */
1111                tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1112
1113                return newsk;
1114        }
1115
1116        ireq = inet_rsk(req);
1117
1118        if (sk_acceptq_is_full(sk))
1119                goto out_overflow;
1120
1121        if (!dst) {
1122                dst = inet6_csk_route_req(sk, &fl6, req);
1123                if (!dst)
1124                        goto out;
1125        }
1126
1127        newsk = tcp_create_openreq_child(sk, req, skb);
1128        if (newsk == NULL)
1129                goto out_nonewsk;
1130
1131        /*
1132         * No need to charge this sock to the relevant IPv6 refcnt debug socks
1133         * count here, tcp_create_openreq_child now does this for us, see the
1134         * comment in that function for the gory details. -acme
1135         */
1136
1137        newsk->sk_gso_type = SKB_GSO_TCPV6;
1138        ip6_dst_store(newsk, dst, NULL, NULL);
1139        inet6_sk_rx_dst_set(newsk, skb);
1140
1141        newtcp6sk = (struct tcp6_sock *)newsk;
1142        inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1143
1144        newtp = tcp_sk(newsk);
1145        newinet = inet_sk(newsk);
1146        newnp = inet6_sk(newsk);
1147
1148        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1149
1150        newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1151        newnp->saddr = ireq->ir_v6_loc_addr;
1152        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1153        newsk->sk_bound_dev_if = ireq->ir_iif;
1154
1155        sk_set_txhash(newsk);
1156
1157        /* Now IPv6 options...
1158
1159           First: no IPv4 options.
1160         */
1161        newinet->inet_opt = NULL;
1162        newnp->ipv6_mc_list = NULL;
1163        newnp->ipv6_ac_list = NULL;
1164        newnp->ipv6_fl_list = NULL;
1165
1166        /* Clone RX bits */
1167        newnp->rxopt.all = np->rxopt.all;
1168
1169        /* Clone pktoptions received with SYN */
1170        newnp->pktoptions = NULL;
1171        if (ireq->pktopts != NULL) {
1172                newnp->pktoptions = skb_clone(ireq->pktopts,
1173                                              sk_gfp_atomic(sk, GFP_ATOMIC));
1174                consume_skb(ireq->pktopts);
1175                ireq->pktopts = NULL;
1176                if (newnp->pktoptions)
1177                        skb_set_owner_r(newnp->pktoptions, newsk);
1178        }
1179        newnp->opt        = NULL;
1180        newnp->mcast_oif  = inet6_iif(skb);
1181        newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1182        newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
1183
1184        /* Clone native IPv6 options from listening socket (if any)
1185
1186           Yes, keeping reference count would be much more clever,
1187           but we make one more one thing there: reattach optmem
1188           to newsk.
1189         */
1190        opt = rcu_dereference(np->opt);
1191        if (opt) {
1192                opt = ipv6_dup_options(newsk, opt);
1193                RCU_INIT_POINTER(newnp->opt, opt);
1194        }
1195        inet_csk(newsk)->icsk_ext_hdr_len = 0;
1196        if (opt)
1197                inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1198                                                    opt->opt_flen;
1199
1200        tcp_ca_openreq_child(newsk, dst);
1201
1202        tcp_sync_mss(newsk, dst_mtu(dst));
1203        newtp->advmss = dst_metric_advmss(dst);
1204        if (tcp_sk(sk)->rx_opt.user_mss &&
1205            tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1206                newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1207
1208        tcp_initialize_rcv_mss(newsk);
1209
1210        newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1211        newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1212
1213#ifdef CONFIG_TCP_MD5SIG
1214        /* Copy over the MD5 key from the original socket */
1215        key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
1216        if (key != NULL) {
1217                /* We're using one, so create a matching key
1218                 * on the newsk structure. If we fail to get
1219                 * memory, then we end up not copying the key
1220                 * across. Shucks.
1221                 */
1222                tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
1223                               AF_INET6, key->key, key->keylen,
1224                               sk_gfp_atomic(sk, GFP_ATOMIC));
1225        }
1226#endif
1227
1228        if (__inet_inherit_port(sk, newsk) < 0) {
1229                inet_csk_prepare_forced_close(newsk);
1230                tcp_done(newsk);
1231                goto out;
1232        }
1233        __inet6_hash(newsk, NULL);
1234
1235        return newsk;
1236
1237out_overflow:
1238        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1239out_nonewsk:
1240        dst_release(dst);
1241out:
1242        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1243        return NULL;
1244}
1245
1246/* The socket must have it's spinlock held when we get
1247 * here.
1248 *
1249 * We have a potential double-lock case here, so even when
1250 * doing backlog processing we use the BH locking scheme.
1251 * This is because we cannot sleep with the original spinlock
1252 * held.
1253 */
1254static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1255{
1256        struct ipv6_pinfo *np = inet6_sk(sk);
1257        struct tcp_sock *tp;
1258        struct sk_buff *opt_skb = NULL;
1259
1260        /* Imagine: socket is IPv6. IPv4 packet arrives,
1261           goes to IPv4 receive handler and backlogged.
1262           From backlog it always goes here. Kerboom...
1263           Fortunately, tcp_rcv_established and rcv_established
1264           handle them correctly, but it is not case with
1265           tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1266         */
1267
1268        if (skb->protocol == htons(ETH_P_IP))
1269                return tcp_v4_do_rcv(sk, skb);
1270
1271        if (tcp_filter(sk, skb))
1272                goto discard;
1273
1274        /*
1275         *      socket locking is here for SMP purposes as backlog rcv
1276         *      is currently called with bh processing disabled.
1277         */
1278
1279        /* Do Stevens' IPV6_PKTOPTIONS.
1280
1281           Yes, guys, it is the only place in our code, where we
1282           may make it not affecting IPv4.
1283           The rest of code is protocol independent,
1284           and I do not like idea to uglify IPv4.
1285
1286           Actually, all the idea behind IPV6_PKTOPTIONS
1287           looks not very well thought. For now we latch
1288           options, received in the last packet, enqueued
1289           by tcp. Feel free to propose better solution.
1290                                               --ANK (980728)
1291         */
1292        if (np->rxopt.all)
1293                opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC));
1294
1295        if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1296                struct dst_entry *dst = sk->sk_rx_dst;
1297
1298                sock_rps_save_rxhash(sk, skb);
1299                if (dst) {
1300                        if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1301                            dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1302                                dst_release(dst);
1303                                sk->sk_rx_dst = NULL;
1304                        }
1305                }
1306
1307                tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1308                if (opt_skb)
1309                        goto ipv6_pktoptions;
1310                return 0;
1311        }
1312
1313        if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1314                goto csum_err;
1315
1316        if (sk->sk_state == TCP_LISTEN) {
1317                struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1318                if (!nsk)
1319                        goto discard;
1320
1321                /*
1322                 * Queue it on the new socket if the new socket is active,
1323                 * otherwise we just shortcircuit this and continue with
1324                 * the new socket..
1325                 */
1326                if (nsk != sk) {
1327                        sock_rps_save_rxhash(nsk, skb);
1328                        if (tcp_child_process(sk, nsk, skb))
1329                                goto reset;
1330                        if (opt_skb)
1331                                __kfree_skb(opt_skb);
1332                        return 0;
1333                }
1334        } else
1335                sock_rps_save_rxhash(sk, skb);
1336
1337        if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1338                goto reset;
1339        if (opt_skb)
1340                goto ipv6_pktoptions;
1341        return 0;
1342
1343reset:
1344        tcp_v6_send_reset(sk, skb);
1345discard:
1346        if (opt_skb)
1347                __kfree_skb(opt_skb);
1348        kfree_skb(skb);
1349        return 0;
1350csum_err:
1351        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1352        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1353        goto discard;
1354
1355
1356ipv6_pktoptions:
1357        /* Do you ask, what is it?
1358
1359           1. skb was enqueued by tcp.
1360           2. skb is added to tail of read queue, rather than out of order.
1361           3. socket is not in passive state.
1362           4. Finally, it really contains options, which user wants to receive.
1363         */
1364        tp = tcp_sk(sk);
1365        if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1366            !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1367                if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1368                        np->mcast_oif = inet6_iif(opt_skb);
1369                if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1370                        np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1371                if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
1372                        np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
1373                if (ipv6_opt_accepted(sk, opt_skb)) {
1374                        skb_set_owner_r(opt_skb, sk);
1375                        opt_skb = xchg(&np->pktoptions, opt_skb);
1376                } else {
1377                        __kfree_skb(opt_skb);
1378                        opt_skb = xchg(&np->pktoptions, NULL);
1379                }
1380        }
1381
1382        kfree_skb(opt_skb);
1383        return 0;
1384}
1385
1386static int tcp_v6_rcv(struct sk_buff *skb)
1387{
1388        const struct tcphdr *th;
1389        const struct ipv6hdr *hdr;
1390        struct sock *sk;
1391        int ret;
1392        struct net *net = dev_net(skb->dev);
1393
1394        if (skb->pkt_type != PACKET_HOST)
1395                goto discard_it;
1396
1397        /*
1398         *      Count it even if it's bad.
1399         */
1400        TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1401
1402        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1403                goto discard_it;
1404
1405        th = tcp_hdr(skb);
1406
1407        if (th->doff < sizeof(struct tcphdr)/4)
1408                goto bad_packet;
1409        if (!pskb_may_pull(skb, th->doff*4))
1410                goto discard_it;
1411
1412        if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
1413                goto csum_error;
1414
1415        th = tcp_hdr(skb);
1416        hdr = ipv6_hdr(skb);
1417        TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1418        TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1419                                    skb->len - th->doff*4);
1420        TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1421        TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1422        TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1423        TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1424        TCP_SKB_CB(skb)->sacked = 0;
1425
1426        sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1427        if (!sk)
1428                goto no_tcp_socket;
1429
1430process:
1431        if (sk->sk_state == TCP_TIME_WAIT)
1432                goto do_time_wait;
1433
1434        if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1435                NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1436                goto discard_and_relse;
1437        }
1438
1439        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1440                goto discard_and_relse;
1441
1442#ifdef CONFIG_TCP_MD5SIG
1443        if (tcp_v6_inbound_md5_hash(sk, skb))
1444                goto discard_and_relse;
1445#endif
1446
1447        if (tcp_filter(sk, skb))
1448                goto discard_and_relse;
1449        th = (const struct tcphdr *)skb->data;
1450        hdr = ipv6_hdr(skb);
1451
1452        sk_mark_napi_id(sk, skb);
1453        skb->dev = NULL;
1454
1455        bh_lock_sock_nested(sk);
1456        tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1457        ret = 0;
1458        if (!sock_owned_by_user(sk)) {
1459                if (!tcp_prequeue(sk, skb))
1460                        ret = tcp_v6_do_rcv(sk, skb);
1461        } else if (unlikely(sk_add_backlog(sk, skb,
1462                                           sk->sk_rcvbuf + sk->sk_sndbuf))) {
1463                bh_unlock_sock(sk);
1464                NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1465                goto discard_and_relse;
1466        }
1467        bh_unlock_sock(sk);
1468
1469        sock_put(sk);
1470        return ret ? -1 : 0;
1471
1472no_tcp_socket:
1473        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1474                goto discard_it;
1475
1476        if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1477csum_error:
1478                TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1479bad_packet:
1480                TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1481        } else {
1482                tcp_v6_send_reset(NULL, skb);
1483        }
1484
1485discard_it:
1486        kfree_skb(skb);
1487        return 0;
1488
1489discard_and_relse:
1490        sock_put(sk);
1491        goto discard_it;
1492
1493do_time_wait:
1494        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1495                inet_twsk_put(inet_twsk(sk));
1496                goto discard_it;
1497        }
1498
1499        if (skb->len < (th->doff<<2)) {
1500                inet_twsk_put(inet_twsk(sk));
1501                goto bad_packet;
1502        }
1503        if (tcp_checksum_complete(skb)) {
1504                inet_twsk_put(inet_twsk(sk));
1505                goto csum_error;
1506        }
1507
1508        switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1509        case TCP_TW_SYN:
1510        {
1511                struct sock *sk2;
1512
1513                sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1514                                            &ipv6_hdr(skb)->saddr, th->source,
1515                                            &ipv6_hdr(skb)->daddr,
1516                                            ntohs(th->dest), inet6_iif(skb));
1517                if (sk2 != NULL) {
1518                        struct inet_timewait_sock *tw = inet_twsk(sk);
1519                        inet_twsk_deschedule(tw, &tcp_death_row);
1520                        inet_twsk_put(tw);
1521                        sk = sk2;
1522                        goto process;
1523                }
1524                /* Fall through to ACK */
1525        }
1526        case TCP_TW_ACK:
1527                tcp_v6_timewait_ack(sk, skb);
1528                break;
1529        case TCP_TW_RST:
1530                tcp_v6_send_reset(sk, skb);
1531                inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1532                inet_twsk_put(inet_twsk(sk));
1533                goto discard_it;
1534        case TCP_TW_SUCCESS:
1535                ;
1536        }
1537        goto discard_it;
1538}
1539
1540static void tcp_v6_early_demux(struct sk_buff *skb)
1541{
1542        const struct ipv6hdr *hdr;
1543        const struct tcphdr *th;
1544        struct sock *sk;
1545
1546        if (skb->pkt_type != PACKET_HOST)
1547                return;
1548
1549        if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1550                return;
1551
1552        hdr = ipv6_hdr(skb);
1553        th = tcp_hdr(skb);
1554
1555        if (th->doff < sizeof(struct tcphdr) / 4)
1556                return;
1557
1558        sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1559                                        &hdr->saddr, th->source,
1560                                        &hdr->daddr, ntohs(th->dest),
1561                                        inet6_iif(skb));
1562        if (sk) {
1563                skb->sk = sk;
1564                skb->destructor = sock_edemux;
1565                if (sk->sk_state != TCP_TIME_WAIT) {
1566                        struct dst_entry *dst = sk->sk_rx_dst;
1567
1568                        if (dst)
1569                                dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
1570                        if (dst &&
1571                            inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1572                                skb_dst_set_noref(skb, dst);
1573                }
1574        }
1575}
1576
1577static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1578        .twsk_obj_size  = sizeof(struct tcp6_timewait_sock),
1579        .twsk_unique    = tcp_twsk_unique,
1580        .twsk_destructor = tcp_twsk_destructor,
1581};
1582
1583static const struct inet_connection_sock_af_ops ipv6_specific = {
1584        .queue_xmit        = inet6_csk_xmit,
1585        .send_check        = tcp_v6_send_check,
1586        .rebuild_header    = inet6_sk_rebuild_header,
1587        .sk_rx_dst_set     = inet6_sk_rx_dst_set,
1588        .conn_request      = tcp_v6_conn_request,
1589        .syn_recv_sock     = tcp_v6_syn_recv_sock,
1590        .net_header_len    = sizeof(struct ipv6hdr),
1591        .net_frag_header_len = sizeof(struct frag_hdr),
1592        .setsockopt        = ipv6_setsockopt,
1593        .getsockopt        = ipv6_getsockopt,
1594        .addr2sockaddr     = inet6_csk_addr2sockaddr,
1595        .sockaddr_len      = sizeof(struct sockaddr_in6),
1596        .bind_conflict     = inet6_csk_bind_conflict,
1597#ifdef CONFIG_COMPAT
1598        .compat_setsockopt = compat_ipv6_setsockopt,
1599        .compat_getsockopt = compat_ipv6_getsockopt,
1600#endif
1601        .mtu_reduced       = tcp_v6_mtu_reduced,
1602};
1603
1604#ifdef CONFIG_TCP_MD5SIG
1605static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1606        .md5_lookup     =       tcp_v6_md5_lookup,
1607        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
1608        .md5_parse      =       tcp_v6_parse_md5_keys,
1609};
1610#endif
1611
1612/*
1613 *      TCP over IPv4 via INET6 API
1614 */
1615static const struct inet_connection_sock_af_ops ipv6_mapped = {
1616        .queue_xmit        = ip_queue_xmit,
1617        .send_check        = tcp_v4_send_check,
1618        .rebuild_header    = inet_sk_rebuild_header,
1619        .sk_rx_dst_set     = inet_sk_rx_dst_set,
1620        .conn_request      = tcp_v6_conn_request,
1621        .syn_recv_sock     = tcp_v6_syn_recv_sock,
1622        .net_header_len    = sizeof(struct iphdr),
1623        .setsockopt        = ipv6_setsockopt,
1624        .getsockopt        = ipv6_getsockopt,
1625        .addr2sockaddr     = inet6_csk_addr2sockaddr,
1626        .sockaddr_len      = sizeof(struct sockaddr_in6),
1627        .bind_conflict     = inet6_csk_bind_conflict,
1628#ifdef CONFIG_COMPAT
1629        .compat_setsockopt = compat_ipv6_setsockopt,
1630        .compat_getsockopt = compat_ipv6_getsockopt,
1631#endif
1632        .mtu_reduced       = tcp_v4_mtu_reduced,
1633};
1634
1635#ifdef CONFIG_TCP_MD5SIG
1636static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1637        .md5_lookup     =       tcp_v4_md5_lookup,
1638        .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1639        .md5_parse      =       tcp_v6_parse_md5_keys,
1640};
1641#endif
1642
1643/* NOTE: A lot of things set to zero explicitly by call to
1644 *       sk_alloc() so need not be done here.
1645 */
1646static int tcp_v6_init_sock(struct sock *sk)
1647{
1648        struct inet_connection_sock *icsk = inet_csk(sk);
1649
1650        tcp_init_sock(sk);
1651
1652        icsk->icsk_af_ops = &ipv6_specific;
1653
1654#ifdef CONFIG_TCP_MD5SIG
1655        tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1656#endif
1657
1658        return 0;
1659}
1660
1661static void tcp_v6_destroy_sock(struct sock *sk)
1662{
1663        tcp_v4_destroy_sock(sk);
1664        inet6_destroy_sock(sk);
1665}
1666
1667#ifdef CONFIG_PROC_FS
1668/* Proc filesystem TCPv6 sock list dumping. */
1669static void get_openreq6(struct seq_file *seq,
1670                         const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
1671{
1672        int ttd = req->expires - jiffies;
1673        const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1674        const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1675
1676        if (ttd < 0)
1677                ttd = 0;
1678
1679        seq_printf(seq,
1680                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1681                   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
1682                   i,
1683                   src->s6_addr32[0], src->s6_addr32[1],
1684                   src->s6_addr32[2], src->s6_addr32[3],
1685                   inet_rsk(req)->ir_num,
1686                   dest->s6_addr32[0], dest->s6_addr32[1],
1687                   dest->s6_addr32[2], dest->s6_addr32[3],
1688                   ntohs(inet_rsk(req)->ir_rmt_port),
1689                   TCP_SYN_RECV,
1690                   0, 0, /* could print option size, but that is af dependent. */
1691                   1,   /* timers active (only the expire timer) */
1692                   jiffies_to_clock_t(ttd),
1693                   req->num_timeout,
1694                   from_kuid_munged(seq_user_ns(seq), uid),
1695                   0,  /* non standard timer */
1696                   0, /* open_requests have no inode */
1697                   0, req);
1698}
1699
1700static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1701{
1702        const struct in6_addr *dest, *src;
1703        __u16 destp, srcp;
1704        int timer_active;
1705        unsigned long timer_expires;
1706        const struct inet_sock *inet = inet_sk(sp);
1707        const struct tcp_sock *tp = tcp_sk(sp);
1708        const struct inet_connection_sock *icsk = inet_csk(sp);
1709        struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
1710
1711        dest  = &sp->sk_v6_daddr;
1712        src   = &sp->sk_v6_rcv_saddr;
1713        destp = ntohs(inet->inet_dport);
1714        srcp  = ntohs(inet->inet_sport);
1715
1716        if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
1717                timer_active    = 1;
1718                timer_expires   = icsk->icsk_timeout;
1719        } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1720                timer_active    = 4;
1721                timer_expires   = icsk->icsk_timeout;
1722        } else if (timer_pending(&sp->sk_timer)) {
1723                timer_active    = 2;
1724                timer_expires   = sp->sk_timer.expires;
1725        } else {
1726                timer_active    = 0;
1727                timer_expires = jiffies;
1728        }
1729
1730        seq_printf(seq,
1731                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1732                   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
1733                   i,
1734                   src->s6_addr32[0], src->s6_addr32[1],
1735                   src->s6_addr32[2], src->s6_addr32[3], srcp,
1736                   dest->s6_addr32[0], dest->s6_addr32[1],
1737                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1738                   sp->sk_state,
1739                   tp->write_seq-tp->snd_una,
1740                   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
1741                   timer_active,
1742                   jiffies_delta_to_clock_t(timer_expires - jiffies),
1743                   icsk->icsk_retransmits,
1744                   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
1745                   icsk->icsk_probes_out,
1746                   sock_i_ino(sp),
1747                   atomic_read(&sp->sk_refcnt), sp,
1748                   jiffies_to_clock_t(icsk->icsk_rto),
1749                   jiffies_to_clock_t(icsk->icsk_ack.ato),
1750                   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
1751                   tp->snd_cwnd,
1752                   sp->sk_state == TCP_LISTEN ?
1753                        (fastopenq ? fastopenq->max_qlen : 0) :
1754                        (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
1755                   );
1756}
1757
1758static void get_timewait6_sock(struct seq_file *seq,
1759                               struct inet_timewait_sock *tw, int i)
1760{
1761        const struct in6_addr *dest, *src;
1762        __u16 destp, srcp;
1763        s32 delta = tw->tw_ttd - inet_tw_time_stamp();
1764
1765        dest = &tw->tw_v6_daddr;
1766        src  = &tw->tw_v6_rcv_saddr;
1767        destp = ntohs(tw->tw_dport);
1768        srcp  = ntohs(tw->tw_sport);
1769
1770        seq_printf(seq,
1771                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1772                   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1773                   i,
1774                   src->s6_addr32[0], src->s6_addr32[1],
1775                   src->s6_addr32[2], src->s6_addr32[3], srcp,
1776                   dest->s6_addr32[0], dest->s6_addr32[1],
1777                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1778                   tw->tw_substate, 0, 0,
1779                   3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
1780                   atomic_read(&tw->tw_refcnt), tw);
1781}
1782
1783static int tcp6_seq_show(struct seq_file *seq, void *v)
1784{
1785        struct tcp_iter_state *st;
1786        struct sock *sk = v;
1787
1788        if (v == SEQ_START_TOKEN) {
1789                seq_puts(seq,
1790                         "  sl  "
1791                         "local_address                         "
1792                         "remote_address                        "
1793                         "st tx_queue rx_queue tr tm->when retrnsmt"
1794                         "   uid  timeout inode\n");
1795                goto out;
1796        }
1797        st = seq->private;
1798
1799        switch (st->state) {
1800        case TCP_SEQ_STATE_LISTENING:
1801        case TCP_SEQ_STATE_ESTABLISHED:
1802                if (sk->sk_state == TCP_TIME_WAIT)
1803                        get_timewait6_sock(seq, v, st->num);
1804                else
1805                        get_tcp6_sock(seq, v, st->num);
1806                break;
1807        case TCP_SEQ_STATE_OPENREQ:
1808                get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
1809                break;
1810        }
1811out:
1812        return 0;
1813}
1814
1815static const struct file_operations tcp6_afinfo_seq_fops = {
1816        .owner   = THIS_MODULE,
1817        .open    = tcp_seq_open,
1818        .read    = seq_read,
1819        .llseek  = seq_lseek,
1820        .release = seq_release_net
1821};
1822
1823static struct tcp_seq_afinfo tcp6_seq_afinfo = {
1824        .name           = "tcp6",
1825        .family         = AF_INET6,
1826        .seq_fops       = &tcp6_afinfo_seq_fops,
1827        .seq_ops        = {
1828                .show           = tcp6_seq_show,
1829        },
1830};
1831
1832int __net_init tcp6_proc_init(struct net *net)
1833{
1834        return tcp_proc_register(net, &tcp6_seq_afinfo);
1835}
1836
1837void tcp6_proc_exit(struct net *net)
1838{
1839        tcp_proc_unregister(net, &tcp6_seq_afinfo);
1840}
1841#endif
1842
1843static void tcp_v6_clear_sk(struct sock *sk, int size)
1844{
1845        struct inet_sock *inet = inet_sk(sk);
1846
1847        /* we do not want to clear pinet6 field, because of RCU lookups */
1848        sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1849
1850        size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1851        memset(&inet->pinet6 + 1, 0, size);
1852}
1853
1854struct proto tcpv6_prot = {
1855        .name                   = "TCPv6",
1856        .owner                  = THIS_MODULE,
1857        .close                  = tcp_close,
1858        .connect                = tcp_v6_connect,
1859        .disconnect             = tcp_disconnect,
1860        .accept                 = inet_csk_accept,
1861        .ioctl                  = tcp_ioctl,
1862        .init                   = tcp_v6_init_sock,
1863        .destroy                = tcp_v6_destroy_sock,
1864        .shutdown               = tcp_shutdown,
1865        .setsockopt             = tcp_setsockopt,
1866        .getsockopt             = tcp_getsockopt,
1867        .recvmsg                = tcp_recvmsg,
1868        .sendmsg                = tcp_sendmsg,
1869        .sendpage               = tcp_sendpage,
1870        .backlog_rcv            = tcp_v6_do_rcv,
1871        .release_cb             = tcp_release_cb,
1872        .hash                   = tcp_v6_hash,
1873        .unhash                 = inet_unhash,
1874        .get_port               = inet_csk_get_port,
1875        .enter_memory_pressure  = tcp_enter_memory_pressure,
1876        .stream_memory_free     = tcp_stream_memory_free,
1877        .sockets_allocated      = &tcp_sockets_allocated,
1878        .memory_allocated       = &tcp_memory_allocated,
1879        .memory_pressure        = &tcp_memory_pressure,
1880        .orphan_count           = &tcp_orphan_count,
1881        .sysctl_wmem            = sysctl_tcp_wmem,
1882        .sysctl_rmem            = sysctl_tcp_rmem,
1883        .max_header             = MAX_TCP_HEADER,
1884        .obj_size               = sizeof(struct tcp6_sock),
1885        .slab_flags             = SLAB_DESTROY_BY_RCU,
1886        .twsk_prot              = &tcp6_timewait_sock_ops,
1887        .rsk_prot               = &tcp6_request_sock_ops,
1888        .h.hashinfo             = &tcp_hashinfo,
1889        .no_autobind            = true,
1890#ifdef CONFIG_COMPAT
1891        .compat_setsockopt      = compat_tcp_setsockopt,
1892        .compat_getsockopt      = compat_tcp_getsockopt,
1893#endif
1894#ifdef CONFIG_MEMCG_KMEM
1895        .proto_cgroup           = tcp_proto_cgroup,
1896#endif
1897        .clear_sk               = tcp_v6_clear_sk,
1898};
1899
1900static const struct inet6_protocol tcpv6_protocol = {
1901        .early_demux    =       tcp_v6_early_demux,
1902        .handler        =       tcp_v6_rcv,
1903        .err_handler    =       tcp_v6_err,
1904        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1905};
1906
1907static struct inet_protosw tcpv6_protosw = {
1908        .type           =       SOCK_STREAM,
1909        .protocol       =       IPPROTO_TCP,
1910        .prot           =       &tcpv6_prot,
1911        .ops            =       &inet6_stream_ops,
1912        .flags          =       INET_PROTOSW_PERMANENT |
1913                                INET_PROTOSW_ICSK,
1914};
1915
1916static int __net_init tcpv6_net_init(struct net *net)
1917{
1918        return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1919                                    SOCK_RAW, IPPROTO_TCP, net);
1920}
1921
1922static void __net_exit tcpv6_net_exit(struct net *net)
1923{
1924        inet_ctl_sock_destroy(net->ipv6.tcp_sk);
1925}
1926
1927static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
1928{
1929        inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
1930}
1931
1932static struct pernet_operations tcpv6_net_ops = {
1933        .init       = tcpv6_net_init,
1934        .exit       = tcpv6_net_exit,
1935        .exit_batch = tcpv6_net_exit_batch,
1936};
1937
1938int __init tcpv6_init(void)
1939{
1940        int ret;
1941
1942        ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1943        if (ret)
1944                goto out;
1945
1946        /* register inet6 protocol */
1947        ret = inet6_register_protosw(&tcpv6_protosw);
1948        if (ret)
1949                goto out_tcpv6_protocol;
1950
1951        ret = register_pernet_subsys(&tcpv6_net_ops);
1952        if (ret)
1953                goto out_tcpv6_protosw;
1954out:
1955        return ret;
1956
1957out_tcpv6_protosw:
1958        inet6_unregister_protosw(&tcpv6_protosw);
1959out_tcpv6_protocol:
1960        inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1961        goto out;
1962}
1963
1964void tcpv6_exit(void)
1965{
1966        unregister_pernet_subsys(&tcpv6_net_ops);
1967        inet6_unregister_protosw(&tcpv6_protosw);
1968        inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
1969}
1970