linux/net/ipv6/udp.c
<<
>>
Prefs
   1/*
   2 *      UDP over IPv6
   3 *      Linux INET6 implementation
   4 *
   5 *      Authors:
   6 *      Pedro Roque             <roque@di.fc.ul.pt>
   7 *
   8 *      Based on linux/ipv4/udp.c
   9 *
  10 *      Fixes:
  11 *      Hideaki YOSHIFUJI       :       sin6_scope_id support
  12 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
  13 *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
  14 *                                      a single port at the same time.
  15 *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
  16 *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/udp6 to seq_file.
  17 *
  18 *      This program is free software; you can redistribute it and/or
  19 *      modify it under the terms of the GNU General Public License
  20 *      as published by the Free Software Foundation; either version
  21 *      2 of the License, or (at your option) any later version.
  22 */
  23
  24#include <linux/errno.h>
  25#include <linux/types.h>
  26#include <linux/socket.h>
  27#include <linux/sockios.h>
  28#include <linux/net.h>
  29#include <linux/in6.h>
  30#include <linux/netdevice.h>
  31#include <linux/if_arp.h>
  32#include <linux/ipv6.h>
  33#include <linux/icmpv6.h>
  34#include <linux/init.h>
  35#include <linux/module.h>
  36#include <linux/skbuff.h>
  37#include <linux/slab.h>
  38#include <asm/uaccess.h>
  39
  40#include <net/ndisc.h>
  41#include <net/protocol.h>
  42#include <net/transp_v6.h>
  43#include <net/ip6_route.h>
  44#include <net/raw.h>
  45#include <net/tcp_states.h>
  46#include <net/ip6_checksum.h>
  47#include <net/xfrm.h>
  48
  49#include <linux/proc_fs.h>
  50#include <linux/seq_file.h>
  51#include "udp_impl.h"
  52
  53int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
  54{
  55        const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
  56        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
  57        __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
  58        __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
  59        int sk_ipv6only = ipv6_only_sock(sk);
  60        int sk2_ipv6only = inet_v6_ipv6only(sk2);
  61        int addr_type = ipv6_addr_type(sk_rcv_saddr6);
  62        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
  63
  64        /* if both are mapped, treat as IPv4 */
  65        if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
  66                return (!sk2_ipv6only &&
  67                        (!sk1_rcv_saddr || !sk2_rcv_saddr ||
  68                          sk1_rcv_saddr == sk2_rcv_saddr));
  69
  70        if (addr_type2 == IPV6_ADDR_ANY &&
  71            !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
  72                return 1;
  73
  74        if (addr_type == IPV6_ADDR_ANY &&
  75            !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
  76                return 1;
  77
  78        if (sk2_rcv_saddr6 &&
  79            ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
  80                return 1;
  81
  82        return 0;
  83}
  84
  85static unsigned int udp6_portaddr_hash(struct net *net,
  86                                       const struct in6_addr *addr6,
  87                                       unsigned int port)
  88{
  89        unsigned int hash, mix = net_hash_mix(net);
  90
  91        if (ipv6_addr_any(addr6))
  92                hash = jhash_1word(0, mix);
  93        else if (ipv6_addr_v4mapped(addr6))
  94                hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
  95        else
  96                hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
  97
  98        return hash ^ port;
  99}
 100
 101
 102int udp_v6_get_port(struct sock *sk, unsigned short snum)
 103{
 104        unsigned int hash2_nulladdr =
 105                udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
 106        unsigned int hash2_partial = 
 107                udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
 108
 109        /* precompute partial secondary hash */
 110        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 111        return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
 112}
 113
 114static void udp_v6_rehash(struct sock *sk)
 115{
 116        u16 new_hash = udp6_portaddr_hash(sock_net(sk),
 117                                          &inet6_sk(sk)->rcv_saddr,
 118                                          inet_sk(sk)->inet_num);
 119
 120        udp_lib_rehash(sk, new_hash);
 121}
 122
 123static inline int compute_score(struct sock *sk, struct net *net,
 124                                unsigned short hnum,
 125                                const struct in6_addr *saddr, __be16 sport,
 126                                const struct in6_addr *daddr, __be16 dport,
 127                                int dif)
 128{
 129        int score = -1;
 130
 131        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 132                        sk->sk_family == PF_INET6) {
 133                struct ipv6_pinfo *np = inet6_sk(sk);
 134                struct inet_sock *inet = inet_sk(sk);
 135
 136                score = 0;
 137                if (inet->inet_dport) {
 138                        if (inet->inet_dport != sport)
 139                                return -1;
 140                        score++;
 141                }
 142                if (!ipv6_addr_any(&np->rcv_saddr)) {
 143                        if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
 144                                return -1;
 145                        score++;
 146                }
 147                if (!ipv6_addr_any(&np->daddr)) {
 148                        if (!ipv6_addr_equal(&np->daddr, saddr))
 149                                return -1;
 150                        score++;
 151                }
 152                if (sk->sk_bound_dev_if) {
 153                        if (sk->sk_bound_dev_if != dif)
 154                                return -1;
 155                        score++;
 156                }
 157        }
 158        return score;
 159}
 160
 161#define SCORE2_MAX (1 + 1 + 1)
 162static inline int compute_score2(struct sock *sk, struct net *net,
 163                                const struct in6_addr *saddr, __be16 sport,
 164                                const struct in6_addr *daddr, unsigned short hnum,
 165                                int dif)
 166{
 167        int score = -1;
 168
 169        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
 170                        sk->sk_family == PF_INET6) {
 171                struct ipv6_pinfo *np = inet6_sk(sk);
 172                struct inet_sock *inet = inet_sk(sk);
 173
 174                if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
 175                        return -1;
 176                score = 0;
 177                if (inet->inet_dport) {
 178                        if (inet->inet_dport != sport)
 179                                return -1;
 180                        score++;
 181                }
 182                if (!ipv6_addr_any(&np->daddr)) {
 183                        if (!ipv6_addr_equal(&np->daddr, saddr))
 184                                return -1;
 185                        score++;
 186                }
 187                if (sk->sk_bound_dev_if) {
 188                        if (sk->sk_bound_dev_if != dif)
 189                                return -1;
 190                        score++;
 191                }
 192        }
 193        return score;
 194}
 195
 196
 197/* called with read_rcu_lock() */
 198static struct sock *udp6_lib_lookup2(struct net *net,
 199                const struct in6_addr *saddr, __be16 sport,
 200                const struct in6_addr *daddr, unsigned int hnum, int dif,
 201                struct udp_hslot *hslot2, unsigned int slot2)
 202{
 203        struct sock *sk, *result;
 204        struct hlist_nulls_node *node;
 205        int score, badness;
 206
 207begin:
 208        result = NULL;
 209        badness = -1;
 210        udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
 211                score = compute_score2(sk, net, saddr, sport,
 212                                      daddr, hnum, dif);
 213                if (score > badness) {
 214                        result = sk;
 215                        badness = score;
 216                        if (score == SCORE2_MAX)
 217                                goto exact_match;
 218                }
 219        }
 220        /*
 221         * if the nulls value we got at the end of this lookup is
 222         * not the expected one, we must restart lookup.
 223         * We probably met an item that was moved to another chain.
 224         */
 225        if (get_nulls_value(node) != slot2)
 226                goto begin;
 227
 228        if (result) {
 229exact_match:
 230                if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 231                        result = NULL;
 232                else if (unlikely(compute_score2(result, net, saddr, sport,
 233                                  daddr, hnum, dif) < badness)) {
 234                        sock_put(result);
 235                        goto begin;
 236                }
 237        }
 238        return result;
 239}
 240
 241static struct sock *__udp6_lib_lookup(struct net *net,
 242                                      const struct in6_addr *saddr, __be16 sport,
 243                                      const struct in6_addr *daddr, __be16 dport,
 244                                      int dif, struct udp_table *udptable)
 245{
 246        struct sock *sk, *result;
 247        struct hlist_nulls_node *node;
 248        unsigned short hnum = ntohs(dport);
 249        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
 250        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 251        int score, badness;
 252
 253        rcu_read_lock();
 254        if (hslot->count > 10) {
 255                hash2 = udp6_portaddr_hash(net, daddr, hnum);
 256                slot2 = hash2 & udptable->mask;
 257                hslot2 = &udptable->hash2[slot2];
 258                if (hslot->count < hslot2->count)
 259                        goto begin;
 260
 261                result = udp6_lib_lookup2(net, saddr, sport,
 262                                          daddr, hnum, dif,
 263                                          hslot2, slot2);
 264                if (!result) {
 265                        hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
 266                        slot2 = hash2 & udptable->mask;
 267                        hslot2 = &udptable->hash2[slot2];
 268                        if (hslot->count < hslot2->count)
 269                                goto begin;
 270
 271                        result = udp6_lib_lookup2(net, saddr, sport,
 272                                                  &in6addr_any, hnum, dif,
 273                                                  hslot2, slot2);
 274                }
 275                rcu_read_unlock();
 276                return result;
 277        }
 278begin:
 279        result = NULL;
 280        badness = -1;
 281        sk_nulls_for_each_rcu(sk, node, &hslot->head) {
 282                score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
 283                if (score > badness) {
 284                        result = sk;
 285                        badness = score;
 286                }
 287        }
 288        /*
 289         * if the nulls value we got at the end of this lookup is
 290         * not the expected one, we must restart lookup.
 291         * We probably met an item that was moved to another chain.
 292         */
 293        if (get_nulls_value(node) != slot)
 294                goto begin;
 295
 296        if (result) {
 297                if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 298                        result = NULL;
 299                else if (unlikely(compute_score(result, net, hnum, saddr, sport,
 300                                        daddr, dport, dif) < badness)) {
 301                        sock_put(result);
 302                        goto begin;
 303                }
 304        }
 305        rcu_read_unlock();
 306        return result;
 307}
 308
 309static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 310                                          __be16 sport, __be16 dport,
 311                                          struct udp_table *udptable)
 312{
 313        struct sock *sk;
 314        struct ipv6hdr *iph = ipv6_hdr(skb);
 315
 316        if (unlikely(sk = skb_steal_sock(skb)))
 317                return sk;
 318        return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
 319                                 &iph->daddr, dport, inet6_iif(skb),
 320                                 udptable);
 321}
 322
 323struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
 324                             const struct in6_addr *daddr, __be16 dport, int dif)
 325{
 326        return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
 327}
 328EXPORT_SYMBOL_GPL(udp6_lib_lookup);
 329
 330
 331/*
 332 *      This should be easy, if there is something there we
 333 *      return it, otherwise we block.
 334 */
 335
 336int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
 337                  struct msghdr *msg, size_t len,
 338                  int noblock, int flags, int *addr_len)
 339{
 340        struct ipv6_pinfo *np = inet6_sk(sk);
 341        struct inet_sock *inet = inet_sk(sk);
 342        struct sk_buff *skb;
 343        unsigned int ulen;
 344        int peeked;
 345        int err;
 346        int is_udplite = IS_UDPLITE(sk);
 347        int is_udp4;
 348        bool slow;
 349
 350        if (addr_len)
 351                *addr_len=sizeof(struct sockaddr_in6);
 352
 353        if (flags & MSG_ERRQUEUE)
 354                return ipv6_recv_error(sk, msg, len);
 355
 356        if (np->rxpmtu && np->rxopt.bits.rxpmtu)
 357                return ipv6_recv_rxpmtu(sk, msg, len);
 358
 359try_again:
 360        skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
 361                                  &peeked, &err);
 362        if (!skb)
 363                goto out;
 364
 365        ulen = skb->len - sizeof(struct udphdr);
 366        if (len > ulen)
 367                len = ulen;
 368        else if (len < ulen)
 369                msg->msg_flags |= MSG_TRUNC;
 370
 371        is_udp4 = (skb->protocol == htons(ETH_P_IP));
 372
 373        /*
 374         * If checksum is needed at all, try to do it while copying the
 375         * data.  If the data is truncated, or if we only want a partial
 376         * coverage checksum (UDP-Lite), do it before the copy.
 377         */
 378
 379        if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
 380                if (udp_lib_checksum_complete(skb))
 381                        goto csum_copy_err;
 382        }
 383
 384        if (skb_csum_unnecessary(skb))
 385                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
 386                                              msg->msg_iov,len);
 387        else {
 388                err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
 389                if (err == -EINVAL)
 390                        goto csum_copy_err;
 391        }
 392        if (err)
 393                goto out_free;
 394
 395        if (!peeked) {
 396                if (is_udp4)
 397                        UDP_INC_STATS_USER(sock_net(sk),
 398                                        UDP_MIB_INDATAGRAMS, is_udplite);
 399                else
 400                        UDP6_INC_STATS_USER(sock_net(sk),
 401                                        UDP_MIB_INDATAGRAMS, is_udplite);
 402        }
 403
 404        sock_recv_ts_and_drops(msg, sk, skb);
 405
 406        /* Copy the address. */
 407        if (msg->msg_name) {
 408                struct sockaddr_in6 *sin6;
 409
 410                sin6 = (struct sockaddr_in6 *) msg->msg_name;
 411                sin6->sin6_family = AF_INET6;
 412                sin6->sin6_port = udp_hdr(skb)->source;
 413                sin6->sin6_flowinfo = 0;
 414                sin6->sin6_scope_id = 0;
 415
 416                if (is_udp4)
 417                        ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
 418                                               &sin6->sin6_addr);
 419                else {
 420                        ipv6_addr_copy(&sin6->sin6_addr,
 421                                       &ipv6_hdr(skb)->saddr);
 422                        if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
 423                                sin6->sin6_scope_id = IP6CB(skb)->iif;
 424                }
 425
 426        }
 427        if (is_udp4) {
 428                if (inet->cmsg_flags)
 429                        ip_cmsg_recv(msg, skb);
 430        } else {
 431                if (np->rxopt.all)
 432                        datagram_recv_ctl(sk, msg, skb);
 433        }
 434
 435        err = len;
 436        if (flags & MSG_TRUNC)
 437                err = ulen;
 438
 439out_free:
 440        skb_free_datagram_locked(sk, skb);
 441out:
 442        return err;
 443
 444csum_copy_err:
 445        slow = lock_sock_fast(sk);
 446        if (!skb_kill_datagram(sk, skb, flags)) {
 447                if (is_udp4)
 448                        UDP_INC_STATS_USER(sock_net(sk),
 449                                        UDP_MIB_INERRORS, is_udplite);
 450                else
 451                        UDP6_INC_STATS_USER(sock_net(sk),
 452                                        UDP_MIB_INERRORS, is_udplite);
 453        }
 454        unlock_sock_fast(sk, slow);
 455
 456        if (flags & MSG_DONTWAIT)
 457                return -EAGAIN;
 458        goto try_again;
 459}
 460
 461void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 462                    u8 type, u8 code, int offset, __be32 info,
 463                    struct udp_table *udptable)
 464{
 465        struct ipv6_pinfo *np;
 466        struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
 467        struct in6_addr *saddr = &hdr->saddr;
 468        struct in6_addr *daddr = &hdr->daddr;
 469        struct udphdr *uh = (struct udphdr*)(skb->data+offset);
 470        struct sock *sk;
 471        int err;
 472
 473        sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
 474                               saddr, uh->source, inet6_iif(skb), udptable);
 475        if (sk == NULL)
 476                return;
 477
 478        np = inet6_sk(sk);
 479
 480        if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
 481                goto out;
 482
 483        if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
 484                goto out;
 485
 486        if (np->recverr)
 487                ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
 488
 489        sk->sk_err = err;
 490        sk->sk_error_report(sk);
 491out:
 492        sock_put(sk);
 493}
 494
 495static __inline__ void udpv6_err(struct sk_buff *skb,
 496                                 struct inet6_skb_parm *opt, u8 type,
 497                                 u8 code, int offset, __be32 info     )
 498{
 499        __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
 500}
 501
 502int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
 503{
 504        struct udp_sock *up = udp_sk(sk);
 505        int rc;
 506        int is_udplite = IS_UDPLITE(sk);
 507
 508        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
 509                goto drop;
 510
 511        /*
 512         * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
 513         */
 514        if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
 515
 516                if (up->pcrlen == 0) {          /* full coverage was set  */
 517                        LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
 518                                " %d while full coverage %d requested\n",
 519                                UDP_SKB_CB(skb)->cscov, skb->len);
 520                        goto drop;
 521                }
 522                if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
 523                        LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
 524                                                    "too small, need min %d\n",
 525                                       UDP_SKB_CB(skb)->cscov, up->pcrlen);
 526                        goto drop;
 527                }
 528        }
 529
 530        if (rcu_dereference_raw(sk->sk_filter)) {
 531                if (udp_lib_checksum_complete(skb))
 532                        goto drop;
 533        }
 534
 535        if ((rc = ip_queue_rcv_skb(sk, skb)) < 0) {
 536                /* Note that an ENOMEM error is charged twice */
 537                if (rc == -ENOMEM)
 538                        UDP6_INC_STATS_BH(sock_net(sk),
 539                                        UDP_MIB_RCVBUFERRORS, is_udplite);
 540                goto drop_no_sk_drops_inc;
 541        }
 542
 543        return 0;
 544drop:
 545        atomic_inc(&sk->sk_drops);
 546drop_no_sk_drops_inc:
 547        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
 548        kfree_skb(skb);
 549        return -1;
 550}
 551
 552static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
 553                                      __be16 loc_port, struct in6_addr *loc_addr,
 554                                      __be16 rmt_port, struct in6_addr *rmt_addr,
 555                                      int dif)
 556{
 557        struct hlist_nulls_node *node;
 558        struct sock *s = sk;
 559        unsigned short num = ntohs(loc_port);
 560
 561        sk_nulls_for_each_from(s, node) {
 562                struct inet_sock *inet = inet_sk(s);
 563
 564                if (!net_eq(sock_net(s), net))
 565                        continue;
 566
 567                if (udp_sk(s)->udp_port_hash == num &&
 568                    s->sk_family == PF_INET6) {
 569                        struct ipv6_pinfo *np = inet6_sk(s);
 570                        if (inet->inet_dport) {
 571                                if (inet->inet_dport != rmt_port)
 572                                        continue;
 573                        }
 574                        if (!ipv6_addr_any(&np->daddr) &&
 575                            !ipv6_addr_equal(&np->daddr, rmt_addr))
 576                                continue;
 577
 578                        if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
 579                                continue;
 580
 581                        if (!ipv6_addr_any(&np->rcv_saddr)) {
 582                                if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
 583                                        continue;
 584                        }
 585                        if (!inet6_mc_check(s, loc_addr, rmt_addr))
 586                                continue;
 587                        return s;
 588                }
 589        }
 590        return NULL;
 591}
 592
 593static void flush_stack(struct sock **stack, unsigned int count,
 594                        struct sk_buff *skb, unsigned int final)
 595{
 596        unsigned int i;
 597        struct sock *sk;
 598        struct sk_buff *skb1;
 599
 600        for (i = 0; i < count; i++) {
 601                skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
 602
 603                sk = stack[i];
 604                if (skb1) {
 605                        if (sk_rcvqueues_full(sk, skb1)) {
 606                                kfree_skb(skb1);
 607                                goto drop;
 608                        }
 609                        bh_lock_sock(sk);
 610                        if (!sock_owned_by_user(sk))
 611                                udpv6_queue_rcv_skb(sk, skb1);
 612                        else if (sk_add_backlog(sk, skb1)) {
 613                                kfree_skb(skb1);
 614                                bh_unlock_sock(sk);
 615                                goto drop;
 616                        }
 617                        bh_unlock_sock(sk);
 618                        continue;
 619                }
 620drop:
 621                atomic_inc(&sk->sk_drops);
 622                UDP6_INC_STATS_BH(sock_net(sk),
 623                                UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
 624                UDP6_INC_STATS_BH(sock_net(sk),
 625                                UDP_MIB_INERRORS, IS_UDPLITE(sk));
 626        }
 627}
 628/*
 629 * Note: called only from the BH handler context,
 630 * so we don't need to lock the hashes.
 631 */
 632static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 633                struct in6_addr *saddr, struct in6_addr *daddr,
 634                struct udp_table *udptable)
 635{
 636        struct sock *sk, *stack[256 / sizeof(struct sock *)];
 637        const struct udphdr *uh = udp_hdr(skb);
 638        struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
 639        int dif;
 640        unsigned int i, count = 0;
 641
 642        spin_lock(&hslot->lock);
 643        sk = sk_nulls_head(&hslot->head);
 644        dif = inet6_iif(skb);
 645        sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
 646        while (sk) {
 647                stack[count++] = sk;
 648                sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
 649                                       uh->source, saddr, dif);
 650                if (unlikely(count == ARRAY_SIZE(stack))) {
 651                        if (!sk)
 652                                break;
 653                        flush_stack(stack, count, skb, ~0);
 654                        count = 0;
 655                }
 656        }
 657        /*
 658         * before releasing the lock, we must take reference on sockets
 659         */
 660        for (i = 0; i < count; i++)
 661                sock_hold(stack[i]);
 662
 663        spin_unlock(&hslot->lock);
 664
 665        if (count) {
 666                flush_stack(stack, count, skb, count - 1);
 667
 668                for (i = 0; i < count; i++)
 669                        sock_put(stack[i]);
 670        } else {
 671                kfree_skb(skb);
 672        }
 673        return 0;
 674}
 675
 676static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
 677                                 int proto)
 678{
 679        int err;
 680
 681        UDP_SKB_CB(skb)->partial_cov = 0;
 682        UDP_SKB_CB(skb)->cscov = skb->len;
 683
 684        if (proto == IPPROTO_UDPLITE) {
 685                err = udplite_checksum_init(skb, uh);
 686                if (err)
 687                        return err;
 688        }
 689
 690        if (uh->check == 0) {
 691                /* RFC 2460 section 8.1 says that we SHOULD log
 692                   this error. Well, it is reasonable.
 693                 */
 694                LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
 695                return 1;
 696        }
 697        if (skb->ip_summed == CHECKSUM_COMPLETE &&
 698            !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
 699                             skb->len, proto, skb->csum))
 700                skb->ip_summed = CHECKSUM_UNNECESSARY;
 701
 702        if (!skb_csum_unnecessary(skb))
 703                skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 704                                                         &ipv6_hdr(skb)->daddr,
 705                                                         skb->len, proto, 0));
 706
 707        return 0;
 708}
 709
 710int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 711                   int proto)
 712{
 713        struct net *net = dev_net(skb->dev);
 714        struct sock *sk;
 715        struct udphdr *uh;
 716        struct in6_addr *saddr, *daddr;
 717        u32 ulen = 0;
 718
 719        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 720                goto discard;
 721
 722        saddr = &ipv6_hdr(skb)->saddr;
 723        daddr = &ipv6_hdr(skb)->daddr;
 724        uh = udp_hdr(skb);
 725
 726        ulen = ntohs(uh->len);
 727        if (ulen > skb->len)
 728                goto short_packet;
 729
 730        if (proto == IPPROTO_UDP) {
 731                /* UDP validates ulen. */
 732
 733                /* Check for jumbo payload */
 734                if (ulen == 0)
 735                        ulen = skb->len;
 736
 737                if (ulen < sizeof(*uh))
 738                        goto short_packet;
 739
 740                if (ulen < skb->len) {
 741                        if (pskb_trim_rcsum(skb, ulen))
 742                                goto short_packet;
 743                        saddr = &ipv6_hdr(skb)->saddr;
 744                        daddr = &ipv6_hdr(skb)->daddr;
 745                        uh = udp_hdr(skb);
 746                }
 747        }
 748
 749        if (udp6_csum_init(skb, uh, proto))
 750                goto discard;
 751
 752        /*
 753         *      Multicast receive code
 754         */
 755        if (ipv6_addr_is_multicast(daddr))
 756                return __udp6_lib_mcast_deliver(net, skb,
 757                                saddr, daddr, udptable);
 758
 759        /* Unicast */
 760
 761        /*
 762         * check socket cache ... must talk to Alan about his plans
 763         * for sock caches... i'll skip this for now.
 764         */
 765        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
 766
 767        if (sk == NULL) {
 768                if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
 769                        goto discard;
 770
 771                if (udp_lib_checksum_complete(skb))
 772                        goto discard;
 773                UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
 774                                proto == IPPROTO_UDPLITE);
 775
 776                icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
 777
 778                kfree_skb(skb);
 779                return 0;
 780        }
 781
 782        /* deliver */
 783
 784        if (sk_rcvqueues_full(sk, skb)) {
 785                sock_put(sk);
 786                goto discard;
 787        }
 788        bh_lock_sock(sk);
 789        if (!sock_owned_by_user(sk))
 790                udpv6_queue_rcv_skb(sk, skb);
 791        else if (sk_add_backlog(sk, skb)) {
 792                atomic_inc(&sk->sk_drops);
 793                bh_unlock_sock(sk);
 794                sock_put(sk);
 795                goto discard;
 796        }
 797        bh_unlock_sock(sk);
 798        sock_put(sk);
 799        return 0;
 800
 801short_packet:
 802        LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
 803                       proto == IPPROTO_UDPLITE ? "-Lite" : "",
 804                       saddr,
 805                       ntohs(uh->source),
 806                       ulen,
 807                       skb->len,
 808                       daddr,
 809                       ntohs(uh->dest));
 810
 811discard:
 812        UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
 813        kfree_skb(skb);
 814        return 0;
 815}
 816
 817static __inline__ int udpv6_rcv(struct sk_buff *skb)
 818{
 819        return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
 820}
 821
 822/*
 823 * Throw away all pending data and cancel the corking. Socket is locked.
 824 */
 825static void udp_v6_flush_pending_frames(struct sock *sk)
 826{
 827        struct udp_sock *up = udp_sk(sk);
 828
 829        if (up->pending == AF_INET)
 830                udp_flush_pending_frames(sk);
 831        else if (up->pending) {
 832                up->len = 0;
 833                up->pending = 0;
 834                ip6_flush_pending_frames(sk);
 835        }
 836}
 837
 838/**
 839 *      udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
 840 *      @sk:    socket we are sending on
 841 *      @skb:   sk_buff containing the filled-in UDP header
 842 *              (checksum field must be zeroed out)
 843 */
 844static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
 845                                 const struct in6_addr *saddr,
 846                                 const struct in6_addr *daddr, int len)
 847{
 848        unsigned int offset;
 849        struct udphdr *uh = udp_hdr(skb);
 850        __wsum csum = 0;
 851
 852        if (skb_queue_len(&sk->sk_write_queue) == 1) {
 853                /* Only one fragment on the socket.  */
 854                skb->csum_start = skb_transport_header(skb) - skb->head;
 855                skb->csum_offset = offsetof(struct udphdr, check);
 856                uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
 857        } else {
 858                /*
 859                 * HW-checksum won't work as there are two or more
 860                 * fragments on the socket so that all csums of sk_buffs
 861                 * should be together
 862                 */
 863                offset = skb_transport_offset(skb);
 864                skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
 865
 866                skb->ip_summed = CHECKSUM_NONE;
 867
 868                skb_queue_walk(&sk->sk_write_queue, skb) {
 869                        csum = csum_add(csum, skb->csum);
 870                }
 871
 872                uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
 873                                            csum);
 874                if (uh->check == 0)
 875                        uh->check = CSUM_MANGLED_0;
 876        }
 877}
 878
 879/*
 880 *      Sending
 881 */
 882
 883static int udp_v6_push_pending_frames(struct sock *sk)
 884{
 885        struct sk_buff *skb;
 886        struct udphdr *uh;
 887        struct udp_sock  *up = udp_sk(sk);
 888        struct inet_sock *inet = inet_sk(sk);
 889        struct flowi *fl = &inet->cork.fl;
 890        int err = 0;
 891        int is_udplite = IS_UDPLITE(sk);
 892        __wsum csum = 0;
 893
 894        /* Grab the skbuff where UDP header space exists. */
 895        if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
 896                goto out;
 897
 898        /*
 899         * Create a UDP header
 900         */
 901        uh = udp_hdr(skb);
 902        uh->source = fl->fl_ip_sport;
 903        uh->dest = fl->fl_ip_dport;
 904        uh->len = htons(up->len);
 905        uh->check = 0;
 906
 907        if (is_udplite)
 908                csum = udplite_csum_outgoing(sk, skb);
 909        else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
 910                udp6_hwcsum_outgoing(sk, skb, &fl->fl6_src, &fl->fl6_dst,
 911                                     up->len);
 912                goto send;
 913        } else
 914                csum = udp_csum_outgoing(sk, skb);
 915
 916        /* add protocol-dependent pseudo-header */
 917        uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst,
 918                                    up->len, fl->proto, csum   );
 919        if (uh->check == 0)
 920                uh->check = CSUM_MANGLED_0;
 921
 922send:
 923        err = ip6_push_pending_frames(sk);
 924        if (err) {
 925                if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
 926                        UDP6_INC_STATS_USER(sock_net(sk),
 927                                            UDP_MIB_SNDBUFERRORS, is_udplite);
 928                        err = 0;
 929                }
 930        } else
 931                UDP6_INC_STATS_USER(sock_net(sk),
 932                                    UDP_MIB_OUTDATAGRAMS, is_udplite);
 933out:
 934        up->len = 0;
 935        up->pending = 0;
 936        return err;
 937}
 938
 939int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
 940                  struct msghdr *msg, size_t len)
 941{
 942        struct ipv6_txoptions opt_space;
 943        struct udp_sock *up = udp_sk(sk);
 944        struct inet_sock *inet = inet_sk(sk);
 945        struct ipv6_pinfo *np = inet6_sk(sk);
 946        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
 947        struct in6_addr *daddr, *final_p, final;
 948        struct ipv6_txoptions *opt = NULL;
 949        struct ip6_flowlabel *flowlabel = NULL;
 950        struct flowi fl;
 951        struct dst_entry *dst;
 952        int addr_len = msg->msg_namelen;
 953        int ulen = len;
 954        int hlimit = -1;
 955        int tclass = -1;
 956        int dontfrag = -1;
 957        int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
 958        int err;
 959        int connected = 0;
 960        int is_udplite = IS_UDPLITE(sk);
 961        int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
 962
 963        /* destination address check */
 964        if (sin6) {
 965                if (addr_len < offsetof(struct sockaddr, sa_data))
 966                        return -EINVAL;
 967
 968                switch (sin6->sin6_family) {
 969                case AF_INET6:
 970                        if (addr_len < SIN6_LEN_RFC2133)
 971                                return -EINVAL;
 972                        daddr = &sin6->sin6_addr;
 973                        break;
 974                case AF_INET:
 975                        goto do_udp_sendmsg;
 976                case AF_UNSPEC:
 977                        msg->msg_name = sin6 = NULL;
 978                        msg->msg_namelen = addr_len = 0;
 979                        daddr = NULL;
 980                        break;
 981                default:
 982                        return -EINVAL;
 983                }
 984        } else if (!up->pending) {
 985                if (sk->sk_state != TCP_ESTABLISHED)
 986                        return -EDESTADDRREQ;
 987                daddr = &np->daddr;
 988        } else
 989                daddr = NULL;
 990
 991        if (daddr) {
 992                if (ipv6_addr_v4mapped(daddr)) {
 993                        struct sockaddr_in sin;
 994                        sin.sin_family = AF_INET;
 995                        sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
 996                        sin.sin_addr.s_addr = daddr->s6_addr32[3];
 997                        msg->msg_name = &sin;
 998                        msg->msg_namelen = sizeof(sin);
 999do_udp_sendmsg:
1000                        if (__ipv6_only_sock(sk))
1001                                return -ENETUNREACH;
1002                        return udp_sendmsg(iocb, sk, msg, len);
1003                }
1004        }
1005
1006        if (up->pending == AF_INET)
1007                return udp_sendmsg(iocb, sk, msg, len);
1008
1009        /* Rough check on arithmetic overflow,
1010           better check is made in ip6_append_data().
1011           */
1012        if (len > INT_MAX - sizeof(struct udphdr))
1013                return -EMSGSIZE;
1014
1015        if (up->pending) {
1016                /*
1017                 * There are pending frames.
1018                 * The socket lock must be held while it's corked.
1019                 */
1020                lock_sock(sk);
1021                if (likely(up->pending)) {
1022                        if (unlikely(up->pending != AF_INET6)) {
1023                                release_sock(sk);
1024                                return -EAFNOSUPPORT;
1025                        }
1026                        dst = NULL;
1027                        goto do_append_data;
1028                }
1029                release_sock(sk);
1030        }
1031        ulen += sizeof(struct udphdr);
1032
1033        memset(&fl, 0, sizeof(fl));
1034
1035        if (sin6) {
1036                if (sin6->sin6_port == 0)
1037                        return -EINVAL;
1038
1039                fl.fl_ip_dport = sin6->sin6_port;
1040                daddr = &sin6->sin6_addr;
1041
1042                if (np->sndflow) {
1043                        fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1044                        if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
1045                                flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
1046                                if (flowlabel == NULL)
1047                                        return -EINVAL;
1048                                daddr = &flowlabel->dst;
1049                        }
1050                }
1051
1052                /*
1053                 * Otherwise it will be difficult to maintain
1054                 * sk->sk_dst_cache.
1055                 */
1056                if (sk->sk_state == TCP_ESTABLISHED &&
1057                    ipv6_addr_equal(daddr, &np->daddr))
1058                        daddr = &np->daddr;
1059
1060                if (addr_len >= sizeof(struct sockaddr_in6) &&
1061                    sin6->sin6_scope_id &&
1062                    ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
1063                        fl.oif = sin6->sin6_scope_id;
1064        } else {
1065                if (sk->sk_state != TCP_ESTABLISHED)
1066                        return -EDESTADDRREQ;
1067
1068                fl.fl_ip_dport = inet->inet_dport;
1069                daddr = &np->daddr;
1070                fl.fl6_flowlabel = np->flow_label;
1071                connected = 1;
1072        }
1073
1074        if (!fl.oif)
1075                fl.oif = sk->sk_bound_dev_if;
1076
1077        if (!fl.oif)
1078                fl.oif = np->sticky_pktinfo.ipi6_ifindex;
1079
1080        fl.mark = sk->sk_mark;
1081
1082        if (msg->msg_controllen) {
1083                opt = &opt_space;
1084                memset(opt, 0, sizeof(struct ipv6_txoptions));
1085                opt->tot_len = sizeof(*opt);
1086
1087                err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit,
1088                                        &tclass, &dontfrag);
1089                if (err < 0) {
1090                        fl6_sock_release(flowlabel);
1091                        return err;
1092                }
1093                if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1094                        flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
1095                        if (flowlabel == NULL)
1096                                return -EINVAL;
1097                }
1098                if (!(opt->opt_nflen|opt->opt_flen))
1099                        opt = NULL;
1100                connected = 0;
1101        }
1102        if (opt == NULL)
1103                opt = np->opt;
1104        if (flowlabel)
1105                opt = fl6_merge_options(&opt_space, flowlabel, opt);
1106        opt = ipv6_fixup_options(&opt_space, opt);
1107
1108        fl.proto = sk->sk_protocol;
1109        if (!ipv6_addr_any(daddr))
1110                ipv6_addr_copy(&fl.fl6_dst, daddr);
1111        else
1112                fl.fl6_dst.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1113        if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
1114                ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1115        fl.fl_ip_sport = inet->inet_sport;
1116
1117        final_p = fl6_update_dst(&fl, opt, &final);
1118        if (final_p)
1119                connected = 0;
1120
1121        if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
1122                fl.oif = np->mcast_oif;
1123                connected = 0;
1124        }
1125
1126        security_sk_classify_flow(sk, &fl);
1127
1128        err = ip6_sk_dst_lookup(sk, &dst, &fl);
1129        if (err)
1130                goto out;
1131        if (final_p)
1132                ipv6_addr_copy(&fl.fl6_dst, final_p);
1133
1134        err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
1135        if (err < 0) {
1136                if (err == -EREMOTE)
1137                        err = ip6_dst_blackhole(sk, &dst, &fl);
1138                if (err < 0)
1139                        goto out;
1140        }
1141
1142        if (hlimit < 0) {
1143                if (ipv6_addr_is_multicast(&fl.fl6_dst))
1144                        hlimit = np->mcast_hops;
1145                else
1146                        hlimit = np->hop_limit;
1147                if (hlimit < 0)
1148                        hlimit = ip6_dst_hoplimit(dst);
1149        }
1150
1151        if (tclass < 0)
1152                tclass = np->tclass;
1153
1154        if (dontfrag < 0)
1155                dontfrag = np->dontfrag;
1156
1157        if (msg->msg_flags&MSG_CONFIRM)
1158                goto do_confirm;
1159back_from_confirm:
1160
1161        lock_sock(sk);
1162        if (unlikely(up->pending)) {
1163                /* The socket is already corked while preparing it. */
1164                /* ... which is an evident application bug. --ANK */
1165                release_sock(sk);
1166
1167                LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
1168                err = -EINVAL;
1169                goto out;
1170        }
1171
1172        up->pending = AF_INET6;
1173
1174do_append_data:
1175        up->len += ulen;
1176        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1177        err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
1178                sizeof(struct udphdr), hlimit, tclass, opt, &fl,
1179                (struct rt6_info*)dst,
1180                corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
1181        if (err)
1182                udp_v6_flush_pending_frames(sk);
1183        else if (!corkreq)
1184                err = udp_v6_push_pending_frames(sk);
1185        else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1186                up->pending = 0;
1187
1188        if (dst) {
1189                if (connected) {
1190                        ip6_dst_store(sk, dst,
1191                                      ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ?
1192                                      &np->daddr : NULL,
1193#ifdef CONFIG_IPV6_SUBTREES
1194                                      ipv6_addr_equal(&fl.fl6_src, &np->saddr) ?
1195                                      &np->saddr :
1196#endif
1197                                      NULL);
1198                } else {
1199                        dst_release(dst);
1200                }
1201                dst = NULL;
1202        }
1203
1204        if (err > 0)
1205                err = np->recverr ? net_xmit_errno(err) : 0;
1206        release_sock(sk);
1207out:
1208        dst_release(dst);
1209        fl6_sock_release(flowlabel);
1210        if (!err)
1211                return len;
1212        /*
1213         * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1214         * ENOBUFS might not be good (it's not tunable per se), but otherwise
1215         * we don't have a good statistic (IpOutDiscards but it can be too many
1216         * things).  We could add another new stat but at least for now that
1217         * seems like overkill.
1218         */
1219        if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1220                UDP6_INC_STATS_USER(sock_net(sk),
1221                                UDP_MIB_SNDBUFERRORS, is_udplite);
1222        }
1223        return err;
1224
1225do_confirm:
1226        dst_confirm(dst);
1227        if (!(msg->msg_flags&MSG_PROBE) || len)
1228                goto back_from_confirm;
1229        err = 0;
1230        goto out;
1231}
1232
1233void udpv6_destroy_sock(struct sock *sk)
1234{
1235        lock_sock(sk);
1236        udp_v6_flush_pending_frames(sk);
1237        release_sock(sk);
1238
1239        inet6_destroy_sock(sk);
1240}
1241
1242/*
1243 *      Socket option code for UDP
1244 */
1245int udpv6_setsockopt(struct sock *sk, int level, int optname,
1246                     char __user *optval, unsigned int optlen)
1247{
1248        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1249                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1250                                          udp_v6_push_pending_frames);
1251        return ipv6_setsockopt(sk, level, optname, optval, optlen);
1252}
1253
1254#ifdef CONFIG_COMPAT
1255int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1256                            char __user *optval, unsigned int optlen)
1257{
1258        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1259                return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1260                                          udp_v6_push_pending_frames);
1261        return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
1262}
1263#endif
1264
1265int udpv6_getsockopt(struct sock *sk, int level, int optname,
1266                     char __user *optval, int __user *optlen)
1267{
1268        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1269                return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1270        return ipv6_getsockopt(sk, level, optname, optval, optlen);
1271}
1272
1273#ifdef CONFIG_COMPAT
1274int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1275                            char __user *optval, int __user *optlen)
1276{
1277        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1278                return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1279        return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
1280}
1281#endif
1282
1283static int udp6_ufo_send_check(struct sk_buff *skb)
1284{
1285        struct ipv6hdr *ipv6h;
1286        struct udphdr *uh;
1287
1288        if (!pskb_may_pull(skb, sizeof(*uh)))
1289                return -EINVAL;
1290
1291        ipv6h = ipv6_hdr(skb);
1292        uh = udp_hdr(skb);
1293
1294        uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
1295                                     IPPROTO_UDP, 0);
1296        skb->csum_start = skb_transport_header(skb) - skb->head;
1297        skb->csum_offset = offsetof(struct udphdr, check);
1298        skb->ip_summed = CHECKSUM_PARTIAL;
1299        return 0;
1300}
1301
1302static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
1303{
1304        struct sk_buff *segs = ERR_PTR(-EINVAL);
1305        unsigned int mss;
1306        unsigned int unfrag_ip6hlen, unfrag_len;
1307        struct frag_hdr *fptr;
1308        u8 *mac_start, *prevhdr;
1309        u8 nexthdr;
1310        u8 frag_hdr_sz = sizeof(struct frag_hdr);
1311        int offset;
1312        __wsum csum;
1313
1314        mss = skb_shinfo(skb)->gso_size;
1315        if (unlikely(skb->len <= mss))
1316                goto out;
1317
1318        if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
1319                /* Packet is from an untrusted source, reset gso_segs. */
1320                int type = skb_shinfo(skb)->gso_type;
1321
1322                if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
1323                             !(type & (SKB_GSO_UDP))))
1324                        goto out;
1325
1326                skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
1327
1328                segs = NULL;
1329                goto out;
1330        }
1331
1332        /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
1333         * do checksum of UDP packets sent as multiple IP fragments.
1334         */
1335        offset = skb->csum_start - skb_headroom(skb);
1336        csum = skb_checksum(skb, offset, skb->len- offset, 0);
1337        offset += skb->csum_offset;
1338        *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1339        skb->ip_summed = CHECKSUM_NONE;
1340
1341        /* Check if there is enough headroom to insert fragment header. */
1342        if ((skb_headroom(skb) < frag_hdr_sz) &&
1343            pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
1344                goto out;
1345
1346        /* Find the unfragmentable header and shift it left by frag_hdr_sz
1347         * bytes to insert fragment header.
1348         */
1349        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
1350        nexthdr = *prevhdr;
1351        *prevhdr = NEXTHDR_FRAGMENT;
1352        unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
1353                     unfrag_ip6hlen;
1354        mac_start = skb_mac_header(skb);
1355        memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
1356
1357        skb->mac_header -= frag_hdr_sz;
1358        skb->network_header -= frag_hdr_sz;
1359
1360        fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
1361        fptr->nexthdr = nexthdr;
1362        fptr->reserved = 0;
1363        ipv6_select_ident(fptr);
1364
1365        /* Fragment the skb. ipv6 header and the remaining fields of the
1366         * fragment header are updated in ipv6_gso_segment()
1367         */
1368        segs = skb_segment(skb, features);
1369
1370out:
1371        return segs;
1372}
1373
1374static const struct inet6_protocol udpv6_protocol = {
1375        .handler        =       udpv6_rcv,
1376        .err_handler    =       udpv6_err,
1377        .gso_send_check =       udp6_ufo_send_check,
1378        .gso_segment    =       udp6_ufo_fragment,
1379        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1380};
1381
1382/* ------------------------------------------------------------------------ */
1383#ifdef CONFIG_PROC_FS
1384
1385static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket)
1386{
1387        struct inet_sock *inet = inet_sk(sp);
1388        struct ipv6_pinfo *np = inet6_sk(sp);
1389        struct in6_addr *dest, *src;
1390        __u16 destp, srcp;
1391
1392        dest  = &np->daddr;
1393        src   = &np->rcv_saddr;
1394        destp = ntohs(inet->inet_dport);
1395        srcp  = ntohs(inet->inet_sport);
1396        seq_printf(seq,
1397                   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1398                   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n",
1399                   bucket,
1400                   src->s6_addr32[0], src->s6_addr32[1],
1401                   src->s6_addr32[2], src->s6_addr32[3], srcp,
1402                   dest->s6_addr32[0], dest->s6_addr32[1],
1403                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
1404                   sp->sk_state,
1405                   sk_wmem_alloc_get(sp),
1406                   sk_rmem_alloc_get(sp),
1407                   0, 0L, 0,
1408                   sock_i_uid(sp), 0,
1409                   sock_i_ino(sp),
1410                   atomic_read(&sp->sk_refcnt), sp,
1411                   atomic_read(&sp->sk_drops));
1412}
1413
1414int udp6_seq_show(struct seq_file *seq, void *v)
1415{
1416        if (v == SEQ_START_TOKEN)
1417                seq_printf(seq,
1418                           "  sl  "
1419                           "local_address                         "
1420                           "remote_address                        "
1421                           "st tx_queue rx_queue tr tm->when retrnsmt"
1422                           "   uid  timeout inode ref pointer drops\n");
1423        else
1424                udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket);
1425        return 0;
1426}
1427
1428static struct udp_seq_afinfo udp6_seq_afinfo = {
1429        .name           = "udp6",
1430        .family         = AF_INET6,
1431        .udp_table      = &udp_table,
1432        .seq_fops       = {
1433                .owner  =       THIS_MODULE,
1434        },
1435        .seq_ops        = {
1436                .show           = udp6_seq_show,
1437        },
1438};
1439
1440int __net_init udp6_proc_init(struct net *net)
1441{
1442        return udp_proc_register(net, &udp6_seq_afinfo);
1443}
1444
1445void udp6_proc_exit(struct net *net) {
1446        udp_proc_unregister(net, &udp6_seq_afinfo);
1447}
1448#endif /* CONFIG_PROC_FS */
1449
1450/* ------------------------------------------------------------------------ */
1451
1452struct proto udpv6_prot = {
1453        .name              = "UDPv6",
1454        .owner             = THIS_MODULE,
1455        .close             = udp_lib_close,
1456        .connect           = ip6_datagram_connect,
1457        .disconnect        = udp_disconnect,
1458        .ioctl             = udp_ioctl,
1459        .destroy           = udpv6_destroy_sock,
1460        .setsockopt        = udpv6_setsockopt,
1461        .getsockopt        = udpv6_getsockopt,
1462        .sendmsg           = udpv6_sendmsg,
1463        .recvmsg           = udpv6_recvmsg,
1464        .backlog_rcv       = udpv6_queue_rcv_skb,
1465        .hash              = udp_lib_hash,
1466        .unhash            = udp_lib_unhash,
1467        .rehash            = udp_v6_rehash,
1468        .get_port          = udp_v6_get_port,
1469        .memory_allocated  = &udp_memory_allocated,
1470        .sysctl_mem        = sysctl_udp_mem,
1471        .sysctl_wmem       = &sysctl_udp_wmem_min,
1472        .sysctl_rmem       = &sysctl_udp_rmem_min,
1473        .obj_size          = sizeof(struct udp6_sock),
1474        .slab_flags        = SLAB_DESTROY_BY_RCU,
1475        .h.udp_table       = &udp_table,
1476#ifdef CONFIG_COMPAT
1477        .compat_setsockopt = compat_udpv6_setsockopt,
1478        .compat_getsockopt = compat_udpv6_getsockopt,
1479#endif
1480        .clear_sk          = sk_prot_clear_portaddr_nulls,
1481};
1482
1483static struct inet_protosw udpv6_protosw = {
1484        .type =      SOCK_DGRAM,
1485        .protocol =  IPPROTO_UDP,
1486        .prot =      &udpv6_prot,
1487        .ops =       &inet6_dgram_ops,
1488        .no_check =  UDP_CSUM_DEFAULT,
1489        .flags =     INET_PROTOSW_PERMANENT,
1490};
1491
1492
1493int __init udpv6_init(void)
1494{
1495        int ret;
1496
1497        ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1498        if (ret)
1499                goto out;
1500
1501        ret = inet6_register_protosw(&udpv6_protosw);
1502        if (ret)
1503                goto out_udpv6_protocol;
1504out:
1505        return ret;
1506
1507out_udpv6_protocol:
1508        inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1509        goto out;
1510}
1511
1512void udpv6_exit(void)
1513{
1514        inet6_unregister_protosw(&udpv6_protosw);
1515        inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1516}
1517