linux/net/sctp/input.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * Copyright (c) 1999-2000 Cisco, Inc.
   3 * Copyright (c) 1999-2001 Motorola, Inc.
   4 * Copyright (c) 2001-2003 International Business Machines, Corp.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This file is part of the SCTP kernel implementation
  10 *
  11 * These functions handle all input from the IP layer into SCTP.
  12 *
  13 * This SCTP implementation is free software;
  14 * you can redistribute it and/or modify it under the terms of
  15 * the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This SCTP implementation is distributed in the hope that it
  20 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  21 *                 ************************
  22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  23 * See the GNU General Public License for more details.
  24 *
  25 * You should have received a copy of the GNU General Public License
  26 * along with GNU CC; see the file COPYING.  If not, write to
  27 * the Free Software Foundation, 59 Temple Place - Suite 330,
  28 * Boston, MA 02111-1307, USA.
  29 *
  30 * Please send any bug reports or fixes you make to the
  31 * email address(es):
  32 *    lksctp developers <linux-sctp@vger.kernel.org>
  33 *
  34 * Written or modified by:
  35 *    La Monte H.P. Yarroll <piggy@acm.org>
  36 *    Karl Knutson <karl@athena.chicago.il.us>
  37 *    Xingang Guo <xingang.guo@intel.com>
  38 *    Jon Grimm <jgrimm@us.ibm.com>
  39 *    Hui Huang <hui.huang@nokia.com>
  40 *    Daisy Chang <daisyc@us.ibm.com>
  41 *    Sridhar Samudrala <sri@us.ibm.com>
  42 *    Ardelle Fan <ardelle.fan@intel.com>
  43 */
  44
  45#include <linux/types.h>
  46#include <linux/list.h> /* For struct list_head */
  47#include <linux/socket.h>
  48#include <linux/ip.h>
  49#include <linux/time.h> /* For struct timeval */
  50#include <linux/slab.h>
  51#include <net/ip.h>
  52#include <net/icmp.h>
  53#include <net/snmp.h>
  54#include <net/sock.h>
  55#include <net/xfrm.h>
  56#include <net/sctp/sctp.h>
  57#include <net/sctp/sm.h>
  58#include <net/sctp/checksum.h>
  59#include <net/net_namespace.h>
  60
  61/* Forward declarations for internal helpers. */
  62static int sctp_rcv_ootb(struct sk_buff *);
  63static struct sctp_association *__sctp_rcv_lookup(struct net *net,
  64                                      struct sk_buff *skb,
  65                                      const union sctp_addr *paddr,
  66                                      const union sctp_addr *laddr,
  67                                      struct sctp_transport **transportp);
  68static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
  69                                                const union sctp_addr *laddr);
  70static struct sctp_association *__sctp_lookup_association(
  71                                        struct net *net,
  72                                        const union sctp_addr *local,
  73                                        const union sctp_addr *peer,
  74                                        struct sctp_transport **pt);
  75
  76static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
  77
  78
  79/* Calculate the SCTP checksum of an SCTP packet.  */
  80static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
  81{
  82        struct sctphdr *sh = sctp_hdr(skb);
  83        __le32 cmp = sh->checksum;
  84        __le32 val = sctp_compute_cksum(skb, 0);
  85
  86        if (val != cmp) {
  87                /* CRC failure, dump it. */
  88                SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
  89                return -1;
  90        }
  91        return 0;
  92}
  93
  94struct sctp_input_cb {
  95        union {
  96                struct inet_skb_parm    h4;
  97#if IS_ENABLED(CONFIG_IPV6)
  98                struct inet6_skb_parm   h6;
  99#endif
 100        } header;
 101        struct sctp_chunk *chunk;
 102};
 103#define SCTP_INPUT_CB(__skb)    ((struct sctp_input_cb *)&((__skb)->cb[0]))
 104
 105/*
 106 * This is the routine which IP calls when receiving an SCTP packet.
 107 */
 108int sctp_rcv(struct sk_buff *skb)
 109{
 110        struct sock *sk;
 111        struct sctp_association *asoc;
 112        struct sctp_endpoint *ep = NULL;
 113        struct sctp_ep_common *rcvr;
 114        struct sctp_transport *transport = NULL;
 115        struct sctp_chunk *chunk;
 116        struct sctphdr *sh;
 117        union sctp_addr src;
 118        union sctp_addr dest;
 119        int family;
 120        struct sctp_af *af;
 121        struct net *net = dev_net(skb->dev);
 122
 123        if (skb->pkt_type!=PACKET_HOST)
 124                goto discard_it;
 125
 126        SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
 127
 128        if (skb_linearize(skb))
 129                goto discard_it;
 130
 131        sh = sctp_hdr(skb);
 132
 133        /* Pull up the IP and SCTP headers. */
 134        __skb_pull(skb, skb_transport_offset(skb));
 135        if (skb->len < sizeof(struct sctphdr))
 136                goto discard_it;
 137        if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
 138                  sctp_rcv_checksum(net, skb) < 0)
 139                goto discard_it;
 140
 141        skb_pull(skb, sizeof(struct sctphdr));
 142
 143        /* Make sure we at least have chunk headers worth of data left. */
 144        if (skb->len < sizeof(struct sctp_chunkhdr))
 145                goto discard_it;
 146
 147        family = ipver2af(ip_hdr(skb)->version);
 148        af = sctp_get_af_specific(family);
 149        if (unlikely(!af))
 150                goto discard_it;
 151
 152        /* Initialize local addresses for lookups. */
 153        af->from_skb(&src, skb, 1);
 154        af->from_skb(&dest, skb, 0);
 155
 156        /* If the packet is to or from a non-unicast address,
 157         * silently discard the packet.
 158         *
 159         * This is not clearly defined in the RFC except in section
 160         * 8.4 - OOTB handling.  However, based on the book "Stream Control
 161         * Transmission Protocol" 2.1, "It is important to note that the
 162         * IP address of an SCTP transport address must be a routable
 163         * unicast address.  In other words, IP multicast addresses and
 164         * IP broadcast addresses cannot be used in an SCTP transport
 165         * address."
 166         */
 167        if (!af->addr_valid(&src, NULL, skb) ||
 168            !af->addr_valid(&dest, NULL, skb))
 169                goto discard_it;
 170
 171        asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 172
 173        if (!asoc)
 174                ep = __sctp_rcv_lookup_endpoint(net, &dest);
 175
 176        /* Retrieve the common input handling substructure. */
 177        rcvr = asoc ? &asoc->base : &ep->base;
 178        sk = rcvr->sk;
 179
 180        /*
 181         * If a frame arrives on an interface and the receiving socket is
 182         * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
 183         */
 184        if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
 185        {
 186                if (asoc) {
 187                        sctp_association_put(asoc);
 188                        asoc = NULL;
 189                } else {
 190                        sctp_endpoint_put(ep);
 191                        ep = NULL;
 192                }
 193                sk = net->sctp.ctl_sock;
 194                ep = sctp_sk(sk)->ep;
 195                sctp_endpoint_hold(ep);
 196                rcvr = &ep->base;
 197        }
 198
 199        /*
 200         * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
 201         * An SCTP packet is called an "out of the blue" (OOTB)
 202         * packet if it is correctly formed, i.e., passed the
 203         * receiver's checksum check, but the receiver is not
 204         * able to identify the association to which this
 205         * packet belongs.
 206         */
 207        if (!asoc) {
 208                if (sctp_rcv_ootb(skb)) {
 209                        SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
 210                        goto discard_release;
 211                }
 212        }
 213
 214        if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
 215                goto discard_release;
 216        nf_reset(skb);
 217
 218        if (sk_filter(sk, skb))
 219                goto discard_release;
 220
 221        /* Create an SCTP packet structure. */
 222        chunk = sctp_chunkify(skb, asoc, sk);
 223        if (!chunk)
 224                goto discard_release;
 225        SCTP_INPUT_CB(skb)->chunk = chunk;
 226
 227        /* Remember what endpoint is to handle this packet. */
 228        chunk->rcvr = rcvr;
 229
 230        /* Remember the SCTP header. */
 231        chunk->sctp_hdr = sh;
 232
 233        /* Set the source and destination addresses of the incoming chunk.  */
 234        sctp_init_addrs(chunk, &src, &dest);
 235
 236        /* Remember where we came from.  */
 237        chunk->transport = transport;
 238
 239        /* Acquire access to the sock lock. Note: We are safe from other
 240         * bottom halves on this lock, but a user may be in the lock too,
 241         * so check if it is busy.
 242         */
 243        sctp_bh_lock_sock(sk);
 244
 245        if (sk != rcvr->sk) {
 246                /* Our cached sk is different from the rcvr->sk.  This is
 247                 * because migrate()/accept() may have moved the association
 248                 * to a new socket and released all the sockets.  So now we
 249                 * are holding a lock on the old socket while the user may
 250                 * be doing something with the new socket.  Switch our veiw
 251                 * of the current sk.
 252                 */
 253                sctp_bh_unlock_sock(sk);
 254                sk = rcvr->sk;
 255                sctp_bh_lock_sock(sk);
 256        }
 257
 258        if (sock_owned_by_user(sk)) {
 259                if (sctp_add_backlog(sk, skb)) {
 260                        sctp_bh_unlock_sock(sk);
 261                        sctp_chunk_free(chunk);
 262                        skb = NULL; /* sctp_chunk_free already freed the skb */
 263                        goto discard_release;
 264                }
 265                SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
 266        } else {
 267                SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
 268                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 269        }
 270
 271        sctp_bh_unlock_sock(sk);
 272
 273        /* Release the asoc/ep ref we took in the lookup calls. */
 274        if (asoc)
 275                sctp_association_put(asoc);
 276        else
 277                sctp_endpoint_put(ep);
 278
 279        return 0;
 280
 281discard_it:
 282        SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
 283        kfree_skb(skb);
 284        return 0;
 285
 286discard_release:
 287        /* Release the asoc/ep ref we took in the lookup calls. */
 288        if (asoc)
 289                sctp_association_put(asoc);
 290        else
 291                sctp_endpoint_put(ep);
 292
 293        goto discard_it;
 294}
 295
 296/* Process the backlog queue of the socket.  Every skb on
 297 * the backlog holds a ref on an association or endpoint.
 298 * We hold this ref throughout the state machine to make
 299 * sure that the structure we need is still around.
 300 */
 301int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 302{
 303        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 304        struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
 305        struct sctp_ep_common *rcvr = NULL;
 306        int backloged = 0;
 307
 308        rcvr = chunk->rcvr;
 309
 310        /* If the rcvr is dead then the association or endpoint
 311         * has been deleted and we can safely drop the chunk
 312         * and refs that we are holding.
 313         */
 314        if (rcvr->dead) {
 315                sctp_chunk_free(chunk);
 316                goto done;
 317        }
 318
 319        if (unlikely(rcvr->sk != sk)) {
 320                /* In this case, the association moved from one socket to
 321                 * another.  We are currently sitting on the backlog of the
 322                 * old socket, so we need to move.
 323                 * However, since we are here in the process context we
 324                 * need to take make sure that the user doesn't own
 325                 * the new socket when we process the packet.
 326                 * If the new socket is user-owned, queue the chunk to the
 327                 * backlog of the new socket without dropping any refs.
 328                 * Otherwise, we can safely push the chunk on the inqueue.
 329                 */
 330
 331                sk = rcvr->sk;
 332                sctp_bh_lock_sock(sk);
 333
 334                if (sock_owned_by_user(sk)) {
 335                        if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 336                                sctp_chunk_free(chunk);
 337                        else
 338                                backloged = 1;
 339                } else
 340                        sctp_inq_push(inqueue, chunk);
 341
 342                sctp_bh_unlock_sock(sk);
 343
 344                /* If the chunk was backloged again, don't drop refs */
 345                if (backloged)
 346                        return 0;
 347        } else {
 348                sctp_inq_push(inqueue, chunk);
 349        }
 350
 351done:
 352        /* Release the refs we took in sctp_add_backlog */
 353        if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
 354                sctp_association_put(sctp_assoc(rcvr));
 355        else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 356                sctp_endpoint_put(sctp_ep(rcvr));
 357        else
 358                BUG();
 359
 360        return 0;
 361}
 362
 363static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 364{
 365        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 366        struct sctp_ep_common *rcvr = chunk->rcvr;
 367        int ret;
 368
 369        ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 370        if (!ret) {
 371                /* Hold the assoc/ep while hanging on the backlog queue.
 372                 * This way, we know structures we need will not disappear
 373                 * from us
 374                 */
 375                if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
 376                        sctp_association_hold(sctp_assoc(rcvr));
 377                else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 378                        sctp_endpoint_hold(sctp_ep(rcvr));
 379                else
 380                        BUG();
 381        }
 382        return ret;
 383
 384}
 385
 386/* Handle icmp frag needed error. */
 387void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
 388                           struct sctp_transport *t, __u32 pmtu)
 389{
 390        if (!t || (t->pathmtu <= pmtu))
 391                return;
 392
 393        if (sock_owned_by_user(sk)) {
 394                asoc->pmtu_pending = 1;
 395                t->pmtu_pending = 1;
 396                return;
 397        }
 398
 399        if (t->param_flags & SPP_PMTUD_ENABLE) {
 400                /* Update transports view of the MTU */
 401                sctp_transport_update_pmtu(sk, t, pmtu);
 402
 403                /* Update association pmtu. */
 404                sctp_assoc_sync_pmtu(sk, asoc);
 405        }
 406
 407        /* Retransmit with the new pmtu setting.
 408         * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
 409         * Needed will never be sent, but if a message was sent before
 410         * PMTU discovery was disabled that was larger than the PMTU, it
 411         * would not be fragmented, so it must be re-transmitted fragmented.
 412         */
 413        sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
 414}
 415
 416void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
 417                        struct sk_buff *skb)
 418{
 419        struct dst_entry *dst;
 420
 421        if (!t)
 422                return;
 423        dst = sctp_transport_dst_check(t);
 424        if (dst)
 425                dst->ops->redirect(dst, sk, skb);
 426}
 427
 428/*
 429 * SCTP Implementer's Guide, 2.37 ICMP handling procedures
 430 *
 431 * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
 432 *        or a "Protocol Unreachable" treat this message as an abort
 433 *        with the T bit set.
 434 *
 435 * This function sends an event to the state machine, which will abort the
 436 * association.
 437 *
 438 */
 439void sctp_icmp_proto_unreachable(struct sock *sk,
 440                           struct sctp_association *asoc,
 441                           struct sctp_transport *t)
 442{
 443        if (sock_owned_by_user(sk)) {
 444                if (timer_pending(&t->proto_unreach_timer))
 445                        return;
 446                else {
 447                        if (!mod_timer(&t->proto_unreach_timer,
 448                                                jiffies + (HZ/20)))
 449                                sctp_association_hold(asoc);
 450                }
 451        } else {
 452                struct net *net = sock_net(sk);
 453
 454                pr_debug("%s: unrecognized next header type "
 455                         "encountered!\n", __func__);
 456
 457                if (del_timer(&t->proto_unreach_timer))
 458                        sctp_association_put(asoc);
 459
 460                sctp_do_sm(net, SCTP_EVENT_T_OTHER,
 461                           SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
 462                           asoc->state, asoc->ep, asoc, t,
 463                           GFP_ATOMIC);
 464        }
 465}
 466
 467/* Common lookup code for icmp/icmpv6 error handler. */
 468struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
 469                             struct sctphdr *sctphdr,
 470                             struct sctp_association **app,
 471                             struct sctp_transport **tpp)
 472{
 473        union sctp_addr saddr;
 474        union sctp_addr daddr;
 475        struct sctp_af *af;
 476        struct sock *sk = NULL;
 477        struct sctp_association *asoc;
 478        struct sctp_transport *transport = NULL;
 479        struct sctp_init_chunk *chunkhdr;
 480        __u32 vtag = ntohl(sctphdr->vtag);
 481        int len = skb->len - ((void *)sctphdr - (void *)skb->data);
 482
 483        *app = NULL; *tpp = NULL;
 484
 485        af = sctp_get_af_specific(family);
 486        if (unlikely(!af)) {
 487                return NULL;
 488        }
 489
 490        /* Initialize local addresses for lookups. */
 491        af->from_skb(&saddr, skb, 1);
 492        af->from_skb(&daddr, skb, 0);
 493
 494        /* Look for an association that matches the incoming ICMP error
 495         * packet.
 496         */
 497        asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
 498        if (!asoc)
 499                return NULL;
 500
 501        sk = asoc->base.sk;
 502
 503        /* RFC 4960, Appendix C. ICMP Handling
 504         *
 505         * ICMP6) An implementation MUST validate that the Verification Tag
 506         * contained in the ICMP message matches the Verification Tag of
 507         * the peer.  If the Verification Tag is not 0 and does NOT
 508         * match, discard the ICMP message.  If it is 0 and the ICMP
 509         * message contains enough bytes to verify that the chunk type is
 510         * an INIT chunk and that the Initiate Tag matches the tag of the
 511         * peer, continue with ICMP7.  If the ICMP message is too short
 512         * or the chunk type or the Initiate Tag does not match, silently
 513         * discard the packet.
 514         */
 515        if (vtag == 0) {
 516                chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
 517                if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
 518                          + sizeof(__be32) ||
 519                    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
 520                    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
 521                        goto out;
 522                }
 523        } else if (vtag != asoc->c.peer_vtag) {
 524                goto out;
 525        }
 526
 527        sctp_bh_lock_sock(sk);
 528
 529        /* If too many ICMPs get dropped on busy
 530         * servers this needs to be solved differently.
 531         */
 532        if (sock_owned_by_user(sk))
 533                NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
 534
 535        *app = asoc;
 536        *tpp = transport;
 537        return sk;
 538
 539out:
 540        if (asoc)
 541                sctp_association_put(asoc);
 542        return NULL;
 543}
 544
 545/* Common cleanup code for icmp/icmpv6 error handler. */
 546void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
 547{
 548        sctp_bh_unlock_sock(sk);
 549        if (asoc)
 550                sctp_association_put(asoc);
 551}
 552
 553/*
 554 * This routine is called by the ICMP module when it gets some
 555 * sort of error condition.  If err < 0 then the socket should
 556 * be closed and the error returned to the user.  If err > 0
 557 * it's just the icmp type << 8 | icmp code.  After adjustment
 558 * header points to the first 8 bytes of the sctp header.  We need
 559 * to find the appropriate port.
 560 *
 561 * The locking strategy used here is very "optimistic". When
 562 * someone else accesses the socket the ICMP is just dropped
 563 * and for some paths there is no check at all.
 564 * A more general error queue to queue errors for later handling
 565 * is probably better.
 566 *
 567 */
 568void sctp_v4_err(struct sk_buff *skb, __u32 info)
 569{
 570        const struct iphdr *iph = (const struct iphdr *)skb->data;
 571        const int ihlen = iph->ihl * 4;
 572        const int type = icmp_hdr(skb)->type;
 573        const int code = icmp_hdr(skb)->code;
 574        struct sock *sk;
 575        struct sctp_association *asoc = NULL;
 576        struct sctp_transport *transport;
 577        struct inet_sock *inet;
 578        __u16 saveip, savesctp;
 579        int err;
 580        struct net *net = dev_net(skb->dev);
 581
 582        if (skb->len < ihlen + 8) {
 583                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 584                return;
 585        }
 586
 587        /* Fix up skb to look at the embedded net header. */
 588        saveip = skb->network_header;
 589        savesctp = skb->transport_header;
 590        skb_reset_network_header(skb);
 591        skb_set_transport_header(skb, ihlen);
 592        sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
 593        /* Put back, the original values. */
 594        skb->network_header = saveip;
 595        skb->transport_header = savesctp;
 596        if (!sk) {
 597                ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
 598                return;
 599        }
 600        /* Warning:  The sock lock is held.  Remember to call
 601         * sctp_err_finish!
 602         */
 603
 604        switch (type) {
 605        case ICMP_PARAMETERPROB:
 606                err = EPROTO;
 607                break;
 608        case ICMP_DEST_UNREACH:
 609                if (code > NR_ICMP_UNREACH)
 610                        goto out_unlock;
 611
 612                /* PMTU discovery (RFC1191) */
 613                if (ICMP_FRAG_NEEDED == code) {
 614                        sctp_icmp_frag_needed(sk, asoc, transport, info);
 615                        goto out_unlock;
 616                }
 617                else {
 618                        if (ICMP_PROT_UNREACH == code) {
 619                                sctp_icmp_proto_unreachable(sk, asoc,
 620                                                            transport);
 621                                goto out_unlock;
 622                        }
 623                }
 624                err = icmp_err_convert[code].errno;
 625                break;
 626        case ICMP_TIME_EXCEEDED:
 627                /* Ignore any time exceeded errors due to fragment reassembly
 628                 * timeouts.
 629                 */
 630                if (ICMP_EXC_FRAGTIME == code)
 631                        goto out_unlock;
 632
 633                err = EHOSTUNREACH;
 634                break;
 635        case ICMP_REDIRECT:
 636                sctp_icmp_redirect(sk, transport, skb);
 637                /* Fall through to out_unlock. */
 638        default:
 639                goto out_unlock;
 640        }
 641
 642        inet = inet_sk(sk);
 643        if (!sock_owned_by_user(sk) && inet->recverr) {
 644                sk->sk_err = err;
 645                sk->sk_error_report(sk);
 646        } else {  /* Only an error on timeout */
 647                sk->sk_err_soft = err;
 648        }
 649
 650out_unlock:
 651        sctp_err_finish(sk, asoc);
 652}
 653
 654/*
 655 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
 656 *
 657 * This function scans all the chunks in the OOTB packet to determine if
 658 * the packet should be discarded right away.  If a response might be needed
 659 * for this packet, or, if further processing is possible, the packet will
 660 * be queued to a proper inqueue for the next phase of handling.
 661 *
 662 * Output:
 663 * Return 0 - If further processing is needed.
 664 * Return 1 - If the packet can be discarded right away.
 665 */
 666static int sctp_rcv_ootb(struct sk_buff *skb)
 667{
 668        sctp_chunkhdr_t *ch;
 669        __u8 *ch_end;
 670
 671        ch = (sctp_chunkhdr_t *) skb->data;
 672
 673        /* Scan through all the chunks in the packet.  */
 674        do {
 675                /* Break out if chunk length is less then minimal. */
 676                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
 677                        break;
 678
 679                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
 680                if (ch_end > skb_tail_pointer(skb))
 681                        break;
 682
 683                /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
 684                 * receiver MUST silently discard the OOTB packet and take no
 685                 * further action.
 686                 */
 687                if (SCTP_CID_ABORT == ch->type)
 688                        goto discard;
 689
 690                /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
 691                 * chunk, the receiver should silently discard the packet
 692                 * and take no further action.
 693                 */
 694                if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
 695                        goto discard;
 696
 697                /* RFC 4460, 2.11.2
 698                 * This will discard packets with INIT chunk bundled as
 699                 * subsequent chunks in the packet.  When INIT is first,
 700                 * the normal INIT processing will discard the chunk.
 701                 */
 702                if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
 703                        goto discard;
 704
 705                ch = (sctp_chunkhdr_t *) ch_end;
 706        } while (ch_end < skb_tail_pointer(skb));
 707
 708        return 0;
 709
 710discard:
 711        return 1;
 712}
 713
 714/* Insert endpoint into the hash table.  */
 715static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
 716{
 717        struct net *net = sock_net(ep->base.sk);
 718        struct sctp_ep_common *epb;
 719        struct sctp_hashbucket *head;
 720
 721        epb = &ep->base;
 722
 723        epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 724        head = &sctp_ep_hashtable[epb->hashent];
 725
 726        sctp_write_lock(&head->lock);
 727        hlist_add_head(&epb->node, &head->chain);
 728        sctp_write_unlock(&head->lock);
 729}
 730
 731/* Add an endpoint to the hash. Local BH-safe. */
 732void sctp_hash_endpoint(struct sctp_endpoint *ep)
 733{
 734        sctp_local_bh_disable();
 735        __sctp_hash_endpoint(ep);
 736        sctp_local_bh_enable();
 737}
 738
 739/* Remove endpoint from the hash table.  */
 740static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 741{
 742        struct net *net = sock_net(ep->base.sk);
 743        struct sctp_hashbucket *head;
 744        struct sctp_ep_common *epb;
 745
 746        epb = &ep->base;
 747
 748        epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 749
 750        head = &sctp_ep_hashtable[epb->hashent];
 751
 752        sctp_write_lock(&head->lock);
 753        hlist_del_init(&epb->node);
 754        sctp_write_unlock(&head->lock);
 755}
 756
 757/* Remove endpoint from the hash.  Local BH-safe. */
 758void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 759{
 760        sctp_local_bh_disable();
 761        __sctp_unhash_endpoint(ep);
 762        sctp_local_bh_enable();
 763}
 764
 765/* Look up an endpoint. */
 766static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
 767                                                const union sctp_addr *laddr)
 768{
 769        struct sctp_hashbucket *head;
 770        struct sctp_ep_common *epb;
 771        struct sctp_endpoint *ep;
 772        int hash;
 773
 774        hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
 775        head = &sctp_ep_hashtable[hash];
 776        read_lock(&head->lock);
 777        sctp_for_each_hentry(epb, &head->chain) {
 778                ep = sctp_ep(epb);
 779                if (sctp_endpoint_is_match(ep, net, laddr))
 780                        goto hit;
 781        }
 782
 783        ep = sctp_sk(net->sctp.ctl_sock)->ep;
 784
 785hit:
 786        sctp_endpoint_hold(ep);
 787        read_unlock(&head->lock);
 788        return ep;
 789}
 790
 791/* Insert association into the hash table.  */
 792static void __sctp_hash_established(struct sctp_association *asoc)
 793{
 794        struct net *net = sock_net(asoc->base.sk);
 795        struct sctp_ep_common *epb;
 796        struct sctp_hashbucket *head;
 797
 798        epb = &asoc->base;
 799
 800        /* Calculate which chain this entry will belong to. */
 801        epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
 802                                         asoc->peer.port);
 803
 804        head = &sctp_assoc_hashtable[epb->hashent];
 805
 806        sctp_write_lock(&head->lock);
 807        hlist_add_head(&epb->node, &head->chain);
 808        sctp_write_unlock(&head->lock);
 809}
 810
 811/* Add an association to the hash. Local BH-safe. */
 812void sctp_hash_established(struct sctp_association *asoc)
 813{
 814        if (asoc->temp)
 815                return;
 816
 817        sctp_local_bh_disable();
 818        __sctp_hash_established(asoc);
 819        sctp_local_bh_enable();
 820}
 821
 822/* Remove association from the hash table.  */
 823static void __sctp_unhash_established(struct sctp_association *asoc)
 824{
 825        struct net *net = sock_net(asoc->base.sk);
 826        struct sctp_hashbucket *head;
 827        struct sctp_ep_common *epb;
 828
 829        epb = &asoc->base;
 830
 831        epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
 832                                         asoc->peer.port);
 833
 834        head = &sctp_assoc_hashtable[epb->hashent];
 835
 836        sctp_write_lock(&head->lock);
 837        hlist_del_init(&epb->node);
 838        sctp_write_unlock(&head->lock);
 839}
 840
 841/* Remove association from the hash table.  Local BH-safe. */
 842void sctp_unhash_established(struct sctp_association *asoc)
 843{
 844        if (asoc->temp)
 845                return;
 846
 847        sctp_local_bh_disable();
 848        __sctp_unhash_established(asoc);
 849        sctp_local_bh_enable();
 850}
 851
 852/* Look up an association. */
 853static struct sctp_association *__sctp_lookup_association(
 854                                        struct net *net,
 855                                        const union sctp_addr *local,
 856                                        const union sctp_addr *peer,
 857                                        struct sctp_transport **pt)
 858{
 859        struct sctp_hashbucket *head;
 860        struct sctp_ep_common *epb;
 861        struct sctp_association *asoc;
 862        struct sctp_transport *transport;
 863        int hash;
 864
 865        /* Optimize here for direct hit, only listening connections can
 866         * have wildcards anyways.
 867         */
 868        hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
 869                                 ntohs(peer->v4.sin_port));
 870        head = &sctp_assoc_hashtable[hash];
 871        read_lock(&head->lock);
 872        sctp_for_each_hentry(epb, &head->chain) {
 873                asoc = sctp_assoc(epb);
 874                transport = sctp_assoc_is_match(asoc, net, local, peer);
 875                if (transport)
 876                        goto hit;
 877        }
 878
 879        read_unlock(&head->lock);
 880
 881        return NULL;
 882
 883hit:
 884        *pt = transport;
 885        sctp_association_hold(asoc);
 886        read_unlock(&head->lock);
 887        return asoc;
 888}
 889
 890/* Look up an association. BH-safe. */
 891static
 892struct sctp_association *sctp_lookup_association(struct net *net,
 893                                                 const union sctp_addr *laddr,
 894                                                 const union sctp_addr *paddr,
 895                                                 struct sctp_transport **transportp)
 896{
 897        struct sctp_association *asoc;
 898
 899        sctp_local_bh_disable();
 900        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 901        sctp_local_bh_enable();
 902
 903        return asoc;
 904}
 905
 906/* Is there an association matching the given local and peer addresses? */
 907int sctp_has_association(struct net *net,
 908                         const union sctp_addr *laddr,
 909                         const union sctp_addr *paddr)
 910{
 911        struct sctp_association *asoc;
 912        struct sctp_transport *transport;
 913
 914        if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
 915                sctp_association_put(asoc);
 916                return 1;
 917        }
 918
 919        return 0;
 920}
 921
 922/*
 923 * SCTP Implementors Guide, 2.18 Handling of address
 924 * parameters within the INIT or INIT-ACK.
 925 *
 926 * D) When searching for a matching TCB upon reception of an INIT
 927 *    or INIT-ACK chunk the receiver SHOULD use not only the
 928 *    source address of the packet (containing the INIT or
 929 *    INIT-ACK) but the receiver SHOULD also use all valid
 930 *    address parameters contained within the chunk.
 931 *
 932 * 2.18.3 Solution description
 933 *
 934 * This new text clearly specifies to an implementor the need
 935 * to look within the INIT or INIT-ACK. Any implementation that
 936 * does not do this, may not be able to establish associations
 937 * in certain circumstances.
 938 *
 939 */
 940static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
 941        struct sk_buff *skb,
 942        const union sctp_addr *laddr, struct sctp_transport **transportp)
 943{
 944        struct sctp_association *asoc;
 945        union sctp_addr addr;
 946        union sctp_addr *paddr = &addr;
 947        struct sctphdr *sh = sctp_hdr(skb);
 948        union sctp_params params;
 949        sctp_init_chunk_t *init;
 950        struct sctp_transport *transport;
 951        struct sctp_af *af;
 952
 953        /*
 954         * This code will NOT touch anything inside the chunk--it is
 955         * strictly READ-ONLY.
 956         *
 957         * RFC 2960 3  SCTP packet Format
 958         *
 959         * Multiple chunks can be bundled into one SCTP packet up to
 960         * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
 961         * COMPLETE chunks.  These chunks MUST NOT be bundled with any
 962         * other chunk in a packet.  See Section 6.10 for more details
 963         * on chunk bundling.
 964         */
 965
 966        /* Find the start of the TLVs and the end of the chunk.  This is
 967         * the region we search for address parameters.
 968         */
 969        init = (sctp_init_chunk_t *)skb->data;
 970
 971        /* Walk the parameters looking for embedded addresses. */
 972        sctp_walk_params(params, init, init_hdr.params) {
 973
 974                /* Note: Ignoring hostname addresses. */
 975                af = sctp_get_af_specific(param_type2af(params.p->type));
 976                if (!af)
 977                        continue;
 978
 979                af->from_addr_param(paddr, params.addr, sh->source, 0);
 980
 981                asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
 982                if (asoc)
 983                        return asoc;
 984        }
 985
 986        return NULL;
 987}
 988
 989/* ADD-IP, Section 5.2
 990 * When an endpoint receives an ASCONF Chunk from the remote peer
 991 * special procedures may be needed to identify the association the
 992 * ASCONF Chunk is associated with. To properly find the association
 993 * the following procedures SHOULD be followed:
 994 *
 995 * D2) If the association is not found, use the address found in the
 996 * Address Parameter TLV combined with the port number found in the
 997 * SCTP common header. If found proceed to rule D4.
 998 *
 999 * D2-ext) If more than one ASCONF Chunks are packed together, use the
1000 * address found in the ASCONF Address Parameter TLV of each of the
1001 * subsequent ASCONF Chunks. If found, proceed to rule D4.
1002 */
1003static struct sctp_association *__sctp_rcv_asconf_lookup(
1004                                        struct net *net,
1005                                        sctp_chunkhdr_t *ch,
1006                                        const union sctp_addr *laddr,
1007                                        __be16 peer_port,
1008                                        struct sctp_transport **transportp)
1009{
1010        sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;
1011        struct sctp_af *af;
1012        union sctp_addr_param *param;
1013        union sctp_addr paddr;
1014
1015        /* Skip over the ADDIP header and find the Address parameter */
1016        param = (union sctp_addr_param *)(asconf + 1);
1017
1018        af = sctp_get_af_specific(param_type2af(param->p.type));
1019        if (unlikely(!af))
1020                return NULL;
1021
1022        af->from_addr_param(&paddr, param, peer_port, 0);
1023
1024        return __sctp_lookup_association(net, laddr, &paddr, transportp);
1025}
1026
1027
1028/* SCTP-AUTH, Section 6.3:
1029*    If the receiver does not find a STCB for a packet containing an AUTH
1030*    chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1031*    chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1032*    association.
1033*
1034* This means that any chunks that can help us identify the association need
1035* to be looked at to find this association.
1036*/
1037static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1038                                      struct sk_buff *skb,
1039                                      const union sctp_addr *laddr,
1040                                      struct sctp_transport **transportp)
1041{
1042        struct sctp_association *asoc = NULL;
1043        sctp_chunkhdr_t *ch;
1044        int have_auth = 0;
1045        unsigned int chunk_num = 1;
1046        __u8 *ch_end;
1047
1048        /* Walk through the chunks looking for AUTH or ASCONF chunks
1049         * to help us find the association.
1050         */
1051        ch = (sctp_chunkhdr_t *) skb->data;
1052        do {
1053                /* Break out if chunk length is less then minimal. */
1054                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
1055                        break;
1056
1057                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1058                if (ch_end > skb_tail_pointer(skb))
1059                        break;
1060
1061                switch(ch->type) {
1062                    case SCTP_CID_AUTH:
1063                            have_auth = chunk_num;
1064                            break;
1065
1066                    case SCTP_CID_COOKIE_ECHO:
1067                            /* If a packet arrives containing an AUTH chunk as
1068                             * a first chunk, a COOKIE-ECHO chunk as the second
1069                             * chunk, and possibly more chunks after them, and
1070                             * the receiver does not have an STCB for that
1071                             * packet, then authentication is based on
1072                             * the contents of the COOKIE- ECHO chunk.
1073                             */
1074                            if (have_auth == 1 && chunk_num == 2)
1075                                    return NULL;
1076                            break;
1077
1078                    case SCTP_CID_ASCONF:
1079                            if (have_auth || net->sctp.addip_noauth)
1080                                    asoc = __sctp_rcv_asconf_lookup(
1081                                                        net, ch, laddr,
1082                                                        sctp_hdr(skb)->source,
1083                                                        transportp);
1084                    default:
1085                            break;
1086                }
1087
1088                if (asoc)
1089                        break;
1090
1091                ch = (sctp_chunkhdr_t *) ch_end;
1092                chunk_num++;
1093        } while (ch_end < skb_tail_pointer(skb));
1094
1095        return asoc;
1096}
1097
1098/*
1099 * There are circumstances when we need to look inside the SCTP packet
1100 * for information to help us find the association.   Examples
1101 * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1102 * chunks.
1103 */
1104static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1105                                      struct sk_buff *skb,
1106                                      const union sctp_addr *laddr,
1107                                      struct sctp_transport **transportp)
1108{
1109        sctp_chunkhdr_t *ch;
1110
1111        ch = (sctp_chunkhdr_t *) skb->data;
1112
1113        /* The code below will attempt to walk the chunk and extract
1114         * parameter information.  Before we do that, we need to verify
1115         * that the chunk length doesn't cause overflow.  Otherwise, we'll
1116         * walk off the end.
1117         */
1118        if (WORD_ROUND(ntohs(ch->length)) > skb->len)
1119                return NULL;
1120
1121        /* If this is INIT/INIT-ACK look inside the chunk too. */
1122        switch (ch->type) {
1123        case SCTP_CID_INIT:
1124        case SCTP_CID_INIT_ACK:
1125                return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
1126                break;
1127
1128        default:
1129                return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
1130                break;
1131        }
1132
1133
1134        return NULL;
1135}
1136
1137/* Lookup an association for an inbound skb. */
1138static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1139                                      struct sk_buff *skb,
1140                                      const union sctp_addr *paddr,
1141                                      const union sctp_addr *laddr,
1142                                      struct sctp_transport **transportp)
1143{
1144        struct sctp_association *asoc;
1145
1146        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1147
1148        /* Further lookup for INIT/INIT-ACK packets.
1149         * SCTP Implementors Guide, 2.18 Handling of address
1150         * parameters within the INIT or INIT-ACK.
1151         */
1152        if (!asoc)
1153                asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1154
1155        return asoc;
1156}
1157