linux/net/sctp/input.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * Copyright (c) 1999-2000 Cisco, Inc.
   3 * Copyright (c) 1999-2001 Motorola, Inc.
   4 * Copyright (c) 2001-2003 International Business Machines, Corp.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This file is part of the SCTP kernel implementation
  10 *
  11 * These functions handle all input from the IP layer into SCTP.
  12 *
  13 * This SCTP implementation is free software;
  14 * you can redistribute it and/or modify it under the terms of
  15 * the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This SCTP implementation is distributed in the hope that it
  20 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  21 *                 ************************
  22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  23 * See the GNU General Public License for more details.
  24 *
  25 * You should have received a copy of the GNU General Public License
  26 * along with GNU CC; see the file COPYING.  If not, see
  27 * <http://www.gnu.org/licenses/>.
  28 *
  29 * Please send any bug reports or fixes you make to the
  30 * email address(es):
  31 *    lksctp developers <linux-sctp@vger.kernel.org>
  32 *
  33 * Written or modified by:
  34 *    La Monte H.P. Yarroll <piggy@acm.org>
  35 *    Karl Knutson <karl@athena.chicago.il.us>
  36 *    Xingang Guo <xingang.guo@intel.com>
  37 *    Jon Grimm <jgrimm@us.ibm.com>
  38 *    Hui Huang <hui.huang@nokia.com>
  39 *    Daisy Chang <daisyc@us.ibm.com>
  40 *    Sridhar Samudrala <sri@us.ibm.com>
  41 *    Ardelle Fan <ardelle.fan@intel.com>
  42 */
  43
  44#include <linux/types.h>
  45#include <linux/list.h> /* For struct list_head */
  46#include <linux/socket.h>
  47#include <linux/ip.h>
  48#include <linux/time.h> /* For struct timeval */
  49#include <linux/slab.h>
  50#include <net/ip.h>
  51#include <net/icmp.h>
  52#include <net/snmp.h>
  53#include <net/sock.h>
  54#include <net/xfrm.h>
  55#include <net/sctp/sctp.h>
  56#include <net/sctp/sm.h>
  57#include <net/sctp/checksum.h>
  58#include <net/net_namespace.h>
  59
  60/* Forward declarations for internal helpers. */
  61static int sctp_rcv_ootb(struct sk_buff *);
  62static struct sctp_association *__sctp_rcv_lookup(struct net *net,
  63                                      struct sk_buff *skb,
  64                                      const union sctp_addr *paddr,
  65                                      const union sctp_addr *laddr,
  66                                      struct sctp_transport **transportp);
  67static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
  68                                                const union sctp_addr *laddr);
  69static struct sctp_association *__sctp_lookup_association(
  70                                        struct net *net,
  71                                        const union sctp_addr *local,
  72                                        const union sctp_addr *peer,
  73                                        struct sctp_transport **pt);
  74
  75static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
  76
  77
  78/* Calculate the SCTP checksum of an SCTP packet.  */
  79static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
  80{
  81        struct sctphdr *sh = sctp_hdr(skb);
  82        __le32 cmp = sh->checksum;
  83        __le32 val = sctp_compute_cksum(skb, 0);
  84
  85        if (val != cmp) {
  86                /* CRC failure, dump it. */
  87                __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
  88                return -1;
  89        }
  90        return 0;
  91}
  92
  93struct sctp_input_cb {
  94        union {
  95                struct inet_skb_parm    h4;
  96#if IS_ENABLED(CONFIG_IPV6)
  97                struct inet6_skb_parm   h6;
  98#endif
  99        } header;
 100        struct sctp_chunk *chunk;
 101};
 102#define SCTP_INPUT_CB(__skb)    ((struct sctp_input_cb *)&((__skb)->cb[0]))
 103
 104/*
 105 * This is the routine which IP calls when receiving an SCTP packet.
 106 */
 107int sctp_rcv(struct sk_buff *skb)
 108{
 109        struct sock *sk;
 110        struct sctp_association *asoc;
 111        struct sctp_endpoint *ep = NULL;
 112        struct sctp_ep_common *rcvr;
 113        struct sctp_transport *transport = NULL;
 114        struct sctp_chunk *chunk;
 115        union sctp_addr src;
 116        union sctp_addr dest;
 117        int family;
 118        struct sctp_af *af;
 119        struct net *net = dev_net(skb->dev);
 120
 121        if (skb->pkt_type != PACKET_HOST)
 122                goto discard_it;
 123
 124        __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
 125
 126        if (skb_linearize(skb))
 127                goto discard_it;
 128
 129        /* Pull up the IP and SCTP headers. */
 130        __skb_pull(skb, skb_transport_offset(skb));
 131        if (skb->len < sizeof(struct sctphdr))
 132                goto discard_it;
 133
 134        skb->csum_valid = 0; /* Previous value not applicable */
 135        if (skb_csum_unnecessary(skb))
 136                __skb_decr_checksum_unnecessary(skb);
 137        else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0)
 138                goto discard_it;
 139        skb->csum_valid = 1;
 140
 141        skb_pull(skb, sizeof(struct sctphdr));
 142
 143        /* Make sure we at least have chunk headers worth of data left. */
 144        if (skb->len < sizeof(struct sctp_chunkhdr))
 145                goto discard_it;
 146
 147        family = ipver2af(ip_hdr(skb)->version);
 148        af = sctp_get_af_specific(family);
 149        if (unlikely(!af))
 150                goto discard_it;
 151
 152        /* Initialize local addresses for lookups. */
 153        af->from_skb(&src, skb, 1);
 154        af->from_skb(&dest, skb, 0);
 155
 156        /* If the packet is to or from a non-unicast address,
 157         * silently discard the packet.
 158         *
 159         * This is not clearly defined in the RFC except in section
 160         * 8.4 - OOTB handling.  However, based on the book "Stream Control
 161         * Transmission Protocol" 2.1, "It is important to note that the
 162         * IP address of an SCTP transport address must be a routable
 163         * unicast address.  In other words, IP multicast addresses and
 164         * IP broadcast addresses cannot be used in an SCTP transport
 165         * address."
 166         */
 167        if (!af->addr_valid(&src, NULL, skb) ||
 168            !af->addr_valid(&dest, NULL, skb))
 169                goto discard_it;
 170
 171        asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 172
 173        if (!asoc)
 174                ep = __sctp_rcv_lookup_endpoint(net, &dest);
 175
 176        /* Retrieve the common input handling substructure. */
 177        rcvr = asoc ? &asoc->base : &ep->base;
 178        sk = rcvr->sk;
 179
 180        /*
 181         * If a frame arrives on an interface and the receiving socket is
 182         * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
 183         */
 184        if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
 185                if (asoc) {
 186                        sctp_association_put(asoc);
 187                        asoc = NULL;
 188                } else {
 189                        sctp_endpoint_put(ep);
 190                        ep = NULL;
 191                }
 192                sk = net->sctp.ctl_sock;
 193                ep = sctp_sk(sk)->ep;
 194                sctp_endpoint_hold(ep);
 195                rcvr = &ep->base;
 196        }
 197
 198        /*
 199         * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
 200         * An SCTP packet is called an "out of the blue" (OOTB)
 201         * packet if it is correctly formed, i.e., passed the
 202         * receiver's checksum check, but the receiver is not
 203         * able to identify the association to which this
 204         * packet belongs.
 205         */
 206        if (!asoc) {
 207                if (sctp_rcv_ootb(skb)) {
 208                        __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 209                        goto discard_release;
 210                }
 211        }
 212
 213        if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
 214                goto discard_release;
 215        nf_reset(skb);
 216
 217        if (sk_filter(sk, skb))
 218                goto discard_release;
 219
 220        /* Create an SCTP packet structure. */
 221        chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
 222        if (!chunk)
 223                goto discard_release;
 224        SCTP_INPUT_CB(skb)->chunk = chunk;
 225
 226        /* Remember what endpoint is to handle this packet. */
 227        chunk->rcvr = rcvr;
 228
 229        /* Remember the SCTP header. */
 230        chunk->sctp_hdr = sctp_hdr(skb);
 231
 232        /* Set the source and destination addresses of the incoming chunk.  */
 233        sctp_init_addrs(chunk, &src, &dest);
 234
 235        /* Remember where we came from.  */
 236        chunk->transport = transport;
 237
 238        /* Acquire access to the sock lock. Note: We are safe from other
 239         * bottom halves on this lock, but a user may be in the lock too,
 240         * so check if it is busy.
 241         */
 242        bh_lock_sock(sk);
 243
 244        if (sk != rcvr->sk) {
 245                /* Our cached sk is different from the rcvr->sk.  This is
 246                 * because migrate()/accept() may have moved the association
 247                 * to a new socket and released all the sockets.  So now we
 248                 * are holding a lock on the old socket while the user may
 249                 * be doing something with the new socket.  Switch our veiw
 250                 * of the current sk.
 251                 */
 252                bh_unlock_sock(sk);
 253                sk = rcvr->sk;
 254                bh_lock_sock(sk);
 255        }
 256
 257        if (sock_owned_by_user(sk)) {
 258                if (sctp_add_backlog(sk, skb)) {
 259                        bh_unlock_sock(sk);
 260                        sctp_chunk_free(chunk);
 261                        skb = NULL; /* sctp_chunk_free already freed the skb */
 262                        goto discard_release;
 263                }
 264                __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
 265        } else {
 266                __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
 267                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 268        }
 269
 270        bh_unlock_sock(sk);
 271
 272        /* Release the asoc/ep ref we took in the lookup calls. */
 273        if (asoc)
 274                sctp_association_put(asoc);
 275        else
 276                sctp_endpoint_put(ep);
 277
 278        return 0;
 279
 280discard_it:
 281        __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
 282        kfree_skb(skb);
 283        return 0;
 284
 285discard_release:
 286        /* Release the asoc/ep ref we took in the lookup calls. */
 287        if (asoc)
 288                sctp_association_put(asoc);
 289        else
 290                sctp_endpoint_put(ep);
 291
 292        goto discard_it;
 293}
 294
 295/* Process the backlog queue of the socket.  Every skb on
 296 * the backlog holds a ref on an association or endpoint.
 297 * We hold this ref throughout the state machine to make
 298 * sure that the structure we need is still around.
 299 */
 300int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 301{
 302        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 303        struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
 304        struct sctp_ep_common *rcvr = NULL;
 305        int backloged = 0;
 306
 307        rcvr = chunk->rcvr;
 308
 309        /* If the rcvr is dead then the association or endpoint
 310         * has been deleted and we can safely drop the chunk
 311         * and refs that we are holding.
 312         */
 313        if (rcvr->dead) {
 314                sctp_chunk_free(chunk);
 315                goto done;
 316        }
 317
 318        if (unlikely(rcvr->sk != sk)) {
 319                /* In this case, the association moved from one socket to
 320                 * another.  We are currently sitting on the backlog of the
 321                 * old socket, so we need to move.
 322                 * However, since we are here in the process context we
 323                 * need to take make sure that the user doesn't own
 324                 * the new socket when we process the packet.
 325                 * If the new socket is user-owned, queue the chunk to the
 326                 * backlog of the new socket without dropping any refs.
 327                 * Otherwise, we can safely push the chunk on the inqueue.
 328                 */
 329
 330                sk = rcvr->sk;
 331                bh_lock_sock(sk);
 332
 333                if (sock_owned_by_user(sk)) {
 334                        if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 335                                sctp_chunk_free(chunk);
 336                        else
 337                                backloged = 1;
 338                } else
 339                        sctp_inq_push(inqueue, chunk);
 340
 341                bh_unlock_sock(sk);
 342
 343                /* If the chunk was backloged again, don't drop refs */
 344                if (backloged)
 345                        return 0;
 346        } else {
 347                sctp_inq_push(inqueue, chunk);
 348        }
 349
 350done:
 351        /* Release the refs we took in sctp_add_backlog */
 352        if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
 353                sctp_association_put(sctp_assoc(rcvr));
 354        else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 355                sctp_endpoint_put(sctp_ep(rcvr));
 356        else
 357                BUG();
 358
 359        return 0;
 360}
 361
 362static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 363{
 364        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 365        struct sctp_ep_common *rcvr = chunk->rcvr;
 366        int ret;
 367
 368        ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 369        if (!ret) {
 370                /* Hold the assoc/ep while hanging on the backlog queue.
 371                 * This way, we know structures we need will not disappear
 372                 * from us
 373                 */
 374                if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
 375                        sctp_association_hold(sctp_assoc(rcvr));
 376                else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 377                        sctp_endpoint_hold(sctp_ep(rcvr));
 378                else
 379                        BUG();
 380        }
 381        return ret;
 382
 383}
 384
 385/* Handle icmp frag needed error. */
 386void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
 387                           struct sctp_transport *t, __u32 pmtu)
 388{
 389        if (!t || (t->pathmtu <= pmtu))
 390                return;
 391
 392        if (sock_owned_by_user(sk)) {
 393                asoc->pmtu_pending = 1;
 394                t->pmtu_pending = 1;
 395                return;
 396        }
 397
 398        if (t->param_flags & SPP_PMTUD_ENABLE) {
 399                /* Update transports view of the MTU */
 400                sctp_transport_update_pmtu(sk, t, pmtu);
 401
 402                /* Update association pmtu. */
 403                sctp_assoc_sync_pmtu(sk, asoc);
 404        }
 405
 406        /* Retransmit with the new pmtu setting.
 407         * Normally, if PMTU discovery is disabled, an ICMP Fragmentation
 408         * Needed will never be sent, but if a message was sent before
 409         * PMTU discovery was disabled that was larger than the PMTU, it
 410         * would not be fragmented, so it must be re-transmitted fragmented.
 411         */
 412        sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
 413}
 414
 415void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
 416                        struct sk_buff *skb)
 417{
 418        struct dst_entry *dst;
 419
 420        if (!t)
 421                return;
 422        dst = sctp_transport_dst_check(t);
 423        if (dst)
 424                dst->ops->redirect(dst, sk, skb);
 425}
 426
 427/*
 428 * SCTP Implementer's Guide, 2.37 ICMP handling procedures
 429 *
 430 * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
 431 *        or a "Protocol Unreachable" treat this message as an abort
 432 *        with the T bit set.
 433 *
 434 * This function sends an event to the state machine, which will abort the
 435 * association.
 436 *
 437 */
 438void sctp_icmp_proto_unreachable(struct sock *sk,
 439                           struct sctp_association *asoc,
 440                           struct sctp_transport *t)
 441{
 442        if (sock_owned_by_user(sk)) {
 443                if (timer_pending(&t->proto_unreach_timer))
 444                        return;
 445                else {
 446                        if (!mod_timer(&t->proto_unreach_timer,
 447                                                jiffies + (HZ/20)))
 448                                sctp_association_hold(asoc);
 449                }
 450        } else {
 451                struct net *net = sock_net(sk);
 452
 453                pr_debug("%s: unrecognized next header type "
 454                         "encountered!\n", __func__);
 455
 456                if (del_timer(&t->proto_unreach_timer))
 457                        sctp_association_put(asoc);
 458
 459                sctp_do_sm(net, SCTP_EVENT_T_OTHER,
 460                           SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
 461                           asoc->state, asoc->ep, asoc, t,
 462                           GFP_ATOMIC);
 463        }
 464}
 465
 466/* Common lookup code for icmp/icmpv6 error handler. */
 467struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
 468                             struct sctphdr *sctphdr,
 469                             struct sctp_association **app,
 470                             struct sctp_transport **tpp)
 471{
 472        union sctp_addr saddr;
 473        union sctp_addr daddr;
 474        struct sctp_af *af;
 475        struct sock *sk = NULL;
 476        struct sctp_association *asoc;
 477        struct sctp_transport *transport = NULL;
 478        struct sctp_init_chunk *chunkhdr;
 479        __u32 vtag = ntohl(sctphdr->vtag);
 480        int len = skb->len - ((void *)sctphdr - (void *)skb->data);
 481
 482        *app = NULL; *tpp = NULL;
 483
 484        af = sctp_get_af_specific(family);
 485        if (unlikely(!af)) {
 486                return NULL;
 487        }
 488
 489        /* Initialize local addresses for lookups. */
 490        af->from_skb(&saddr, skb, 1);
 491        af->from_skb(&daddr, skb, 0);
 492
 493        /* Look for an association that matches the incoming ICMP error
 494         * packet.
 495         */
 496        asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
 497        if (!asoc)
 498                return NULL;
 499
 500        sk = asoc->base.sk;
 501
 502        /* RFC 4960, Appendix C. ICMP Handling
 503         *
 504         * ICMP6) An implementation MUST validate that the Verification Tag
 505         * contained in the ICMP message matches the Verification Tag of
 506         * the peer.  If the Verification Tag is not 0 and does NOT
 507         * match, discard the ICMP message.  If it is 0 and the ICMP
 508         * message contains enough bytes to verify that the chunk type is
 509         * an INIT chunk and that the Initiate Tag matches the tag of the
 510         * peer, continue with ICMP7.  If the ICMP message is too short
 511         * or the chunk type or the Initiate Tag does not match, silently
 512         * discard the packet.
 513         */
 514        if (vtag == 0) {
 515                chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
 516                if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
 517                          + sizeof(__be32) ||
 518                    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
 519                    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
 520                        goto out;
 521                }
 522        } else if (vtag != asoc->c.peer_vtag) {
 523                goto out;
 524        }
 525
 526        bh_lock_sock(sk);
 527
 528        /* If too many ICMPs get dropped on busy
 529         * servers this needs to be solved differently.
 530         */
 531        if (sock_owned_by_user(sk))
 532                __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 533
 534        *app = asoc;
 535        *tpp = transport;
 536        return sk;
 537
 538out:
 539        sctp_association_put(asoc);
 540        return NULL;
 541}
 542
 543/* Common cleanup code for icmp/icmpv6 error handler. */
 544void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
 545{
 546        bh_unlock_sock(sk);
 547        sctp_association_put(asoc);
 548}
 549
 550/*
 551 * This routine is called by the ICMP module when it gets some
 552 * sort of error condition.  If err < 0 then the socket should
 553 * be closed and the error returned to the user.  If err > 0
 554 * it's just the icmp type << 8 | icmp code.  After adjustment
 555 * header points to the first 8 bytes of the sctp header.  We need
 556 * to find the appropriate port.
 557 *
 558 * The locking strategy used here is very "optimistic". When
 559 * someone else accesses the socket the ICMP is just dropped
 560 * and for some paths there is no check at all.
 561 * A more general error queue to queue errors for later handling
 562 * is probably better.
 563 *
 564 */
 565void sctp_v4_err(struct sk_buff *skb, __u32 info)
 566{
 567        const struct iphdr *iph = (const struct iphdr *)skb->data;
 568        const int ihlen = iph->ihl * 4;
 569        const int type = icmp_hdr(skb)->type;
 570        const int code = icmp_hdr(skb)->code;
 571        struct sock *sk;
 572        struct sctp_association *asoc = NULL;
 573        struct sctp_transport *transport;
 574        struct inet_sock *inet;
 575        __u16 saveip, savesctp;
 576        int err;
 577        struct net *net = dev_net(skb->dev);
 578
 579        /* Fix up skb to look at the embedded net header. */
 580        saveip = skb->network_header;
 581        savesctp = skb->transport_header;
 582        skb_reset_network_header(skb);
 583        skb_set_transport_header(skb, ihlen);
 584        sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
 585        /* Put back, the original values. */
 586        skb->network_header = saveip;
 587        skb->transport_header = savesctp;
 588        if (!sk) {
 589                __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 590                return;
 591        }
 592        /* Warning:  The sock lock is held.  Remember to call
 593         * sctp_err_finish!
 594         */
 595
 596        switch (type) {
 597        case ICMP_PARAMETERPROB:
 598                err = EPROTO;
 599                break;
 600        case ICMP_DEST_UNREACH:
 601                if (code > NR_ICMP_UNREACH)
 602                        goto out_unlock;
 603
 604                /* PMTU discovery (RFC1191) */
 605                if (ICMP_FRAG_NEEDED == code) {
 606                        sctp_icmp_frag_needed(sk, asoc, transport,
 607                                              WORD_TRUNC(info));
 608                        goto out_unlock;
 609                } else {
 610                        if (ICMP_PROT_UNREACH == code) {
 611                                sctp_icmp_proto_unreachable(sk, asoc,
 612                                                            transport);
 613                                goto out_unlock;
 614                        }
 615                }
 616                err = icmp_err_convert[code].errno;
 617                break;
 618        case ICMP_TIME_EXCEEDED:
 619                /* Ignore any time exceeded errors due to fragment reassembly
 620                 * timeouts.
 621                 */
 622                if (ICMP_EXC_FRAGTIME == code)
 623                        goto out_unlock;
 624
 625                err = EHOSTUNREACH;
 626                break;
 627        case ICMP_REDIRECT:
 628                sctp_icmp_redirect(sk, transport, skb);
 629                /* Fall through to out_unlock. */
 630        default:
 631                goto out_unlock;
 632        }
 633
 634        inet = inet_sk(sk);
 635        if (!sock_owned_by_user(sk) && inet->recverr) {
 636                sk->sk_err = err;
 637                sk->sk_error_report(sk);
 638        } else {  /* Only an error on timeout */
 639                sk->sk_err_soft = err;
 640        }
 641
 642out_unlock:
 643        sctp_err_finish(sk, asoc);
 644}
 645
 646/*
 647 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
 648 *
 649 * This function scans all the chunks in the OOTB packet to determine if
 650 * the packet should be discarded right away.  If a response might be needed
 651 * for this packet, or, if further processing is possible, the packet will
 652 * be queued to a proper inqueue for the next phase of handling.
 653 *
 654 * Output:
 655 * Return 0 - If further processing is needed.
 656 * Return 1 - If the packet can be discarded right away.
 657 */
 658static int sctp_rcv_ootb(struct sk_buff *skb)
 659{
 660        sctp_chunkhdr_t *ch;
 661        __u8 *ch_end;
 662
 663        ch = (sctp_chunkhdr_t *) skb->data;
 664
 665        /* Scan through all the chunks in the packet.  */
 666        do {
 667                /* Break out if chunk length is less then minimal. */
 668                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
 669                        break;
 670
 671                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
 672                if (ch_end > skb_tail_pointer(skb))
 673                        break;
 674
 675                /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
 676                 * receiver MUST silently discard the OOTB packet and take no
 677                 * further action.
 678                 */
 679                if (SCTP_CID_ABORT == ch->type)
 680                        goto discard;
 681
 682                /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
 683                 * chunk, the receiver should silently discard the packet
 684                 * and take no further action.
 685                 */
 686                if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
 687                        goto discard;
 688
 689                /* RFC 4460, 2.11.2
 690                 * This will discard packets with INIT chunk bundled as
 691                 * subsequent chunks in the packet.  When INIT is first,
 692                 * the normal INIT processing will discard the chunk.
 693                 */
 694                if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
 695                        goto discard;
 696
 697                ch = (sctp_chunkhdr_t *) ch_end;
 698        } while (ch_end < skb_tail_pointer(skb));
 699
 700        return 0;
 701
 702discard:
 703        return 1;
 704}
 705
 706/* Insert endpoint into the hash table.  */
 707static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
 708{
 709        struct net *net = sock_net(ep->base.sk);
 710        struct sctp_ep_common *epb;
 711        struct sctp_hashbucket *head;
 712
 713        epb = &ep->base;
 714
 715        epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 716        head = &sctp_ep_hashtable[epb->hashent];
 717
 718        write_lock(&head->lock);
 719        hlist_add_head(&epb->node, &head->chain);
 720        write_unlock(&head->lock);
 721}
 722
 723/* Add an endpoint to the hash. Local BH-safe. */
 724void sctp_hash_endpoint(struct sctp_endpoint *ep)
 725{
 726        local_bh_disable();
 727        __sctp_hash_endpoint(ep);
 728        local_bh_enable();
 729}
 730
 731/* Remove endpoint from the hash table.  */
 732static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 733{
 734        struct net *net = sock_net(ep->base.sk);
 735        struct sctp_hashbucket *head;
 736        struct sctp_ep_common *epb;
 737
 738        epb = &ep->base;
 739
 740        epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 741
 742        head = &sctp_ep_hashtable[epb->hashent];
 743
 744        write_lock(&head->lock);
 745        hlist_del_init(&epb->node);
 746        write_unlock(&head->lock);
 747}
 748
 749/* Remove endpoint from the hash.  Local BH-safe. */
 750void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 751{
 752        local_bh_disable();
 753        __sctp_unhash_endpoint(ep);
 754        local_bh_enable();
 755}
 756
 757/* Look up an endpoint. */
 758static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
 759                                                const union sctp_addr *laddr)
 760{
 761        struct sctp_hashbucket *head;
 762        struct sctp_ep_common *epb;
 763        struct sctp_endpoint *ep;
 764        int hash;
 765
 766        hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
 767        head = &sctp_ep_hashtable[hash];
 768        read_lock(&head->lock);
 769        sctp_for_each_hentry(epb, &head->chain) {
 770                ep = sctp_ep(epb);
 771                if (sctp_endpoint_is_match(ep, net, laddr))
 772                        goto hit;
 773        }
 774
 775        ep = sctp_sk(net->sctp.ctl_sock)->ep;
 776
 777hit:
 778        sctp_endpoint_hold(ep);
 779        read_unlock(&head->lock);
 780        return ep;
 781}
 782
 783/* rhashtable for transport */
 784struct sctp_hash_cmp_arg {
 785        const struct sctp_endpoint      *ep;
 786        const union sctp_addr           *laddr;
 787        const union sctp_addr           *paddr;
 788        const struct net                *net;
 789};
 790
 791static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
 792                                const void *ptr)
 793{
 794        const struct sctp_hash_cmp_arg *x = arg->key;
 795        const struct sctp_transport *t = ptr;
 796        struct sctp_association *asoc = t->asoc;
 797        const struct net *net = x->net;
 798
 799        if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
 800                return 1;
 801        if (!net_eq(sock_net(asoc->base.sk), net))
 802                return 1;
 803        if (x->ep) {
 804                if (x->ep != asoc->ep)
 805                        return 1;
 806        } else {
 807                if (x->laddr->v4.sin_port != htons(asoc->base.bind_addr.port))
 808                        return 1;
 809                if (!sctp_bind_addr_match(&asoc->base.bind_addr,
 810                                          x->laddr, sctp_sk(asoc->base.sk)))
 811                        return 1;
 812        }
 813
 814        return 0;
 815}
 816
 817static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
 818{
 819        const struct sctp_transport *t = data;
 820        const union sctp_addr *paddr = &t->ipaddr;
 821        const struct net *net = sock_net(t->asoc->base.sk);
 822        u16 lport = htons(t->asoc->base.bind_addr.port);
 823        u32 addr;
 824
 825        if (paddr->sa.sa_family == AF_INET6)
 826                addr = jhash(&paddr->v6.sin6_addr, 16, seed);
 827        else
 828                addr = paddr->v4.sin_addr.s_addr;
 829
 830        return  jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
 831                             (__force __u32)lport, net_hash_mix(net), seed);
 832}
 833
 834static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed)
 835{
 836        const struct sctp_hash_cmp_arg *x = data;
 837        const union sctp_addr *paddr = x->paddr;
 838        const struct net *net = x->net;
 839        u16 lport;
 840        u32 addr;
 841
 842        lport = x->ep ? htons(x->ep->base.bind_addr.port) :
 843                        x->laddr->v4.sin_port;
 844        if (paddr->sa.sa_family == AF_INET6)
 845                addr = jhash(&paddr->v6.sin6_addr, 16, seed);
 846        else
 847                addr = paddr->v4.sin_addr.s_addr;
 848
 849        return  jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
 850                             (__force __u32)lport, net_hash_mix(net), seed);
 851}
 852
 853static const struct rhashtable_params sctp_hash_params = {
 854        .head_offset            = offsetof(struct sctp_transport, node),
 855        .hashfn                 = sctp_hash_key,
 856        .obj_hashfn             = sctp_hash_obj,
 857        .obj_cmpfn              = sctp_hash_cmp,
 858        .automatic_shrinking    = true,
 859};
 860
 861int sctp_transport_hashtable_init(void)
 862{
 863        return rhashtable_init(&sctp_transport_hashtable, &sctp_hash_params);
 864}
 865
 866void sctp_transport_hashtable_destroy(void)
 867{
 868        rhashtable_destroy(&sctp_transport_hashtable);
 869}
 870
 871void sctp_hash_transport(struct sctp_transport *t)
 872{
 873        struct sctp_hash_cmp_arg arg;
 874
 875        if (t->asoc->temp)
 876                return;
 877
 878        arg.ep = t->asoc->ep;
 879        arg.paddr = &t->ipaddr;
 880        arg.net   = sock_net(t->asoc->base.sk);
 881
 882reinsert:
 883        if (rhashtable_lookup_insert_key(&sctp_transport_hashtable, &arg,
 884                                         &t->node, sctp_hash_params) == -EBUSY)
 885                goto reinsert;
 886}
 887
 888void sctp_unhash_transport(struct sctp_transport *t)
 889{
 890        if (t->asoc->temp)
 891                return;
 892
 893        rhashtable_remove_fast(&sctp_transport_hashtable, &t->node,
 894                               sctp_hash_params);
 895}
 896
 897struct sctp_transport *sctp_addrs_lookup_transport(
 898                                struct net *net,
 899                                const union sctp_addr *laddr,
 900                                const union sctp_addr *paddr)
 901{
 902        struct sctp_hash_cmp_arg arg = {
 903                .ep    = NULL,
 904                .laddr = laddr,
 905                .paddr = paddr,
 906                .net   = net,
 907        };
 908
 909        return rhashtable_lookup_fast(&sctp_transport_hashtable, &arg,
 910                                      sctp_hash_params);
 911}
 912
 913struct sctp_transport *sctp_epaddr_lookup_transport(
 914                                const struct sctp_endpoint *ep,
 915                                const union sctp_addr *paddr)
 916{
 917        struct net *net = sock_net(ep->base.sk);
 918        struct sctp_hash_cmp_arg arg = {
 919                .ep    = ep,
 920                .paddr = paddr,
 921                .net   = net,
 922        };
 923
 924        return rhashtable_lookup_fast(&sctp_transport_hashtable, &arg,
 925                                      sctp_hash_params);
 926}
 927
 928/* Look up an association. */
 929static struct sctp_association *__sctp_lookup_association(
 930                                        struct net *net,
 931                                        const union sctp_addr *local,
 932                                        const union sctp_addr *peer,
 933                                        struct sctp_transport **pt)
 934{
 935        struct sctp_transport *t;
 936        struct sctp_association *asoc = NULL;
 937
 938        t = sctp_addrs_lookup_transport(net, local, peer);
 939        if (!t || !sctp_transport_hold(t))
 940                goto out;
 941
 942        asoc = t->asoc;
 943        sctp_association_hold(asoc);
 944        *pt = t;
 945
 946        sctp_transport_put(t);
 947
 948out:
 949        return asoc;
 950}
 951
 952/* Look up an association. protected by RCU read lock */
 953static
 954struct sctp_association *sctp_lookup_association(struct net *net,
 955                                                 const union sctp_addr *laddr,
 956                                                 const union sctp_addr *paddr,
 957                                                 struct sctp_transport **transportp)
 958{
 959        struct sctp_association *asoc;
 960
 961        rcu_read_lock();
 962        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
 963        rcu_read_unlock();
 964
 965        return asoc;
 966}
 967
 968/* Is there an association matching the given local and peer addresses? */
 969int sctp_has_association(struct net *net,
 970                         const union sctp_addr *laddr,
 971                         const union sctp_addr *paddr)
 972{
 973        struct sctp_association *asoc;
 974        struct sctp_transport *transport;
 975
 976        if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
 977                sctp_association_put(asoc);
 978                return 1;
 979        }
 980
 981        return 0;
 982}
 983
 984/*
 985 * SCTP Implementors Guide, 2.18 Handling of address
 986 * parameters within the INIT or INIT-ACK.
 987 *
 988 * D) When searching for a matching TCB upon reception of an INIT
 989 *    or INIT-ACK chunk the receiver SHOULD use not only the
 990 *    source address of the packet (containing the INIT or
 991 *    INIT-ACK) but the receiver SHOULD also use all valid
 992 *    address parameters contained within the chunk.
 993 *
 994 * 2.18.3 Solution description
 995 *
 996 * This new text clearly specifies to an implementor the need
 997 * to look within the INIT or INIT-ACK. Any implementation that
 998 * does not do this, may not be able to establish associations
 999 * in certain circumstances.
1000 *
1001 */
1002static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1003        struct sk_buff *skb,
1004        const union sctp_addr *laddr, struct sctp_transport **transportp)
1005{
1006        struct sctp_association *asoc;
1007        union sctp_addr addr;
1008        union sctp_addr *paddr = &addr;
1009        struct sctphdr *sh = sctp_hdr(skb);
1010        union sctp_params params;
1011        sctp_init_chunk_t *init;
1012        struct sctp_transport *transport;
1013        struct sctp_af *af;
1014
1015        /*
1016         * This code will NOT touch anything inside the chunk--it is
1017         * strictly READ-ONLY.
1018         *
1019         * RFC 2960 3  SCTP packet Format
1020         *
1021         * Multiple chunks can be bundled into one SCTP packet up to
1022         * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
1023         * COMPLETE chunks.  These chunks MUST NOT be bundled with any
1024         * other chunk in a packet.  See Section 6.10 for more details
1025         * on chunk bundling.
1026         */
1027
1028        /* Find the start of the TLVs and the end of the chunk.  This is
1029         * the region we search for address parameters.
1030         */
1031        init = (sctp_init_chunk_t *)skb->data;
1032
1033        /* Walk the parameters looking for embedded addresses. */
1034        sctp_walk_params(params, init, init_hdr.params) {
1035
1036                /* Note: Ignoring hostname addresses. */
1037                af = sctp_get_af_specific(param_type2af(params.p->type));
1038                if (!af)
1039                        continue;
1040
1041                af->from_addr_param(paddr, params.addr, sh->source, 0);
1042
1043                asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
1044                if (asoc)
1045                        return asoc;
1046        }
1047
1048        return NULL;
1049}
1050
1051/* ADD-IP, Section 5.2
1052 * When an endpoint receives an ASCONF Chunk from the remote peer
1053 * special procedures may be needed to identify the association the
1054 * ASCONF Chunk is associated with. To properly find the association
1055 * the following procedures SHOULD be followed:
1056 *
1057 * D2) If the association is not found, use the address found in the
1058 * Address Parameter TLV combined with the port number found in the
1059 * SCTP common header. If found proceed to rule D4.
1060 *
1061 * D2-ext) If more than one ASCONF Chunks are packed together, use the
1062 * address found in the ASCONF Address Parameter TLV of each of the
1063 * subsequent ASCONF Chunks. If found, proceed to rule D4.
1064 */
1065static struct sctp_association *__sctp_rcv_asconf_lookup(
1066                                        struct net *net,
1067                                        sctp_chunkhdr_t *ch,
1068                                        const union sctp_addr *laddr,
1069                                        __be16 peer_port,
1070                                        struct sctp_transport **transportp)
1071{
1072        sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;
1073        struct sctp_af *af;
1074        union sctp_addr_param *param;
1075        union sctp_addr paddr;
1076
1077        /* Skip over the ADDIP header and find the Address parameter */
1078        param = (union sctp_addr_param *)(asconf + 1);
1079
1080        af = sctp_get_af_specific(param_type2af(param->p.type));
1081        if (unlikely(!af))
1082                return NULL;
1083
1084        af->from_addr_param(&paddr, param, peer_port, 0);
1085
1086        return __sctp_lookup_association(net, laddr, &paddr, transportp);
1087}
1088
1089
1090/* SCTP-AUTH, Section 6.3:
1091*    If the receiver does not find a STCB for a packet containing an AUTH
1092*    chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1093*    chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1094*    association.
1095*
1096* This means that any chunks that can help us identify the association need
1097* to be looked at to find this association.
1098*/
1099static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1100                                      struct sk_buff *skb,
1101                                      const union sctp_addr *laddr,
1102                                      struct sctp_transport **transportp)
1103{
1104        struct sctp_association *asoc = NULL;
1105        sctp_chunkhdr_t *ch;
1106        int have_auth = 0;
1107        unsigned int chunk_num = 1;
1108        __u8 *ch_end;
1109
1110        /* Walk through the chunks looking for AUTH or ASCONF chunks
1111         * to help us find the association.
1112         */
1113        ch = (sctp_chunkhdr_t *) skb->data;
1114        do {
1115                /* Break out if chunk length is less then minimal. */
1116                if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
1117                        break;
1118
1119                ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1120                if (ch_end > skb_tail_pointer(skb))
1121                        break;
1122
1123                switch (ch->type) {
1124                case SCTP_CID_AUTH:
1125                        have_auth = chunk_num;
1126                        break;
1127
1128                case SCTP_CID_COOKIE_ECHO:
1129                        /* If a packet arrives containing an AUTH chunk as
1130                         * a first chunk, a COOKIE-ECHO chunk as the second
1131                         * chunk, and possibly more chunks after them, and
1132                         * the receiver does not have an STCB for that
1133                         * packet, then authentication is based on
1134                         * the contents of the COOKIE- ECHO chunk.
1135                         */
1136                        if (have_auth == 1 && chunk_num == 2)
1137                                return NULL;
1138                        break;
1139
1140                case SCTP_CID_ASCONF:
1141                        if (have_auth || net->sctp.addip_noauth)
1142                                asoc = __sctp_rcv_asconf_lookup(
1143                                                net, ch, laddr,
1144                                                sctp_hdr(skb)->source,
1145                                                transportp);
1146                default:
1147                        break;
1148                }
1149
1150                if (asoc)
1151                        break;
1152
1153                ch = (sctp_chunkhdr_t *) ch_end;
1154                chunk_num++;
1155        } while (ch_end < skb_tail_pointer(skb));
1156
1157        return asoc;
1158}
1159
1160/*
1161 * There are circumstances when we need to look inside the SCTP packet
1162 * for information to help us find the association.   Examples
1163 * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1164 * chunks.
1165 */
1166static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1167                                      struct sk_buff *skb,
1168                                      const union sctp_addr *laddr,
1169                                      struct sctp_transport **transportp)
1170{
1171        sctp_chunkhdr_t *ch;
1172
1173        ch = (sctp_chunkhdr_t *) skb->data;
1174
1175        /* The code below will attempt to walk the chunk and extract
1176         * parameter information.  Before we do that, we need to verify
1177         * that the chunk length doesn't cause overflow.  Otherwise, we'll
1178         * walk off the end.
1179         */
1180        if (WORD_ROUND(ntohs(ch->length)) > skb->len)
1181                return NULL;
1182
1183        /* If this is INIT/INIT-ACK look inside the chunk too. */
1184        if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK)
1185                return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
1186
1187        return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
1188}
1189
1190/* Lookup an association for an inbound skb. */
1191static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1192                                      struct sk_buff *skb,
1193                                      const union sctp_addr *paddr,
1194                                      const union sctp_addr *laddr,
1195                                      struct sctp_transport **transportp)
1196{
1197        struct sctp_association *asoc;
1198
1199        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1200
1201        /* Further lookup for INIT/INIT-ACK packets.
1202         * SCTP Implementors Guide, 2.18 Handling of address
1203         * parameters within the INIT or INIT-ACK.
1204         */
1205        if (!asoc)
1206                asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1207
1208        return asoc;
1209}
1210