linux/net/sctp/input.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * Copyright (c) 1999-2000 Cisco, Inc.
   3 * Copyright (c) 1999-2001 Motorola, Inc.
   4 * Copyright (c) 2001-2003 International Business Machines, Corp.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This file is part of the SCTP kernel implementation
  10 *
  11 * These functions handle all input from the IP layer into SCTP.
  12 *
  13 * This SCTP implementation is free software;
  14 * you can redistribute it and/or modify it under the terms of
  15 * the GNU General Public License as published by
  16 * the Free Software Foundation; either version 2, or (at your option)
  17 * any later version.
  18 *
  19 * This SCTP implementation is distributed in the hope that it
  20 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  21 *                 ************************
  22 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  23 * See the GNU General Public License for more details.
  24 *
  25 * You should have received a copy of the GNU General Public License
  26 * along with GNU CC; see the file COPYING.  If not, see
  27 * <http://www.gnu.org/licenses/>.
  28 *
  29 * Please send any bug reports or fixes you make to the
  30 * email address(es):
  31 *    lksctp developers <linux-sctp@vger.kernel.org>
  32 *
  33 * Written or modified by:
  34 *    La Monte H.P. Yarroll <piggy@acm.org>
  35 *    Karl Knutson <karl@athena.chicago.il.us>
  36 *    Xingang Guo <xingang.guo@intel.com>
  37 *    Jon Grimm <jgrimm@us.ibm.com>
  38 *    Hui Huang <hui.huang@nokia.com>
  39 *    Daisy Chang <daisyc@us.ibm.com>
  40 *    Sridhar Samudrala <sri@us.ibm.com>
  41 *    Ardelle Fan <ardelle.fan@intel.com>
  42 */
  43
  44#include <linux/types.h>
  45#include <linux/list.h> /* For struct list_head */
  46#include <linux/socket.h>
  47#include <linux/ip.h>
  48#include <linux/time.h> /* For struct timeval */
  49#include <linux/slab.h>
  50#include <net/ip.h>
  51#include <net/icmp.h>
  52#include <net/snmp.h>
  53#include <net/sock.h>
  54#include <net/xfrm.h>
  55#include <net/sctp/sctp.h>
  56#include <net/sctp/sm.h>
  57#include <net/sctp/checksum.h>
  58#include <net/net_namespace.h>
  59#include <linux/rhashtable.h>
  60#include <net/sock_reuseport.h>
  61
  62/* Forward declarations for internal helpers. */
  63static int sctp_rcv_ootb(struct sk_buff *);
  64static struct sctp_association *__sctp_rcv_lookup(struct net *net,
  65                                      struct sk_buff *skb,
  66                                      const union sctp_addr *paddr,
  67                                      const union sctp_addr *laddr,
  68                                      struct sctp_transport **transportp);
  69static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
  70                                        struct net *net, struct sk_buff *skb,
  71                                        const union sctp_addr *laddr,
  72                                        const union sctp_addr *daddr);
  73static struct sctp_association *__sctp_lookup_association(
  74                                        struct net *net,
  75                                        const union sctp_addr *local,
  76                                        const union sctp_addr *peer,
  77                                        struct sctp_transport **pt);
  78
  79static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
  80
  81
  82/* Calculate the SCTP checksum of an SCTP packet.  */
  83static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
  84{
  85        struct sctphdr *sh = sctp_hdr(skb);
  86        __le32 cmp = sh->checksum;
  87        __le32 val = sctp_compute_cksum(skb, 0);
  88
  89        if (val != cmp) {
  90                /* CRC failure, dump it. */
  91                __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
  92                return -1;
  93        }
  94        return 0;
  95}
  96
  97/*
  98 * This is the routine which IP calls when receiving an SCTP packet.
  99 */
 100int sctp_rcv(struct sk_buff *skb)
 101{
 102        struct sock *sk;
 103        struct sctp_association *asoc;
 104        struct sctp_endpoint *ep = NULL;
 105        struct sctp_ep_common *rcvr;
 106        struct sctp_transport *transport = NULL;
 107        struct sctp_chunk *chunk;
 108        union sctp_addr src;
 109        union sctp_addr dest;
 110        int family;
 111        struct sctp_af *af;
 112        struct net *net = dev_net(skb->dev);
 113        bool is_gso = skb_is_gso(skb) && skb_is_gso_sctp(skb);
 114
 115        if (skb->pkt_type != PACKET_HOST)
 116                goto discard_it;
 117
 118        __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
 119
 120        /* If packet is too small to contain a single chunk, let's not
 121         * waste time on it anymore.
 122         */
 123        if (skb->len < sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr) +
 124                       skb_transport_offset(skb))
 125                goto discard_it;
 126
 127        /* If the packet is fragmented and we need to do crc checking,
 128         * it's better to just linearize it otherwise crc computing
 129         * takes longer.
 130         */
 131        if ((!is_gso && skb_linearize(skb)) ||
 132            !pskb_may_pull(skb, sizeof(struct sctphdr)))
 133                goto discard_it;
 134
 135        /* Pull up the IP header. */
 136        __skb_pull(skb, skb_transport_offset(skb));
 137
 138        skb->csum_valid = 0; /* Previous value not applicable */
 139        if (skb_csum_unnecessary(skb))
 140                __skb_decr_checksum_unnecessary(skb);
 141        else if (!sctp_checksum_disable &&
 142                 !is_gso &&
 143                 sctp_rcv_checksum(net, skb) < 0)
 144                goto discard_it;
 145        skb->csum_valid = 1;
 146
 147        __skb_pull(skb, sizeof(struct sctphdr));
 148
 149        family = ipver2af(ip_hdr(skb)->version);
 150        af = sctp_get_af_specific(family);
 151        if (unlikely(!af))
 152                goto discard_it;
 153        SCTP_INPUT_CB(skb)->af = af;
 154
 155        /* Initialize local addresses for lookups. */
 156        af->from_skb(&src, skb, 1);
 157        af->from_skb(&dest, skb, 0);
 158
 159        /* If the packet is to or from a non-unicast address,
 160         * silently discard the packet.
 161         *
 162         * This is not clearly defined in the RFC except in section
 163         * 8.4 - OOTB handling.  However, based on the book "Stream Control
 164         * Transmission Protocol" 2.1, "It is important to note that the
 165         * IP address of an SCTP transport address must be a routable
 166         * unicast address.  In other words, IP multicast addresses and
 167         * IP broadcast addresses cannot be used in an SCTP transport
 168         * address."
 169         */
 170        if (!af->addr_valid(&src, NULL, skb) ||
 171            !af->addr_valid(&dest, NULL, skb))
 172                goto discard_it;
 173
 174        asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
 175
 176        if (!asoc)
 177                ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
 178
 179        /* Retrieve the common input handling substructure. */
 180        rcvr = asoc ? &asoc->base : &ep->base;
 181        sk = rcvr->sk;
 182
 183        /*
 184         * If a frame arrives on an interface and the receiving socket is
 185         * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB
 186         */
 187        if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
 188                if (transport) {
 189                        sctp_transport_put(transport);
 190                        asoc = NULL;
 191                        transport = NULL;
 192                } else {
 193                        sctp_endpoint_put(ep);
 194                        ep = NULL;
 195                }
 196                sk = net->sctp.ctl_sock;
 197                ep = sctp_sk(sk)->ep;
 198                sctp_endpoint_hold(ep);
 199                rcvr = &ep->base;
 200        }
 201
 202        /*
 203         * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
 204         * An SCTP packet is called an "out of the blue" (OOTB)
 205         * packet if it is correctly formed, i.e., passed the
 206         * receiver's checksum check, but the receiver is not
 207         * able to identify the association to which this
 208         * packet belongs.
 209         */
 210        if (!asoc) {
 211                if (sctp_rcv_ootb(skb)) {
 212                        __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
 213                        goto discard_release;
 214                }
 215        }
 216
 217        if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
 218                goto discard_release;
 219        nf_reset(skb);
 220
 221        if (sk_filter(sk, skb))
 222                goto discard_release;
 223
 224        /* Create an SCTP packet structure. */
 225        chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
 226        if (!chunk)
 227                goto discard_release;
 228        SCTP_INPUT_CB(skb)->chunk = chunk;
 229
 230        /* Remember what endpoint is to handle this packet. */
 231        chunk->rcvr = rcvr;
 232
 233        /* Remember the SCTP header. */
 234        chunk->sctp_hdr = sctp_hdr(skb);
 235
 236        /* Set the source and destination addresses of the incoming chunk.  */
 237        sctp_init_addrs(chunk, &src, &dest);
 238
 239        /* Remember where we came from.  */
 240        chunk->transport = transport;
 241
 242        /* Acquire access to the sock lock. Note: We are safe from other
 243         * bottom halves on this lock, but a user may be in the lock too,
 244         * so check if it is busy.
 245         */
 246        bh_lock_sock(sk);
 247
 248        if (sk != rcvr->sk) {
 249                /* Our cached sk is different from the rcvr->sk.  This is
 250                 * because migrate()/accept() may have moved the association
 251                 * to a new socket and released all the sockets.  So now we
 252                 * are holding a lock on the old socket while the user may
 253                 * be doing something with the new socket.  Switch our veiw
 254                 * of the current sk.
 255                 */
 256                bh_unlock_sock(sk);
 257                sk = rcvr->sk;
 258                bh_lock_sock(sk);
 259        }
 260
 261        if (sock_owned_by_user(sk)) {
 262                if (sctp_add_backlog(sk, skb)) {
 263                        bh_unlock_sock(sk);
 264                        sctp_chunk_free(chunk);
 265                        skb = NULL; /* sctp_chunk_free already freed the skb */
 266                        goto discard_release;
 267                }
 268                __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
 269        } else {
 270                __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
 271                sctp_inq_push(&chunk->rcvr->inqueue, chunk);
 272        }
 273
 274        bh_unlock_sock(sk);
 275
 276        /* Release the asoc/ep ref we took in the lookup calls. */
 277        if (transport)
 278                sctp_transport_put(transport);
 279        else
 280                sctp_endpoint_put(ep);
 281
 282        return 0;
 283
 284discard_it:
 285        __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
 286        kfree_skb(skb);
 287        return 0;
 288
 289discard_release:
 290        /* Release the asoc/ep ref we took in the lookup calls. */
 291        if (transport)
 292                sctp_transport_put(transport);
 293        else
 294                sctp_endpoint_put(ep);
 295
 296        goto discard_it;
 297}
 298
 299/* Process the backlog queue of the socket.  Every skb on
 300 * the backlog holds a ref on an association or endpoint.
 301 * We hold this ref throughout the state machine to make
 302 * sure that the structure we need is still around.
 303 */
 304int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 305{
 306        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 307        struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
 308        struct sctp_transport *t = chunk->transport;
 309        struct sctp_ep_common *rcvr = NULL;
 310        int backloged = 0;
 311
 312        rcvr = chunk->rcvr;
 313
 314        /* If the rcvr is dead then the association or endpoint
 315         * has been deleted and we can safely drop the chunk
 316         * and refs that we are holding.
 317         */
 318        if (rcvr->dead) {
 319                sctp_chunk_free(chunk);
 320                goto done;
 321        }
 322
 323        if (unlikely(rcvr->sk != sk)) {
 324                /* In this case, the association moved from one socket to
 325                 * another.  We are currently sitting on the backlog of the
 326                 * old socket, so we need to move.
 327                 * However, since we are here in the process context we
 328                 * need to take make sure that the user doesn't own
 329                 * the new socket when we process the packet.
 330                 * If the new socket is user-owned, queue the chunk to the
 331                 * backlog of the new socket without dropping any refs.
 332                 * Otherwise, we can safely push the chunk on the inqueue.
 333                 */
 334
 335                sk = rcvr->sk;
 336                local_bh_disable();
 337                bh_lock_sock(sk);
 338
 339                if (sock_owned_by_user(sk)) {
 340                        if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
 341                                sctp_chunk_free(chunk);
 342                        else
 343                                backloged = 1;
 344                } else
 345                        sctp_inq_push(inqueue, chunk);
 346
 347                bh_unlock_sock(sk);
 348                local_bh_enable();
 349
 350                /* If the chunk was backloged again, don't drop refs */
 351                if (backloged)
 352                        return 0;
 353        } else {
 354                sctp_inq_push(inqueue, chunk);
 355        }
 356
 357done:
 358        /* Release the refs we took in sctp_add_backlog */
 359        if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
 360                sctp_transport_put(t);
 361        else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 362                sctp_endpoint_put(sctp_ep(rcvr));
 363        else
 364                BUG();
 365
 366        return 0;
 367}
 368
 369static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
 370{
 371        struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
 372        struct sctp_transport *t = chunk->transport;
 373        struct sctp_ep_common *rcvr = chunk->rcvr;
 374        int ret;
 375
 376        ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
 377        if (!ret) {
 378                /* Hold the assoc/ep while hanging on the backlog queue.
 379                 * This way, we know structures we need will not disappear
 380                 * from us
 381                 */
 382                if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
 383                        sctp_transport_hold(t);
 384                else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
 385                        sctp_endpoint_hold(sctp_ep(rcvr));
 386                else
 387                        BUG();
 388        }
 389        return ret;
 390
 391}
 392
 393/* Handle icmp frag needed error. */
 394void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
 395                           struct sctp_transport *t, __u32 pmtu)
 396{
 397        if (!t || (t->pathmtu <= pmtu))
 398                return;
 399
 400        if (sock_owned_by_user(sk)) {
 401                atomic_set(&t->mtu_info, pmtu);
 402                asoc->pmtu_pending = 1;
 403                t->pmtu_pending = 1;
 404                return;
 405        }
 406
 407        if (!(t->param_flags & SPP_PMTUD_ENABLE))
 408                /* We can't allow retransmitting in such case, as the
 409                 * retransmission would be sized just as before, and thus we
 410                 * would get another icmp, and retransmit again.
 411                 */
 412                return;
 413
 414        /* Update transports view of the MTU. Return if no update was needed.
 415         * If an update wasn't needed/possible, it also doesn't make sense to
 416         * try to retransmit now.
 417         */
 418        if (!sctp_transport_update_pmtu(t, pmtu))
 419                return;
 420
 421        /* Update association pmtu. */
 422        sctp_assoc_sync_pmtu(asoc);
 423
 424        /* Retransmit with the new pmtu setting. */
 425        sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
 426}
 427
 428void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
 429                        struct sk_buff *skb)
 430{
 431        struct dst_entry *dst;
 432
 433        if (sock_owned_by_user(sk) || !t)
 434                return;
 435        dst = sctp_transport_dst_check(t);
 436        if (dst)
 437                dst->ops->redirect(dst, sk, skb);
 438}
 439
 440/*
 441 * SCTP Implementer's Guide, 2.37 ICMP handling procedures
 442 *
 443 * ICMP8) If the ICMP code is a "Unrecognized next header type encountered"
 444 *        or a "Protocol Unreachable" treat this message as an abort
 445 *        with the T bit set.
 446 *
 447 * This function sends an event to the state machine, which will abort the
 448 * association.
 449 *
 450 */
 451void sctp_icmp_proto_unreachable(struct sock *sk,
 452                           struct sctp_association *asoc,
 453                           struct sctp_transport *t)
 454{
 455        if (sock_owned_by_user(sk)) {
 456                if (timer_pending(&t->proto_unreach_timer))
 457                        return;
 458                else {
 459                        if (!mod_timer(&t->proto_unreach_timer,
 460                                                jiffies + (HZ/20)))
 461                                sctp_association_hold(asoc);
 462                }
 463        } else {
 464                struct net *net = sock_net(sk);
 465
 466                pr_debug("%s: unrecognized next header type "
 467                         "encountered!\n", __func__);
 468
 469                if (del_timer(&t->proto_unreach_timer))
 470                        sctp_association_put(asoc);
 471
 472                sctp_do_sm(net, SCTP_EVENT_T_OTHER,
 473                           SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
 474                           asoc->state, asoc->ep, asoc, t,
 475                           GFP_ATOMIC);
 476        }
 477}
 478
 479/* Common lookup code for icmp/icmpv6 error handler. */
 480struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
 481                             struct sctphdr *sctphdr,
 482                             struct sctp_association **app,
 483                             struct sctp_transport **tpp)
 484{
 485        struct sctp_init_chunk *chunkhdr, _chunkhdr;
 486        union sctp_addr saddr;
 487        union sctp_addr daddr;
 488        struct sctp_af *af;
 489        struct sock *sk = NULL;
 490        struct sctp_association *asoc;
 491        struct sctp_transport *transport = NULL;
 492        __u32 vtag = ntohl(sctphdr->vtag);
 493
 494        *app = NULL; *tpp = NULL;
 495
 496        af = sctp_get_af_specific(family);
 497        if (unlikely(!af)) {
 498                return NULL;
 499        }
 500
 501        /* Initialize local addresses for lookups. */
 502        af->from_skb(&saddr, skb, 1);
 503        af->from_skb(&daddr, skb, 0);
 504
 505        /* Look for an association that matches the incoming ICMP error
 506         * packet.
 507         */
 508        asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
 509        if (!asoc)
 510                return NULL;
 511
 512        sk = asoc->base.sk;
 513
 514        /* RFC 4960, Appendix C. ICMP Handling
 515         *
 516         * ICMP6) An implementation MUST validate that the Verification Tag
 517         * contained in the ICMP message matches the Verification Tag of
 518         * the peer.  If the Verification Tag is not 0 and does NOT
 519         * match, discard the ICMP message.  If it is 0 and the ICMP
 520         * message contains enough bytes to verify that the chunk type is
 521         * an INIT chunk and that the Initiate Tag matches the tag of the
 522         * peer, continue with ICMP7.  If the ICMP message is too short
 523         * or the chunk type or the Initiate Tag does not match, silently
 524         * discard the packet.
 525         */
 526        if (vtag == 0) {
 527                /* chunk header + first 4 octects of init header */
 528                chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
 529                                              sizeof(struct sctphdr),
 530                                              sizeof(struct sctp_chunkhdr) +
 531                                              sizeof(__be32), &_chunkhdr);
 532                if (!chunkhdr ||
 533                    chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
 534                    ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
 535                        goto out;
 536
 537        } else if (vtag != asoc->c.peer_vtag) {
 538                goto out;
 539        }
 540
 541        bh_lock_sock(sk);
 542
 543        /* If too many ICMPs get dropped on busy
 544         * servers this needs to be solved differently.
 545         */
 546        if (sock_owned_by_user(sk))
 547                __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
 548
 549        *app = asoc;
 550        *tpp = transport;
 551        return sk;
 552
 553out:
 554        sctp_transport_put(transport);
 555        return NULL;
 556}
 557
 558/* Common cleanup code for icmp/icmpv6 error handler. */
 559void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
 560{
 561        bh_unlock_sock(sk);
 562        sctp_transport_put(t);
 563}
 564
 565/*
 566 * This routine is called by the ICMP module when it gets some
 567 * sort of error condition.  If err < 0 then the socket should
 568 * be closed and the error returned to the user.  If err > 0
 569 * it's just the icmp type << 8 | icmp code.  After adjustment
 570 * header points to the first 8 bytes of the sctp header.  We need
 571 * to find the appropriate port.
 572 *
 573 * The locking strategy used here is very "optimistic". When
 574 * someone else accesses the socket the ICMP is just dropped
 575 * and for some paths there is no check at all.
 576 * A more general error queue to queue errors for later handling
 577 * is probably better.
 578 *
 579 */
 580int sctp_v4_err(struct sk_buff *skb, __u32 info)
 581{
 582        const struct iphdr *iph = (const struct iphdr *)skb->data;
 583        const int ihlen = iph->ihl * 4;
 584        const int type = icmp_hdr(skb)->type;
 585        const int code = icmp_hdr(skb)->code;
 586        struct sock *sk;
 587        struct sctp_association *asoc = NULL;
 588        struct sctp_transport *transport;
 589        struct inet_sock *inet;
 590        __u16 saveip, savesctp;
 591        int err;
 592        struct net *net = dev_net(skb->dev);
 593
 594        /* Fix up skb to look at the embedded net header. */
 595        saveip = skb->network_header;
 596        savesctp = skb->transport_header;
 597        skb_reset_network_header(skb);
 598        skb_set_transport_header(skb, ihlen);
 599        sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
 600        /* Put back, the original values. */
 601        skb->network_header = saveip;
 602        skb->transport_header = savesctp;
 603        if (!sk) {
 604                __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
 605                return -ENOENT;
 606        }
 607        /* Warning:  The sock lock is held.  Remember to call
 608         * sctp_err_finish!
 609         */
 610
 611        switch (type) {
 612        case ICMP_PARAMETERPROB:
 613                err = EPROTO;
 614                break;
 615        case ICMP_DEST_UNREACH:
 616                if (code > NR_ICMP_UNREACH)
 617                        goto out_unlock;
 618
 619                /* PMTU discovery (RFC1191) */
 620                if (ICMP_FRAG_NEEDED == code) {
 621                        sctp_icmp_frag_needed(sk, asoc, transport,
 622                                              SCTP_TRUNC4(info));
 623                        goto out_unlock;
 624                } else {
 625                        if (ICMP_PROT_UNREACH == code) {
 626                                sctp_icmp_proto_unreachable(sk, asoc,
 627                                                            transport);
 628                                goto out_unlock;
 629                        }
 630                }
 631                err = icmp_err_convert[code].errno;
 632                break;
 633        case ICMP_TIME_EXCEEDED:
 634                /* Ignore any time exceeded errors due to fragment reassembly
 635                 * timeouts.
 636                 */
 637                if (ICMP_EXC_FRAGTIME == code)
 638                        goto out_unlock;
 639
 640                err = EHOSTUNREACH;
 641                break;
 642        case ICMP_REDIRECT:
 643                sctp_icmp_redirect(sk, transport, skb);
 644                /* Fall through to out_unlock. */
 645        default:
 646                goto out_unlock;
 647        }
 648
 649        inet = inet_sk(sk);
 650        if (!sock_owned_by_user(sk) && inet->recverr) {
 651                sk->sk_err = err;
 652                sk->sk_error_report(sk);
 653        } else {  /* Only an error on timeout */
 654                sk->sk_err_soft = err;
 655        }
 656
 657out_unlock:
 658        sctp_err_finish(sk, transport);
 659        return 0;
 660}
 661
 662/*
 663 * RFC 2960, 8.4 - Handle "Out of the blue" Packets.
 664 *
 665 * This function scans all the chunks in the OOTB packet to determine if
 666 * the packet should be discarded right away.  If a response might be needed
 667 * for this packet, or, if further processing is possible, the packet will
 668 * be queued to a proper inqueue for the next phase of handling.
 669 *
 670 * Output:
 671 * Return 0 - If further processing is needed.
 672 * Return 1 - If the packet can be discarded right away.
 673 */
 674static int sctp_rcv_ootb(struct sk_buff *skb)
 675{
 676        struct sctp_chunkhdr *ch, _ch;
 677        int ch_end, offset = 0;
 678
 679        /* Scan through all the chunks in the packet.  */
 680        do {
 681                /* Make sure we have at least the header there */
 682                if (offset + sizeof(_ch) > skb->len)
 683                        break;
 684
 685                ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
 686
 687                /* Break out if chunk length is less then minimal. */
 688                if (ntohs(ch->length) < sizeof(_ch))
 689                        break;
 690
 691                ch_end = offset + SCTP_PAD4(ntohs(ch->length));
 692                if (ch_end > skb->len)
 693                        break;
 694
 695                /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the
 696                 * receiver MUST silently discard the OOTB packet and take no
 697                 * further action.
 698                 */
 699                if (SCTP_CID_ABORT == ch->type)
 700                        goto discard;
 701
 702                /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE
 703                 * chunk, the receiver should silently discard the packet
 704                 * and take no further action.
 705                 */
 706                if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
 707                        goto discard;
 708
 709                /* RFC 4460, 2.11.2
 710                 * This will discard packets with INIT chunk bundled as
 711                 * subsequent chunks in the packet.  When INIT is first,
 712                 * the normal INIT processing will discard the chunk.
 713                 */
 714                if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
 715                        goto discard;
 716
 717                offset = ch_end;
 718        } while (ch_end < skb->len);
 719
 720        return 0;
 721
 722discard:
 723        return 1;
 724}
 725
 726/* Insert endpoint into the hash table.  */
 727static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
 728{
 729        struct sock *sk = ep->base.sk;
 730        struct net *net = sock_net(sk);
 731        struct sctp_hashbucket *head;
 732        struct sctp_ep_common *epb;
 733
 734        epb = &ep->base;
 735        epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
 736        head = &sctp_ep_hashtable[epb->hashent];
 737
 738        if (sk->sk_reuseport) {
 739                bool any = sctp_is_ep_boundall(sk);
 740                struct sctp_ep_common *epb2;
 741                struct list_head *list;
 742                int cnt = 0, err = 1;
 743
 744                list_for_each(list, &ep->base.bind_addr.address_list)
 745                        cnt++;
 746
 747                sctp_for_each_hentry(epb2, &head->chain) {
 748                        struct sock *sk2 = epb2->sk;
 749
 750                        if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
 751                            !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
 752                            !sk2->sk_reuseport)
 753                                continue;
 754
 755                        err = sctp_bind_addrs_check(sctp_sk(sk2),
 756                                                    sctp_sk(sk), cnt);
 757                        if (!err) {
 758                                err = reuseport_add_sock(sk, sk2, any);
 759                                if (err)
 760                                        return err;
 761                                break;
 762                        } else if (err < 0) {
 763                                return err;
 764                        }
 765                }
 766
 767                if (err) {
 768                        err = reuseport_alloc(sk, any);
 769                        if (err)
 770                                return err;
 771                }
 772        }
 773
 774        write_lock(&head->lock);
 775        hlist_add_head(&epb->node, &head->chain);
 776        write_unlock(&head->lock);
 777        return 0;
 778}
 779
 780/* Add an endpoint to the hash. Local BH-safe. */
 781int sctp_hash_endpoint(struct sctp_endpoint *ep)
 782{
 783        int err;
 784
 785        local_bh_disable();
 786        err = __sctp_hash_endpoint(ep);
 787        local_bh_enable();
 788
 789        return err;
 790}
 791
 792/* Remove endpoint from the hash table.  */
 793static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
 794{
 795        struct sock *sk = ep->base.sk;
 796        struct sctp_hashbucket *head;
 797        struct sctp_ep_common *epb;
 798
 799        epb = &ep->base;
 800
 801        epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port);
 802
 803        head = &sctp_ep_hashtable[epb->hashent];
 804
 805        if (rcu_access_pointer(sk->sk_reuseport_cb))
 806                reuseport_detach_sock(sk);
 807
 808        write_lock(&head->lock);
 809        hlist_del_init(&epb->node);
 810        write_unlock(&head->lock);
 811}
 812
 813/* Remove endpoint from the hash.  Local BH-safe. */
 814void sctp_unhash_endpoint(struct sctp_endpoint *ep)
 815{
 816        local_bh_disable();
 817        __sctp_unhash_endpoint(ep);
 818        local_bh_enable();
 819}
 820
 821static inline __u32 sctp_hashfn(const struct net *net, __be16 lport,
 822                                const union sctp_addr *paddr, __u32 seed)
 823{
 824        __u32 addr;
 825
 826        if (paddr->sa.sa_family == AF_INET6)
 827                addr = jhash(&paddr->v6.sin6_addr, 16, seed);
 828        else
 829                addr = (__force __u32)paddr->v4.sin_addr.s_addr;
 830
 831        return  jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
 832                             (__force __u32)lport, net_hash_mix(net), seed);
 833}
 834
 835/* Look up an endpoint. */
 836static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
 837                                        struct net *net, struct sk_buff *skb,
 838                                        const union sctp_addr *laddr,
 839                                        const union sctp_addr *paddr)
 840{
 841        struct sctp_hashbucket *head;
 842        struct sctp_ep_common *epb;
 843        struct sctp_endpoint *ep;
 844        struct sock *sk;
 845        __be16 lport;
 846        int hash;
 847
 848        lport = laddr->v4.sin_port;
 849        hash = sctp_ep_hashfn(net, ntohs(lport));
 850        head = &sctp_ep_hashtable[hash];
 851        read_lock(&head->lock);
 852        sctp_for_each_hentry(epb, &head->chain) {
 853                ep = sctp_ep(epb);
 854                if (sctp_endpoint_is_match(ep, net, laddr))
 855                        goto hit;
 856        }
 857
 858        ep = sctp_sk(net->sctp.ctl_sock)->ep;
 859
 860hit:
 861        sk = ep->base.sk;
 862        if (sk->sk_reuseport) {
 863                __u32 phash = sctp_hashfn(net, lport, paddr, 0);
 864
 865                sk = reuseport_select_sock(sk, phash, skb,
 866                                           sizeof(struct sctphdr));
 867                if (sk)
 868                        ep = sctp_sk(sk)->ep;
 869        }
 870        sctp_endpoint_hold(ep);
 871        read_unlock(&head->lock);
 872        return ep;
 873}
 874
 875/* rhashtable for transport */
 876struct sctp_hash_cmp_arg {
 877        const union sctp_addr   *paddr;
 878        const struct net        *net;
 879        __be16                  lport;
 880};
 881
 882static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
 883                                const void *ptr)
 884{
 885        struct sctp_transport *t = (struct sctp_transport *)ptr;
 886        const struct sctp_hash_cmp_arg *x = arg->key;
 887        int err = 1;
 888
 889        if (!sctp_cmp_addr_exact(&t->ipaddr, x->paddr))
 890                return err;
 891        if (!sctp_transport_hold(t))
 892                return err;
 893
 894        if (!net_eq(sock_net(t->asoc->base.sk), x->net))
 895                goto out;
 896        if (x->lport != htons(t->asoc->base.bind_addr.port))
 897                goto out;
 898
 899        err = 0;
 900out:
 901        sctp_transport_put(t);
 902        return err;
 903}
 904
 905static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
 906{
 907        const struct sctp_transport *t = data;
 908
 909        return sctp_hashfn(sock_net(t->asoc->base.sk),
 910                           htons(t->asoc->base.bind_addr.port),
 911                           &t->ipaddr, seed);
 912}
 913
 914static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
 915{
 916        const struct sctp_hash_cmp_arg *x = data;
 917
 918        return sctp_hashfn(x->net, x->lport, x->paddr, seed);
 919}
 920
 921static const struct rhashtable_params sctp_hash_params = {
 922        .head_offset            = offsetof(struct sctp_transport, node),
 923        .hashfn                 = sctp_hash_key,
 924        .obj_hashfn             = sctp_hash_obj,
 925        .obj_cmpfn              = sctp_hash_cmp,
 926        .automatic_shrinking    = true,
 927};
 928
 929int sctp_transport_hashtable_init(void)
 930{
 931        return rhltable_init(&sctp_transport_hashtable, &sctp_hash_params);
 932}
 933
 934void sctp_transport_hashtable_destroy(void)
 935{
 936        rhltable_destroy(&sctp_transport_hashtable);
 937}
 938
 939int sctp_hash_transport(struct sctp_transport *t)
 940{
 941        struct sctp_transport *transport;
 942        struct rhlist_head *tmp, *list;
 943        struct sctp_hash_cmp_arg arg;
 944        int err;
 945
 946        if (t->asoc->temp)
 947                return 0;
 948
 949        arg.net   = sock_net(t->asoc->base.sk);
 950        arg.paddr = &t->ipaddr;
 951        arg.lport = htons(t->asoc->base.bind_addr.port);
 952
 953        rcu_read_lock();
 954        list = rhltable_lookup(&sctp_transport_hashtable, &arg,
 955                               sctp_hash_params);
 956
 957        rhl_for_each_entry_rcu(transport, tmp, list, node)
 958                if (transport->asoc->ep == t->asoc->ep) {
 959                        rcu_read_unlock();
 960                        return -EEXIST;
 961                }
 962        rcu_read_unlock();
 963
 964        err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
 965                                  &t->node, sctp_hash_params);
 966        if (err)
 967                pr_err_once("insert transport fail, errno %d\n", err);
 968
 969        return err;
 970}
 971
 972void sctp_unhash_transport(struct sctp_transport *t)
 973{
 974        if (t->asoc->temp)
 975                return;
 976
 977        rhltable_remove(&sctp_transport_hashtable, &t->node,
 978                        sctp_hash_params);
 979}
 980
 981/* return a transport with holding it */
 982struct sctp_transport *sctp_addrs_lookup_transport(
 983                                struct net *net,
 984                                const union sctp_addr *laddr,
 985                                const union sctp_addr *paddr)
 986{
 987        struct rhlist_head *tmp, *list;
 988        struct sctp_transport *t;
 989        struct sctp_hash_cmp_arg arg = {
 990                .paddr = paddr,
 991                .net   = net,
 992                .lport = laddr->v4.sin_port,
 993        };
 994
 995        list = rhltable_lookup(&sctp_transport_hashtable, &arg,
 996                               sctp_hash_params);
 997
 998        rhl_for_each_entry_rcu(t, tmp, list, node) {
 999                if (!sctp_transport_hold(t))
1000                        continue;
1001
1002                if (sctp_bind_addr_match(&t->asoc->base.bind_addr,
1003                                         laddr, sctp_sk(t->asoc->base.sk)))
1004                        return t;
1005                sctp_transport_put(t);
1006        }
1007
1008        return NULL;
1009}
1010
1011/* return a transport without holding it, as it's only used under sock lock */
1012struct sctp_transport *sctp_epaddr_lookup_transport(
1013                                const struct sctp_endpoint *ep,
1014                                const union sctp_addr *paddr)
1015{
1016        struct net *net = sock_net(ep->base.sk);
1017        struct rhlist_head *tmp, *list;
1018        struct sctp_transport *t;
1019        struct sctp_hash_cmp_arg arg = {
1020                .paddr = paddr,
1021                .net   = net,
1022                .lport = htons(ep->base.bind_addr.port),
1023        };
1024
1025        list = rhltable_lookup(&sctp_transport_hashtable, &arg,
1026                               sctp_hash_params);
1027
1028        rhl_for_each_entry_rcu(t, tmp, list, node)
1029                if (ep == t->asoc->ep)
1030                        return t;
1031
1032        return NULL;
1033}
1034
1035/* Look up an association. */
1036static struct sctp_association *__sctp_lookup_association(
1037                                        struct net *net,
1038                                        const union sctp_addr *local,
1039                                        const union sctp_addr *peer,
1040                                        struct sctp_transport **pt)
1041{
1042        struct sctp_transport *t;
1043        struct sctp_association *asoc = NULL;
1044
1045        t = sctp_addrs_lookup_transport(net, local, peer);
1046        if (!t)
1047                goto out;
1048
1049        asoc = t->asoc;
1050        *pt = t;
1051
1052out:
1053        return asoc;
1054}
1055
1056/* Look up an association. protected by RCU read lock */
1057static
1058struct sctp_association *sctp_lookup_association(struct net *net,
1059                                                 const union sctp_addr *laddr,
1060                                                 const union sctp_addr *paddr,
1061                                                 struct sctp_transport **transportp)
1062{
1063        struct sctp_association *asoc;
1064
1065        rcu_read_lock();
1066        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1067        rcu_read_unlock();
1068
1069        return asoc;
1070}
1071
1072/* Is there an association matching the given local and peer addresses? */
1073bool sctp_has_association(struct net *net,
1074                          const union sctp_addr *laddr,
1075                          const union sctp_addr *paddr)
1076{
1077        struct sctp_transport *transport;
1078
1079        if (sctp_lookup_association(net, laddr, paddr, &transport)) {
1080                sctp_transport_put(transport);
1081                return true;
1082        }
1083
1084        return false;
1085}
1086
1087/*
1088 * SCTP Implementors Guide, 2.18 Handling of address
1089 * parameters within the INIT or INIT-ACK.
1090 *
1091 * D) When searching for a matching TCB upon reception of an INIT
1092 *    or INIT-ACK chunk the receiver SHOULD use not only the
1093 *    source address of the packet (containing the INIT or
1094 *    INIT-ACK) but the receiver SHOULD also use all valid
1095 *    address parameters contained within the chunk.
1096 *
1097 * 2.18.3 Solution description
1098 *
1099 * This new text clearly specifies to an implementor the need
1100 * to look within the INIT or INIT-ACK. Any implementation that
1101 * does not do this, may not be able to establish associations
1102 * in certain circumstances.
1103 *
1104 */
1105static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
1106        struct sk_buff *skb,
1107        const union sctp_addr *laddr, struct sctp_transport **transportp)
1108{
1109        struct sctp_association *asoc;
1110        union sctp_addr addr;
1111        union sctp_addr *paddr = &addr;
1112        struct sctphdr *sh = sctp_hdr(skb);
1113        union sctp_params params;
1114        struct sctp_init_chunk *init;
1115        struct sctp_af *af;
1116
1117        /*
1118         * This code will NOT touch anything inside the chunk--it is
1119         * strictly READ-ONLY.
1120         *
1121         * RFC 2960 3  SCTP packet Format
1122         *
1123         * Multiple chunks can be bundled into one SCTP packet up to
1124         * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN
1125         * COMPLETE chunks.  These chunks MUST NOT be bundled with any
1126         * other chunk in a packet.  See Section 6.10 for more details
1127         * on chunk bundling.
1128         */
1129
1130        /* Find the start of the TLVs and the end of the chunk.  This is
1131         * the region we search for address parameters.
1132         */
1133        init = (struct sctp_init_chunk *)skb->data;
1134
1135        /* Walk the parameters looking for embedded addresses. */
1136        sctp_walk_params(params, init, init_hdr.params) {
1137
1138                /* Note: Ignoring hostname addresses. */
1139                af = sctp_get_af_specific(param_type2af(params.p->type));
1140                if (!af)
1141                        continue;
1142
1143                af->from_addr_param(paddr, params.addr, sh->source, 0);
1144
1145                asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1146                if (asoc)
1147                        return asoc;
1148        }
1149
1150        return NULL;
1151}
1152
1153/* ADD-IP, Section 5.2
1154 * When an endpoint receives an ASCONF Chunk from the remote peer
1155 * special procedures may be needed to identify the association the
1156 * ASCONF Chunk is associated with. To properly find the association
1157 * the following procedures SHOULD be followed:
1158 *
1159 * D2) If the association is not found, use the address found in the
1160 * Address Parameter TLV combined with the port number found in the
1161 * SCTP common header. If found proceed to rule D4.
1162 *
1163 * D2-ext) If more than one ASCONF Chunks are packed together, use the
1164 * address found in the ASCONF Address Parameter TLV of each of the
1165 * subsequent ASCONF Chunks. If found, proceed to rule D4.
1166 */
1167static struct sctp_association *__sctp_rcv_asconf_lookup(
1168                                        struct net *net,
1169                                        struct sctp_chunkhdr *ch,
1170                                        const union sctp_addr *laddr,
1171                                        __be16 peer_port,
1172                                        struct sctp_transport **transportp)
1173{
1174        struct sctp_addip_chunk *asconf = (struct sctp_addip_chunk *)ch;
1175        struct sctp_af *af;
1176        union sctp_addr_param *param;
1177        union sctp_addr paddr;
1178
1179        /* Skip over the ADDIP header and find the Address parameter */
1180        param = (union sctp_addr_param *)(asconf + 1);
1181
1182        af = sctp_get_af_specific(param_type2af(param->p.type));
1183        if (unlikely(!af))
1184                return NULL;
1185
1186        af->from_addr_param(&paddr, param, peer_port, 0);
1187
1188        return __sctp_lookup_association(net, laddr, &paddr, transportp);
1189}
1190
1191
1192/* SCTP-AUTH, Section 6.3:
1193*    If the receiver does not find a STCB for a packet containing an AUTH
1194*    chunk as the first chunk and not a COOKIE-ECHO chunk as the second
1195*    chunk, it MUST use the chunks after the AUTH chunk to look up an existing
1196*    association.
1197*
1198* This means that any chunks that can help us identify the association need
1199* to be looked at to find this association.
1200*/
1201static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1202                                      struct sk_buff *skb,
1203                                      const union sctp_addr *laddr,
1204                                      struct sctp_transport **transportp)
1205{
1206        struct sctp_association *asoc = NULL;
1207        struct sctp_chunkhdr *ch;
1208        int have_auth = 0;
1209        unsigned int chunk_num = 1;
1210        __u8 *ch_end;
1211
1212        /* Walk through the chunks looking for AUTH or ASCONF chunks
1213         * to help us find the association.
1214         */
1215        ch = (struct sctp_chunkhdr *)skb->data;
1216        do {
1217                /* Break out if chunk length is less then minimal. */
1218                if (ntohs(ch->length) < sizeof(*ch))
1219                        break;
1220
1221                ch_end = ((__u8 *)ch) + SCTP_PAD4(ntohs(ch->length));
1222                if (ch_end > skb_tail_pointer(skb))
1223                        break;
1224
1225                switch (ch->type) {
1226                case SCTP_CID_AUTH:
1227                        have_auth = chunk_num;
1228                        break;
1229
1230                case SCTP_CID_COOKIE_ECHO:
1231                        /* If a packet arrives containing an AUTH chunk as
1232                         * a first chunk, a COOKIE-ECHO chunk as the second
1233                         * chunk, and possibly more chunks after them, and
1234                         * the receiver does not have an STCB for that
1235                         * packet, then authentication is based on
1236                         * the contents of the COOKIE- ECHO chunk.
1237                         */
1238                        if (have_auth == 1 && chunk_num == 2)
1239                                return NULL;
1240                        break;
1241
1242                case SCTP_CID_ASCONF:
1243                        if (have_auth || net->sctp.addip_noauth)
1244                                asoc = __sctp_rcv_asconf_lookup(
1245                                                net, ch, laddr,
1246                                                sctp_hdr(skb)->source,
1247                                                transportp);
1248                default:
1249                        break;
1250                }
1251
1252                if (asoc)
1253                        break;
1254
1255                ch = (struct sctp_chunkhdr *)ch_end;
1256                chunk_num++;
1257        } while (ch_end < skb_tail_pointer(skb));
1258
1259        return asoc;
1260}
1261
1262/*
1263 * There are circumstances when we need to look inside the SCTP packet
1264 * for information to help us find the association.   Examples
1265 * include looking inside of INIT/INIT-ACK chunks or after the AUTH
1266 * chunks.
1267 */
1268static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1269                                      struct sk_buff *skb,
1270                                      const union sctp_addr *laddr,
1271                                      struct sctp_transport **transportp)
1272{
1273        struct sctp_chunkhdr *ch;
1274
1275        /* We do not allow GSO frames here as we need to linearize and
1276         * then cannot guarantee frame boundaries. This shouldn't be an
1277         * issue as packets hitting this are mostly INIT or INIT-ACK and
1278         * those cannot be on GSO-style anyway.
1279         */
1280        if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
1281                return NULL;
1282
1283        ch = (struct sctp_chunkhdr *)skb->data;
1284
1285        /* The code below will attempt to walk the chunk and extract
1286         * parameter information.  Before we do that, we need to verify
1287         * that the chunk length doesn't cause overflow.  Otherwise, we'll
1288         * walk off the end.
1289         */
1290        if (SCTP_PAD4(ntohs(ch->length)) > skb->len)
1291                return NULL;
1292
1293        /* If this is INIT/INIT-ACK look inside the chunk too. */
1294        if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK)
1295                return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
1296
1297        return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
1298}
1299
1300/* Lookup an association for an inbound skb. */
1301static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1302                                      struct sk_buff *skb,
1303                                      const union sctp_addr *paddr,
1304                                      const union sctp_addr *laddr,
1305                                      struct sctp_transport **transportp)
1306{
1307        struct sctp_association *asoc;
1308
1309        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1310        if (asoc)
1311                goto out;
1312
1313        /* Further lookup for INIT/INIT-ACK packets.
1314         * SCTP Implementors Guide, 2.18 Handling of address
1315         * parameters within the INIT or INIT-ACK.
1316         */
1317        asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1318        if (asoc)
1319                goto out;
1320
1321        if (paddr->sa.sa_family == AF_INET)
1322                pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n",
1323                         &laddr->v4.sin_addr, ntohs(laddr->v4.sin_port),
1324                         &paddr->v4.sin_addr, ntohs(paddr->v4.sin_port));
1325        else
1326                pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n",
1327                         &laddr->v6.sin6_addr, ntohs(laddr->v6.sin6_port),
1328                         &paddr->v6.sin6_addr, ntohs(paddr->v6.sin6_port));
1329
1330out:
1331        return asoc;
1332}
1333