linux/net/sctp/associola.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 La Monte H.P. Yarroll
   7 *
   8 * This file is part of the SCTP kernel implementation
   9 *
  10 * This module provides the abstraction for an SCTP association.
  11 *
  12 * This SCTP implementation is free software;
  13 * you can redistribute it and/or modify it under the terms of
  14 * the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2, or (at your option)
  16 * any later version.
  17 *
  18 * This SCTP implementation is distributed in the hope that it
  19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  20 *                 ************************
  21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  22 * See the GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with GNU CC; see the file COPYING.  If not, write to
  26 * the Free Software Foundation, 59 Temple Place - Suite 330,
  27 * Boston, MA 02111-1307, USA.
  28 *
  29 * Please send any bug reports or fixes you make to the
  30 * email address(es):
  31 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
  32 *
  33 * Or submit a bug report through the following website:
  34 *    http://www.sf.net/projects/lksctp
  35 *
  36 * Written or modified by:
  37 *    La Monte H.P. Yarroll <piggy@acm.org>
  38 *    Karl Knutson          <karl@athena.chicago.il.us>
  39 *    Jon Grimm             <jgrimm@us.ibm.com>
  40 *    Xingang Guo           <xingang.guo@intel.com>
  41 *    Hui Huang             <hui.huang@nokia.com>
  42 *    Sridhar Samudrala     <sri@us.ibm.com>
  43 *    Daisy Chang           <daisyc@us.ibm.com>
  44 *    Ryan Layer            <rmlayer@us.ibm.com>
  45 *    Kevin Gao             <kevin.gao@intel.com>
  46 *
  47 * Any bugs reported given to us we will try to fix... any fixes shared will
  48 * be incorporated into the next SCTP release.
  49 */
  50
  51#include <linux/types.h>
  52#include <linux/fcntl.h>
  53#include <linux/poll.h>
  54#include <linux/init.h>
  55
  56#include <linux/slab.h>
  57#include <linux/in.h>
  58#include <net/ipv6.h>
  59#include <net/sctp/sctp.h>
  60#include <net/sctp/sm.h>
  61
  62/* Forward declarations for internal functions. */
  63static void sctp_assoc_bh_rcv(struct work_struct *work);
  64static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
  65
  66
  67/* 1st Level Abstractions. */
  68
  69/* Initialize a new association from provided memory. */
  70static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
  71                                          const struct sctp_endpoint *ep,
  72                                          const struct sock *sk,
  73                                          sctp_scope_t scope,
  74                                          gfp_t gfp)
  75{
  76        struct sctp_sock *sp;
  77        int i;
  78        sctp_paramhdr_t *p;
  79        int err;
  80
  81        /* Retrieve the SCTP per socket area.  */
  82        sp = sctp_sk((struct sock *)sk);
  83
  84        /* Init all variables to a known value.  */
  85        memset(asoc, 0, sizeof(struct sctp_association));
  86
  87        /* Discarding const is appropriate here.  */
  88        asoc->ep = (struct sctp_endpoint *)ep;
  89        sctp_endpoint_hold(asoc->ep);
  90
  91        /* Hold the sock.  */
  92        asoc->base.sk = (struct sock *)sk;
  93        sock_hold(asoc->base.sk);
  94
  95        /* Initialize the common base substructure.  */
  96        asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
  97
  98        /* Initialize the object handling fields.  */
  99        atomic_set(&asoc->base.refcnt, 1);
 100        asoc->base.dead = 0;
 101        asoc->base.malloced = 0;
 102
 103        /* Initialize the bind addr area.  */
 104        sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
 105
 106        asoc->state = SCTP_STATE_CLOSED;
 107
 108        /* Set these values from the socket values, a conversion between
 109         * millsecons to seconds/microseconds must also be done.
 110         */
 111        asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
 112        asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
 113                                        * 1000;
 114        asoc->frag_point = 0;
 115        asoc->user_frag = sp->user_frag;
 116
 117        /* Set the association max_retrans and RTO values from the
 118         * socket values.
 119         */
 120        asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
 121        asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
 122        asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
 123        asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
 124
 125        asoc->overall_error_count = 0;
 126
 127        /* Initialize the association's heartbeat interval based on the
 128         * sock configured value.
 129         */
 130        asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
 131
 132        /* Initialize path max retrans value. */
 133        asoc->pathmaxrxt = sp->pathmaxrxt;
 134
 135        /* Initialize default path MTU. */
 136        asoc->pathmtu = sp->pathmtu;
 137
 138        /* Set association default SACK delay */
 139        asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
 140        asoc->sackfreq = sp->sackfreq;
 141
 142        /* Set the association default flags controlling
 143         * Heartbeat, SACK delay, and Path MTU Discovery.
 144         */
 145        asoc->param_flags = sp->param_flags;
 146
 147        /* Initialize the maximum mumber of new data packets that can be sent
 148         * in a burst.
 149         */
 150        asoc->max_burst = sp->max_burst;
 151
 152        /* initialize association timers */
 153        asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
 154        asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
 155        asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
 156        asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
 157        asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
 158        asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
 159
 160        /* sctpimpguide Section 2.12.2
 161         * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
 162         * recommended value of 5 times 'RTO.Max'.
 163         */
 164        asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
 165                = 5 * asoc->rto_max;
 166
 167        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
 168        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
 169        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
 170                sp->autoclose * HZ;
 171
 172        /* Initilizes the timers */
 173        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
 174                setup_timer(&asoc->timers[i], sctp_timer_events[i],
 175                                (unsigned long)asoc);
 176
 177        /* Pull default initialization values from the sock options.
 178         * Note: This assumes that the values have already been
 179         * validated in the sock.
 180         */
 181        asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
 182        asoc->c.sinit_num_ostreams  = sp->initmsg.sinit_num_ostreams;
 183        asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
 184
 185        asoc->max_init_timeo =
 186                 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
 187
 188        /* Allocate storage for the ssnmap after the inbound and outbound
 189         * streams have been negotiated during Init.
 190         */
 191        asoc->ssnmap = NULL;
 192
 193        /* Set the local window size for receive.
 194         * This is also the rcvbuf space per association.
 195         * RFC 6 - A SCTP receiver MUST be able to receive a minimum of
 196         * 1500 bytes in one SCTP packet.
 197         */
 198        if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
 199                asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
 200        else
 201                asoc->rwnd = sk->sk_rcvbuf/2;
 202
 203        asoc->a_rwnd = asoc->rwnd;
 204
 205        asoc->rwnd_over = 0;
 206        asoc->rwnd_press = 0;
 207
 208        /* Use my own max window until I learn something better.  */
 209        asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
 210
 211        /* Set the sndbuf size for transmit.  */
 212        asoc->sndbuf_used = 0;
 213
 214        /* Initialize the receive memory counter */
 215        atomic_set(&asoc->rmem_alloc, 0);
 216
 217        init_waitqueue_head(&asoc->wait);
 218
 219        asoc->c.my_vtag = sctp_generate_tag(ep);
 220        asoc->peer.i.init_tag = 0;     /* INIT needs a vtag of 0. */
 221        asoc->c.peer_vtag = 0;
 222        asoc->c.my_ttag   = 0;
 223        asoc->c.peer_ttag = 0;
 224        asoc->c.my_port = ep->base.bind_addr.port;
 225
 226        asoc->c.initial_tsn = sctp_generate_tsn(ep);
 227
 228        asoc->next_tsn = asoc->c.initial_tsn;
 229
 230        asoc->ctsn_ack_point = asoc->next_tsn - 1;
 231        asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
 232        asoc->highest_sacked = asoc->ctsn_ack_point;
 233        asoc->last_cwr_tsn = asoc->ctsn_ack_point;
 234        asoc->unack_data = 0;
 235
 236        /* ADDIP Section 4.1 Asconf Chunk Procedures
 237         *
 238         * When an endpoint has an ASCONF signaled change to be sent to the
 239         * remote endpoint it should do the following:
 240         * ...
 241         * A2) a serial number should be assigned to the chunk. The serial
 242         * number SHOULD be a monotonically increasing number. The serial
 243         * numbers SHOULD be initialized at the start of the
 244         * association to the same value as the initial TSN.
 245         */
 246        asoc->addip_serial = asoc->c.initial_tsn;
 247
 248        INIT_LIST_HEAD(&asoc->addip_chunk_list);
 249        INIT_LIST_HEAD(&asoc->asconf_ack_list);
 250
 251        /* Make an empty list of remote transport addresses.  */
 252        INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
 253        asoc->peer.transport_count = 0;
 254
 255        /* RFC 2960 5.1 Normal Establishment of an Association
 256         *
 257         * After the reception of the first data chunk in an
 258         * association the endpoint must immediately respond with a
 259         * sack to acknowledge the data chunk.  Subsequent
 260         * acknowledgements should be done as described in Section
 261         * 6.2.
 262         *
 263         * [We implement this by telling a new association that it
 264         * already received one packet.]
 265         */
 266        asoc->peer.sack_needed = 1;
 267        asoc->peer.sack_cnt = 0;
 268
 269        /* Assume that the peer will tell us if he recognizes ASCONF
 270         * as part of INIT exchange.
 271         * The sctp_addip_noauth option is there for backward compatibilty
 272         * and will revert old behavior.
 273         */
 274        asoc->peer.asconf_capable = 0;
 275        if (sctp_addip_noauth)
 276                asoc->peer.asconf_capable = 1;
 277
 278        /* Create an input queue.  */
 279        sctp_inq_init(&asoc->base.inqueue);
 280        sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
 281
 282        /* Create an output queue.  */
 283        sctp_outq_init(asoc, &asoc->outqueue);
 284
 285        if (!sctp_ulpq_init(&asoc->ulpq, asoc))
 286                goto fail_init;
 287
 288        memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
 289
 290        asoc->need_ecne = 0;
 291
 292        asoc->assoc_id = 0;
 293
 294        /* Assume that peer would support both address types unless we are
 295         * told otherwise.
 296         */
 297        asoc->peer.ipv4_address = 1;
 298        if (asoc->base.sk->sk_family == PF_INET6)
 299                asoc->peer.ipv6_address = 1;
 300        INIT_LIST_HEAD(&asoc->asocs);
 301
 302        asoc->autoclose = sp->autoclose;
 303
 304        asoc->default_stream = sp->default_stream;
 305        asoc->default_ppid = sp->default_ppid;
 306        asoc->default_flags = sp->default_flags;
 307        asoc->default_context = sp->default_context;
 308        asoc->default_timetolive = sp->default_timetolive;
 309        asoc->default_rcv_context = sp->default_rcv_context;
 310
 311        /* AUTH related initializations */
 312        INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
 313        err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
 314        if (err)
 315                goto fail_init;
 316
 317        asoc->active_key_id = ep->active_key_id;
 318        asoc->asoc_shared_key = NULL;
 319
 320        asoc->default_hmac_id = 0;
 321        /* Save the hmacs and chunks list into this association */
 322        if (ep->auth_hmacs_list)
 323                memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
 324                        ntohs(ep->auth_hmacs_list->param_hdr.length));
 325        if (ep->auth_chunk_list)
 326                memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
 327                        ntohs(ep->auth_chunk_list->param_hdr.length));
 328
 329        /* Get the AUTH random number for this association */
 330        p = (sctp_paramhdr_t *)asoc->c.auth_random;
 331        p->type = SCTP_PARAM_RANDOM;
 332        p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
 333        get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
 334
 335        return asoc;
 336
 337fail_init:
 338        sctp_endpoint_put(asoc->ep);
 339        sock_put(asoc->base.sk);
 340        return NULL;
 341}
 342
 343/* Allocate and initialize a new association */
 344struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
 345                                         const struct sock *sk,
 346                                         sctp_scope_t scope,
 347                                         gfp_t gfp)
 348{
 349        struct sctp_association *asoc;
 350
 351        asoc = t_new(struct sctp_association, gfp);
 352        if (!asoc)
 353                goto fail;
 354
 355        if (!sctp_association_init(asoc, ep, sk, scope, gfp))
 356                goto fail_init;
 357
 358        asoc->base.malloced = 1;
 359        SCTP_DBG_OBJCNT_INC(assoc);
 360        SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
 361
 362        return asoc;
 363
 364fail_init:
 365        kfree(asoc);
 366fail:
 367        return NULL;
 368}
 369
 370/* Free this association if possible.  There may still be users, so
 371 * the actual deallocation may be delayed.
 372 */
 373void sctp_association_free(struct sctp_association *asoc)
 374{
 375        struct sock *sk = asoc->base.sk;
 376        struct sctp_transport *transport;
 377        struct list_head *pos, *temp;
 378        int i;
 379
 380        /* Only real associations count against the endpoint, so
 381         * don't bother for if this is a temporary association.
 382         */
 383        if (!asoc->temp) {
 384                list_del(&asoc->asocs);
 385
 386                /* Decrement the backlog value for a TCP-style listening
 387                 * socket.
 388                 */
 389                if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
 390                        sk->sk_ack_backlog--;
 391        }
 392
 393        /* Mark as dead, so other users can know this structure is
 394         * going away.
 395         */
 396        asoc->base.dead = 1;
 397
 398        /* Dispose of any data lying around in the outqueue. */
 399        sctp_outq_free(&asoc->outqueue);
 400
 401        /* Dispose of any pending messages for the upper layer. */
 402        sctp_ulpq_free(&asoc->ulpq);
 403
 404        /* Dispose of any pending chunks on the inqueue. */
 405        sctp_inq_free(&asoc->base.inqueue);
 406
 407        sctp_tsnmap_free(&asoc->peer.tsn_map);
 408
 409        /* Free ssnmap storage. */
 410        sctp_ssnmap_free(asoc->ssnmap);
 411
 412        /* Clean up the bound address list. */
 413        sctp_bind_addr_free(&asoc->base.bind_addr);
 414
 415        /* Do we need to go through all of our timers and
 416         * delete them?   To be safe we will try to delete all, but we
 417         * should be able to go through and make a guess based
 418         * on our state.
 419         */
 420        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
 421                if (timer_pending(&asoc->timers[i]) &&
 422                    del_timer(&asoc->timers[i]))
 423                        sctp_association_put(asoc);
 424        }
 425
 426        /* Free peer's cached cookie. */
 427        kfree(asoc->peer.cookie);
 428        kfree(asoc->peer.peer_random);
 429        kfree(asoc->peer.peer_chunks);
 430        kfree(asoc->peer.peer_hmacs);
 431
 432        /* Release the transport structures. */
 433        list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
 434                transport = list_entry(pos, struct sctp_transport, transports);
 435                list_del(pos);
 436                sctp_transport_free(transport);
 437        }
 438
 439        asoc->peer.transport_count = 0;
 440
 441        /* Free any cached ASCONF_ACK chunk. */
 442        sctp_assoc_free_asconf_acks(asoc);
 443
 444        /* Free any cached ASCONF chunk. */
 445        if (asoc->addip_last_asconf)
 446                sctp_chunk_free(asoc->addip_last_asconf);
 447
 448        /* AUTH - Free the endpoint shared keys */
 449        sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
 450
 451        /* AUTH - Free the association shared key */
 452        sctp_auth_key_put(asoc->asoc_shared_key);
 453
 454        sctp_association_put(asoc);
 455}
 456
 457/* Cleanup and free up an association. */
 458static void sctp_association_destroy(struct sctp_association *asoc)
 459{
 460        SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
 461
 462        sctp_endpoint_put(asoc->ep);
 463        sock_put(asoc->base.sk);
 464
 465        if (asoc->assoc_id != 0) {
 466                spin_lock_bh(&sctp_assocs_id_lock);
 467                idr_remove(&sctp_assocs_id, asoc->assoc_id);
 468                spin_unlock_bh(&sctp_assocs_id_lock);
 469        }
 470
 471        WARN_ON(atomic_read(&asoc->rmem_alloc));
 472
 473        if (asoc->base.malloced) {
 474                kfree(asoc);
 475                SCTP_DBG_OBJCNT_DEC(assoc);
 476        }
 477}
 478
 479/* Change the primary destination address for the peer. */
 480void sctp_assoc_set_primary(struct sctp_association *asoc,
 481                            struct sctp_transport *transport)
 482{
 483        int changeover = 0;
 484
 485        /* it's a changeover only if we already have a primary path
 486         * that we are changing
 487         */
 488        if (asoc->peer.primary_path != NULL &&
 489            asoc->peer.primary_path != transport)
 490                changeover = 1 ;
 491
 492        asoc->peer.primary_path = transport;
 493
 494        /* Set a default msg_name for events. */
 495        memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
 496               sizeof(union sctp_addr));
 497
 498        /* If the primary path is changing, assume that the
 499         * user wants to use this new path.
 500         */
 501        if ((transport->state == SCTP_ACTIVE) ||
 502            (transport->state == SCTP_UNKNOWN))
 503                asoc->peer.active_path = transport;
 504
 505        /*
 506         * SFR-CACC algorithm:
 507         * Upon the receipt of a request to change the primary
 508         * destination address, on the data structure for the new
 509         * primary destination, the sender MUST do the following:
 510         *
 511         * 1) If CHANGEOVER_ACTIVE is set, then there was a switch
 512         * to this destination address earlier. The sender MUST set
 513         * CYCLING_CHANGEOVER to indicate that this switch is a
 514         * double switch to the same destination address.
 515         */
 516        if (transport->cacc.changeover_active)
 517                transport->cacc.cycling_changeover = changeover;
 518
 519        /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
 520         * a changeover has occurred.
 521         */
 522        transport->cacc.changeover_active = changeover;
 523
 524        /* 3) The sender MUST store the next TSN to be sent in
 525         * next_tsn_at_change.
 526         */
 527        transport->cacc.next_tsn_at_change = asoc->next_tsn;
 528}
 529
 530/* Remove a transport from an association.  */
 531void sctp_assoc_rm_peer(struct sctp_association *asoc,
 532                        struct sctp_transport *peer)
 533{
 534        struct list_head        *pos;
 535        struct sctp_transport   *transport;
 536
 537        SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
 538                                 " port: %d\n",
 539                                 asoc,
 540                                 (&peer->ipaddr),
 541                                 ntohs(peer->ipaddr.v4.sin_port));
 542
 543        /* If we are to remove the current retran_path, update it
 544         * to the next peer before removing this peer from the list.
 545         */
 546        if (asoc->peer.retran_path == peer)
 547                sctp_assoc_update_retran_path(asoc);
 548
 549        /* Remove this peer from the list. */
 550        list_del(&peer->transports);
 551
 552        /* Get the first transport of asoc. */
 553        pos = asoc->peer.transport_addr_list.next;
 554        transport = list_entry(pos, struct sctp_transport, transports);
 555
 556        /* Update any entries that match the peer to be deleted. */
 557        if (asoc->peer.primary_path == peer)
 558                sctp_assoc_set_primary(asoc, transport);
 559        if (asoc->peer.active_path == peer)
 560                asoc->peer.active_path = transport;
 561        if (asoc->peer.last_data_from == peer)
 562                asoc->peer.last_data_from = transport;
 563
 564        /* If we remove the transport an INIT was last sent to, set it to
 565         * NULL. Combined with the update of the retran path above, this
 566         * will cause the next INIT to be sent to the next available
 567         * transport, maintaining the cycle.
 568         */
 569        if (asoc->init_last_sent_to == peer)
 570                asoc->init_last_sent_to = NULL;
 571
 572        /* If we remove the transport an SHUTDOWN was last sent to, set it
 573         * to NULL. Combined with the update of the retran path above, this
 574         * will cause the next SHUTDOWN to be sent to the next available
 575         * transport, maintaining the cycle.
 576         */
 577        if (asoc->shutdown_last_sent_to == peer)
 578                asoc->shutdown_last_sent_to = NULL;
 579
 580        /* If we remove the transport an ASCONF was last sent to, set it to
 581         * NULL.
 582         */
 583        if (asoc->addip_last_asconf &&
 584            asoc->addip_last_asconf->transport == peer)
 585                asoc->addip_last_asconf->transport = NULL;
 586
 587        /* If we have something on the transmitted list, we have to
 588         * save it off.  The best place is the active path.
 589         */
 590        if (!list_empty(&peer->transmitted)) {
 591                struct sctp_transport *active = asoc->peer.active_path;
 592                struct sctp_chunk *ch;
 593
 594                /* Reset the transport of each chunk on this list */
 595                list_for_each_entry(ch, &peer->transmitted,
 596                                        transmitted_list) {
 597                        ch->transport = NULL;
 598                        ch->rtt_in_progress = 0;
 599                }
 600
 601                list_splice_tail_init(&peer->transmitted,
 602                                        &active->transmitted);
 603
 604                /* Start a T3 timer here in case it wasn't running so
 605                 * that these migrated packets have a chance to get
 606                 * retrnasmitted.
 607                 */
 608                if (!timer_pending(&active->T3_rtx_timer))
 609                        if (!mod_timer(&active->T3_rtx_timer,
 610                                        jiffies + active->rto))
 611                                sctp_transport_hold(active);
 612        }
 613
 614        asoc->peer.transport_count--;
 615
 616        sctp_transport_free(peer);
 617}
 618
 619/* Add a transport address to an association.  */
 620struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
 621                                           const union sctp_addr *addr,
 622                                           const gfp_t gfp,
 623                                           const int peer_state)
 624{
 625        struct sctp_transport *peer;
 626        struct sctp_sock *sp;
 627        unsigned short port;
 628
 629        sp = sctp_sk(asoc->base.sk);
 630
 631        /* AF_INET and AF_INET6 share common port field. */
 632        port = ntohs(addr->v4.sin_port);
 633
 634        SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
 635                                 " port: %d state:%d\n",
 636                                 asoc,
 637                                 addr,
 638                                 port,
 639                                 peer_state);
 640
 641        /* Set the port if it has not been set yet.  */
 642        if (0 == asoc->peer.port)
 643                asoc->peer.port = port;
 644
 645        /* Check to see if this is a duplicate. */
 646        peer = sctp_assoc_lookup_paddr(asoc, addr);
 647        if (peer) {
 648                /* An UNKNOWN state is only set on transports added by
 649                 * user in sctp_connectx() call.  Such transports should be
 650                 * considered CONFIRMED per RFC 4960, Section 5.4.
 651                 */
 652                if (peer->state == SCTP_UNKNOWN) {
 653                        peer->state = SCTP_ACTIVE;
 654                }
 655                return peer;
 656        }
 657
 658        peer = sctp_transport_new(addr, gfp);
 659        if (!peer)
 660                return NULL;
 661
 662        sctp_transport_set_owner(peer, asoc);
 663
 664        /* Initialize the peer's heartbeat interval based on the
 665         * association configured value.
 666         */
 667        peer->hbinterval = asoc->hbinterval;
 668
 669        /* Set the path max_retrans.  */
 670        peer->pathmaxrxt = asoc->pathmaxrxt;
 671
 672        /* Initialize the peer's SACK delay timeout based on the
 673         * association configured value.
 674         */
 675        peer->sackdelay = asoc->sackdelay;
 676        peer->sackfreq = asoc->sackfreq;
 677
 678        /* Enable/disable heartbeat, SACK delay, and path MTU discovery
 679         * based on association setting.
 680         */
 681        peer->param_flags = asoc->param_flags;
 682
 683        sctp_transport_route(peer, NULL, sp);
 684
 685        /* Initialize the pmtu of the transport. */
 686        if (peer->param_flags & SPP_PMTUD_DISABLE) {
 687                if (asoc->pathmtu)
 688                        peer->pathmtu = asoc->pathmtu;
 689                else
 690                        peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 691        }
 692
 693        /* If this is the first transport addr on this association,
 694         * initialize the association PMTU to the peer's PMTU.
 695         * If not and the current association PMTU is higher than the new
 696         * peer's PMTU, reset the association PMTU to the new peer's PMTU.
 697         */
 698        if (asoc->pathmtu)
 699                asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
 700        else
 701                asoc->pathmtu = peer->pathmtu;
 702
 703        SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
 704                          "%d\n", asoc, asoc->pathmtu);
 705        peer->pmtu_pending = 0;
 706
 707        asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
 708
 709        /* The asoc->peer.port might not be meaningful yet, but
 710         * initialize the packet structure anyway.
 711         */
 712        sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
 713                         asoc->peer.port);
 714
 715        /* 7.2.1 Slow-Start
 716         *
 717         * o The initial cwnd before DATA transmission or after a sufficiently
 718         *   long idle period MUST be set to
 719         *      min(4*MTU, max(2*MTU, 4380 bytes))
 720         *
 721         * o The initial value of ssthresh MAY be arbitrarily high
 722         *   (for example, implementations MAY use the size of the
 723         *   receiver advertised window).
 724         */
 725        peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
 726
 727        /* At this point, we may not have the receiver's advertised window,
 728         * so initialize ssthresh to the default value and it will be set
 729         * later when we process the INIT.
 730         */
 731        peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
 732
 733        peer->partial_bytes_acked = 0;
 734        peer->flight_size = 0;
 735
 736        /* Set the transport's RTO.initial value */
 737        peer->rto = asoc->rto_initial;
 738
 739        /* Set the peer's active state. */
 740        peer->state = peer_state;
 741
 742        /* Attach the remote transport to our asoc.  */
 743        list_add_tail(&peer->transports, &asoc->peer.transport_addr_list);
 744        asoc->peer.transport_count++;
 745
 746        /* If we do not yet have a primary path, set one.  */
 747        if (!asoc->peer.primary_path) {
 748                sctp_assoc_set_primary(asoc, peer);
 749                asoc->peer.retran_path = peer;
 750        }
 751
 752        if (asoc->peer.active_path == asoc->peer.retran_path) {
 753                asoc->peer.retran_path = peer;
 754        }
 755
 756        return peer;
 757}
 758
 759/* Delete a transport address from an association.  */
 760void sctp_assoc_del_peer(struct sctp_association *asoc,
 761                         const union sctp_addr *addr)
 762{
 763        struct list_head        *pos;
 764        struct list_head        *temp;
 765        struct sctp_transport   *transport;
 766
 767        list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
 768                transport = list_entry(pos, struct sctp_transport, transports);
 769                if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
 770                        /* Do book keeping for removing the peer and free it. */
 771                        sctp_assoc_rm_peer(asoc, transport);
 772                        break;
 773                }
 774        }
 775}
 776
 777/* Lookup a transport by address. */
 778struct sctp_transport *sctp_assoc_lookup_paddr(
 779                                        const struct sctp_association *asoc,
 780                                        const union sctp_addr *address)
 781{
 782        struct sctp_transport *t;
 783
 784        /* Cycle through all transports searching for a peer address. */
 785
 786        list_for_each_entry(t, &asoc->peer.transport_addr_list,
 787                        transports) {
 788                if (sctp_cmp_addr_exact(address, &t->ipaddr))
 789                        return t;
 790        }
 791
 792        return NULL;
 793}
 794
 795/* Remove all transports except a give one */
 796void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
 797                                     struct sctp_transport *primary)
 798{
 799        struct sctp_transport   *temp;
 800        struct sctp_transport   *t;
 801
 802        list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
 803                                 transports) {
 804                /* if the current transport is not the primary one, delete it */
 805                if (t != primary)
 806                        sctp_assoc_rm_peer(asoc, t);
 807        }
 808
 809        return;
 810}
 811
 812/* Engage in transport control operations.
 813 * Mark the transport up or down and send a notification to the user.
 814 * Select and update the new active and retran paths.
 815 */
 816void sctp_assoc_control_transport(struct sctp_association *asoc,
 817                                  struct sctp_transport *transport,
 818                                  sctp_transport_cmd_t command,
 819                                  sctp_sn_error_t error)
 820{
 821        struct sctp_transport *t = NULL;
 822        struct sctp_transport *first;
 823        struct sctp_transport *second;
 824        struct sctp_ulpevent *event;
 825        struct sockaddr_storage addr;
 826        int spc_state = 0;
 827
 828        /* Record the transition on the transport.  */
 829        switch (command) {
 830        case SCTP_TRANSPORT_UP:
 831                /* If we are moving from UNCONFIRMED state due
 832                 * to heartbeat success, report the SCTP_ADDR_CONFIRMED
 833                 * state to the user, otherwise report SCTP_ADDR_AVAILABLE.
 834                 */
 835                if (SCTP_UNCONFIRMED == transport->state &&
 836                    SCTP_HEARTBEAT_SUCCESS == error)
 837                        spc_state = SCTP_ADDR_CONFIRMED;
 838                else
 839                        spc_state = SCTP_ADDR_AVAILABLE;
 840                transport->state = SCTP_ACTIVE;
 841                break;
 842
 843        case SCTP_TRANSPORT_DOWN:
 844                /* If the transport was never confirmed, do not transition it
 845                 * to inactive state.  Also, release the cached route since
 846                 * there may be a better route next time.
 847                 */
 848                if (transport->state != SCTP_UNCONFIRMED)
 849                        transport->state = SCTP_INACTIVE;
 850                else {
 851                        dst_release(transport->dst);
 852                        transport->dst = NULL;
 853                }
 854
 855                spc_state = SCTP_ADDR_UNREACHABLE;
 856                break;
 857
 858        default:
 859                return;
 860        }
 861
 862        /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
 863         * user.
 864         */
 865        memset(&addr, 0, sizeof(struct sockaddr_storage));
 866        memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
 867        event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
 868                                0, spc_state, error, GFP_ATOMIC);
 869        if (event)
 870                sctp_ulpq_tail_event(&asoc->ulpq, event);
 871
 872        /* Select new active and retran paths. */
 873
 874        /* Look for the two most recently used active transports.
 875         *
 876         * This code produces the wrong ordering whenever jiffies
 877         * rolls over, but we still get usable transports, so we don't
 878         * worry about it.
 879         */
 880        first = NULL; second = NULL;
 881
 882        list_for_each_entry(t, &asoc->peer.transport_addr_list,
 883                        transports) {
 884
 885                if ((t->state == SCTP_INACTIVE) ||
 886                    (t->state == SCTP_UNCONFIRMED))
 887                        continue;
 888                if (!first || t->last_time_heard > first->last_time_heard) {
 889                        second = first;
 890                        first = t;
 891                }
 892                if (!second || t->last_time_heard > second->last_time_heard)
 893                        second = t;
 894        }
 895
 896        /* RFC 2960 6.4 Multi-Homed SCTP Endpoints
 897         *
 898         * By default, an endpoint should always transmit to the
 899         * primary path, unless the SCTP user explicitly specifies the
 900         * destination transport address (and possibly source
 901         * transport address) to use.
 902         *
 903         * [If the primary is active but not most recent, bump the most
 904         * recently used transport.]
 905         */
 906        if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
 907             (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
 908            first != asoc->peer.primary_path) {
 909                second = first;
 910                first = asoc->peer.primary_path;
 911        }
 912
 913        /* If we failed to find a usable transport, just camp on the
 914         * primary, even if it is inactive.
 915         */
 916        if (!first) {
 917                first = asoc->peer.primary_path;
 918                second = asoc->peer.primary_path;
 919        }
 920
 921        /* Set the active and retran transports.  */
 922        asoc->peer.active_path = first;
 923        asoc->peer.retran_path = second;
 924}
 925
 926/* Hold a reference to an association. */
 927void sctp_association_hold(struct sctp_association *asoc)
 928{
 929        atomic_inc(&asoc->base.refcnt);
 930}
 931
 932/* Release a reference to an association and cleanup
 933 * if there are no more references.
 934 */
 935void sctp_association_put(struct sctp_association *asoc)
 936{
 937        if (atomic_dec_and_test(&asoc->base.refcnt))
 938                sctp_association_destroy(asoc);
 939}
 940
 941/* Allocate the next TSN, Transmission Sequence Number, for the given
 942 * association.
 943 */
 944__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
 945{
 946        /* From Section 1.6 Serial Number Arithmetic:
 947         * Transmission Sequence Numbers wrap around when they reach
 948         * 2**32 - 1.  That is, the next TSN a DATA chunk MUST use
 949         * after transmitting TSN = 2*32 - 1 is TSN = 0.
 950         */
 951        __u32 retval = asoc->next_tsn;
 952        asoc->next_tsn++;
 953        asoc->unack_data++;
 954
 955        return retval;
 956}
 957
 958/* Compare two addresses to see if they match.  Wildcard addresses
 959 * only match themselves.
 960 */
 961int sctp_cmp_addr_exact(const union sctp_addr *ss1,
 962                        const union sctp_addr *ss2)
 963{
 964        struct sctp_af *af;
 965
 966        af = sctp_get_af_specific(ss1->sa.sa_family);
 967        if (unlikely(!af))
 968                return 0;
 969
 970        return af->cmp_addr(ss1, ss2);
 971}
 972
 973/* Return an ecne chunk to get prepended to a packet.
 974 * Note:  We are sly and return a shared, prealloced chunk.  FIXME:
 975 * No we don't, but we could/should.
 976 */
 977struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
 978{
 979        struct sctp_chunk *chunk;
 980
 981        /* Send ECNE if needed.
 982         * Not being able to allocate a chunk here is not deadly.
 983         */
 984        if (asoc->need_ecne)
 985                chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
 986        else
 987                chunk = NULL;
 988
 989        return chunk;
 990}
 991
 992/*
 993 * Find which transport this TSN was sent on.
 994 */
 995struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
 996                                             __u32 tsn)
 997{
 998        struct sctp_transport *active;
 999        struct sctp_transport *match;
1000        struct sctp_transport *transport;
1001        struct sctp_chunk *chunk;
1002        __be32 key = htonl(tsn);
1003
1004        match = NULL;
1005
1006        /*
1007         * FIXME: In general, find a more efficient data structure for
1008         * searching.
1009         */
1010
1011        /*
1012         * The general strategy is to search each transport's transmitted
1013         * list.   Return which transport this TSN lives on.
1014         *
1015         * Let's be hopeful and check the active_path first.
1016         * Another optimization would be to know if there is only one
1017         * outbound path and not have to look for the TSN at all.
1018         *
1019         */
1020
1021        active = asoc->peer.active_path;
1022
1023        list_for_each_entry(chunk, &active->transmitted,
1024                        transmitted_list) {
1025
1026                if (key == chunk->subh.data_hdr->tsn) {
1027                        match = active;
1028                        goto out;
1029                }
1030        }
1031
1032        /* If not found, go search all the other transports. */
1033        list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1034                        transports) {
1035
1036                if (transport == active)
1037                        break;
1038                list_for_each_entry(chunk, &transport->transmitted,
1039                                transmitted_list) {
1040                        if (key == chunk->subh.data_hdr->tsn) {
1041                                match = transport;
1042                                goto out;
1043                        }
1044                }
1045        }
1046out:
1047        return match;
1048}
1049
1050/* Is this the association we are looking for? */
1051struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1052                                           const union sctp_addr *laddr,
1053                                           const union sctp_addr *paddr)
1054{
1055        struct sctp_transport *transport;
1056
1057        if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1058            (htons(asoc->peer.port) == paddr->v4.sin_port)) {
1059                transport = sctp_assoc_lookup_paddr(asoc, paddr);
1060                if (!transport)
1061                        goto out;
1062
1063                if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1064                                         sctp_sk(asoc->base.sk)))
1065                        goto out;
1066        }
1067        transport = NULL;
1068
1069out:
1070        return transport;
1071}
1072
1073/* Do delayed input processing.  This is scheduled by sctp_rcv(). */
1074static void sctp_assoc_bh_rcv(struct work_struct *work)
1075{
1076        struct sctp_association *asoc =
1077                container_of(work, struct sctp_association,
1078                             base.inqueue.immediate);
1079        struct sctp_endpoint *ep;
1080        struct sctp_chunk *chunk;
1081        struct sock *sk;
1082        struct sctp_inq *inqueue;
1083        int state;
1084        sctp_subtype_t subtype;
1085        int error = 0;
1086
1087        /* The association should be held so we should be safe. */
1088        ep = asoc->ep;
1089        sk = asoc->base.sk;
1090
1091        inqueue = &asoc->base.inqueue;
1092        sctp_association_hold(asoc);
1093        while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1094                state = asoc->state;
1095                subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1096
1097                /* SCTP-AUTH, Section 6.3:
1098                 *    The receiver has a list of chunk types which it expects
1099                 *    to be received only after an AUTH-chunk.  This list has
1100                 *    been sent to the peer during the association setup.  It
1101                 *    MUST silently discard these chunks if they are not placed
1102                 *    after an AUTH chunk in the packet.
1103                 */
1104                if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1105                        continue;
1106
1107                /* Remember where the last DATA chunk came from so we
1108                 * know where to send the SACK.
1109                 */
1110                if (sctp_chunk_is_data(chunk))
1111                        asoc->peer.last_data_from = chunk->transport;
1112                else
1113                        SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS);
1114
1115                if (chunk->transport)
1116                        chunk->transport->last_time_heard = jiffies;
1117
1118                /* Run through the state machine. */
1119                error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype,
1120                                   state, ep, asoc, chunk, GFP_ATOMIC);
1121
1122                /* Check to see if the association is freed in response to
1123                 * the incoming chunk.  If so, get out of the while loop.
1124                 */
1125                if (asoc->base.dead)
1126                        break;
1127
1128                /* If there is an error on chunk, discard this packet. */
1129                if (error && chunk)
1130                        chunk->pdiscard = 1;
1131        }
1132        sctp_association_put(asoc);
1133}
1134
1135/* This routine moves an association from its old sk to a new sk.  */
1136void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1137{
1138        struct sctp_sock *newsp = sctp_sk(newsk);
1139        struct sock *oldsk = assoc->base.sk;
1140
1141        /* Delete the association from the old endpoint's list of
1142         * associations.
1143         */
1144        list_del_init(&assoc->asocs);
1145
1146        /* Decrement the backlog value for a TCP-style socket. */
1147        if (sctp_style(oldsk, TCP))
1148                oldsk->sk_ack_backlog--;
1149
1150        /* Release references to the old endpoint and the sock.  */
1151        sctp_endpoint_put(assoc->ep);
1152        sock_put(assoc->base.sk);
1153
1154        /* Get a reference to the new endpoint.  */
1155        assoc->ep = newsp->ep;
1156        sctp_endpoint_hold(assoc->ep);
1157
1158        /* Get a reference to the new sock.  */
1159        assoc->base.sk = newsk;
1160        sock_hold(assoc->base.sk);
1161
1162        /* Add the association to the new endpoint's list of associations.  */
1163        sctp_endpoint_add_asoc(newsp->ep, assoc);
1164}
1165
1166/* Update an association (possibly from unexpected COOKIE-ECHO processing).  */
1167void sctp_assoc_update(struct sctp_association *asoc,
1168                       struct sctp_association *new)
1169{
1170        struct sctp_transport *trans;
1171        struct list_head *pos, *temp;
1172
1173        /* Copy in new parameters of peer. */
1174        asoc->c = new->c;
1175        asoc->peer.rwnd = new->peer.rwnd;
1176        asoc->peer.sack_needed = new->peer.sack_needed;
1177        asoc->peer.i = new->peer.i;
1178        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1179                         asoc->peer.i.initial_tsn, GFP_ATOMIC);
1180
1181        /* Remove any peer addresses not present in the new association. */
1182        list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1183                trans = list_entry(pos, struct sctp_transport, transports);
1184                if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr))
1185                        sctp_assoc_del_peer(asoc, &trans->ipaddr);
1186
1187                if (asoc->state >= SCTP_STATE_ESTABLISHED)
1188                        sctp_transport_reset(trans);
1189        }
1190
1191        /* If the case is A (association restart), use
1192         * initial_tsn as next_tsn. If the case is B, use
1193         * current next_tsn in case data sent to peer
1194         * has been discarded and needs retransmission.
1195         */
1196        if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1197                asoc->next_tsn = new->next_tsn;
1198                asoc->ctsn_ack_point = new->ctsn_ack_point;
1199                asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1200
1201                /* Reinitialize SSN for both local streams
1202                 * and peer's streams.
1203                 */
1204                sctp_ssnmap_clear(asoc->ssnmap);
1205
1206                /* Flush the ULP reassembly and ordered queue.
1207                 * Any data there will now be stale and will
1208                 * cause problems.
1209                 */
1210                sctp_ulpq_flush(&asoc->ulpq);
1211
1212                /* reset the overall association error count so
1213                 * that the restarted association doesn't get torn
1214                 * down on the next retransmission timer.
1215                 */
1216                asoc->overall_error_count = 0;
1217
1218        } else {
1219                /* Add any peer addresses from the new association. */
1220                list_for_each_entry(trans, &new->peer.transport_addr_list,
1221                                transports) {
1222                        if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1223                                sctp_assoc_add_peer(asoc, &trans->ipaddr,
1224                                                    GFP_ATOMIC, trans->state);
1225                }
1226
1227                asoc->ctsn_ack_point = asoc->next_tsn - 1;
1228                asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1229                if (!asoc->ssnmap) {
1230                        /* Move the ssnmap. */
1231                        asoc->ssnmap = new->ssnmap;
1232                        new->ssnmap = NULL;
1233                }
1234
1235                if (!asoc->assoc_id) {
1236                        /* get a new association id since we don't have one
1237                         * yet.
1238                         */
1239                        sctp_assoc_set_id(asoc, GFP_ATOMIC);
1240                }
1241        }
1242
1243        /* SCTP-AUTH: Save the peer parameters from the new assocaitions
1244         * and also move the association shared keys over
1245         */
1246        kfree(asoc->peer.peer_random);
1247        asoc->peer.peer_random = new->peer.peer_random;
1248        new->peer.peer_random = NULL;
1249
1250        kfree(asoc->peer.peer_chunks);
1251        asoc->peer.peer_chunks = new->peer.peer_chunks;
1252        new->peer.peer_chunks = NULL;
1253
1254        kfree(asoc->peer.peer_hmacs);
1255        asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1256        new->peer.peer_hmacs = NULL;
1257
1258        sctp_auth_key_put(asoc->asoc_shared_key);
1259        sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1260}
1261
1262/* Update the retran path for sending a retransmitted packet.
1263 * Round-robin through the active transports, else round-robin
1264 * through the inactive transports as this is the next best thing
1265 * we can try.
1266 */
1267void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1268{
1269        struct sctp_transport *t, *next;
1270        struct list_head *head = &asoc->peer.transport_addr_list;
1271        struct list_head *pos;
1272
1273        if (asoc->peer.transport_count == 1)
1274                return;
1275
1276        /* Find the next transport in a round-robin fashion. */
1277        t = asoc->peer.retran_path;
1278        pos = &t->transports;
1279        next = NULL;
1280
1281        while (1) {
1282                /* Skip the head. */
1283                if (pos->next == head)
1284                        pos = head->next;
1285                else
1286                        pos = pos->next;
1287
1288                t = list_entry(pos, struct sctp_transport, transports);
1289
1290                /* We have exhausted the list, but didn't find any
1291                 * other active transports.  If so, use the next
1292                 * transport.
1293                 */
1294                if (t == asoc->peer.retran_path) {
1295                        t = next;
1296                        break;
1297                }
1298
1299                /* Try to find an active transport. */
1300
1301                if ((t->state == SCTP_ACTIVE) ||
1302                    (t->state == SCTP_UNKNOWN)) {
1303                        break;
1304                } else {
1305                        /* Keep track of the next transport in case
1306                         * we don't find any active transport.
1307                         */
1308                        if (!next)
1309                                next = t;
1310                }
1311        }
1312
1313        asoc->peer.retran_path = t;
1314
1315        SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1316                                 " %p addr: ",
1317                                 " port: %d\n",
1318                                 asoc,
1319                                 (&t->ipaddr),
1320                                 ntohs(t->ipaddr.v4.sin_port));
1321}
1322
1323/* Choose the transport for sending retransmit packet.  */
1324struct sctp_transport *sctp_assoc_choose_alter_transport(
1325        struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1326{
1327        /* If this is the first time packet is sent, use the active path,
1328         * else use the retran path. If the last packet was sent over the
1329         * retran path, update the retran path and use it.
1330         */
1331        if (!last_sent_to)
1332                return asoc->peer.active_path;
1333        else {
1334                if (last_sent_to == asoc->peer.retran_path)
1335                        sctp_assoc_update_retran_path(asoc);
1336                return asoc->peer.retran_path;
1337        }
1338}
1339
1340/* Update the association's pmtu and frag_point by going through all the
1341 * transports. This routine is called when a transport's PMTU has changed.
1342 */
1343void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
1344{
1345        struct sctp_transport *t;
1346        __u32 pmtu = 0;
1347
1348        if (!asoc)
1349                return;
1350
1351        /* Get the lowest pmtu of all the transports. */
1352        list_for_each_entry(t, &asoc->peer.transport_addr_list,
1353                                transports) {
1354                if (t->pmtu_pending && t->dst) {
1355                        sctp_transport_update_pmtu(t, dst_mtu(t->dst));
1356                        t->pmtu_pending = 0;
1357                }
1358                if (!pmtu || (t->pathmtu < pmtu))
1359                        pmtu = t->pathmtu;
1360        }
1361
1362        if (pmtu) {
1363                asoc->pathmtu = pmtu;
1364                asoc->frag_point = sctp_frag_point(asoc, pmtu);
1365        }
1366
1367        SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
1368                          __func__, asoc, asoc->pathmtu, asoc->frag_point);
1369}
1370
1371/* Should we send a SACK to update our peer? */
1372static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1373{
1374        switch (asoc->state) {
1375        case SCTP_STATE_ESTABLISHED:
1376        case SCTP_STATE_SHUTDOWN_PENDING:
1377        case SCTP_STATE_SHUTDOWN_RECEIVED:
1378        case SCTP_STATE_SHUTDOWN_SENT:
1379                if ((asoc->rwnd > asoc->a_rwnd) &&
1380                    ((asoc->rwnd - asoc->a_rwnd) >=
1381                     min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pathmtu)))
1382                        return 1;
1383                break;
1384        default:
1385                break;
1386        }
1387        return 0;
1388}
1389
1390/* Increase asoc's rwnd by len and send any window update SACK if needed. */
1391void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1392{
1393        struct sctp_chunk *sack;
1394        struct timer_list *timer;
1395
1396        if (asoc->rwnd_over) {
1397                if (asoc->rwnd_over >= len) {
1398                        asoc->rwnd_over -= len;
1399                } else {
1400                        asoc->rwnd += (len - asoc->rwnd_over);
1401                        asoc->rwnd_over = 0;
1402                }
1403        } else {
1404                asoc->rwnd += len;
1405        }
1406
1407        /* If we had window pressure, start recovering it
1408         * once our rwnd had reached the accumulated pressure
1409         * threshold.  The idea is to recover slowly, but up
1410         * to the initial advertised window.
1411         */
1412        if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1413                int change = min(asoc->pathmtu, asoc->rwnd_press);
1414                asoc->rwnd += change;
1415                asoc->rwnd_press -= change;
1416        }
1417
1418        SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
1419                          "- %u\n", __func__, asoc, len, asoc->rwnd,
1420                          asoc->rwnd_over, asoc->a_rwnd);
1421
1422        /* Send a window update SACK if the rwnd has increased by at least the
1423         * minimum of the association's PMTU and half of the receive buffer.
1424         * The algorithm used is similar to the one described in
1425         * Section 4.2.3.3 of RFC 1122.
1426         */
1427        if (sctp_peer_needs_update(asoc)) {
1428                asoc->a_rwnd = asoc->rwnd;
1429                SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
1430                                  "rwnd: %u a_rwnd: %u\n", __func__,
1431                                  asoc, asoc->rwnd, asoc->a_rwnd);
1432                sack = sctp_make_sack(asoc);
1433                if (!sack)
1434                        return;
1435
1436                asoc->peer.sack_needed = 0;
1437
1438                sctp_outq_tail(&asoc->outqueue, sack);
1439
1440                /* Stop the SACK timer.  */
1441                timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1442                if (timer_pending(timer) && del_timer(timer))
1443                        sctp_association_put(asoc);
1444        }
1445}
1446
1447/* Decrease asoc's rwnd by len. */
1448void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
1449{
1450        int rx_count;
1451        int over = 0;
1452
1453        SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
1454        SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
1455
1456        if (asoc->ep->rcvbuf_policy)
1457                rx_count = atomic_read(&asoc->rmem_alloc);
1458        else
1459                rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1460
1461        /* If we've reached or overflowed our receive buffer, announce
1462         * a 0 rwnd if rwnd would still be positive.  Store the
1463         * the pottential pressure overflow so that the window can be restored
1464         * back to original value.
1465         */
1466        if (rx_count >= asoc->base.sk->sk_rcvbuf)
1467                over = 1;
1468
1469        if (asoc->rwnd >= len) {
1470                asoc->rwnd -= len;
1471                if (over) {
1472                        asoc->rwnd_press = asoc->rwnd;
1473                        asoc->rwnd = 0;
1474                }
1475        } else {
1476                asoc->rwnd_over = len - asoc->rwnd;
1477                asoc->rwnd = 0;
1478        }
1479        SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
1480                          __func__, asoc, len, asoc->rwnd,
1481                          asoc->rwnd_over, asoc->rwnd_press);
1482}
1483
1484/* Build the bind address list for the association based on info from the
1485 * local endpoint and the remote peer.
1486 */
1487int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1488                                     sctp_scope_t scope, gfp_t gfp)
1489{
1490        int flags;
1491
1492        /* Use scoping rules to determine the subset of addresses from
1493         * the endpoint.
1494         */
1495        flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1496        if (asoc->peer.ipv4_address)
1497                flags |= SCTP_ADDR4_PEERSUPP;
1498        if (asoc->peer.ipv6_address)
1499                flags |= SCTP_ADDR6_PEERSUPP;
1500
1501        return sctp_bind_addr_copy(&asoc->base.bind_addr,
1502                                   &asoc->ep->base.bind_addr,
1503                                   scope, gfp, flags);
1504}
1505
1506/* Build the association's bind address list from the cookie.  */
1507int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1508                                         struct sctp_cookie *cookie,
1509                                         gfp_t gfp)
1510{
1511        int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1512        int var_size3 = cookie->raw_addr_list_len;
1513        __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1514
1515        return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1516                                      asoc->ep->base.bind_addr.port, gfp);
1517}
1518
1519/* Lookup laddr in the bind address list of an association. */
1520int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1521                            const union sctp_addr *laddr)
1522{
1523        int found = 0;
1524
1525        if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1526            sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1527                                 sctp_sk(asoc->base.sk)))
1528                found = 1;
1529
1530        return found;
1531}
1532
1533/* Set an association id for a given association */
1534int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1535{
1536        int assoc_id;
1537        int error = 0;
1538
1539        /* If the id is already assigned, keep it. */
1540        if (asoc->assoc_id)
1541                return error;
1542retry:
1543        if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp)))
1544                return -ENOMEM;
1545
1546        spin_lock_bh(&sctp_assocs_id_lock);
1547        error = idr_get_new_above(&sctp_assocs_id, (void *)asoc,
1548                                    1, &assoc_id);
1549        spin_unlock_bh(&sctp_assocs_id_lock);
1550        if (error == -EAGAIN)
1551                goto retry;
1552        else if (error)
1553                return error;
1554
1555        asoc->assoc_id = (sctp_assoc_t) assoc_id;
1556        return error;
1557}
1558
1559/* Free asconf_ack cache */
1560static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1561{
1562        struct sctp_chunk *ack;
1563        struct sctp_chunk *tmp;
1564
1565        list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1566                                transmitted_list) {
1567                list_del_init(&ack->transmitted_list);
1568                sctp_chunk_free(ack);
1569        }
1570}
1571
1572/* Clean up the ASCONF_ACK queue */
1573void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1574{
1575        struct sctp_chunk *ack;
1576        struct sctp_chunk *tmp;
1577
1578        /* We can remove all the entries from the queue upto
1579         * the "Peer-Sequence-Number".
1580         */
1581        list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1582                                transmitted_list) {
1583                if (ack->subh.addip_hdr->serial ==
1584                                htonl(asoc->peer.addip_serial))
1585                        break;
1586
1587                list_del_init(&ack->transmitted_list);
1588                sctp_chunk_free(ack);
1589        }
1590}
1591
1592/* Find the ASCONF_ACK whose serial number matches ASCONF */
1593struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1594                                        const struct sctp_association *asoc,
1595                                        __be32 serial)
1596{
1597        struct sctp_chunk *ack;
1598
1599        /* Walk through the list of cached ASCONF-ACKs and find the
1600         * ack chunk whose serial number matches that of the request.
1601         */
1602        list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1603                if (ack->subh.addip_hdr->serial == serial) {
1604                        sctp_chunk_hold(ack);
1605                        return ack;
1606                }
1607        }
1608
1609        return NULL;
1610}
1611