linux/net/sctp/ulpqueue.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, write to
  25 * the Free Software Foundation, 59 Temple Place - Suite 330,
  26 * Boston, MA 02111-1307, USA.
  27 *
  28 * Please send any bug reports or fixes you make to the
  29 * email address(es):
  30 *    lksctp developers <linux-sctp@vger.kernel.org>
  31 *
  32 * Written or modified by:
  33 *    Jon Grimm             <jgrimm@us.ibm.com>
  34 *    La Monte H.P. Yarroll <piggy@acm.org>
  35 *    Sridhar Samudrala     <sri@us.ibm.com>
  36 */
  37
  38#include <linux/slab.h>
  39#include <linux/types.h>
  40#include <linux/skbuff.h>
  41#include <net/sock.h>
  42#include <net/sctp/structs.h>
  43#include <net/sctp/sctp.h>
  44#include <net/sctp/sm.h>
  45
  46/* Forward declarations for internal helpers.  */
  47static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  48                                              struct sctp_ulpevent *);
  49static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
  50                                              struct sctp_ulpevent *);
  51static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  52
  53/* 1st Level Abstractions */
  54
  55/* Initialize a ULP queue from a block of memory.  */
  56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  57                                 struct sctp_association *asoc)
  58{
  59        memset(ulpq, 0, sizeof(struct sctp_ulpq));
  60
  61        ulpq->asoc = asoc;
  62        skb_queue_head_init(&ulpq->reasm);
  63        skb_queue_head_init(&ulpq->lobby);
  64        ulpq->pd_mode  = 0;
  65
  66        return ulpq;
  67}
  68
  69
  70/* Flush the reassembly and ordering queues.  */
  71void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  72{
  73        struct sk_buff *skb;
  74        struct sctp_ulpevent *event;
  75
  76        while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  77                event = sctp_skb2event(skb);
  78                sctp_ulpevent_free(event);
  79        }
  80
  81        while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  82                event = sctp_skb2event(skb);
  83                sctp_ulpevent_free(event);
  84        }
  85
  86}
  87
  88/* Dispose of a ulpqueue.  */
  89void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  90{
  91        sctp_ulpq_flush(ulpq);
  92}
  93
  94/* Process an incoming DATA chunk.  */
  95int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  96                        gfp_t gfp)
  97{
  98        struct sk_buff_head temp;
  99        struct sctp_ulpevent *event;
 100        int event_eor = 0;
 101
 102        /* Create an event from the incoming chunk. */
 103        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 104        if (!event)
 105                return -ENOMEM;
 106
 107        /* Do reassembly if needed.  */
 108        event = sctp_ulpq_reasm(ulpq, event);
 109
 110        /* Do ordering if needed.  */
 111        if ((event) && (event->msg_flags & MSG_EOR)){
 112                /* Create a temporary list to collect chunks on.  */
 113                skb_queue_head_init(&temp);
 114                __skb_queue_tail(&temp, sctp_event2skb(event));
 115
 116                event = sctp_ulpq_order(ulpq, event);
 117        }
 118
 119        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 120         * very first SKB on the 'temp' list.
 121         */
 122        if (event) {
 123                event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 124                sctp_ulpq_tail_event(ulpq, event);
 125        }
 126
 127        return event_eor;
 128}
 129
 130/* Add a new event for propagation to the ULP.  */
 131/* Clear the partial delivery mode for this socket.   Note: This
 132 * assumes that no association is currently in partial delivery mode.
 133 */
 134int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 135{
 136        struct sctp_sock *sp = sctp_sk(sk);
 137
 138        if (atomic_dec_and_test(&sp->pd_mode)) {
 139                /* This means there are no other associations in PD, so
 140                 * we can go ahead and clear out the lobby in one shot
 141                 */
 142                if (!skb_queue_empty(&sp->pd_lobby)) {
 143                        struct list_head *list;
 144                        sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
 145                        list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 146                        INIT_LIST_HEAD(list);
 147                        return 1;
 148                }
 149        } else {
 150                /* There are other associations in PD, so we only need to
 151                 * pull stuff out of the lobby that belongs to the
 152                 * associations that is exiting PD (all of its notifications
 153                 * are posted here).
 154                 */
 155                if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 156                        struct sk_buff *skb, *tmp;
 157                        struct sctp_ulpevent *event;
 158
 159                        sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 160                                event = sctp_skb2event(skb);
 161                                if (event->asoc == asoc) {
 162                                        __skb_unlink(skb, &sp->pd_lobby);
 163                                        __skb_queue_tail(&sk->sk_receive_queue,
 164                                                         skb);
 165                                }
 166                        }
 167                }
 168        }
 169
 170        return 0;
 171}
 172
 173/* Set the pd_mode on the socket and ulpq */
 174static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 175{
 176        struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 177
 178        atomic_inc(&sp->pd_mode);
 179        ulpq->pd_mode = 1;
 180}
 181
 182/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 183static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 184{
 185        ulpq->pd_mode = 0;
 186        sctp_ulpq_reasm_drain(ulpq);
 187        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 188}
 189
 190/* If the SKB of 'event' is on a list, it is the first such member
 191 * of that list.
 192 */
 193int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 194{
 195        struct sock *sk = ulpq->asoc->base.sk;
 196        struct sk_buff_head *queue, *skb_list;
 197        struct sk_buff *skb = sctp_event2skb(event);
 198        int clear_pd = 0;
 199
 200        skb_list = (struct sk_buff_head *) skb->prev;
 201
 202        /* If the socket is just going to throw this away, do not
 203         * even try to deliver it.
 204         */
 205        if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
 206                goto out_free;
 207
 208        /* Check if the user wishes to receive this event.  */
 209        if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
 210                goto out_free;
 211
 212        /* If we are in partial delivery mode, post to the lobby until
 213         * partial delivery is cleared, unless, of course _this_ is
 214         * the association the cause of the partial delivery.
 215         */
 216
 217        if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
 218                queue = &sk->sk_receive_queue;
 219        } else {
 220                if (ulpq->pd_mode) {
 221                        /* If the association is in partial delivery, we
 222                         * need to finish delivering the partially processed
 223                         * packet before passing any other data.  This is
 224                         * because we don't truly support stream interleaving.
 225                         */
 226                        if ((event->msg_flags & MSG_NOTIFICATION) ||
 227                            (SCTP_DATA_NOT_FRAG ==
 228                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 229                                queue = &sctp_sk(sk)->pd_lobby;
 230                        else {
 231                                clear_pd = event->msg_flags & MSG_EOR;
 232                                queue = &sk->sk_receive_queue;
 233                        }
 234                } else {
 235                        /*
 236                         * If fragment interleave is enabled, we
 237                         * can queue this to the receive queue instead
 238                         * of the lobby.
 239                         */
 240                        if (sctp_sk(sk)->frag_interleave)
 241                                queue = &sk->sk_receive_queue;
 242                        else
 243                                queue = &sctp_sk(sk)->pd_lobby;
 244                }
 245        }
 246
 247        /* If we are harvesting multiple skbs they will be
 248         * collected on a list.
 249         */
 250        if (skb_list)
 251                sctp_skb_list_tail(skb_list, queue);
 252        else
 253                __skb_queue_tail(queue, skb);
 254
 255        /* Did we just complete partial delivery and need to get
 256         * rolling again?  Move pending data to the receive
 257         * queue.
 258         */
 259        if (clear_pd)
 260                sctp_ulpq_clear_pd(ulpq);
 261
 262        if (queue == &sk->sk_receive_queue)
 263                sk->sk_data_ready(sk, 0);
 264        return 1;
 265
 266out_free:
 267        if (skb_list)
 268                sctp_queue_purge_ulpevents(skb_list);
 269        else
 270                sctp_ulpevent_free(event);
 271
 272        return 0;
 273}
 274
 275/* 2nd Level Abstractions */
 276
 277/* Helper function to store chunks that need to be reassembled.  */
 278static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 279                                         struct sctp_ulpevent *event)
 280{
 281        struct sk_buff *pos;
 282        struct sctp_ulpevent *cevent;
 283        __u32 tsn, ctsn;
 284
 285        tsn = event->tsn;
 286
 287        /* See if it belongs at the end. */
 288        pos = skb_peek_tail(&ulpq->reasm);
 289        if (!pos) {
 290                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 291                return;
 292        }
 293
 294        /* Short circuit just dropping it at the end. */
 295        cevent = sctp_skb2event(pos);
 296        ctsn = cevent->tsn;
 297        if (TSN_lt(ctsn, tsn)) {
 298                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 299                return;
 300        }
 301
 302        /* Find the right place in this list. We store them by TSN.  */
 303        skb_queue_walk(&ulpq->reasm, pos) {
 304                cevent = sctp_skb2event(pos);
 305                ctsn = cevent->tsn;
 306
 307                if (TSN_lt(tsn, ctsn))
 308                        break;
 309        }
 310
 311        /* Insert before pos. */
 312        __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 313
 314}
 315
 316/* Helper function to return an event corresponding to the reassembled
 317 * datagram.
 318 * This routine creates a re-assembled skb given the first and last skb's
 319 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 320 * payload was fragmented on the way and ip had to reassemble them.
 321 * We add the rest of skb's to the first skb's fraglist.
 322 */
 323static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 324        struct sk_buff_head *queue, struct sk_buff *f_frag,
 325        struct sk_buff *l_frag)
 326{
 327        struct sk_buff *pos;
 328        struct sk_buff *new = NULL;
 329        struct sctp_ulpevent *event;
 330        struct sk_buff *pnext, *last;
 331        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 332
 333        /* Store the pointer to the 2nd skb */
 334        if (f_frag == l_frag)
 335                pos = NULL;
 336        else
 337                pos = f_frag->next;
 338
 339        /* Get the last skb in the f_frag's frag_list if present. */
 340        for (last = list; list; last = list, list = list->next);
 341
 342        /* Add the list of remaining fragments to the first fragments
 343         * frag_list.
 344         */
 345        if (last)
 346                last->next = pos;
 347        else {
 348                if (skb_cloned(f_frag)) {
 349                        /* This is a cloned skb, we can't just modify
 350                         * the frag_list.  We need a new skb to do that.
 351                         * Instead of calling skb_unshare(), we'll do it
 352                         * ourselves since we need to delay the free.
 353                         */
 354                        new = skb_copy(f_frag, GFP_ATOMIC);
 355                        if (!new)
 356                                return NULL;    /* try again later */
 357
 358                        sctp_skb_set_owner_r(new, f_frag->sk);
 359
 360                        skb_shinfo(new)->frag_list = pos;
 361                } else
 362                        skb_shinfo(f_frag)->frag_list = pos;
 363        }
 364
 365        /* Remove the first fragment from the reassembly queue.  */
 366        __skb_unlink(f_frag, queue);
 367
 368        /* if we did unshare, then free the old skb and re-assign */
 369        if (new) {
 370                kfree_skb(f_frag);
 371                f_frag = new;
 372        }
 373
 374        while (pos) {
 375
 376                pnext = pos->next;
 377
 378                /* Update the len and data_len fields of the first fragment. */
 379                f_frag->len += pos->len;
 380                f_frag->data_len += pos->len;
 381
 382                /* Remove the fragment from the reassembly queue.  */
 383                __skb_unlink(pos, queue);
 384
 385                /* Break if we have reached the last fragment.  */
 386                if (pos == l_frag)
 387                        break;
 388                pos->next = pnext;
 389                pos = pnext;
 390        }
 391
 392        event = sctp_skb2event(f_frag);
 393        SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 394
 395        return event;
 396}
 397
 398
 399/* Helper function to check if an incoming chunk has filled up the last
 400 * missing fragment in a SCTP datagram and return the corresponding event.
 401 */
 402static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 403{
 404        struct sk_buff *pos;
 405        struct sctp_ulpevent *cevent;
 406        struct sk_buff *first_frag = NULL;
 407        __u32 ctsn, next_tsn;
 408        struct sctp_ulpevent *retval = NULL;
 409        struct sk_buff *pd_first = NULL;
 410        struct sk_buff *pd_last = NULL;
 411        size_t pd_len = 0;
 412        struct sctp_association *asoc;
 413        u32 pd_point;
 414
 415        /* Initialized to 0 just to avoid compiler warning message.  Will
 416         * never be used with this value. It is referenced only after it
 417         * is set when we find the first fragment of a message.
 418         */
 419        next_tsn = 0;
 420
 421        /* The chunks are held in the reasm queue sorted by TSN.
 422         * Walk through the queue sequentially and look for a sequence of
 423         * fragmented chunks that complete a datagram.
 424         * 'first_frag' and next_tsn are reset when we find a chunk which
 425         * is the first fragment of a datagram. Once these 2 fields are set
 426         * we expect to find the remaining middle fragments and the last
 427         * fragment in order. If not, first_frag is reset to NULL and we
 428         * start the next pass when we find another first fragment.
 429         *
 430         * There is a potential to do partial delivery if user sets
 431         * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 432         * to see if can do PD.
 433         */
 434        skb_queue_walk(&ulpq->reasm, pos) {
 435                cevent = sctp_skb2event(pos);
 436                ctsn = cevent->tsn;
 437
 438                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 439                case SCTP_DATA_FIRST_FRAG:
 440                        /* If this "FIRST_FRAG" is the first
 441                         * element in the queue, then count it towards
 442                         * possible PD.
 443                         */
 444                        if (pos == ulpq->reasm.next) {
 445                            pd_first = pos;
 446                            pd_last = pos;
 447                            pd_len = pos->len;
 448                        } else {
 449                            pd_first = NULL;
 450                            pd_last = NULL;
 451                            pd_len = 0;
 452                        }
 453
 454                        first_frag = pos;
 455                        next_tsn = ctsn + 1;
 456                        break;
 457
 458                case SCTP_DATA_MIDDLE_FRAG:
 459                        if ((first_frag) && (ctsn == next_tsn)) {
 460                                next_tsn++;
 461                                if (pd_first) {
 462                                    pd_last = pos;
 463                                    pd_len += pos->len;
 464                                }
 465                        } else
 466                                first_frag = NULL;
 467                        break;
 468
 469                case SCTP_DATA_LAST_FRAG:
 470                        if (first_frag && (ctsn == next_tsn))
 471                                goto found;
 472                        else
 473                                first_frag = NULL;
 474                        break;
 475                }
 476        }
 477
 478        asoc = ulpq->asoc;
 479        if (pd_first) {
 480                /* Make sure we can enter partial deliver.
 481                 * We can trigger partial delivery only if framgent
 482                 * interleave is set, or the socket is not already
 483                 * in  partial delivery.
 484                 */
 485                if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 486                    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 487                        goto done;
 488
 489                cevent = sctp_skb2event(pd_first);
 490                pd_point = sctp_sk(asoc->base.sk)->pd_point;
 491                if (pd_point && pd_point <= pd_len) {
 492                        retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
 493                                                             &ulpq->reasm,
 494                                                             pd_first,
 495                                                             pd_last);
 496                        if (retval)
 497                                sctp_ulpq_set_pd(ulpq);
 498                }
 499        }
 500done:
 501        return retval;
 502found:
 503        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 504                                             &ulpq->reasm, first_frag, pos);
 505        if (retval)
 506                retval->msg_flags |= MSG_EOR;
 507        goto done;
 508}
 509
 510/* Retrieve the next set of fragments of a partial message. */
 511static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 512{
 513        struct sk_buff *pos, *last_frag, *first_frag;
 514        struct sctp_ulpevent *cevent;
 515        __u32 ctsn, next_tsn;
 516        int is_last;
 517        struct sctp_ulpevent *retval;
 518
 519        /* The chunks are held in the reasm queue sorted by TSN.
 520         * Walk through the queue sequentially and look for the first
 521         * sequence of fragmented chunks.
 522         */
 523
 524        if (skb_queue_empty(&ulpq->reasm))
 525                return NULL;
 526
 527        last_frag = first_frag = NULL;
 528        retval = NULL;
 529        next_tsn = 0;
 530        is_last = 0;
 531
 532        skb_queue_walk(&ulpq->reasm, pos) {
 533                cevent = sctp_skb2event(pos);
 534                ctsn = cevent->tsn;
 535
 536                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 537                case SCTP_DATA_FIRST_FRAG:
 538                        if (!first_frag)
 539                                return NULL;
 540                        goto done;
 541                case SCTP_DATA_MIDDLE_FRAG:
 542                        if (!first_frag) {
 543                                first_frag = pos;
 544                                next_tsn = ctsn + 1;
 545                                last_frag = pos;
 546                        } else if (next_tsn == ctsn) {
 547                                next_tsn++;
 548                                last_frag = pos;
 549                        } else
 550                                goto done;
 551                        break;
 552                case SCTP_DATA_LAST_FRAG:
 553                        if (!first_frag)
 554                                first_frag = pos;
 555                        else if (ctsn != next_tsn)
 556                                goto done;
 557                        last_frag = pos;
 558                        is_last = 1;
 559                        goto done;
 560                default:
 561                        return NULL;
 562                }
 563        }
 564
 565        /* We have the reassembled event. There is no need to look
 566         * further.
 567         */
 568done:
 569        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 570                                        &ulpq->reasm, first_frag, last_frag);
 571        if (retval && is_last)
 572                retval->msg_flags |= MSG_EOR;
 573
 574        return retval;
 575}
 576
 577
 578/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 579 * need reassembling.
 580 */
 581static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 582                                                struct sctp_ulpevent *event)
 583{
 584        struct sctp_ulpevent *retval = NULL;
 585
 586        /* Check if this is part of a fragmented message.  */
 587        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 588                event->msg_flags |= MSG_EOR;
 589                return event;
 590        }
 591
 592        sctp_ulpq_store_reasm(ulpq, event);
 593        if (!ulpq->pd_mode)
 594                retval = sctp_ulpq_retrieve_reassembled(ulpq);
 595        else {
 596                __u32 ctsn, ctsnap;
 597
 598                /* Do not even bother unless this is the next tsn to
 599                 * be delivered.
 600                 */
 601                ctsn = event->tsn;
 602                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 603                if (TSN_lte(ctsn, ctsnap))
 604                        retval = sctp_ulpq_retrieve_partial(ulpq);
 605        }
 606
 607        return retval;
 608}
 609
 610/* Retrieve the first part (sequential fragments) for partial delivery.  */
 611static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 612{
 613        struct sk_buff *pos, *last_frag, *first_frag;
 614        struct sctp_ulpevent *cevent;
 615        __u32 ctsn, next_tsn;
 616        struct sctp_ulpevent *retval;
 617
 618        /* The chunks are held in the reasm queue sorted by TSN.
 619         * Walk through the queue sequentially and look for a sequence of
 620         * fragmented chunks that start a datagram.
 621         */
 622
 623        if (skb_queue_empty(&ulpq->reasm))
 624                return NULL;
 625
 626        last_frag = first_frag = NULL;
 627        retval = NULL;
 628        next_tsn = 0;
 629
 630        skb_queue_walk(&ulpq->reasm, pos) {
 631                cevent = sctp_skb2event(pos);
 632                ctsn = cevent->tsn;
 633
 634                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 635                case SCTP_DATA_FIRST_FRAG:
 636                        if (!first_frag) {
 637                                first_frag = pos;
 638                                next_tsn = ctsn + 1;
 639                                last_frag = pos;
 640                        } else
 641                                goto done;
 642                        break;
 643
 644                case SCTP_DATA_MIDDLE_FRAG:
 645                        if (!first_frag)
 646                                return NULL;
 647                        if (ctsn == next_tsn) {
 648                                next_tsn++;
 649                                last_frag = pos;
 650                        } else
 651                                goto done;
 652                        break;
 653
 654                case SCTP_DATA_LAST_FRAG:
 655                        if (!first_frag)
 656                                return NULL;
 657                        else
 658                                goto done;
 659                        break;
 660
 661                default:
 662                        return NULL;
 663                }
 664        }
 665
 666        /* We have the reassembled event. There is no need to look
 667         * further.
 668         */
 669done:
 670        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 671                                        &ulpq->reasm, first_frag, last_frag);
 672        return retval;
 673}
 674
 675/*
 676 * Flush out stale fragments from the reassembly queue when processing
 677 * a Forward TSN.
 678 *
 679 * RFC 3758, Section 3.6
 680 *
 681 * After receiving and processing a FORWARD TSN, the data receiver MUST
 682 * take cautions in updating its re-assembly queue.  The receiver MUST
 683 * remove any partially reassembled message, which is still missing one
 684 * or more TSNs earlier than or equal to the new cumulative TSN point.
 685 * In the event that the receiver has invoked the partial delivery API,
 686 * a notification SHOULD also be generated to inform the upper layer API
 687 * that the message being partially delivered will NOT be completed.
 688 */
 689void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 690{
 691        struct sk_buff *pos, *tmp;
 692        struct sctp_ulpevent *event;
 693        __u32 tsn;
 694
 695        if (skb_queue_empty(&ulpq->reasm))
 696                return;
 697
 698        skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 699                event = sctp_skb2event(pos);
 700                tsn = event->tsn;
 701
 702                /* Since the entire message must be abandoned by the
 703                 * sender (item A3 in Section 3.5, RFC 3758), we can
 704                 * free all fragments on the list that are less then
 705                 * or equal to ctsn_point
 706                 */
 707                if (TSN_lte(tsn, fwd_tsn)) {
 708                        __skb_unlink(pos, &ulpq->reasm);
 709                        sctp_ulpevent_free(event);
 710                } else
 711                        break;
 712        }
 713}
 714
 715/*
 716 * Drain the reassembly queue.  If we just cleared parted delivery, it
 717 * is possible that the reassembly queue will contain already reassembled
 718 * messages.  Retrieve any such messages and give them to the user.
 719 */
 720static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 721{
 722        struct sctp_ulpevent *event = NULL;
 723        struct sk_buff_head temp;
 724
 725        if (skb_queue_empty(&ulpq->reasm))
 726                return;
 727
 728        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 729                /* Do ordering if needed.  */
 730                if ((event) && (event->msg_flags & MSG_EOR)){
 731                        skb_queue_head_init(&temp);
 732                        __skb_queue_tail(&temp, sctp_event2skb(event));
 733
 734                        event = sctp_ulpq_order(ulpq, event);
 735                }
 736
 737                /* Send event to the ULP.  'event' is the
 738                 * sctp_ulpevent for  very first SKB on the  temp' list.
 739                 */
 740                if (event)
 741                        sctp_ulpq_tail_event(ulpq, event);
 742        }
 743}
 744
 745
 746/* Helper function to gather skbs that have possibly become
 747 * ordered by an an incoming chunk.
 748 */
 749static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 750                                              struct sctp_ulpevent *event)
 751{
 752        struct sk_buff_head *event_list;
 753        struct sk_buff *pos, *tmp;
 754        struct sctp_ulpevent *cevent;
 755        struct sctp_stream *in;
 756        __u16 sid, csid, cssn;
 757
 758        sid = event->stream;
 759        in  = &ulpq->asoc->ssnmap->in;
 760
 761        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 762
 763        /* We are holding the chunks by stream, by SSN.  */
 764        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 765                cevent = (struct sctp_ulpevent *) pos->cb;
 766                csid = cevent->stream;
 767                cssn = cevent->ssn;
 768
 769                /* Have we gone too far?  */
 770                if (csid > sid)
 771                        break;
 772
 773                /* Have we not gone far enough?  */
 774                if (csid < sid)
 775                        continue;
 776
 777                if (cssn != sctp_ssn_peek(in, sid))
 778                        break;
 779
 780                /* Found it, so mark in the ssnmap. */
 781                sctp_ssn_next(in, sid);
 782
 783                __skb_unlink(pos, &ulpq->lobby);
 784
 785                /* Attach all gathered skbs to the event.  */
 786                __skb_queue_tail(event_list, pos);
 787        }
 788}
 789
 790/* Helper function to store chunks needing ordering.  */
 791static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 792                                           struct sctp_ulpevent *event)
 793{
 794        struct sk_buff *pos;
 795        struct sctp_ulpevent *cevent;
 796        __u16 sid, csid;
 797        __u16 ssn, cssn;
 798
 799        pos = skb_peek_tail(&ulpq->lobby);
 800        if (!pos) {
 801                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 802                return;
 803        }
 804
 805        sid = event->stream;
 806        ssn = event->ssn;
 807
 808        cevent = (struct sctp_ulpevent *) pos->cb;
 809        csid = cevent->stream;
 810        cssn = cevent->ssn;
 811        if (sid > csid) {
 812                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 813                return;
 814        }
 815
 816        if ((sid == csid) && SSN_lt(cssn, ssn)) {
 817                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 818                return;
 819        }
 820
 821        /* Find the right place in this list.  We store them by
 822         * stream ID and then by SSN.
 823         */
 824        skb_queue_walk(&ulpq->lobby, pos) {
 825                cevent = (struct sctp_ulpevent *) pos->cb;
 826                csid = cevent->stream;
 827                cssn = cevent->ssn;
 828
 829                if (csid > sid)
 830                        break;
 831                if (csid == sid && SSN_lt(ssn, cssn))
 832                        break;
 833        }
 834
 835
 836        /* Insert before pos. */
 837        __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 838}
 839
 840static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 841                                             struct sctp_ulpevent *event)
 842{
 843        __u16 sid, ssn;
 844        struct sctp_stream *in;
 845
 846        /* Check if this message needs ordering.  */
 847        if (SCTP_DATA_UNORDERED & event->msg_flags)
 848                return event;
 849
 850        /* Note: The stream ID must be verified before this routine.  */
 851        sid = event->stream;
 852        ssn = event->ssn;
 853        in  = &ulpq->asoc->ssnmap->in;
 854
 855        /* Is this the expected SSN for this stream ID?  */
 856        if (ssn != sctp_ssn_peek(in, sid)) {
 857                /* We've received something out of order, so find where it
 858                 * needs to be placed.  We order by stream and then by SSN.
 859                 */
 860                sctp_ulpq_store_ordered(ulpq, event);
 861                return NULL;
 862        }
 863
 864        /* Mark that the next chunk has been found.  */
 865        sctp_ssn_next(in, sid);
 866
 867        /* Go find any other chunks that were waiting for
 868         * ordering.
 869         */
 870        sctp_ulpq_retrieve_ordered(ulpq, event);
 871
 872        return event;
 873}
 874
 875/* Helper function to gather skbs that have possibly become
 876 * ordered by forward tsn skipping their dependencies.
 877 */
 878static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 879{
 880        struct sk_buff *pos, *tmp;
 881        struct sctp_ulpevent *cevent;
 882        struct sctp_ulpevent *event;
 883        struct sctp_stream *in;
 884        struct sk_buff_head temp;
 885        struct sk_buff_head *lobby = &ulpq->lobby;
 886        __u16 csid, cssn;
 887
 888        in  = &ulpq->asoc->ssnmap->in;
 889
 890        /* We are holding the chunks by stream, by SSN.  */
 891        skb_queue_head_init(&temp);
 892        event = NULL;
 893        sctp_skb_for_each(pos, lobby, tmp) {
 894                cevent = (struct sctp_ulpevent *) pos->cb;
 895                csid = cevent->stream;
 896                cssn = cevent->ssn;
 897
 898                /* Have we gone too far?  */
 899                if (csid > sid)
 900                        break;
 901
 902                /* Have we not gone far enough?  */
 903                if (csid < sid)
 904                        continue;
 905
 906                /* see if this ssn has been marked by skipping */
 907                if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
 908                        break;
 909
 910                __skb_unlink(pos, lobby);
 911                if (!event)
 912                        /* Create a temporary list to collect chunks on.  */
 913                        event = sctp_skb2event(pos);
 914
 915                /* Attach all gathered skbs to the event.  */
 916                __skb_queue_tail(&temp, pos);
 917        }
 918
 919        /* If we didn't reap any data, see if the next expected SSN
 920         * is next on the queue and if so, use that.
 921         */
 922        if (event == NULL && pos != (struct sk_buff *)lobby) {
 923                cevent = (struct sctp_ulpevent *) pos->cb;
 924                csid = cevent->stream;
 925                cssn = cevent->ssn;
 926
 927                if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
 928                        sctp_ssn_next(in, csid);
 929                        __skb_unlink(pos, lobby);
 930                        __skb_queue_tail(&temp, pos);
 931                        event = sctp_skb2event(pos);
 932                }
 933        }
 934
 935        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 936         * very first SKB on the 'temp' list.
 937         */
 938        if (event) {
 939                /* see if we have more ordered that we can deliver */
 940                sctp_ulpq_retrieve_ordered(ulpq, event);
 941                sctp_ulpq_tail_event(ulpq, event);
 942        }
 943}
 944
 945/* Skip over an SSN. This is used during the processing of
 946 * Forwared TSN chunk to skip over the abandoned ordered data
 947 */
 948void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 949{
 950        struct sctp_stream *in;
 951
 952        /* Note: The stream ID must be verified before this routine.  */
 953        in  = &ulpq->asoc->ssnmap->in;
 954
 955        /* Is this an old SSN?  If so ignore. */
 956        if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
 957                return;
 958
 959        /* Mark that we are no longer expecting this SSN or lower. */
 960        sctp_ssn_skip(in, sid, ssn);
 961
 962        /* Go find any other chunks that were waiting for
 963         * ordering and deliver them if needed.
 964         */
 965        sctp_ulpq_reap_ordered(ulpq, sid);
 966}
 967
 968static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
 969                struct sk_buff_head *list, __u16 needed)
 970{
 971        __u16 freed = 0;
 972        __u32 tsn, last_tsn;
 973        struct sk_buff *skb, *flist, *last;
 974        struct sctp_ulpevent *event;
 975        struct sctp_tsnmap *tsnmap;
 976
 977        tsnmap = &ulpq->asoc->peer.tsn_map;
 978
 979        while ((skb = skb_peek_tail(list)) != NULL) {
 980                event = sctp_skb2event(skb);
 981                tsn = event->tsn;
 982
 983                /* Don't renege below the Cumulative TSN ACK Point. */
 984                if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
 985                        break;
 986
 987                /* Events in ordering queue may have multiple fragments
 988                 * corresponding to additional TSNs.  Sum the total
 989                 * freed space; find the last TSN.
 990                 */
 991                freed += skb_headlen(skb);
 992                flist = skb_shinfo(skb)->frag_list;
 993                for (last = flist; flist; flist = flist->next) {
 994                        last = flist;
 995                        freed += skb_headlen(last);
 996                }
 997                if (last)
 998                        last_tsn = sctp_skb2event(last)->tsn;
 999                else
1000                        last_tsn = tsn;
1001
1002                /* Unlink the event, then renege all applicable TSNs. */
1003                __skb_unlink(skb, list);
1004                sctp_ulpevent_free(event);
1005                while (TSN_lte(tsn, last_tsn)) {
1006                        sctp_tsnmap_renege(tsnmap, tsn);
1007                        tsn++;
1008                }
1009                if (freed >= needed)
1010                        return freed;
1011        }
1012
1013        return freed;
1014}
1015
1016/* Renege 'needed' bytes from the ordering queue. */
1017static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1018{
1019        return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1020}
1021
1022/* Renege 'needed' bytes from the reassembly queue. */
1023static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1024{
1025        return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1026}
1027
1028/* Partial deliver the first message as there is pressure on rwnd. */
1029void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1030                                gfp_t gfp)
1031{
1032        struct sctp_ulpevent *event;
1033        struct sctp_association *asoc;
1034        struct sctp_sock *sp;
1035        __u32 ctsn;
1036        struct sk_buff *skb;
1037
1038        asoc = ulpq->asoc;
1039        sp = sctp_sk(asoc->base.sk);
1040
1041        /* If the association is already in Partial Delivery mode
1042         * we have nothing to do.
1043         */
1044        if (ulpq->pd_mode)
1045                return;
1046
1047        /* Data must be at or below the Cumulative TSN ACK Point to
1048         * start partial delivery.
1049         */
1050        skb = skb_peek(&asoc->ulpq.reasm);
1051        if (skb != NULL) {
1052                ctsn = sctp_skb2event(skb)->tsn;
1053                if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1054                        return;
1055        }
1056
1057        /* If the user enabled fragment interleave socket option,
1058         * multiple associations can enter partial delivery.
1059         * Otherwise, we can only enter partial delivery if the
1060         * socket is not in partial deliver mode.
1061         */
1062        if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1063                /* Is partial delivery possible?  */
1064                event = sctp_ulpq_retrieve_first(ulpq);
1065                /* Send event to the ULP.   */
1066                if (event) {
1067                        sctp_ulpq_tail_event(ulpq, event);
1068                        sctp_ulpq_set_pd(ulpq);
1069                        return;
1070                }
1071        }
1072}
1073
1074/* Renege some packets to make room for an incoming chunk.  */
1075void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1076                      gfp_t gfp)
1077{
1078        struct sctp_association *asoc;
1079        __u16 needed, freed;
1080
1081        asoc = ulpq->asoc;
1082
1083        if (chunk) {
1084                needed = ntohs(chunk->chunk_hdr->length);
1085                needed -= sizeof(sctp_data_chunk_t);
1086        } else
1087                needed = SCTP_DEFAULT_MAXWINDOW;
1088
1089        freed = 0;
1090
1091        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1092                freed = sctp_ulpq_renege_order(ulpq, needed);
1093                if (freed < needed) {
1094                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1095                }
1096        }
1097        /* If able to free enough room, accept this chunk. */
1098        if (chunk && (freed >= needed)) {
1099                int retval;
1100                retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1101                /*
1102                 * Enter partial delivery if chunk has not been
1103                 * delivered; otherwise, drain the reassembly queue.
1104                 */
1105                if (retval <= 0)
1106                        sctp_ulpq_partial_delivery(ulpq, gfp);
1107                else if (retval == 1)
1108                        sctp_ulpq_reasm_drain(ulpq);
1109        }
1110
1111        sk_mem_reclaim(asoc->base.sk);
1112}
1113
1114
1115
1116/* Notify the application if an association is aborted and in
1117 * partial delivery mode.  Send up any pending received messages.
1118 */
1119void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1120{
1121        struct sctp_ulpevent *ev = NULL;
1122        struct sock *sk;
1123
1124        if (!ulpq->pd_mode)
1125                return;
1126
1127        sk = ulpq->asoc->base.sk;
1128        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1129                                       &sctp_sk(sk)->subscribe))
1130                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1131                                              SCTP_PARTIAL_DELIVERY_ABORTED,
1132                                              gfp);
1133        if (ev)
1134                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1135
1136        /* If there is data waiting, send it up the socket now. */
1137        if (sctp_ulpq_clear_pd(ulpq) || ev)
1138                sk->sk_data_ready(sk, 0);
1139}
1140