linux/net/sctp/ulpqueue.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, write to
  25 * the Free Software Foundation, 59 Temple Place - Suite 330,
  26 * Boston, MA 02111-1307, USA.
  27 *
  28 * Please send any bug reports or fixes you make to the
  29 * email address(es):
  30 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
  31 *
  32 * Or submit a bug report through the following website:
  33 *    http://www.sf.net/projects/lksctp
  34 *
  35 * Written or modified by:
  36 *    Jon Grimm             <jgrimm@us.ibm.com>
  37 *    La Monte H.P. Yarroll <piggy@acm.org>
  38 *    Sridhar Samudrala     <sri@us.ibm.com>
  39 *
  40 * Any bugs reported given to us we will try to fix... any fixes shared will
  41 * be incorporated into the next SCTP release.
  42 */
  43
  44#include <linux/slab.h>
  45#include <linux/types.h>
  46#include <linux/skbuff.h>
  47#include <net/sock.h>
  48#include <net/sctp/structs.h>
  49#include <net/sctp/sctp.h>
  50#include <net/sctp/sm.h>
  51
  52/* Forward declarations for internal helpers.  */
  53static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  54                                              struct sctp_ulpevent *);
  55static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
  56                                              struct sctp_ulpevent *);
  57static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  58
  59/* 1st Level Abstractions */
  60
  61/* Initialize a ULP queue from a block of memory.  */
  62struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  63                                 struct sctp_association *asoc)
  64{
  65        memset(ulpq, 0, sizeof(struct sctp_ulpq));
  66
  67        ulpq->asoc = asoc;
  68        skb_queue_head_init(&ulpq->reasm);
  69        skb_queue_head_init(&ulpq->lobby);
  70        ulpq->pd_mode  = 0;
  71
  72        return ulpq;
  73}
  74
  75
  76/* Flush the reassembly and ordering queues.  */
  77void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  78{
  79        struct sk_buff *skb;
  80        struct sctp_ulpevent *event;
  81
  82        while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  83                event = sctp_skb2event(skb);
  84                sctp_ulpevent_free(event);
  85        }
  86
  87        while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  88                event = sctp_skb2event(skb);
  89                sctp_ulpevent_free(event);
  90        }
  91
  92}
  93
  94/* Dispose of a ulpqueue.  */
  95void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  96{
  97        sctp_ulpq_flush(ulpq);
  98}
  99
 100/* Process an incoming DATA chunk.  */
 101int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 102                        gfp_t gfp)
 103{
 104        struct sk_buff_head temp;
 105        struct sctp_ulpevent *event;
 106        int event_eor = 0;
 107
 108        /* Create an event from the incoming chunk. */
 109        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 110        if (!event)
 111                return -ENOMEM;
 112
 113        /* Do reassembly if needed.  */
 114        event = sctp_ulpq_reasm(ulpq, event);
 115
 116        /* Do ordering if needed.  */
 117        if ((event) && (event->msg_flags & MSG_EOR)){
 118                /* Create a temporary list to collect chunks on.  */
 119                skb_queue_head_init(&temp);
 120                __skb_queue_tail(&temp, sctp_event2skb(event));
 121
 122                event = sctp_ulpq_order(ulpq, event);
 123        }
 124
 125        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 126         * very first SKB on the 'temp' list.
 127         */
 128        if (event) {
 129                event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 130                sctp_ulpq_tail_event(ulpq, event);
 131        }
 132
 133        return event_eor;
 134}
 135
 136/* Add a new event for propagation to the ULP.  */
 137/* Clear the partial delivery mode for this socket.   Note: This
 138 * assumes that no association is currently in partial delivery mode.
 139 */
 140int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 141{
 142        struct sctp_sock *sp = sctp_sk(sk);
 143
 144        if (atomic_dec_and_test(&sp->pd_mode)) {
 145                /* This means there are no other associations in PD, so
 146                 * we can go ahead and clear out the lobby in one shot
 147                 */
 148                if (!skb_queue_empty(&sp->pd_lobby)) {
 149                        struct list_head *list;
 150                        sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
 151                        list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 152                        INIT_LIST_HEAD(list);
 153                        return 1;
 154                }
 155        } else {
 156                /* There are other associations in PD, so we only need to
 157                 * pull stuff out of the lobby that belongs to the
 158                 * associations that is exiting PD (all of its notifications
 159                 * are posted here).
 160                 */
 161                if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 162                        struct sk_buff *skb, *tmp;
 163                        struct sctp_ulpevent *event;
 164
 165                        sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 166                                event = sctp_skb2event(skb);
 167                                if (event->asoc == asoc) {
 168                                        __skb_unlink(skb, &sp->pd_lobby);
 169                                        __skb_queue_tail(&sk->sk_receive_queue,
 170                                                         skb);
 171                                }
 172                        }
 173                }
 174        }
 175
 176        return 0;
 177}
 178
 179/* Set the pd_mode on the socket and ulpq */
 180static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 181{
 182        struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 183
 184        atomic_inc(&sp->pd_mode);
 185        ulpq->pd_mode = 1;
 186}
 187
 188/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 189static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 190{
 191        ulpq->pd_mode = 0;
 192        sctp_ulpq_reasm_drain(ulpq);
 193        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 194}
 195
 196/* If the SKB of 'event' is on a list, it is the first such member
 197 * of that list.
 198 */
 199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 200{
 201        struct sock *sk = ulpq->asoc->base.sk;
 202        struct sk_buff_head *queue, *skb_list;
 203        struct sk_buff *skb = sctp_event2skb(event);
 204        int clear_pd = 0;
 205
 206        skb_list = (struct sk_buff_head *) skb->prev;
 207
 208        /* If the socket is just going to throw this away, do not
 209         * even try to deliver it.
 210         */
 211        if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
 212                goto out_free;
 213
 214        /* Check if the user wishes to receive this event.  */
 215        if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
 216                goto out_free;
 217
 218        /* If we are in partial delivery mode, post to the lobby until
 219         * partial delivery is cleared, unless, of course _this_ is
 220         * the association the cause of the partial delivery.
 221         */
 222
 223        if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
 224                queue = &sk->sk_receive_queue;
 225        } else {
 226                if (ulpq->pd_mode) {
 227                        /* If the association is in partial delivery, we
 228                         * need to finish delivering the partially processed
 229                         * packet before passing any other data.  This is
 230                         * because we don't truly support stream interleaving.
 231                         */
 232                        if ((event->msg_flags & MSG_NOTIFICATION) ||
 233                            (SCTP_DATA_NOT_FRAG ==
 234                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 235                                queue = &sctp_sk(sk)->pd_lobby;
 236                        else {
 237                                clear_pd = event->msg_flags & MSG_EOR;
 238                                queue = &sk->sk_receive_queue;
 239                        }
 240                } else {
 241                        /*
 242                         * If fragment interleave is enabled, we
 243                         * can queue this to the receive queue instead
 244                         * of the lobby.
 245                         */
 246                        if (sctp_sk(sk)->frag_interleave)
 247                                queue = &sk->sk_receive_queue;
 248                        else
 249                                queue = &sctp_sk(sk)->pd_lobby;
 250                }
 251        }
 252
 253        /* If we are harvesting multiple skbs they will be
 254         * collected on a list.
 255         */
 256        if (skb_list)
 257                sctp_skb_list_tail(skb_list, queue);
 258        else
 259                __skb_queue_tail(queue, skb);
 260
 261        /* Did we just complete partial delivery and need to get
 262         * rolling again?  Move pending data to the receive
 263         * queue.
 264         */
 265        if (clear_pd)
 266                sctp_ulpq_clear_pd(ulpq);
 267
 268        if (queue == &sk->sk_receive_queue)
 269                sk->sk_data_ready(sk, 0);
 270        return 1;
 271
 272out_free:
 273        if (skb_list)
 274                sctp_queue_purge_ulpevents(skb_list);
 275        else
 276                sctp_ulpevent_free(event);
 277
 278        return 0;
 279}
 280
 281/* 2nd Level Abstractions */
 282
 283/* Helper function to store chunks that need to be reassembled.  */
 284static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 285                                         struct sctp_ulpevent *event)
 286{
 287        struct sk_buff *pos;
 288        struct sctp_ulpevent *cevent;
 289        __u32 tsn, ctsn;
 290
 291        tsn = event->tsn;
 292
 293        /* See if it belongs at the end. */
 294        pos = skb_peek_tail(&ulpq->reasm);
 295        if (!pos) {
 296                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 297                return;
 298        }
 299
 300        /* Short circuit just dropping it at the end. */
 301        cevent = sctp_skb2event(pos);
 302        ctsn = cevent->tsn;
 303        if (TSN_lt(ctsn, tsn)) {
 304                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 305                return;
 306        }
 307
 308        /* Find the right place in this list. We store them by TSN.  */
 309        skb_queue_walk(&ulpq->reasm, pos) {
 310                cevent = sctp_skb2event(pos);
 311                ctsn = cevent->tsn;
 312
 313                if (TSN_lt(tsn, ctsn))
 314                        break;
 315        }
 316
 317        /* Insert before pos. */
 318        __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 319
 320}
 321
 322/* Helper function to return an event corresponding to the reassembled
 323 * datagram.
 324 * This routine creates a re-assembled skb given the first and last skb's
 325 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 326 * payload was fragmented on the way and ip had to reassemble them.
 327 * We add the rest of skb's to the first skb's fraglist.
 328 */
 329static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 330        struct sk_buff_head *queue, struct sk_buff *f_frag,
 331        struct sk_buff *l_frag)
 332{
 333        struct sk_buff *pos;
 334        struct sk_buff *new = NULL;
 335        struct sctp_ulpevent *event;
 336        struct sk_buff *pnext, *last;
 337        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 338
 339        /* Store the pointer to the 2nd skb */
 340        if (f_frag == l_frag)
 341                pos = NULL;
 342        else
 343                pos = f_frag->next;
 344
 345        /* Get the last skb in the f_frag's frag_list if present. */
 346        for (last = list; list; last = list, list = list->next);
 347
 348        /* Add the list of remaining fragments to the first fragments
 349         * frag_list.
 350         */
 351        if (last)
 352                last->next = pos;
 353        else {
 354                if (skb_cloned(f_frag)) {
 355                        /* This is a cloned skb, we can't just modify
 356                         * the frag_list.  We need a new skb to do that.
 357                         * Instead of calling skb_unshare(), we'll do it
 358                         * ourselves since we need to delay the free.
 359                         */
 360                        new = skb_copy(f_frag, GFP_ATOMIC);
 361                        if (!new)
 362                                return NULL;    /* try again later */
 363
 364                        sctp_skb_set_owner_r(new, f_frag->sk);
 365
 366                        skb_shinfo(new)->frag_list = pos;
 367                } else
 368                        skb_shinfo(f_frag)->frag_list = pos;
 369        }
 370
 371        /* Remove the first fragment from the reassembly queue.  */
 372        __skb_unlink(f_frag, queue);
 373
 374        /* if we did unshare, then free the old skb and re-assign */
 375        if (new) {
 376                kfree_skb(f_frag);
 377                f_frag = new;
 378        }
 379
 380        while (pos) {
 381
 382                pnext = pos->next;
 383
 384                /* Update the len and data_len fields of the first fragment. */
 385                f_frag->len += pos->len;
 386                f_frag->data_len += pos->len;
 387
 388                /* Remove the fragment from the reassembly queue.  */
 389                __skb_unlink(pos, queue);
 390
 391                /* Break if we have reached the last fragment.  */
 392                if (pos == l_frag)
 393                        break;
 394                pos->next = pnext;
 395                pos = pnext;
 396        }
 397
 398        event = sctp_skb2event(f_frag);
 399        SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 400
 401        return event;
 402}
 403
 404
 405/* Helper function to check if an incoming chunk has filled up the last
 406 * missing fragment in a SCTP datagram and return the corresponding event.
 407 */
 408static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 409{
 410        struct sk_buff *pos;
 411        struct sctp_ulpevent *cevent;
 412        struct sk_buff *first_frag = NULL;
 413        __u32 ctsn, next_tsn;
 414        struct sctp_ulpevent *retval = NULL;
 415        struct sk_buff *pd_first = NULL;
 416        struct sk_buff *pd_last = NULL;
 417        size_t pd_len = 0;
 418        struct sctp_association *asoc;
 419        u32 pd_point;
 420
 421        /* Initialized to 0 just to avoid compiler warning message.  Will
 422         * never be used with this value. It is referenced only after it
 423         * is set when we find the first fragment of a message.
 424         */
 425        next_tsn = 0;
 426
 427        /* The chunks are held in the reasm queue sorted by TSN.
 428         * Walk through the queue sequentially and look for a sequence of
 429         * fragmented chunks that complete a datagram.
 430         * 'first_frag' and next_tsn are reset when we find a chunk which
 431         * is the first fragment of a datagram. Once these 2 fields are set
 432         * we expect to find the remaining middle fragments and the last
 433         * fragment in order. If not, first_frag is reset to NULL and we
 434         * start the next pass when we find another first fragment.
 435         *
 436         * There is a potential to do partial delivery if user sets
 437         * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 438         * to see if can do PD.
 439         */
 440        skb_queue_walk(&ulpq->reasm, pos) {
 441                cevent = sctp_skb2event(pos);
 442                ctsn = cevent->tsn;
 443
 444                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 445                case SCTP_DATA_FIRST_FRAG:
 446                        /* If this "FIRST_FRAG" is the first
 447                         * element in the queue, then count it towards
 448                         * possible PD.
 449                         */
 450                        if (pos == ulpq->reasm.next) {
 451                            pd_first = pos;
 452                            pd_last = pos;
 453                            pd_len = pos->len;
 454                        } else {
 455                            pd_first = NULL;
 456                            pd_last = NULL;
 457                            pd_len = 0;
 458                        }
 459
 460                        first_frag = pos;
 461                        next_tsn = ctsn + 1;
 462                        break;
 463
 464                case SCTP_DATA_MIDDLE_FRAG:
 465                        if ((first_frag) && (ctsn == next_tsn)) {
 466                                next_tsn++;
 467                                if (pd_first) {
 468                                    pd_last = pos;
 469                                    pd_len += pos->len;
 470                                }
 471                        } else
 472                                first_frag = NULL;
 473                        break;
 474
 475                case SCTP_DATA_LAST_FRAG:
 476                        if (first_frag && (ctsn == next_tsn))
 477                                goto found;
 478                        else
 479                                first_frag = NULL;
 480                        break;
 481                }
 482        }
 483
 484        asoc = ulpq->asoc;
 485        if (pd_first) {
 486                /* Make sure we can enter partial deliver.
 487                 * We can trigger partial delivery only if framgent
 488                 * interleave is set, or the socket is not already
 489                 * in  partial delivery.
 490                 */
 491                if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 492                    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 493                        goto done;
 494
 495                cevent = sctp_skb2event(pd_first);
 496                pd_point = sctp_sk(asoc->base.sk)->pd_point;
 497                if (pd_point && pd_point <= pd_len) {
 498                        retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
 499                                                             &ulpq->reasm,
 500                                                             pd_first,
 501                                                             pd_last);
 502                        if (retval)
 503                                sctp_ulpq_set_pd(ulpq);
 504                }
 505        }
 506done:
 507        return retval;
 508found:
 509        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 510                                             &ulpq->reasm, first_frag, pos);
 511        if (retval)
 512                retval->msg_flags |= MSG_EOR;
 513        goto done;
 514}
 515
 516/* Retrieve the next set of fragments of a partial message. */
 517static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 518{
 519        struct sk_buff *pos, *last_frag, *first_frag;
 520        struct sctp_ulpevent *cevent;
 521        __u32 ctsn, next_tsn;
 522        int is_last;
 523        struct sctp_ulpevent *retval;
 524
 525        /* The chunks are held in the reasm queue sorted by TSN.
 526         * Walk through the queue sequentially and look for the first
 527         * sequence of fragmented chunks.
 528         */
 529
 530        if (skb_queue_empty(&ulpq->reasm))
 531                return NULL;
 532
 533        last_frag = first_frag = NULL;
 534        retval = NULL;
 535        next_tsn = 0;
 536        is_last = 0;
 537
 538        skb_queue_walk(&ulpq->reasm, pos) {
 539                cevent = sctp_skb2event(pos);
 540                ctsn = cevent->tsn;
 541
 542                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 543                case SCTP_DATA_FIRST_FRAG:
 544                        if (!first_frag)
 545                                return NULL;
 546                        goto done;
 547                case SCTP_DATA_MIDDLE_FRAG:
 548                        if (!first_frag) {
 549                                first_frag = pos;
 550                                next_tsn = ctsn + 1;
 551                                last_frag = pos;
 552                        } else if (next_tsn == ctsn) {
 553                                next_tsn++;
 554                                last_frag = pos;
 555                        } else
 556                                goto done;
 557                        break;
 558                case SCTP_DATA_LAST_FRAG:
 559                        if (!first_frag)
 560                                first_frag = pos;
 561                        else if (ctsn != next_tsn)
 562                                goto done;
 563                        last_frag = pos;
 564                        is_last = 1;
 565                        goto done;
 566                default:
 567                        return NULL;
 568                }
 569        }
 570
 571        /* We have the reassembled event. There is no need to look
 572         * further.
 573         */
 574done:
 575        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 576                                        &ulpq->reasm, first_frag, last_frag);
 577        if (retval && is_last)
 578                retval->msg_flags |= MSG_EOR;
 579
 580        return retval;
 581}
 582
 583
 584/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 585 * need reassembling.
 586 */
 587static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 588                                                struct sctp_ulpevent *event)
 589{
 590        struct sctp_ulpevent *retval = NULL;
 591
 592        /* Check if this is part of a fragmented message.  */
 593        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 594                event->msg_flags |= MSG_EOR;
 595                return event;
 596        }
 597
 598        sctp_ulpq_store_reasm(ulpq, event);
 599        if (!ulpq->pd_mode)
 600                retval = sctp_ulpq_retrieve_reassembled(ulpq);
 601        else {
 602                __u32 ctsn, ctsnap;
 603
 604                /* Do not even bother unless this is the next tsn to
 605                 * be delivered.
 606                 */
 607                ctsn = event->tsn;
 608                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 609                if (TSN_lte(ctsn, ctsnap))
 610                        retval = sctp_ulpq_retrieve_partial(ulpq);
 611        }
 612
 613        return retval;
 614}
 615
 616/* Retrieve the first part (sequential fragments) for partial delivery.  */
 617static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 618{
 619        struct sk_buff *pos, *last_frag, *first_frag;
 620        struct sctp_ulpevent *cevent;
 621        __u32 ctsn, next_tsn;
 622        struct sctp_ulpevent *retval;
 623
 624        /* The chunks are held in the reasm queue sorted by TSN.
 625         * Walk through the queue sequentially and look for a sequence of
 626         * fragmented chunks that start a datagram.
 627         */
 628
 629        if (skb_queue_empty(&ulpq->reasm))
 630                return NULL;
 631
 632        last_frag = first_frag = NULL;
 633        retval = NULL;
 634        next_tsn = 0;
 635
 636        skb_queue_walk(&ulpq->reasm, pos) {
 637                cevent = sctp_skb2event(pos);
 638                ctsn = cevent->tsn;
 639
 640                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 641                case SCTP_DATA_FIRST_FRAG:
 642                        if (!first_frag) {
 643                                first_frag = pos;
 644                                next_tsn = ctsn + 1;
 645                                last_frag = pos;
 646                        } else
 647                                goto done;
 648                        break;
 649
 650                case SCTP_DATA_MIDDLE_FRAG:
 651                        if (!first_frag)
 652                                return NULL;
 653                        if (ctsn == next_tsn) {
 654                                next_tsn++;
 655                                last_frag = pos;
 656                        } else
 657                                goto done;
 658                        break;
 659
 660                case SCTP_DATA_LAST_FRAG:
 661                        if (!first_frag)
 662                                return NULL;
 663                        else
 664                                goto done;
 665                        break;
 666
 667                default:
 668                        return NULL;
 669                }
 670        }
 671
 672        /* We have the reassembled event. There is no need to look
 673         * further.
 674         */
 675done:
 676        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 677                                        &ulpq->reasm, first_frag, last_frag);
 678        return retval;
 679}
 680
 681/*
 682 * Flush out stale fragments from the reassembly queue when processing
 683 * a Forward TSN.
 684 *
 685 * RFC 3758, Section 3.6
 686 *
 687 * After receiving and processing a FORWARD TSN, the data receiver MUST
 688 * take cautions in updating its re-assembly queue.  The receiver MUST
 689 * remove any partially reassembled message, which is still missing one
 690 * or more TSNs earlier than or equal to the new cumulative TSN point.
 691 * In the event that the receiver has invoked the partial delivery API,
 692 * a notification SHOULD also be generated to inform the upper layer API
 693 * that the message being partially delivered will NOT be completed.
 694 */
 695void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 696{
 697        struct sk_buff *pos, *tmp;
 698        struct sctp_ulpevent *event;
 699        __u32 tsn;
 700
 701        if (skb_queue_empty(&ulpq->reasm))
 702                return;
 703
 704        skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 705                event = sctp_skb2event(pos);
 706                tsn = event->tsn;
 707
 708                /* Since the entire message must be abandoned by the
 709                 * sender (item A3 in Section 3.5, RFC 3758), we can
 710                 * free all fragments on the list that are less then
 711                 * or equal to ctsn_point
 712                 */
 713                if (TSN_lte(tsn, fwd_tsn)) {
 714                        __skb_unlink(pos, &ulpq->reasm);
 715                        sctp_ulpevent_free(event);
 716                } else
 717                        break;
 718        }
 719}
 720
 721/*
 722 * Drain the reassembly queue.  If we just cleared parted delivery, it
 723 * is possible that the reassembly queue will contain already reassembled
 724 * messages.  Retrieve any such messages and give them to the user.
 725 */
 726static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 727{
 728        struct sctp_ulpevent *event = NULL;
 729        struct sk_buff_head temp;
 730
 731        if (skb_queue_empty(&ulpq->reasm))
 732                return;
 733
 734        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 735                /* Do ordering if needed.  */
 736                if ((event) && (event->msg_flags & MSG_EOR)){
 737                        skb_queue_head_init(&temp);
 738                        __skb_queue_tail(&temp, sctp_event2skb(event));
 739
 740                        event = sctp_ulpq_order(ulpq, event);
 741                }
 742
 743                /* Send event to the ULP.  'event' is the
 744                 * sctp_ulpevent for  very first SKB on the  temp' list.
 745                 */
 746                if (event)
 747                        sctp_ulpq_tail_event(ulpq, event);
 748        }
 749}
 750
 751
 752/* Helper function to gather skbs that have possibly become
 753 * ordered by an an incoming chunk.
 754 */
 755static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 756                                              struct sctp_ulpevent *event)
 757{
 758        struct sk_buff_head *event_list;
 759        struct sk_buff *pos, *tmp;
 760        struct sctp_ulpevent *cevent;
 761        struct sctp_stream *in;
 762        __u16 sid, csid, cssn;
 763
 764        sid = event->stream;
 765        in  = &ulpq->asoc->ssnmap->in;
 766
 767        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 768
 769        /* We are holding the chunks by stream, by SSN.  */
 770        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 771                cevent = (struct sctp_ulpevent *) pos->cb;
 772                csid = cevent->stream;
 773                cssn = cevent->ssn;
 774
 775                /* Have we gone too far?  */
 776                if (csid > sid)
 777                        break;
 778
 779                /* Have we not gone far enough?  */
 780                if (csid < sid)
 781                        continue;
 782
 783                if (cssn != sctp_ssn_peek(in, sid))
 784                        break;
 785
 786                /* Found it, so mark in the ssnmap. */
 787                sctp_ssn_next(in, sid);
 788
 789                __skb_unlink(pos, &ulpq->lobby);
 790
 791                /* Attach all gathered skbs to the event.  */
 792                __skb_queue_tail(event_list, pos);
 793        }
 794}
 795
 796/* Helper function to store chunks needing ordering.  */
 797static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 798                                           struct sctp_ulpevent *event)
 799{
 800        struct sk_buff *pos;
 801        struct sctp_ulpevent *cevent;
 802        __u16 sid, csid;
 803        __u16 ssn, cssn;
 804
 805        pos = skb_peek_tail(&ulpq->lobby);
 806        if (!pos) {
 807                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 808                return;
 809        }
 810
 811        sid = event->stream;
 812        ssn = event->ssn;
 813
 814        cevent = (struct sctp_ulpevent *) pos->cb;
 815        csid = cevent->stream;
 816        cssn = cevent->ssn;
 817        if (sid > csid) {
 818                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 819                return;
 820        }
 821
 822        if ((sid == csid) && SSN_lt(cssn, ssn)) {
 823                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 824                return;
 825        }
 826
 827        /* Find the right place in this list.  We store them by
 828         * stream ID and then by SSN.
 829         */
 830        skb_queue_walk(&ulpq->lobby, pos) {
 831                cevent = (struct sctp_ulpevent *) pos->cb;
 832                csid = cevent->stream;
 833                cssn = cevent->ssn;
 834
 835                if (csid > sid)
 836                        break;
 837                if (csid == sid && SSN_lt(ssn, cssn))
 838                        break;
 839        }
 840
 841
 842        /* Insert before pos. */
 843        __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 844}
 845
 846static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 847                                             struct sctp_ulpevent *event)
 848{
 849        __u16 sid, ssn;
 850        struct sctp_stream *in;
 851
 852        /* Check if this message needs ordering.  */
 853        if (SCTP_DATA_UNORDERED & event->msg_flags)
 854                return event;
 855
 856        /* Note: The stream ID must be verified before this routine.  */
 857        sid = event->stream;
 858        ssn = event->ssn;
 859        in  = &ulpq->asoc->ssnmap->in;
 860
 861        /* Is this the expected SSN for this stream ID?  */
 862        if (ssn != sctp_ssn_peek(in, sid)) {
 863                /* We've received something out of order, so find where it
 864                 * needs to be placed.  We order by stream and then by SSN.
 865                 */
 866                sctp_ulpq_store_ordered(ulpq, event);
 867                return NULL;
 868        }
 869
 870        /* Mark that the next chunk has been found.  */
 871        sctp_ssn_next(in, sid);
 872
 873        /* Go find any other chunks that were waiting for
 874         * ordering.
 875         */
 876        sctp_ulpq_retrieve_ordered(ulpq, event);
 877
 878        return event;
 879}
 880
 881/* Helper function to gather skbs that have possibly become
 882 * ordered by forward tsn skipping their dependencies.
 883 */
 884static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 885{
 886        struct sk_buff *pos, *tmp;
 887        struct sctp_ulpevent *cevent;
 888        struct sctp_ulpevent *event;
 889        struct sctp_stream *in;
 890        struct sk_buff_head temp;
 891        struct sk_buff_head *lobby = &ulpq->lobby;
 892        __u16 csid, cssn;
 893
 894        in  = &ulpq->asoc->ssnmap->in;
 895
 896        /* We are holding the chunks by stream, by SSN.  */
 897        skb_queue_head_init(&temp);
 898        event = NULL;
 899        sctp_skb_for_each(pos, lobby, tmp) {
 900                cevent = (struct sctp_ulpevent *) pos->cb;
 901                csid = cevent->stream;
 902                cssn = cevent->ssn;
 903
 904                /* Have we gone too far?  */
 905                if (csid > sid)
 906                        break;
 907
 908                /* Have we not gone far enough?  */
 909                if (csid < sid)
 910                        continue;
 911
 912                /* see if this ssn has been marked by skipping */
 913                if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
 914                        break;
 915
 916                __skb_unlink(pos, lobby);
 917                if (!event)
 918                        /* Create a temporary list to collect chunks on.  */
 919                        event = sctp_skb2event(pos);
 920
 921                /* Attach all gathered skbs to the event.  */
 922                __skb_queue_tail(&temp, pos);
 923        }
 924
 925        /* If we didn't reap any data, see if the next expected SSN
 926         * is next on the queue and if so, use that.
 927         */
 928        if (event == NULL && pos != (struct sk_buff *)lobby) {
 929                cevent = (struct sctp_ulpevent *) pos->cb;
 930                csid = cevent->stream;
 931                cssn = cevent->ssn;
 932
 933                if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
 934                        sctp_ssn_next(in, csid);
 935                        __skb_unlink(pos, lobby);
 936                        __skb_queue_tail(&temp, pos);
 937                        event = sctp_skb2event(pos);
 938                }
 939        }
 940
 941        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 942         * very first SKB on the 'temp' list.
 943         */
 944        if (event) {
 945                /* see if we have more ordered that we can deliver */
 946                sctp_ulpq_retrieve_ordered(ulpq, event);
 947                sctp_ulpq_tail_event(ulpq, event);
 948        }
 949}
 950
 951/* Skip over an SSN. This is used during the processing of
 952 * Forwared TSN chunk to skip over the abandoned ordered data
 953 */
 954void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 955{
 956        struct sctp_stream *in;
 957
 958        /* Note: The stream ID must be verified before this routine.  */
 959        in  = &ulpq->asoc->ssnmap->in;
 960
 961        /* Is this an old SSN?  If so ignore. */
 962        if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
 963                return;
 964
 965        /* Mark that we are no longer expecting this SSN or lower. */
 966        sctp_ssn_skip(in, sid, ssn);
 967
 968        /* Go find any other chunks that were waiting for
 969         * ordering and deliver them if needed.
 970         */
 971        sctp_ulpq_reap_ordered(ulpq, sid);
 972}
 973
 974static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
 975                struct sk_buff_head *list, __u16 needed)
 976{
 977        __u16 freed = 0;
 978        __u32 tsn, last_tsn;
 979        struct sk_buff *skb, *flist, *last;
 980        struct sctp_ulpevent *event;
 981        struct sctp_tsnmap *tsnmap;
 982
 983        tsnmap = &ulpq->asoc->peer.tsn_map;
 984
 985        while ((skb = skb_peek_tail(list)) != NULL) {
 986                event = sctp_skb2event(skb);
 987                tsn = event->tsn;
 988
 989                /* Don't renege below the Cumulative TSN ACK Point. */
 990                if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
 991                        break;
 992
 993                /* Events in ordering queue may have multiple fragments
 994                 * corresponding to additional TSNs.  Sum the total
 995                 * freed space; find the last TSN.
 996                 */
 997                freed += skb_headlen(skb);
 998                flist = skb_shinfo(skb)->frag_list;
 999                for (last = flist; flist; flist = flist->next) {
1000                        last = flist;
1001                        freed += skb_headlen(last);
1002                }
1003                if (last)
1004                        last_tsn = sctp_skb2event(last)->tsn;
1005                else
1006                        last_tsn = tsn;
1007
1008                /* Unlink the event, then renege all applicable TSNs. */
1009                __skb_unlink(skb, list);
1010                sctp_ulpevent_free(event);
1011                while (TSN_lte(tsn, last_tsn)) {
1012                        sctp_tsnmap_renege(tsnmap, tsn);
1013                        tsn++;
1014                }
1015                if (freed >= needed)
1016                        return freed;
1017        }
1018
1019        return freed;
1020}
1021
1022/* Renege 'needed' bytes from the ordering queue. */
1023static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1024{
1025        return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1026}
1027
1028/* Renege 'needed' bytes from the reassembly queue. */
1029static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1030{
1031        return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1032}
1033
1034/* Partial deliver the first message as there is pressure on rwnd. */
1035void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1036                                gfp_t gfp)
1037{
1038        struct sctp_ulpevent *event;
1039        struct sctp_association *asoc;
1040        struct sctp_sock *sp;
1041        __u32 ctsn;
1042        struct sk_buff *skb;
1043
1044        asoc = ulpq->asoc;
1045        sp = sctp_sk(asoc->base.sk);
1046
1047        /* If the association is already in Partial Delivery mode
1048         * we have nothing to do.
1049         */
1050        if (ulpq->pd_mode)
1051                return;
1052
1053        /* Data must be at or below the Cumulative TSN ACK Point to
1054         * start partial delivery.
1055         */
1056        skb = skb_peek(&asoc->ulpq.reasm);
1057        if (skb != NULL) {
1058                ctsn = sctp_skb2event(skb)->tsn;
1059                if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1060                        return;
1061        }
1062
1063        /* If the user enabled fragment interleave socket option,
1064         * multiple associations can enter partial delivery.
1065         * Otherwise, we can only enter partial delivery if the
1066         * socket is not in partial deliver mode.
1067         */
1068        if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1069                /* Is partial delivery possible?  */
1070                event = sctp_ulpq_retrieve_first(ulpq);
1071                /* Send event to the ULP.   */
1072                if (event) {
1073                        sctp_ulpq_tail_event(ulpq, event);
1074                        sctp_ulpq_set_pd(ulpq);
1075                        return;
1076                }
1077        }
1078}
1079
1080/* Renege some packets to make room for an incoming chunk.  */
1081void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1082                      gfp_t gfp)
1083{
1084        struct sctp_association *asoc;
1085        __u16 needed, freed;
1086
1087        asoc = ulpq->asoc;
1088
1089        if (chunk) {
1090                needed = ntohs(chunk->chunk_hdr->length);
1091                needed -= sizeof(sctp_data_chunk_t);
1092        } else
1093                needed = SCTP_DEFAULT_MAXWINDOW;
1094
1095        freed = 0;
1096
1097        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1098                freed = sctp_ulpq_renege_order(ulpq, needed);
1099                if (freed < needed) {
1100                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1101                }
1102        }
1103        /* If able to free enough room, accept this chunk. */
1104        if (chunk && (freed >= needed)) {
1105                int retval;
1106                retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1107                /*
1108                 * Enter partial delivery if chunk has not been
1109                 * delivered; otherwise, drain the reassembly queue.
1110                 */
1111                if (retval <= 0)
1112                        sctp_ulpq_partial_delivery(ulpq, gfp);
1113                else if (retval == 1)
1114                        sctp_ulpq_reasm_drain(ulpq);
1115        }
1116
1117        sk_mem_reclaim(asoc->base.sk);
1118}
1119
1120
1121
1122/* Notify the application if an association is aborted and in
1123 * partial delivery mode.  Send up any pending received messages.
1124 */
1125void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1126{
1127        struct sctp_ulpevent *ev = NULL;
1128        struct sock *sk;
1129
1130        if (!ulpq->pd_mode)
1131                return;
1132
1133        sk = ulpq->asoc->base.sk;
1134        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1135                                       &sctp_sk(sk)->subscribe))
1136                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1137                                              SCTP_PARTIAL_DELIVERY_ABORTED,
1138                                              gfp);
1139        if (ev)
1140                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1141
1142        /* If there is data waiting, send it up the socket now. */
1143        if (sctp_ulpq_clear_pd(ulpq) || ev)
1144                sk->sk_data_ready(sk, 0);
1145}
1146