linux/net/sctp/ulpqueue.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, see
  25 * <http://www.gnu.org/licenses/>.
  26 *
  27 * Please send any bug reports or fixes you make to the
  28 * email address(es):
  29 *    lksctp developers <linux-sctp@vger.kernel.org>
  30 *
  31 * Written or modified by:
  32 *    Jon Grimm             <jgrimm@us.ibm.com>
  33 *    La Monte H.P. Yarroll <piggy@acm.org>
  34 *    Sridhar Samudrala     <sri@us.ibm.com>
  35 */
  36
  37#include <linux/slab.h>
  38#include <linux/types.h>
  39#include <linux/skbuff.h>
  40#include <net/sock.h>
  41#include <net/busy_poll.h>
  42#include <net/sctp/structs.h>
  43#include <net/sctp/sctp.h>
  44#include <net/sctp/sm.h>
  45
  46/* Forward declarations for internal helpers.  */
  47static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  48                                              struct sctp_ulpevent *);
  49static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  50                                              struct sctp_ulpevent *);
  51static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  52
  53/* 1st Level Abstractions */
  54
  55/* Initialize a ULP queue from a block of memory.  */
  56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  57                                 struct sctp_association *asoc)
  58{
  59        memset(ulpq, 0, sizeof(struct sctp_ulpq));
  60
  61        ulpq->asoc = asoc;
  62        skb_queue_head_init(&ulpq->reasm);
  63        skb_queue_head_init(&ulpq->reasm_uo);
  64        skb_queue_head_init(&ulpq->lobby);
  65        ulpq->pd_mode  = 0;
  66
  67        return ulpq;
  68}
  69
  70
  71/* Flush the reassembly and ordering queues.  */
  72void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  73{
  74        struct sk_buff *skb;
  75        struct sctp_ulpevent *event;
  76
  77        while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  78                event = sctp_skb2event(skb);
  79                sctp_ulpevent_free(event);
  80        }
  81
  82        while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  83                event = sctp_skb2event(skb);
  84                sctp_ulpevent_free(event);
  85        }
  86
  87        while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
  88                event = sctp_skb2event(skb);
  89                sctp_ulpevent_free(event);
  90        }
  91}
  92
  93/* Dispose of a ulpqueue.  */
  94void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  95{
  96        sctp_ulpq_flush(ulpq);
  97}
  98
  99/* Process an incoming DATA chunk.  */
 100int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 101                        gfp_t gfp)
 102{
 103        struct sk_buff_head temp;
 104        struct sctp_ulpevent *event;
 105        int event_eor = 0;
 106
 107        /* Create an event from the incoming chunk. */
 108        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 109        if (!event)
 110                return -ENOMEM;
 111
 112        event->ssn = ntohs(chunk->subh.data_hdr->ssn);
 113        event->ppid = chunk->subh.data_hdr->ppid;
 114
 115        /* Do reassembly if needed.  */
 116        event = sctp_ulpq_reasm(ulpq, event);
 117
 118        /* Do ordering if needed.  */
 119        if ((event) && (event->msg_flags & MSG_EOR)) {
 120                /* Create a temporary list to collect chunks on.  */
 121                skb_queue_head_init(&temp);
 122                __skb_queue_tail(&temp, sctp_event2skb(event));
 123
 124                event = sctp_ulpq_order(ulpq, event);
 125        }
 126
 127        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 128         * very first SKB on the 'temp' list.
 129         */
 130        if (event) {
 131                event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 132                sctp_ulpq_tail_event(ulpq, event);
 133        }
 134
 135        return event_eor;
 136}
 137
 138/* Add a new event for propagation to the ULP.  */
 139/* Clear the partial delivery mode for this socket.   Note: This
 140 * assumes that no association is currently in partial delivery mode.
 141 */
 142int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 143{
 144        struct sctp_sock *sp = sctp_sk(sk);
 145
 146        if (atomic_dec_and_test(&sp->pd_mode)) {
 147                /* This means there are no other associations in PD, so
 148                 * we can go ahead and clear out the lobby in one shot
 149                 */
 150                if (!skb_queue_empty(&sp->pd_lobby)) {
 151                        skb_queue_splice_tail_init(&sp->pd_lobby,
 152                                                   &sk->sk_receive_queue);
 153                        return 1;
 154                }
 155        } else {
 156                /* There are other associations in PD, so we only need to
 157                 * pull stuff out of the lobby that belongs to the
 158                 * associations that is exiting PD (all of its notifications
 159                 * are posted here).
 160                 */
 161                if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 162                        struct sk_buff *skb, *tmp;
 163                        struct sctp_ulpevent *event;
 164
 165                        sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 166                                event = sctp_skb2event(skb);
 167                                if (event->asoc == asoc) {
 168                                        __skb_unlink(skb, &sp->pd_lobby);
 169                                        __skb_queue_tail(&sk->sk_receive_queue,
 170                                                         skb);
 171                                }
 172                        }
 173                }
 174        }
 175
 176        return 0;
 177}
 178
 179/* Set the pd_mode on the socket and ulpq */
 180static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 181{
 182        struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 183
 184        atomic_inc(&sp->pd_mode);
 185        ulpq->pd_mode = 1;
 186}
 187
 188/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 189static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 190{
 191        ulpq->pd_mode = 0;
 192        sctp_ulpq_reasm_drain(ulpq);
 193        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 194}
 195
 196/* If the SKB of 'event' is on a list, it is the first such member
 197 * of that list.
 198 */
 199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 200{
 201        struct sock *sk = ulpq->asoc->base.sk;
 202        struct sctp_sock *sp = sctp_sk(sk);
 203        struct sk_buff_head *queue, *skb_list;
 204        struct sk_buff *skb = sctp_event2skb(event);
 205        int clear_pd = 0;
 206
 207        skb_list = (struct sk_buff_head *) skb->prev;
 208
 209        /* If the socket is just going to throw this away, do not
 210         * even try to deliver it.
 211         */
 212        if (sk->sk_shutdown & RCV_SHUTDOWN &&
 213            (sk->sk_shutdown & SEND_SHUTDOWN ||
 214             !sctp_ulpevent_is_notification(event)))
 215                goto out_free;
 216
 217        if (!sctp_ulpevent_is_notification(event)) {
 218                sk_mark_napi_id(sk, skb);
 219                sk_incoming_cpu_update(sk);
 220        }
 221        /* Check if the user wishes to receive this event.  */
 222        if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
 223                goto out_free;
 224
 225        /* If we are in partial delivery mode, post to the lobby until
 226         * partial delivery is cleared, unless, of course _this_ is
 227         * the association the cause of the partial delivery.
 228         */
 229
 230        if (atomic_read(&sp->pd_mode) == 0) {
 231                queue = &sk->sk_receive_queue;
 232        } else {
 233                if (ulpq->pd_mode) {
 234                        /* If the association is in partial delivery, we
 235                         * need to finish delivering the partially processed
 236                         * packet before passing any other data.  This is
 237                         * because we don't truly support stream interleaving.
 238                         */
 239                        if ((event->msg_flags & MSG_NOTIFICATION) ||
 240                            (SCTP_DATA_NOT_FRAG ==
 241                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 242                                queue = &sp->pd_lobby;
 243                        else {
 244                                clear_pd = event->msg_flags & MSG_EOR;
 245                                queue = &sk->sk_receive_queue;
 246                        }
 247                } else {
 248                        /*
 249                         * If fragment interleave is enabled, we
 250                         * can queue this to the receive queue instead
 251                         * of the lobby.
 252                         */
 253                        if (sp->frag_interleave)
 254                                queue = &sk->sk_receive_queue;
 255                        else
 256                                queue = &sp->pd_lobby;
 257                }
 258        }
 259
 260        /* If we are harvesting multiple skbs they will be
 261         * collected on a list.
 262         */
 263        if (skb_list)
 264                skb_queue_splice_tail_init(skb_list, queue);
 265        else
 266                __skb_queue_tail(queue, skb);
 267
 268        /* Did we just complete partial delivery and need to get
 269         * rolling again?  Move pending data to the receive
 270         * queue.
 271         */
 272        if (clear_pd)
 273                sctp_ulpq_clear_pd(ulpq);
 274
 275        if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
 276                if (!sock_owned_by_user(sk))
 277                        sp->data_ready_signalled = 1;
 278                sk->sk_data_ready(sk);
 279        }
 280        return 1;
 281
 282out_free:
 283        if (skb_list)
 284                sctp_queue_purge_ulpevents(skb_list);
 285        else
 286                sctp_ulpevent_free(event);
 287
 288        return 0;
 289}
 290
 291/* 2nd Level Abstractions */
 292
 293/* Helper function to store chunks that need to be reassembled.  */
 294static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 295                                         struct sctp_ulpevent *event)
 296{
 297        struct sk_buff *pos;
 298        struct sctp_ulpevent *cevent;
 299        __u32 tsn, ctsn;
 300
 301        tsn = event->tsn;
 302
 303        /* See if it belongs at the end. */
 304        pos = skb_peek_tail(&ulpq->reasm);
 305        if (!pos) {
 306                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 307                return;
 308        }
 309
 310        /* Short circuit just dropping it at the end. */
 311        cevent = sctp_skb2event(pos);
 312        ctsn = cevent->tsn;
 313        if (TSN_lt(ctsn, tsn)) {
 314                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 315                return;
 316        }
 317
 318        /* Find the right place in this list. We store them by TSN.  */
 319        skb_queue_walk(&ulpq->reasm, pos) {
 320                cevent = sctp_skb2event(pos);
 321                ctsn = cevent->tsn;
 322
 323                if (TSN_lt(tsn, ctsn))
 324                        break;
 325        }
 326
 327        /* Insert before pos. */
 328        __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 329
 330}
 331
 332/* Helper function to return an event corresponding to the reassembled
 333 * datagram.
 334 * This routine creates a re-assembled skb given the first and last skb's
 335 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 336 * payload was fragmented on the way and ip had to reassemble them.
 337 * We add the rest of skb's to the first skb's fraglist.
 338 */
 339struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 340                                                  struct sk_buff_head *queue,
 341                                                  struct sk_buff *f_frag,
 342                                                  struct sk_buff *l_frag)
 343{
 344        struct sk_buff *pos;
 345        struct sk_buff *new = NULL;
 346        struct sctp_ulpevent *event;
 347        struct sk_buff *pnext, *last;
 348        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 349
 350        /* Store the pointer to the 2nd skb */
 351        if (f_frag == l_frag)
 352                pos = NULL;
 353        else
 354                pos = f_frag->next;
 355
 356        /* Get the last skb in the f_frag's frag_list if present. */
 357        for (last = list; list; last = list, list = list->next)
 358                ;
 359
 360        /* Add the list of remaining fragments to the first fragments
 361         * frag_list.
 362         */
 363        if (last)
 364                last->next = pos;
 365        else {
 366                if (skb_cloned(f_frag)) {
 367                        /* This is a cloned skb, we can't just modify
 368                         * the frag_list.  We need a new skb to do that.
 369                         * Instead of calling skb_unshare(), we'll do it
 370                         * ourselves since we need to delay the free.
 371                         */
 372                        new = skb_copy(f_frag, GFP_ATOMIC);
 373                        if (!new)
 374                                return NULL;    /* try again later */
 375
 376                        sctp_skb_set_owner_r(new, f_frag->sk);
 377
 378                        skb_shinfo(new)->frag_list = pos;
 379                } else
 380                        skb_shinfo(f_frag)->frag_list = pos;
 381        }
 382
 383        /* Remove the first fragment from the reassembly queue.  */
 384        __skb_unlink(f_frag, queue);
 385
 386        /* if we did unshare, then free the old skb and re-assign */
 387        if (new) {
 388                kfree_skb(f_frag);
 389                f_frag = new;
 390        }
 391
 392        while (pos) {
 393
 394                pnext = pos->next;
 395
 396                /* Update the len and data_len fields of the first fragment. */
 397                f_frag->len += pos->len;
 398                f_frag->data_len += pos->len;
 399
 400                /* Remove the fragment from the reassembly queue.  */
 401                __skb_unlink(pos, queue);
 402
 403                /* Break if we have reached the last fragment.  */
 404                if (pos == l_frag)
 405                        break;
 406                pos->next = pnext;
 407                pos = pnext;
 408        }
 409
 410        event = sctp_skb2event(f_frag);
 411        SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 412
 413        return event;
 414}
 415
 416
 417/* Helper function to check if an incoming chunk has filled up the last
 418 * missing fragment in a SCTP datagram and return the corresponding event.
 419 */
 420static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 421{
 422        struct sk_buff *pos;
 423        struct sctp_ulpevent *cevent;
 424        struct sk_buff *first_frag = NULL;
 425        __u32 ctsn, next_tsn;
 426        struct sctp_ulpevent *retval = NULL;
 427        struct sk_buff *pd_first = NULL;
 428        struct sk_buff *pd_last = NULL;
 429        size_t pd_len = 0;
 430        struct sctp_association *asoc;
 431        u32 pd_point;
 432
 433        /* Initialized to 0 just to avoid compiler warning message.  Will
 434         * never be used with this value. It is referenced only after it
 435         * is set when we find the first fragment of a message.
 436         */
 437        next_tsn = 0;
 438
 439        /* The chunks are held in the reasm queue sorted by TSN.
 440         * Walk through the queue sequentially and look for a sequence of
 441         * fragmented chunks that complete a datagram.
 442         * 'first_frag' and next_tsn are reset when we find a chunk which
 443         * is the first fragment of a datagram. Once these 2 fields are set
 444         * we expect to find the remaining middle fragments and the last
 445         * fragment in order. If not, first_frag is reset to NULL and we
 446         * start the next pass when we find another first fragment.
 447         *
 448         * There is a potential to do partial delivery if user sets
 449         * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 450         * to see if can do PD.
 451         */
 452        skb_queue_walk(&ulpq->reasm, pos) {
 453                cevent = sctp_skb2event(pos);
 454                ctsn = cevent->tsn;
 455
 456                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 457                case SCTP_DATA_FIRST_FRAG:
 458                        /* If this "FIRST_FRAG" is the first
 459                         * element in the queue, then count it towards
 460                         * possible PD.
 461                         */
 462                        if (skb_queue_is_first(&ulpq->reasm, pos)) {
 463                            pd_first = pos;
 464                            pd_last = pos;
 465                            pd_len = pos->len;
 466                        } else {
 467                            pd_first = NULL;
 468                            pd_last = NULL;
 469                            pd_len = 0;
 470                        }
 471
 472                        first_frag = pos;
 473                        next_tsn = ctsn + 1;
 474                        break;
 475
 476                case SCTP_DATA_MIDDLE_FRAG:
 477                        if ((first_frag) && (ctsn == next_tsn)) {
 478                                next_tsn++;
 479                                if (pd_first) {
 480                                    pd_last = pos;
 481                                    pd_len += pos->len;
 482                                }
 483                        } else
 484                                first_frag = NULL;
 485                        break;
 486
 487                case SCTP_DATA_LAST_FRAG:
 488                        if (first_frag && (ctsn == next_tsn))
 489                                goto found;
 490                        else
 491                                first_frag = NULL;
 492                        break;
 493                }
 494        }
 495
 496        asoc = ulpq->asoc;
 497        if (pd_first) {
 498                /* Make sure we can enter partial deliver.
 499                 * We can trigger partial delivery only if framgent
 500                 * interleave is set, or the socket is not already
 501                 * in  partial delivery.
 502                 */
 503                if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 504                    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 505                        goto done;
 506
 507                cevent = sctp_skb2event(pd_first);
 508                pd_point = sctp_sk(asoc->base.sk)->pd_point;
 509                if (pd_point && pd_point <= pd_len) {
 510                        retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
 511                                                             &ulpq->reasm,
 512                                                             pd_first,
 513                                                             pd_last);
 514                        if (retval)
 515                                sctp_ulpq_set_pd(ulpq);
 516                }
 517        }
 518done:
 519        return retval;
 520found:
 521        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 522                                             &ulpq->reasm, first_frag, pos);
 523        if (retval)
 524                retval->msg_flags |= MSG_EOR;
 525        goto done;
 526}
 527
 528/* Retrieve the next set of fragments of a partial message. */
 529static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 530{
 531        struct sk_buff *pos, *last_frag, *first_frag;
 532        struct sctp_ulpevent *cevent;
 533        __u32 ctsn, next_tsn;
 534        int is_last;
 535        struct sctp_ulpevent *retval;
 536
 537        /* The chunks are held in the reasm queue sorted by TSN.
 538         * Walk through the queue sequentially and look for the first
 539         * sequence of fragmented chunks.
 540         */
 541
 542        if (skb_queue_empty(&ulpq->reasm))
 543                return NULL;
 544
 545        last_frag = first_frag = NULL;
 546        retval = NULL;
 547        next_tsn = 0;
 548        is_last = 0;
 549
 550        skb_queue_walk(&ulpq->reasm, pos) {
 551                cevent = sctp_skb2event(pos);
 552                ctsn = cevent->tsn;
 553
 554                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 555                case SCTP_DATA_FIRST_FRAG:
 556                        if (!first_frag)
 557                                return NULL;
 558                        goto done;
 559                case SCTP_DATA_MIDDLE_FRAG:
 560                        if (!first_frag) {
 561                                first_frag = pos;
 562                                next_tsn = ctsn + 1;
 563                                last_frag = pos;
 564                        } else if (next_tsn == ctsn) {
 565                                next_tsn++;
 566                                last_frag = pos;
 567                        } else
 568                                goto done;
 569                        break;
 570                case SCTP_DATA_LAST_FRAG:
 571                        if (!first_frag)
 572                                first_frag = pos;
 573                        else if (ctsn != next_tsn)
 574                                goto done;
 575                        last_frag = pos;
 576                        is_last = 1;
 577                        goto done;
 578                default:
 579                        return NULL;
 580                }
 581        }
 582
 583        /* We have the reassembled event. There is no need to look
 584         * further.
 585         */
 586done:
 587        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 588                                        &ulpq->reasm, first_frag, last_frag);
 589        if (retval && is_last)
 590                retval->msg_flags |= MSG_EOR;
 591
 592        return retval;
 593}
 594
 595
 596/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 597 * need reassembling.
 598 */
 599static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 600                                                struct sctp_ulpevent *event)
 601{
 602        struct sctp_ulpevent *retval = NULL;
 603
 604        /* Check if this is part of a fragmented message.  */
 605        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 606                event->msg_flags |= MSG_EOR;
 607                return event;
 608        }
 609
 610        sctp_ulpq_store_reasm(ulpq, event);
 611        if (!ulpq->pd_mode)
 612                retval = sctp_ulpq_retrieve_reassembled(ulpq);
 613        else {
 614                __u32 ctsn, ctsnap;
 615
 616                /* Do not even bother unless this is the next tsn to
 617                 * be delivered.
 618                 */
 619                ctsn = event->tsn;
 620                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 621                if (TSN_lte(ctsn, ctsnap))
 622                        retval = sctp_ulpq_retrieve_partial(ulpq);
 623        }
 624
 625        return retval;
 626}
 627
 628/* Retrieve the first part (sequential fragments) for partial delivery.  */
 629static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 630{
 631        struct sk_buff *pos, *last_frag, *first_frag;
 632        struct sctp_ulpevent *cevent;
 633        __u32 ctsn, next_tsn;
 634        struct sctp_ulpevent *retval;
 635
 636        /* The chunks are held in the reasm queue sorted by TSN.
 637         * Walk through the queue sequentially and look for a sequence of
 638         * fragmented chunks that start a datagram.
 639         */
 640
 641        if (skb_queue_empty(&ulpq->reasm))
 642                return NULL;
 643
 644        last_frag = first_frag = NULL;
 645        retval = NULL;
 646        next_tsn = 0;
 647
 648        skb_queue_walk(&ulpq->reasm, pos) {
 649                cevent = sctp_skb2event(pos);
 650                ctsn = cevent->tsn;
 651
 652                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 653                case SCTP_DATA_FIRST_FRAG:
 654                        if (!first_frag) {
 655                                first_frag = pos;
 656                                next_tsn = ctsn + 1;
 657                                last_frag = pos;
 658                        } else
 659                                goto done;
 660                        break;
 661
 662                case SCTP_DATA_MIDDLE_FRAG:
 663                        if (!first_frag)
 664                                return NULL;
 665                        if (ctsn == next_tsn) {
 666                                next_tsn++;
 667                                last_frag = pos;
 668                        } else
 669                                goto done;
 670                        break;
 671
 672                case SCTP_DATA_LAST_FRAG:
 673                        if (!first_frag)
 674                                return NULL;
 675                        else
 676                                goto done;
 677                        break;
 678
 679                default:
 680                        return NULL;
 681                }
 682        }
 683
 684        /* We have the reassembled event. There is no need to look
 685         * further.
 686         */
 687done:
 688        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 689                                        &ulpq->reasm, first_frag, last_frag);
 690        return retval;
 691}
 692
 693/*
 694 * Flush out stale fragments from the reassembly queue when processing
 695 * a Forward TSN.
 696 *
 697 * RFC 3758, Section 3.6
 698 *
 699 * After receiving and processing a FORWARD TSN, the data receiver MUST
 700 * take cautions in updating its re-assembly queue.  The receiver MUST
 701 * remove any partially reassembled message, which is still missing one
 702 * or more TSNs earlier than or equal to the new cumulative TSN point.
 703 * In the event that the receiver has invoked the partial delivery API,
 704 * a notification SHOULD also be generated to inform the upper layer API
 705 * that the message being partially delivered will NOT be completed.
 706 */
 707void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 708{
 709        struct sk_buff *pos, *tmp;
 710        struct sctp_ulpevent *event;
 711        __u32 tsn;
 712
 713        if (skb_queue_empty(&ulpq->reasm))
 714                return;
 715
 716        skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 717                event = sctp_skb2event(pos);
 718                tsn = event->tsn;
 719
 720                /* Since the entire message must be abandoned by the
 721                 * sender (item A3 in Section 3.5, RFC 3758), we can
 722                 * free all fragments on the list that are less then
 723                 * or equal to ctsn_point
 724                 */
 725                if (TSN_lte(tsn, fwd_tsn)) {
 726                        __skb_unlink(pos, &ulpq->reasm);
 727                        sctp_ulpevent_free(event);
 728                } else
 729                        break;
 730        }
 731}
 732
 733/*
 734 * Drain the reassembly queue.  If we just cleared parted delivery, it
 735 * is possible that the reassembly queue will contain already reassembled
 736 * messages.  Retrieve any such messages and give them to the user.
 737 */
 738static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 739{
 740        struct sctp_ulpevent *event = NULL;
 741        struct sk_buff_head temp;
 742
 743        if (skb_queue_empty(&ulpq->reasm))
 744                return;
 745
 746        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 747                /* Do ordering if needed.  */
 748                if ((event) && (event->msg_flags & MSG_EOR)) {
 749                        skb_queue_head_init(&temp);
 750                        __skb_queue_tail(&temp, sctp_event2skb(event));
 751
 752                        event = sctp_ulpq_order(ulpq, event);
 753                }
 754
 755                /* Send event to the ULP.  'event' is the
 756                 * sctp_ulpevent for  very first SKB on the  temp' list.
 757                 */
 758                if (event)
 759                        sctp_ulpq_tail_event(ulpq, event);
 760        }
 761}
 762
 763
 764/* Helper function to gather skbs that have possibly become
 765 * ordered by an an incoming chunk.
 766 */
 767static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 768                                              struct sctp_ulpevent *event)
 769{
 770        struct sk_buff_head *event_list;
 771        struct sk_buff *pos, *tmp;
 772        struct sctp_ulpevent *cevent;
 773        struct sctp_stream *stream;
 774        __u16 sid, csid, cssn;
 775
 776        sid = event->stream;
 777        stream  = &ulpq->asoc->stream;
 778
 779        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 780
 781        /* We are holding the chunks by stream, by SSN.  */
 782        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 783                cevent = (struct sctp_ulpevent *) pos->cb;
 784                csid = cevent->stream;
 785                cssn = cevent->ssn;
 786
 787                /* Have we gone too far?  */
 788                if (csid > sid)
 789                        break;
 790
 791                /* Have we not gone far enough?  */
 792                if (csid < sid)
 793                        continue;
 794
 795                if (cssn != sctp_ssn_peek(stream, in, sid))
 796                        break;
 797
 798                /* Found it, so mark in the stream. */
 799                sctp_ssn_next(stream, in, sid);
 800
 801                __skb_unlink(pos, &ulpq->lobby);
 802
 803                /* Attach all gathered skbs to the event.  */
 804                __skb_queue_tail(event_list, pos);
 805        }
 806}
 807
 808/* Helper function to store chunks needing ordering.  */
 809static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 810                                           struct sctp_ulpevent *event)
 811{
 812        struct sk_buff *pos;
 813        struct sctp_ulpevent *cevent;
 814        __u16 sid, csid;
 815        __u16 ssn, cssn;
 816
 817        pos = skb_peek_tail(&ulpq->lobby);
 818        if (!pos) {
 819                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 820                return;
 821        }
 822
 823        sid = event->stream;
 824        ssn = event->ssn;
 825
 826        cevent = (struct sctp_ulpevent *) pos->cb;
 827        csid = cevent->stream;
 828        cssn = cevent->ssn;
 829        if (sid > csid) {
 830                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 831                return;
 832        }
 833
 834        if ((sid == csid) && SSN_lt(cssn, ssn)) {
 835                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 836                return;
 837        }
 838
 839        /* Find the right place in this list.  We store them by
 840         * stream ID and then by SSN.
 841         */
 842        skb_queue_walk(&ulpq->lobby, pos) {
 843                cevent = (struct sctp_ulpevent *) pos->cb;
 844                csid = cevent->stream;
 845                cssn = cevent->ssn;
 846
 847                if (csid > sid)
 848                        break;
 849                if (csid == sid && SSN_lt(ssn, cssn))
 850                        break;
 851        }
 852
 853
 854        /* Insert before pos. */
 855        __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 856}
 857
 858static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 859                                             struct sctp_ulpevent *event)
 860{
 861        __u16 sid, ssn;
 862        struct sctp_stream *stream;
 863
 864        /* Check if this message needs ordering.  */
 865        if (event->msg_flags & SCTP_DATA_UNORDERED)
 866                return event;
 867
 868        /* Note: The stream ID must be verified before this routine.  */
 869        sid = event->stream;
 870        ssn = event->ssn;
 871        stream  = &ulpq->asoc->stream;
 872
 873        /* Is this the expected SSN for this stream ID?  */
 874        if (ssn != sctp_ssn_peek(stream, in, sid)) {
 875                /* We've received something out of order, so find where it
 876                 * needs to be placed.  We order by stream and then by SSN.
 877                 */
 878                sctp_ulpq_store_ordered(ulpq, event);
 879                return NULL;
 880        }
 881
 882        /* Mark that the next chunk has been found.  */
 883        sctp_ssn_next(stream, in, sid);
 884
 885        /* Go find any other chunks that were waiting for
 886         * ordering.
 887         */
 888        sctp_ulpq_retrieve_ordered(ulpq, event);
 889
 890        return event;
 891}
 892
 893/* Helper function to gather skbs that have possibly become
 894 * ordered by forward tsn skipping their dependencies.
 895 */
 896static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 897{
 898        struct sk_buff *pos, *tmp;
 899        struct sctp_ulpevent *cevent;
 900        struct sctp_ulpevent *event;
 901        struct sctp_stream *stream;
 902        struct sk_buff_head temp;
 903        struct sk_buff_head *lobby = &ulpq->lobby;
 904        __u16 csid, cssn;
 905
 906        stream = &ulpq->asoc->stream;
 907
 908        /* We are holding the chunks by stream, by SSN.  */
 909        skb_queue_head_init(&temp);
 910        event = NULL;
 911        sctp_skb_for_each(pos, lobby, tmp) {
 912                cevent = (struct sctp_ulpevent *) pos->cb;
 913                csid = cevent->stream;
 914                cssn = cevent->ssn;
 915
 916                /* Have we gone too far?  */
 917                if (csid > sid)
 918                        break;
 919
 920                /* Have we not gone far enough?  */
 921                if (csid < sid)
 922                        continue;
 923
 924                /* see if this ssn has been marked by skipping */
 925                if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
 926                        break;
 927
 928                __skb_unlink(pos, lobby);
 929                if (!event)
 930                        /* Create a temporary list to collect chunks on.  */
 931                        event = sctp_skb2event(pos);
 932
 933                /* Attach all gathered skbs to the event.  */
 934                __skb_queue_tail(&temp, pos);
 935        }
 936
 937        /* If we didn't reap any data, see if the next expected SSN
 938         * is next on the queue and if so, use that.
 939         */
 940        if (event == NULL && pos != (struct sk_buff *)lobby) {
 941                cevent = (struct sctp_ulpevent *) pos->cb;
 942                csid = cevent->stream;
 943                cssn = cevent->ssn;
 944
 945                if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
 946                        sctp_ssn_next(stream, in, csid);
 947                        __skb_unlink(pos, lobby);
 948                        __skb_queue_tail(&temp, pos);
 949                        event = sctp_skb2event(pos);
 950                }
 951        }
 952
 953        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 954         * very first SKB on the 'temp' list.
 955         */
 956        if (event) {
 957                /* see if we have more ordered that we can deliver */
 958                sctp_ulpq_retrieve_ordered(ulpq, event);
 959                sctp_ulpq_tail_event(ulpq, event);
 960        }
 961}
 962
 963/* Skip over an SSN. This is used during the processing of
 964 * Forwared TSN chunk to skip over the abandoned ordered data
 965 */
 966void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 967{
 968        struct sctp_stream *stream;
 969
 970        /* Note: The stream ID must be verified before this routine.  */
 971        stream  = &ulpq->asoc->stream;
 972
 973        /* Is this an old SSN?  If so ignore. */
 974        if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
 975                return;
 976
 977        /* Mark that we are no longer expecting this SSN or lower. */
 978        sctp_ssn_skip(stream, in, sid, ssn);
 979
 980        /* Go find any other chunks that were waiting for
 981         * ordering and deliver them if needed.
 982         */
 983        sctp_ulpq_reap_ordered(ulpq, sid);
 984}
 985
 986__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
 987                            __u16 needed)
 988{
 989        __u16 freed = 0;
 990        __u32 tsn, last_tsn;
 991        struct sk_buff *skb, *flist, *last;
 992        struct sctp_ulpevent *event;
 993        struct sctp_tsnmap *tsnmap;
 994
 995        tsnmap = &ulpq->asoc->peer.tsn_map;
 996
 997        while ((skb = skb_peek_tail(list)) != NULL) {
 998                event = sctp_skb2event(skb);
 999                tsn = event->tsn;
1000
1001                /* Don't renege below the Cumulative TSN ACK Point. */
1002                if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
1003                        break;
1004
1005                /* Events in ordering queue may have multiple fragments
1006                 * corresponding to additional TSNs.  Sum the total
1007                 * freed space; find the last TSN.
1008                 */
1009                freed += skb_headlen(skb);
1010                flist = skb_shinfo(skb)->frag_list;
1011                for (last = flist; flist; flist = flist->next) {
1012                        last = flist;
1013                        freed += skb_headlen(last);
1014                }
1015                if (last)
1016                        last_tsn = sctp_skb2event(last)->tsn;
1017                else
1018                        last_tsn = tsn;
1019
1020                /* Unlink the event, then renege all applicable TSNs. */
1021                __skb_unlink(skb, list);
1022                sctp_ulpevent_free(event);
1023                while (TSN_lte(tsn, last_tsn)) {
1024                        sctp_tsnmap_renege(tsnmap, tsn);
1025                        tsn++;
1026                }
1027                if (freed >= needed)
1028                        return freed;
1029        }
1030
1031        return freed;
1032}
1033
1034/* Renege 'needed' bytes from the ordering queue. */
1035static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1036{
1037        return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1038}
1039
1040/* Renege 'needed' bytes from the reassembly queue. */
1041static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1042{
1043        return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1044}
1045
1046/* Partial deliver the first message as there is pressure on rwnd. */
1047void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1048                                gfp_t gfp)
1049{
1050        struct sctp_ulpevent *event;
1051        struct sctp_association *asoc;
1052        struct sctp_sock *sp;
1053        __u32 ctsn;
1054        struct sk_buff *skb;
1055
1056        asoc = ulpq->asoc;
1057        sp = sctp_sk(asoc->base.sk);
1058
1059        /* If the association is already in Partial Delivery mode
1060         * we have nothing to do.
1061         */
1062        if (ulpq->pd_mode)
1063                return;
1064
1065        /* Data must be at or below the Cumulative TSN ACK Point to
1066         * start partial delivery.
1067         */
1068        skb = skb_peek(&asoc->ulpq.reasm);
1069        if (skb != NULL) {
1070                ctsn = sctp_skb2event(skb)->tsn;
1071                if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1072                        return;
1073        }
1074
1075        /* If the user enabled fragment interleave socket option,
1076         * multiple associations can enter partial delivery.
1077         * Otherwise, we can only enter partial delivery if the
1078         * socket is not in partial deliver mode.
1079         */
1080        if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1081                /* Is partial delivery possible?  */
1082                event = sctp_ulpq_retrieve_first(ulpq);
1083                /* Send event to the ULP.   */
1084                if (event) {
1085                        sctp_ulpq_tail_event(ulpq, event);
1086                        sctp_ulpq_set_pd(ulpq);
1087                        return;
1088                }
1089        }
1090}
1091
1092/* Renege some packets to make room for an incoming chunk.  */
1093void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1094                      gfp_t gfp)
1095{
1096        struct sctp_association *asoc = ulpq->asoc;
1097        __u32 freed = 0;
1098        __u16 needed;
1099
1100        needed = ntohs(chunk->chunk_hdr->length) -
1101                 sizeof(struct sctp_data_chunk);
1102
1103        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1104                freed = sctp_ulpq_renege_order(ulpq, needed);
1105                if (freed < needed)
1106                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1107        }
1108        /* If able to free enough room, accept this chunk. */
1109        if (freed >= needed) {
1110                int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1111                /*
1112                 * Enter partial delivery if chunk has not been
1113                 * delivered; otherwise, drain the reassembly queue.
1114                 */
1115                if (retval <= 0)
1116                        sctp_ulpq_partial_delivery(ulpq, gfp);
1117                else if (retval == 1)
1118                        sctp_ulpq_reasm_drain(ulpq);
1119        }
1120
1121        sk_mem_reclaim(asoc->base.sk);
1122}
1123
1124
1125
1126/* Notify the application if an association is aborted and in
1127 * partial delivery mode.  Send up any pending received messages.
1128 */
1129void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1130{
1131        struct sctp_ulpevent *ev = NULL;
1132        struct sock *sk;
1133        struct sctp_sock *sp;
1134
1135        if (!ulpq->pd_mode)
1136                return;
1137
1138        sk = ulpq->asoc->base.sk;
1139        sp = sctp_sk(sk);
1140        if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1141                                       &sctp_sk(sk)->subscribe))
1142                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1143                                              SCTP_PARTIAL_DELIVERY_ABORTED,
1144                                              0, 0, 0, gfp);
1145        if (ev)
1146                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1147
1148        /* If there is data waiting, send it up the socket now. */
1149        if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1150                sp->data_ready_signalled = 1;
1151                sk->sk_data_ready(sk);
1152        }
1153}
1154