linux/net/sctp/ulpqueue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* SCTP kernel implementation
   3 * (C) Copyright IBM Corp. 2001, 2004
   4 * Copyright (c) 1999-2000 Cisco, Inc.
   5 * Copyright (c) 1999-2001 Motorola, Inc.
   6 * Copyright (c) 2001 Intel Corp.
   7 * Copyright (c) 2001 Nokia, Inc.
   8 * Copyright (c) 2001 La Monte H.P. Yarroll
   9 *
  10 * This abstraction carries sctp events to the ULP (sockets).
  11 *
  12 * Please send any bug reports or fixes you make to the
  13 * email address(es):
  14 *    lksctp developers <linux-sctp@vger.kernel.org>
  15 *
  16 * Written or modified by:
  17 *    Jon Grimm             <jgrimm@us.ibm.com>
  18 *    La Monte H.P. Yarroll <piggy@acm.org>
  19 *    Sridhar Samudrala     <sri@us.ibm.com>
  20 */
  21
  22#include <linux/slab.h>
  23#include <linux/types.h>
  24#include <linux/skbuff.h>
  25#include <net/sock.h>
  26#include <net/busy_poll.h>
  27#include <net/sctp/structs.h>
  28#include <net/sctp/sctp.h>
  29#include <net/sctp/sm.h>
  30
  31/* Forward declarations for internal helpers.  */
  32static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  33                                              struct sctp_ulpevent *);
  34static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  35                                              struct sctp_ulpevent *);
  36static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  37
  38/* 1st Level Abstractions */
  39
  40/* Initialize a ULP queue from a block of memory.  */
  41struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  42                                 struct sctp_association *asoc)
  43{
  44        memset(ulpq, 0, sizeof(struct sctp_ulpq));
  45
  46        ulpq->asoc = asoc;
  47        skb_queue_head_init(&ulpq->reasm);
  48        skb_queue_head_init(&ulpq->reasm_uo);
  49        skb_queue_head_init(&ulpq->lobby);
  50        ulpq->pd_mode  = 0;
  51
  52        return ulpq;
  53}
  54
  55
  56/* Flush the reassembly and ordering queues.  */
  57void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  58{
  59        struct sk_buff *skb;
  60        struct sctp_ulpevent *event;
  61
  62        while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  63                event = sctp_skb2event(skb);
  64                sctp_ulpevent_free(event);
  65        }
  66
  67        while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  68                event = sctp_skb2event(skb);
  69                sctp_ulpevent_free(event);
  70        }
  71
  72        while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
  73                event = sctp_skb2event(skb);
  74                sctp_ulpevent_free(event);
  75        }
  76}
  77
  78/* Dispose of a ulpqueue.  */
  79void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  80{
  81        sctp_ulpq_flush(ulpq);
  82}
  83
  84/* Process an incoming DATA chunk.  */
  85int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  86                        gfp_t gfp)
  87{
  88        struct sk_buff_head temp;
  89        struct sctp_ulpevent *event;
  90        int event_eor = 0;
  91
  92        /* Create an event from the incoming chunk. */
  93        event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  94        if (!event)
  95                return -ENOMEM;
  96
  97        event->ssn = ntohs(chunk->subh.data_hdr->ssn);
  98        event->ppid = chunk->subh.data_hdr->ppid;
  99
 100        /* Do reassembly if needed.  */
 101        event = sctp_ulpq_reasm(ulpq, event);
 102
 103        /* Do ordering if needed.  */
 104        if (event) {
 105                /* Create a temporary list to collect chunks on.  */
 106                skb_queue_head_init(&temp);
 107                __skb_queue_tail(&temp, sctp_event2skb(event));
 108
 109                if (event->msg_flags & MSG_EOR)
 110                        event = sctp_ulpq_order(ulpq, event);
 111        }
 112
 113        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 114         * very first SKB on the 'temp' list.
 115         */
 116        if (event) {
 117                event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 118                sctp_ulpq_tail_event(ulpq, &temp);
 119        }
 120
 121        return event_eor;
 122}
 123
 124/* Add a new event for propagation to the ULP.  */
 125/* Clear the partial delivery mode for this socket.   Note: This
 126 * assumes that no association is currently in partial delivery mode.
 127 */
 128int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 129{
 130        struct sctp_sock *sp = sctp_sk(sk);
 131
 132        if (atomic_dec_and_test(&sp->pd_mode)) {
 133                /* This means there are no other associations in PD, so
 134                 * we can go ahead and clear out the lobby in one shot
 135                 */
 136                if (!skb_queue_empty(&sp->pd_lobby)) {
 137                        skb_queue_splice_tail_init(&sp->pd_lobby,
 138                                                   &sk->sk_receive_queue);
 139                        return 1;
 140                }
 141        } else {
 142                /* There are other associations in PD, so we only need to
 143                 * pull stuff out of the lobby that belongs to the
 144                 * associations that is exiting PD (all of its notifications
 145                 * are posted here).
 146                 */
 147                if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 148                        struct sk_buff *skb, *tmp;
 149                        struct sctp_ulpevent *event;
 150
 151                        sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 152                                event = sctp_skb2event(skb);
 153                                if (event->asoc == asoc) {
 154                                        __skb_unlink(skb, &sp->pd_lobby);
 155                                        __skb_queue_tail(&sk->sk_receive_queue,
 156                                                         skb);
 157                                }
 158                        }
 159                }
 160        }
 161
 162        return 0;
 163}
 164
 165/* Set the pd_mode on the socket and ulpq */
 166static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 167{
 168        struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 169
 170        atomic_inc(&sp->pd_mode);
 171        ulpq->pd_mode = 1;
 172}
 173
 174/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 175static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 176{
 177        ulpq->pd_mode = 0;
 178        sctp_ulpq_reasm_drain(ulpq);
 179        return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 180}
 181
 182int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
 183{
 184        struct sock *sk = ulpq->asoc->base.sk;
 185        struct sctp_sock *sp = sctp_sk(sk);
 186        struct sctp_ulpevent *event;
 187        struct sk_buff_head *queue;
 188        struct sk_buff *skb;
 189        int clear_pd = 0;
 190
 191        skb = __skb_peek(skb_list);
 192        event = sctp_skb2event(skb);
 193
 194        /* If the socket is just going to throw this away, do not
 195         * even try to deliver it.
 196         */
 197        if (sk->sk_shutdown & RCV_SHUTDOWN &&
 198            (sk->sk_shutdown & SEND_SHUTDOWN ||
 199             !sctp_ulpevent_is_notification(event)))
 200                goto out_free;
 201
 202        if (!sctp_ulpevent_is_notification(event)) {
 203                sk_mark_napi_id(sk, skb);
 204                sk_incoming_cpu_update(sk);
 205        }
 206        /* Check if the user wishes to receive this event.  */
 207        if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
 208                goto out_free;
 209
 210        /* If we are in partial delivery mode, post to the lobby until
 211         * partial delivery is cleared, unless, of course _this_ is
 212         * the association the cause of the partial delivery.
 213         */
 214
 215        if (atomic_read(&sp->pd_mode) == 0) {
 216                queue = &sk->sk_receive_queue;
 217        } else {
 218                if (ulpq->pd_mode) {
 219                        /* If the association is in partial delivery, we
 220                         * need to finish delivering the partially processed
 221                         * packet before passing any other data.  This is
 222                         * because we don't truly support stream interleaving.
 223                         */
 224                        if ((event->msg_flags & MSG_NOTIFICATION) ||
 225                            (SCTP_DATA_NOT_FRAG ==
 226                                    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 227                                queue = &sp->pd_lobby;
 228                        else {
 229                                clear_pd = event->msg_flags & MSG_EOR;
 230                                queue = &sk->sk_receive_queue;
 231                        }
 232                } else {
 233                        /*
 234                         * If fragment interleave is enabled, we
 235                         * can queue this to the receive queue instead
 236                         * of the lobby.
 237                         */
 238                        if (sp->frag_interleave)
 239                                queue = &sk->sk_receive_queue;
 240                        else
 241                                queue = &sp->pd_lobby;
 242                }
 243        }
 244
 245        skb_queue_splice_tail_init(skb_list, queue);
 246
 247        /* Did we just complete partial delivery and need to get
 248         * rolling again?  Move pending data to the receive
 249         * queue.
 250         */
 251        if (clear_pd)
 252                sctp_ulpq_clear_pd(ulpq);
 253
 254        if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
 255                if (!sock_owned_by_user(sk))
 256                        sp->data_ready_signalled = 1;
 257                sk->sk_data_ready(sk);
 258        }
 259        return 1;
 260
 261out_free:
 262        if (skb_list)
 263                sctp_queue_purge_ulpevents(skb_list);
 264        else
 265                sctp_ulpevent_free(event);
 266
 267        return 0;
 268}
 269
 270/* 2nd Level Abstractions */
 271
 272/* Helper function to store chunks that need to be reassembled.  */
 273static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 274                                         struct sctp_ulpevent *event)
 275{
 276        struct sk_buff *pos;
 277        struct sctp_ulpevent *cevent;
 278        __u32 tsn, ctsn;
 279
 280        tsn = event->tsn;
 281
 282        /* See if it belongs at the end. */
 283        pos = skb_peek_tail(&ulpq->reasm);
 284        if (!pos) {
 285                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 286                return;
 287        }
 288
 289        /* Short circuit just dropping it at the end. */
 290        cevent = sctp_skb2event(pos);
 291        ctsn = cevent->tsn;
 292        if (TSN_lt(ctsn, tsn)) {
 293                __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 294                return;
 295        }
 296
 297        /* Find the right place in this list. We store them by TSN.  */
 298        skb_queue_walk(&ulpq->reasm, pos) {
 299                cevent = sctp_skb2event(pos);
 300                ctsn = cevent->tsn;
 301
 302                if (TSN_lt(tsn, ctsn))
 303                        break;
 304        }
 305
 306        /* Insert before pos. */
 307        __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 308
 309}
 310
 311/* Helper function to return an event corresponding to the reassembled
 312 * datagram.
 313 * This routine creates a re-assembled skb given the first and last skb's
 314 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 315 * payload was fragmented on the way and ip had to reassemble them.
 316 * We add the rest of skb's to the first skb's fraglist.
 317 */
 318struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 319                                                  struct sk_buff_head *queue,
 320                                                  struct sk_buff *f_frag,
 321                                                  struct sk_buff *l_frag)
 322{
 323        struct sk_buff *pos;
 324        struct sk_buff *new = NULL;
 325        struct sctp_ulpevent *event;
 326        struct sk_buff *pnext, *last;
 327        struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 328
 329        /* Store the pointer to the 2nd skb */
 330        if (f_frag == l_frag)
 331                pos = NULL;
 332        else
 333                pos = f_frag->next;
 334
 335        /* Get the last skb in the f_frag's frag_list if present. */
 336        for (last = list; list; last = list, list = list->next)
 337                ;
 338
 339        /* Add the list of remaining fragments to the first fragments
 340         * frag_list.
 341         */
 342        if (last)
 343                last->next = pos;
 344        else {
 345                if (skb_cloned(f_frag)) {
 346                        /* This is a cloned skb, we can't just modify
 347                         * the frag_list.  We need a new skb to do that.
 348                         * Instead of calling skb_unshare(), we'll do it
 349                         * ourselves since we need to delay the free.
 350                         */
 351                        new = skb_copy(f_frag, GFP_ATOMIC);
 352                        if (!new)
 353                                return NULL;    /* try again later */
 354
 355                        sctp_skb_set_owner_r(new, f_frag->sk);
 356
 357                        skb_shinfo(new)->frag_list = pos;
 358                } else
 359                        skb_shinfo(f_frag)->frag_list = pos;
 360        }
 361
 362        /* Remove the first fragment from the reassembly queue.  */
 363        __skb_unlink(f_frag, queue);
 364
 365        /* if we did unshare, then free the old skb and re-assign */
 366        if (new) {
 367                kfree_skb(f_frag);
 368                f_frag = new;
 369        }
 370
 371        while (pos) {
 372
 373                pnext = pos->next;
 374
 375                /* Update the len and data_len fields of the first fragment. */
 376                f_frag->len += pos->len;
 377                f_frag->data_len += pos->len;
 378
 379                /* Remove the fragment from the reassembly queue.  */
 380                __skb_unlink(pos, queue);
 381
 382                /* Break if we have reached the last fragment.  */
 383                if (pos == l_frag)
 384                        break;
 385                pos->next = pnext;
 386                pos = pnext;
 387        }
 388
 389        event = sctp_skb2event(f_frag);
 390        SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 391
 392        return event;
 393}
 394
 395
 396/* Helper function to check if an incoming chunk has filled up the last
 397 * missing fragment in a SCTP datagram and return the corresponding event.
 398 */
 399static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 400{
 401        struct sk_buff *pos;
 402        struct sctp_ulpevent *cevent;
 403        struct sk_buff *first_frag = NULL;
 404        __u32 ctsn, next_tsn;
 405        struct sctp_ulpevent *retval = NULL;
 406        struct sk_buff *pd_first = NULL;
 407        struct sk_buff *pd_last = NULL;
 408        size_t pd_len = 0;
 409        struct sctp_association *asoc;
 410        u32 pd_point;
 411
 412        /* Initialized to 0 just to avoid compiler warning message.  Will
 413         * never be used with this value. It is referenced only after it
 414         * is set when we find the first fragment of a message.
 415         */
 416        next_tsn = 0;
 417
 418        /* The chunks are held in the reasm queue sorted by TSN.
 419         * Walk through the queue sequentially and look for a sequence of
 420         * fragmented chunks that complete a datagram.
 421         * 'first_frag' and next_tsn are reset when we find a chunk which
 422         * is the first fragment of a datagram. Once these 2 fields are set
 423         * we expect to find the remaining middle fragments and the last
 424         * fragment in order. If not, first_frag is reset to NULL and we
 425         * start the next pass when we find another first fragment.
 426         *
 427         * There is a potential to do partial delivery if user sets
 428         * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 429         * to see if can do PD.
 430         */
 431        skb_queue_walk(&ulpq->reasm, pos) {
 432                cevent = sctp_skb2event(pos);
 433                ctsn = cevent->tsn;
 434
 435                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 436                case SCTP_DATA_FIRST_FRAG:
 437                        /* If this "FIRST_FRAG" is the first
 438                         * element in the queue, then count it towards
 439                         * possible PD.
 440                         */
 441                        if (skb_queue_is_first(&ulpq->reasm, pos)) {
 442                            pd_first = pos;
 443                            pd_last = pos;
 444                            pd_len = pos->len;
 445                        } else {
 446                            pd_first = NULL;
 447                            pd_last = NULL;
 448                            pd_len = 0;
 449                        }
 450
 451                        first_frag = pos;
 452                        next_tsn = ctsn + 1;
 453                        break;
 454
 455                case SCTP_DATA_MIDDLE_FRAG:
 456                        if ((first_frag) && (ctsn == next_tsn)) {
 457                                next_tsn++;
 458                                if (pd_first) {
 459                                    pd_last = pos;
 460                                    pd_len += pos->len;
 461                                }
 462                        } else
 463                                first_frag = NULL;
 464                        break;
 465
 466                case SCTP_DATA_LAST_FRAG:
 467                        if (first_frag && (ctsn == next_tsn))
 468                                goto found;
 469                        else
 470                                first_frag = NULL;
 471                        break;
 472                }
 473        }
 474
 475        asoc = ulpq->asoc;
 476        if (pd_first) {
 477                /* Make sure we can enter partial deliver.
 478                 * We can trigger partial delivery only if framgent
 479                 * interleave is set, or the socket is not already
 480                 * in  partial delivery.
 481                 */
 482                if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 483                    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 484                        goto done;
 485
 486                cevent = sctp_skb2event(pd_first);
 487                pd_point = sctp_sk(asoc->base.sk)->pd_point;
 488                if (pd_point && pd_point <= pd_len) {
 489                        retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
 490                                                             &ulpq->reasm,
 491                                                             pd_first,
 492                                                             pd_last);
 493                        if (retval)
 494                                sctp_ulpq_set_pd(ulpq);
 495                }
 496        }
 497done:
 498        return retval;
 499found:
 500        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 501                                             &ulpq->reasm, first_frag, pos);
 502        if (retval)
 503                retval->msg_flags |= MSG_EOR;
 504        goto done;
 505}
 506
 507/* Retrieve the next set of fragments of a partial message. */
 508static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 509{
 510        struct sk_buff *pos, *last_frag, *first_frag;
 511        struct sctp_ulpevent *cevent;
 512        __u32 ctsn, next_tsn;
 513        int is_last;
 514        struct sctp_ulpevent *retval;
 515
 516        /* The chunks are held in the reasm queue sorted by TSN.
 517         * Walk through the queue sequentially and look for the first
 518         * sequence of fragmented chunks.
 519         */
 520
 521        if (skb_queue_empty(&ulpq->reasm))
 522                return NULL;
 523
 524        last_frag = first_frag = NULL;
 525        retval = NULL;
 526        next_tsn = 0;
 527        is_last = 0;
 528
 529        skb_queue_walk(&ulpq->reasm, pos) {
 530                cevent = sctp_skb2event(pos);
 531                ctsn = cevent->tsn;
 532
 533                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 534                case SCTP_DATA_FIRST_FRAG:
 535                        if (!first_frag)
 536                                return NULL;
 537                        goto done;
 538                case SCTP_DATA_MIDDLE_FRAG:
 539                        if (!first_frag) {
 540                                first_frag = pos;
 541                                next_tsn = ctsn + 1;
 542                                last_frag = pos;
 543                        } else if (next_tsn == ctsn) {
 544                                next_tsn++;
 545                                last_frag = pos;
 546                        } else
 547                                goto done;
 548                        break;
 549                case SCTP_DATA_LAST_FRAG:
 550                        if (!first_frag)
 551                                first_frag = pos;
 552                        else if (ctsn != next_tsn)
 553                                goto done;
 554                        last_frag = pos;
 555                        is_last = 1;
 556                        goto done;
 557                default:
 558                        return NULL;
 559                }
 560        }
 561
 562        /* We have the reassembled event. There is no need to look
 563         * further.
 564         */
 565done:
 566        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 567                                        &ulpq->reasm, first_frag, last_frag);
 568        if (retval && is_last)
 569                retval->msg_flags |= MSG_EOR;
 570
 571        return retval;
 572}
 573
 574
 575/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 576 * need reassembling.
 577 */
 578static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 579                                                struct sctp_ulpevent *event)
 580{
 581        struct sctp_ulpevent *retval = NULL;
 582
 583        /* Check if this is part of a fragmented message.  */
 584        if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 585                event->msg_flags |= MSG_EOR;
 586                return event;
 587        }
 588
 589        sctp_ulpq_store_reasm(ulpq, event);
 590        if (!ulpq->pd_mode)
 591                retval = sctp_ulpq_retrieve_reassembled(ulpq);
 592        else {
 593                __u32 ctsn, ctsnap;
 594
 595                /* Do not even bother unless this is the next tsn to
 596                 * be delivered.
 597                 */
 598                ctsn = event->tsn;
 599                ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 600                if (TSN_lte(ctsn, ctsnap))
 601                        retval = sctp_ulpq_retrieve_partial(ulpq);
 602        }
 603
 604        return retval;
 605}
 606
 607/* Retrieve the first part (sequential fragments) for partial delivery.  */
 608static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 609{
 610        struct sk_buff *pos, *last_frag, *first_frag;
 611        struct sctp_ulpevent *cevent;
 612        __u32 ctsn, next_tsn;
 613        struct sctp_ulpevent *retval;
 614
 615        /* The chunks are held in the reasm queue sorted by TSN.
 616         * Walk through the queue sequentially and look for a sequence of
 617         * fragmented chunks that start a datagram.
 618         */
 619
 620        if (skb_queue_empty(&ulpq->reasm))
 621                return NULL;
 622
 623        last_frag = first_frag = NULL;
 624        retval = NULL;
 625        next_tsn = 0;
 626
 627        skb_queue_walk(&ulpq->reasm, pos) {
 628                cevent = sctp_skb2event(pos);
 629                ctsn = cevent->tsn;
 630
 631                switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 632                case SCTP_DATA_FIRST_FRAG:
 633                        if (!first_frag) {
 634                                first_frag = pos;
 635                                next_tsn = ctsn + 1;
 636                                last_frag = pos;
 637                        } else
 638                                goto done;
 639                        break;
 640
 641                case SCTP_DATA_MIDDLE_FRAG:
 642                        if (!first_frag)
 643                                return NULL;
 644                        if (ctsn == next_tsn) {
 645                                next_tsn++;
 646                                last_frag = pos;
 647                        } else
 648                                goto done;
 649                        break;
 650
 651                case SCTP_DATA_LAST_FRAG:
 652                        if (!first_frag)
 653                                return NULL;
 654                        else
 655                                goto done;
 656                        break;
 657
 658                default:
 659                        return NULL;
 660                }
 661        }
 662
 663        /* We have the reassembled event. There is no need to look
 664         * further.
 665         */
 666done:
 667        retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 668                                        &ulpq->reasm, first_frag, last_frag);
 669        return retval;
 670}
 671
 672/*
 673 * Flush out stale fragments from the reassembly queue when processing
 674 * a Forward TSN.
 675 *
 676 * RFC 3758, Section 3.6
 677 *
 678 * After receiving and processing a FORWARD TSN, the data receiver MUST
 679 * take cautions in updating its re-assembly queue.  The receiver MUST
 680 * remove any partially reassembled message, which is still missing one
 681 * or more TSNs earlier than or equal to the new cumulative TSN point.
 682 * In the event that the receiver has invoked the partial delivery API,
 683 * a notification SHOULD also be generated to inform the upper layer API
 684 * that the message being partially delivered will NOT be completed.
 685 */
 686void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 687{
 688        struct sk_buff *pos, *tmp;
 689        struct sctp_ulpevent *event;
 690        __u32 tsn;
 691
 692        if (skb_queue_empty(&ulpq->reasm))
 693                return;
 694
 695        skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 696                event = sctp_skb2event(pos);
 697                tsn = event->tsn;
 698
 699                /* Since the entire message must be abandoned by the
 700                 * sender (item A3 in Section 3.5, RFC 3758), we can
 701                 * free all fragments on the list that are less then
 702                 * or equal to ctsn_point
 703                 */
 704                if (TSN_lte(tsn, fwd_tsn)) {
 705                        __skb_unlink(pos, &ulpq->reasm);
 706                        sctp_ulpevent_free(event);
 707                } else
 708                        break;
 709        }
 710}
 711
 712/*
 713 * Drain the reassembly queue.  If we just cleared parted delivery, it
 714 * is possible that the reassembly queue will contain already reassembled
 715 * messages.  Retrieve any such messages and give them to the user.
 716 */
 717static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 718{
 719        struct sctp_ulpevent *event = NULL;
 720
 721        if (skb_queue_empty(&ulpq->reasm))
 722                return;
 723
 724        while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 725                struct sk_buff_head temp;
 726
 727                skb_queue_head_init(&temp);
 728                __skb_queue_tail(&temp, sctp_event2skb(event));
 729
 730                /* Do ordering if needed.  */
 731                if (event->msg_flags & MSG_EOR)
 732                        event = sctp_ulpq_order(ulpq, event);
 733
 734                /* Send event to the ULP.  'event' is the
 735                 * sctp_ulpevent for  very first SKB on the  temp' list.
 736                 */
 737                if (event)
 738                        sctp_ulpq_tail_event(ulpq, &temp);
 739        }
 740}
 741
 742
 743/* Helper function to gather skbs that have possibly become
 744 * ordered by an an incoming chunk.
 745 */
 746static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 747                                              struct sctp_ulpevent *event)
 748{
 749        struct sk_buff_head *event_list;
 750        struct sk_buff *pos, *tmp;
 751        struct sctp_ulpevent *cevent;
 752        struct sctp_stream *stream;
 753        __u16 sid, csid, cssn;
 754
 755        sid = event->stream;
 756        stream  = &ulpq->asoc->stream;
 757
 758        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 759
 760        /* We are holding the chunks by stream, by SSN.  */
 761        sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 762                cevent = (struct sctp_ulpevent *) pos->cb;
 763                csid = cevent->stream;
 764                cssn = cevent->ssn;
 765
 766                /* Have we gone too far?  */
 767                if (csid > sid)
 768                        break;
 769
 770                /* Have we not gone far enough?  */
 771                if (csid < sid)
 772                        continue;
 773
 774                if (cssn != sctp_ssn_peek(stream, in, sid))
 775                        break;
 776
 777                /* Found it, so mark in the stream. */
 778                sctp_ssn_next(stream, in, sid);
 779
 780                __skb_unlink(pos, &ulpq->lobby);
 781
 782                /* Attach all gathered skbs to the event.  */
 783                __skb_queue_tail(event_list, pos);
 784        }
 785}
 786
 787/* Helper function to store chunks needing ordering.  */
 788static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 789                                           struct sctp_ulpevent *event)
 790{
 791        struct sk_buff *pos;
 792        struct sctp_ulpevent *cevent;
 793        __u16 sid, csid;
 794        __u16 ssn, cssn;
 795
 796        pos = skb_peek_tail(&ulpq->lobby);
 797        if (!pos) {
 798                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 799                return;
 800        }
 801
 802        sid = event->stream;
 803        ssn = event->ssn;
 804
 805        cevent = (struct sctp_ulpevent *) pos->cb;
 806        csid = cevent->stream;
 807        cssn = cevent->ssn;
 808        if (sid > csid) {
 809                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 810                return;
 811        }
 812
 813        if ((sid == csid) && SSN_lt(cssn, ssn)) {
 814                __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 815                return;
 816        }
 817
 818        /* Find the right place in this list.  We store them by
 819         * stream ID and then by SSN.
 820         */
 821        skb_queue_walk(&ulpq->lobby, pos) {
 822                cevent = (struct sctp_ulpevent *) pos->cb;
 823                csid = cevent->stream;
 824                cssn = cevent->ssn;
 825
 826                if (csid > sid)
 827                        break;
 828                if (csid == sid && SSN_lt(ssn, cssn))
 829                        break;
 830        }
 831
 832
 833        /* Insert before pos. */
 834        __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 835}
 836
 837static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 838                                             struct sctp_ulpevent *event)
 839{
 840        __u16 sid, ssn;
 841        struct sctp_stream *stream;
 842
 843        /* Check if this message needs ordering.  */
 844        if (event->msg_flags & SCTP_DATA_UNORDERED)
 845                return event;
 846
 847        /* Note: The stream ID must be verified before this routine.  */
 848        sid = event->stream;
 849        ssn = event->ssn;
 850        stream  = &ulpq->asoc->stream;
 851
 852        /* Is this the expected SSN for this stream ID?  */
 853        if (ssn != sctp_ssn_peek(stream, in, sid)) {
 854                /* We've received something out of order, so find where it
 855                 * needs to be placed.  We order by stream and then by SSN.
 856                 */
 857                sctp_ulpq_store_ordered(ulpq, event);
 858                return NULL;
 859        }
 860
 861        /* Mark that the next chunk has been found.  */
 862        sctp_ssn_next(stream, in, sid);
 863
 864        /* Go find any other chunks that were waiting for
 865         * ordering.
 866         */
 867        sctp_ulpq_retrieve_ordered(ulpq, event);
 868
 869        return event;
 870}
 871
 872/* Helper function to gather skbs that have possibly become
 873 * ordered by forward tsn skipping their dependencies.
 874 */
 875static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 876{
 877        struct sk_buff *pos, *tmp;
 878        struct sctp_ulpevent *cevent;
 879        struct sctp_ulpevent *event;
 880        struct sctp_stream *stream;
 881        struct sk_buff_head temp;
 882        struct sk_buff_head *lobby = &ulpq->lobby;
 883        __u16 csid, cssn;
 884
 885        stream = &ulpq->asoc->stream;
 886
 887        /* We are holding the chunks by stream, by SSN.  */
 888        skb_queue_head_init(&temp);
 889        event = NULL;
 890        sctp_skb_for_each(pos, lobby, tmp) {
 891                cevent = (struct sctp_ulpevent *) pos->cb;
 892                csid = cevent->stream;
 893                cssn = cevent->ssn;
 894
 895                /* Have we gone too far?  */
 896                if (csid > sid)
 897                        break;
 898
 899                /* Have we not gone far enough?  */
 900                if (csid < sid)
 901                        continue;
 902
 903                /* see if this ssn has been marked by skipping */
 904                if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
 905                        break;
 906
 907                __skb_unlink(pos, lobby);
 908                if (!event)
 909                        /* Create a temporary list to collect chunks on.  */
 910                        event = sctp_skb2event(pos);
 911
 912                /* Attach all gathered skbs to the event.  */
 913                __skb_queue_tail(&temp, pos);
 914        }
 915
 916        /* If we didn't reap any data, see if the next expected SSN
 917         * is next on the queue and if so, use that.
 918         */
 919        if (event == NULL && pos != (struct sk_buff *)lobby) {
 920                cevent = (struct sctp_ulpevent *) pos->cb;
 921                csid = cevent->stream;
 922                cssn = cevent->ssn;
 923
 924                if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
 925                        sctp_ssn_next(stream, in, csid);
 926                        __skb_unlink(pos, lobby);
 927                        __skb_queue_tail(&temp, pos);
 928                        event = sctp_skb2event(pos);
 929                }
 930        }
 931
 932        /* Send event to the ULP.  'event' is the sctp_ulpevent for
 933         * very first SKB on the 'temp' list.
 934         */
 935        if (event) {
 936                /* see if we have more ordered that we can deliver */
 937                sctp_ulpq_retrieve_ordered(ulpq, event);
 938                sctp_ulpq_tail_event(ulpq, &temp);
 939        }
 940}
 941
 942/* Skip over an SSN. This is used during the processing of
 943 * Forwared TSN chunk to skip over the abandoned ordered data
 944 */
 945void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 946{
 947        struct sctp_stream *stream;
 948
 949        /* Note: The stream ID must be verified before this routine.  */
 950        stream  = &ulpq->asoc->stream;
 951
 952        /* Is this an old SSN?  If so ignore. */
 953        if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
 954                return;
 955
 956        /* Mark that we are no longer expecting this SSN or lower. */
 957        sctp_ssn_skip(stream, in, sid, ssn);
 958
 959        /* Go find any other chunks that were waiting for
 960         * ordering and deliver them if needed.
 961         */
 962        sctp_ulpq_reap_ordered(ulpq, sid);
 963}
 964
 965__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
 966                            __u16 needed)
 967{
 968        __u16 freed = 0;
 969        __u32 tsn, last_tsn;
 970        struct sk_buff *skb, *flist, *last;
 971        struct sctp_ulpevent *event;
 972        struct sctp_tsnmap *tsnmap;
 973
 974        tsnmap = &ulpq->asoc->peer.tsn_map;
 975
 976        while ((skb = skb_peek_tail(list)) != NULL) {
 977                event = sctp_skb2event(skb);
 978                tsn = event->tsn;
 979
 980                /* Don't renege below the Cumulative TSN ACK Point. */
 981                if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
 982                        break;
 983
 984                /* Events in ordering queue may have multiple fragments
 985                 * corresponding to additional TSNs.  Sum the total
 986                 * freed space; find the last TSN.
 987                 */
 988                freed += skb_headlen(skb);
 989                flist = skb_shinfo(skb)->frag_list;
 990                for (last = flist; flist; flist = flist->next) {
 991                        last = flist;
 992                        freed += skb_headlen(last);
 993                }
 994                if (last)
 995                        last_tsn = sctp_skb2event(last)->tsn;
 996                else
 997                        last_tsn = tsn;
 998
 999                /* Unlink the event, then renege all applicable TSNs. */
1000                __skb_unlink(skb, list);
1001                sctp_ulpevent_free(event);
1002                while (TSN_lte(tsn, last_tsn)) {
1003                        sctp_tsnmap_renege(tsnmap, tsn);
1004                        tsn++;
1005                }
1006                if (freed >= needed)
1007                        return freed;
1008        }
1009
1010        return freed;
1011}
1012
1013/* Renege 'needed' bytes from the ordering queue. */
1014static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1015{
1016        return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1017}
1018
1019/* Renege 'needed' bytes from the reassembly queue. */
1020static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1021{
1022        return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1023}
1024
1025/* Partial deliver the first message as there is pressure on rwnd. */
1026void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1027                                gfp_t gfp)
1028{
1029        struct sctp_ulpevent *event;
1030        struct sctp_association *asoc;
1031        struct sctp_sock *sp;
1032        __u32 ctsn;
1033        struct sk_buff *skb;
1034
1035        asoc = ulpq->asoc;
1036        sp = sctp_sk(asoc->base.sk);
1037
1038        /* If the association is already in Partial Delivery mode
1039         * we have nothing to do.
1040         */
1041        if (ulpq->pd_mode)
1042                return;
1043
1044        /* Data must be at or below the Cumulative TSN ACK Point to
1045         * start partial delivery.
1046         */
1047        skb = skb_peek(&asoc->ulpq.reasm);
1048        if (skb != NULL) {
1049                ctsn = sctp_skb2event(skb)->tsn;
1050                if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1051                        return;
1052        }
1053
1054        /* If the user enabled fragment interleave socket option,
1055         * multiple associations can enter partial delivery.
1056         * Otherwise, we can only enter partial delivery if the
1057         * socket is not in partial deliver mode.
1058         */
1059        if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1060                /* Is partial delivery possible?  */
1061                event = sctp_ulpq_retrieve_first(ulpq);
1062                /* Send event to the ULP.   */
1063                if (event) {
1064                        struct sk_buff_head temp;
1065
1066                        skb_queue_head_init(&temp);
1067                        __skb_queue_tail(&temp, sctp_event2skb(event));
1068                        sctp_ulpq_tail_event(ulpq, &temp);
1069                        sctp_ulpq_set_pd(ulpq);
1070                        return;
1071                }
1072        }
1073}
1074
1075/* Renege some packets to make room for an incoming chunk.  */
1076void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1077                      gfp_t gfp)
1078{
1079        struct sctp_association *asoc = ulpq->asoc;
1080        __u32 freed = 0;
1081        __u16 needed;
1082
1083        needed = ntohs(chunk->chunk_hdr->length) -
1084                 sizeof(struct sctp_data_chunk);
1085
1086        if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1087                freed = sctp_ulpq_renege_order(ulpq, needed);
1088                if (freed < needed)
1089                        freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1090        }
1091        /* If able to free enough room, accept this chunk. */
1092        if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1093            freed >= needed) {
1094                int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1095                /*
1096                 * Enter partial delivery if chunk has not been
1097                 * delivered; otherwise, drain the reassembly queue.
1098                 */
1099                if (retval <= 0)
1100                        sctp_ulpq_partial_delivery(ulpq, gfp);
1101                else if (retval == 1)
1102                        sctp_ulpq_reasm_drain(ulpq);
1103        }
1104
1105        sk_mem_reclaim(asoc->base.sk);
1106}
1107
1108
1109
1110/* Notify the application if an association is aborted and in
1111 * partial delivery mode.  Send up any pending received messages.
1112 */
1113void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1114{
1115        struct sctp_ulpevent *ev = NULL;
1116        struct sctp_sock *sp;
1117        struct sock *sk;
1118
1119        if (!ulpq->pd_mode)
1120                return;
1121
1122        sk = ulpq->asoc->base.sk;
1123        sp = sctp_sk(sk);
1124        if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1125                                       SCTP_PARTIAL_DELIVERY_EVENT))
1126                ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1127                                              SCTP_PARTIAL_DELIVERY_ABORTED,
1128                                              0, 0, 0, gfp);
1129        if (ev)
1130                __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1131
1132        /* If there is data waiting, send it up the socket now. */
1133        if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1134                sp->data_ready_signalled = 1;
1135                sk->sk_data_ready(sk);
1136        }
1137}
1138