linux/net/sctp/outqueue.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001-2003 Intel Corp.
   6 *
   7 * This file is part of the SCTP kernel implementation
   8 *
   9 * These functions implement the sctp_outq class.   The outqueue handles
  10 * bundling and queueing of outgoing SCTP chunks.
  11 *
  12 * This SCTP implementation is free software;
  13 * you can redistribute it and/or modify it under the terms of
  14 * the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2, or (at your option)
  16 * any later version.
  17 *
  18 * This SCTP implementation is distributed in the hope that it
  19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  20 *                 ************************
  21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  22 * See the GNU General Public License for more details.
  23 *
  24 * You should have received a copy of the GNU General Public License
  25 * along with GNU CC; see the file COPYING.  If not, see
  26 * <http://www.gnu.org/licenses/>.
  27 *
  28 * Please send any bug reports or fixes you make to the
  29 * email address(es):
  30 *    lksctp developers <linux-sctp@vger.kernel.org>
  31 *
  32 * Written or modified by:
  33 *    La Monte H.P. Yarroll <piggy@acm.org>
  34 *    Karl Knutson          <karl@athena.chicago.il.us>
  35 *    Perry Melange         <pmelange@null.cc.uic.edu>
  36 *    Xingang Guo           <xingang.guo@intel.com>
  37 *    Hui Huang             <hui.huang@nokia.com>
  38 *    Sridhar Samudrala     <sri@us.ibm.com>
  39 *    Jon Grimm             <jgrimm@us.ibm.com>
  40 */
  41
  42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  43
  44#include <linux/types.h>
  45#include <linux/list.h>   /* For struct list_head */
  46#include <linux/socket.h>
  47#include <linux/ip.h>
  48#include <linux/slab.h>
  49#include <net/sock.h>     /* For skb_set_owner_w */
  50
  51#include <net/sctp/sctp.h>
  52#include <net/sctp/sm.h>
  53#include <net/sctp/stream_sched.h>
  54
  55/* Declare internal functions here.  */
  56static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
  57static void sctp_check_transmitted(struct sctp_outq *q,
  58                                   struct list_head *transmitted_queue,
  59                                   struct sctp_transport *transport,
  60                                   union sctp_addr *saddr,
  61                                   struct sctp_sackhdr *sack,
  62                                   __u32 *highest_new_tsn);
  63
  64static void sctp_mark_missing(struct sctp_outq *q,
  65                              struct list_head *transmitted_queue,
  66                              struct sctp_transport *transport,
  67                              __u32 highest_new_tsn,
  68                              int count_of_newacks);
  69
  70static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
  71
  72/* Add data to the front of the queue. */
  73static inline void sctp_outq_head_data(struct sctp_outq *q,
  74                                       struct sctp_chunk *ch)
  75{
  76        struct sctp_stream_out_ext *oute;
  77        __u16 stream;
  78
  79        list_add(&ch->list, &q->out_chunk_list);
  80        q->out_qlen += ch->skb->len;
  81
  82        stream = sctp_chunk_stream_no(ch);
  83        oute = SCTP_SO(&q->asoc->stream, stream)->ext;
  84        list_add(&ch->stream_list, &oute->outq);
  85}
  86
  87/* Take data from the front of the queue. */
  88static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
  89{
  90        return q->sched->dequeue(q);
  91}
  92
  93/* Add data chunk to the end of the queue. */
  94static inline void sctp_outq_tail_data(struct sctp_outq *q,
  95                                       struct sctp_chunk *ch)
  96{
  97        struct sctp_stream_out_ext *oute;
  98        __u16 stream;
  99
 100        list_add_tail(&ch->list, &q->out_chunk_list);
 101        q->out_qlen += ch->skb->len;
 102
 103        stream = sctp_chunk_stream_no(ch);
 104        oute = SCTP_SO(&q->asoc->stream, stream)->ext;
 105        list_add_tail(&ch->stream_list, &oute->outq);
 106}
 107
 108/*
 109 * SFR-CACC algorithm:
 110 * D) If count_of_newacks is greater than or equal to 2
 111 * and t was not sent to the current primary then the
 112 * sender MUST NOT increment missing report count for t.
 113 */
 114static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
 115                                       struct sctp_transport *transport,
 116                                       int count_of_newacks)
 117{
 118        if (count_of_newacks >= 2 && transport != primary)
 119                return 1;
 120        return 0;
 121}
 122
 123/*
 124 * SFR-CACC algorithm:
 125 * F) If count_of_newacks is less than 2, let d be the
 126 * destination to which t was sent. If cacc_saw_newack
 127 * is 0 for destination d, then the sender MUST NOT
 128 * increment missing report count for t.
 129 */
 130static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
 131                                       int count_of_newacks)
 132{
 133        if (count_of_newacks < 2 &&
 134                        (transport && !transport->cacc.cacc_saw_newack))
 135                return 1;
 136        return 0;
 137}
 138
 139/*
 140 * SFR-CACC algorithm:
 141 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
 142 * execute steps C, D, F.
 143 *
 144 * C has been implemented in sctp_outq_sack
 145 */
 146static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
 147                                     struct sctp_transport *transport,
 148                                     int count_of_newacks)
 149{
 150        if (!primary->cacc.cycling_changeover) {
 151                if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
 152                        return 1;
 153                if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
 154                        return 1;
 155                return 0;
 156        }
 157        return 0;
 158}
 159
 160/*
 161 * SFR-CACC algorithm:
 162 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
 163 * than next_tsn_at_change of the current primary, then
 164 * the sender MUST NOT increment missing report count
 165 * for t.
 166 */
 167static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
 168{
 169        if (primary->cacc.cycling_changeover &&
 170            TSN_lt(tsn, primary->cacc.next_tsn_at_change))
 171                return 1;
 172        return 0;
 173}
 174
 175/*
 176 * SFR-CACC algorithm:
 177 * 3) If the missing report count for TSN t is to be
 178 * incremented according to [RFC2960] and
 179 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
 180 * then the sender MUST further execute steps 3.1 and
 181 * 3.2 to determine if the missing report count for
 182 * TSN t SHOULD NOT be incremented.
 183 *
 184 * 3.3) If 3.1 and 3.2 do not dictate that the missing
 185 * report count for t should not be incremented, then
 186 * the sender SHOULD increment missing report count for
 187 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
 188 */
 189static inline int sctp_cacc_skip(struct sctp_transport *primary,
 190                                 struct sctp_transport *transport,
 191                                 int count_of_newacks,
 192                                 __u32 tsn)
 193{
 194        if (primary->cacc.changeover_active &&
 195            (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
 196             sctp_cacc_skip_3_2(primary, tsn)))
 197                return 1;
 198        return 0;
 199}
 200
 201/* Initialize an existing sctp_outq.  This does the boring stuff.
 202 * You still need to define handlers if you really want to DO
 203 * something with this structure...
 204 */
 205void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
 206{
 207        memset(q, 0, sizeof(struct sctp_outq));
 208
 209        q->asoc = asoc;
 210        INIT_LIST_HEAD(&q->out_chunk_list);
 211        INIT_LIST_HEAD(&q->control_chunk_list);
 212        INIT_LIST_HEAD(&q->retransmit);
 213        INIT_LIST_HEAD(&q->sacked);
 214        INIT_LIST_HEAD(&q->abandoned);
 215        sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
 216}
 217
 218/* Free the outqueue structure and any related pending chunks.
 219 */
 220static void __sctp_outq_teardown(struct sctp_outq *q)
 221{
 222        struct sctp_transport *transport;
 223        struct list_head *lchunk, *temp;
 224        struct sctp_chunk *chunk, *tmp;
 225
 226        /* Throw away unacknowledged chunks. */
 227        list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
 228                        transports) {
 229                while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
 230                        chunk = list_entry(lchunk, struct sctp_chunk,
 231                                           transmitted_list);
 232                        /* Mark as part of a failed message. */
 233                        sctp_chunk_fail(chunk, q->error);
 234                        sctp_chunk_free(chunk);
 235                }
 236        }
 237
 238        /* Throw away chunks that have been gap ACKed.  */
 239        list_for_each_safe(lchunk, temp, &q->sacked) {
 240                list_del_init(lchunk);
 241                chunk = list_entry(lchunk, struct sctp_chunk,
 242                                   transmitted_list);
 243                sctp_chunk_fail(chunk, q->error);
 244                sctp_chunk_free(chunk);
 245        }
 246
 247        /* Throw away any chunks in the retransmit queue. */
 248        list_for_each_safe(lchunk, temp, &q->retransmit) {
 249                list_del_init(lchunk);
 250                chunk = list_entry(lchunk, struct sctp_chunk,
 251                                   transmitted_list);
 252                sctp_chunk_fail(chunk, q->error);
 253                sctp_chunk_free(chunk);
 254        }
 255
 256        /* Throw away any chunks that are in the abandoned queue. */
 257        list_for_each_safe(lchunk, temp, &q->abandoned) {
 258                list_del_init(lchunk);
 259                chunk = list_entry(lchunk, struct sctp_chunk,
 260                                   transmitted_list);
 261                sctp_chunk_fail(chunk, q->error);
 262                sctp_chunk_free(chunk);
 263        }
 264
 265        /* Throw away any leftover data chunks. */
 266        while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
 267                sctp_sched_dequeue_done(q, chunk);
 268
 269                /* Mark as send failure. */
 270                sctp_chunk_fail(chunk, q->error);
 271                sctp_chunk_free(chunk);
 272        }
 273
 274        /* Throw away any leftover control chunks. */
 275        list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
 276                list_del_init(&chunk->list);
 277                sctp_chunk_free(chunk);
 278        }
 279}
 280
 281void sctp_outq_teardown(struct sctp_outq *q)
 282{
 283        __sctp_outq_teardown(q);
 284        sctp_outq_init(q->asoc, q);
 285}
 286
 287/* Free the outqueue structure and any related pending chunks.  */
 288void sctp_outq_free(struct sctp_outq *q)
 289{
 290        /* Throw away leftover chunks. */
 291        __sctp_outq_teardown(q);
 292}
 293
 294/* Put a new chunk in an sctp_outq.  */
 295void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
 296{
 297        struct net *net = sock_net(q->asoc->base.sk);
 298
 299        pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
 300                 chunk && chunk->chunk_hdr ?
 301                 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
 302                 "illegal chunk");
 303
 304        /* If it is data, queue it up, otherwise, send it
 305         * immediately.
 306         */
 307        if (sctp_chunk_is_data(chunk)) {
 308                pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
 309                         __func__, q, chunk, chunk && chunk->chunk_hdr ?
 310                         sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
 311                         "illegal chunk");
 312
 313                sctp_outq_tail_data(q, chunk);
 314                if (chunk->asoc->peer.prsctp_capable &&
 315                    SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
 316                        chunk->asoc->sent_cnt_removable++;
 317                if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
 318                        SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
 319                else
 320                        SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
 321        } else {
 322                list_add_tail(&chunk->list, &q->control_chunk_list);
 323                SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 324        }
 325
 326        if (!q->cork)
 327                sctp_outq_flush(q, 0, gfp);
 328}
 329
 330/* Insert a chunk into the sorted list based on the TSNs.  The retransmit list
 331 * and the abandoned list are in ascending order.
 332 */
 333static void sctp_insert_list(struct list_head *head, struct list_head *new)
 334{
 335        struct list_head *pos;
 336        struct sctp_chunk *nchunk, *lchunk;
 337        __u32 ntsn, ltsn;
 338        int done = 0;
 339
 340        nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
 341        ntsn = ntohl(nchunk->subh.data_hdr->tsn);
 342
 343        list_for_each(pos, head) {
 344                lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
 345                ltsn = ntohl(lchunk->subh.data_hdr->tsn);
 346                if (TSN_lt(ntsn, ltsn)) {
 347                        list_add(new, pos->prev);
 348                        done = 1;
 349                        break;
 350                }
 351        }
 352        if (!done)
 353                list_add_tail(new, head);
 354}
 355
 356static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
 357                                  struct sctp_sndrcvinfo *sinfo,
 358                                  struct list_head *queue, int msg_len)
 359{
 360        struct sctp_chunk *chk, *temp;
 361
 362        list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
 363                struct sctp_stream_out *streamout;
 364
 365                if (!chk->msg->abandoned &&
 366                    (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
 367                     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
 368                        continue;
 369
 370                chk->msg->abandoned = 1;
 371                list_del_init(&chk->transmitted_list);
 372                sctp_insert_list(&asoc->outqueue.abandoned,
 373                                 &chk->transmitted_list);
 374
 375                streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
 376                asoc->sent_cnt_removable--;
 377                asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
 378                streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
 379
 380                if (queue != &asoc->outqueue.retransmit &&
 381                    !chk->tsn_gap_acked) {
 382                        if (chk->transport)
 383                                chk->transport->flight_size -=
 384                                                sctp_data_size(chk);
 385                        asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
 386                }
 387
 388                msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
 389                if (msg_len <= 0)
 390                        break;
 391        }
 392
 393        return msg_len;
 394}
 395
 396static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
 397                                    struct sctp_sndrcvinfo *sinfo, int msg_len)
 398{
 399        struct sctp_outq *q = &asoc->outqueue;
 400        struct sctp_chunk *chk, *temp;
 401
 402        q->sched->unsched_all(&asoc->stream);
 403
 404        list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
 405                if (!chk->msg->abandoned &&
 406                    (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
 407                     !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
 408                     chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
 409                        continue;
 410
 411                chk->msg->abandoned = 1;
 412                sctp_sched_dequeue_common(q, chk);
 413                asoc->sent_cnt_removable--;
 414                asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
 415                if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
 416                        struct sctp_stream_out *streamout =
 417                                SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
 418
 419                        streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
 420                }
 421
 422                msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
 423                sctp_chunk_free(chk);
 424                if (msg_len <= 0)
 425                        break;
 426        }
 427
 428        q->sched->sched_all(&asoc->stream);
 429
 430        return msg_len;
 431}
 432
 433/* Abandon the chunks according their priorities */
 434void sctp_prsctp_prune(struct sctp_association *asoc,
 435                       struct sctp_sndrcvinfo *sinfo, int msg_len)
 436{
 437        struct sctp_transport *transport;
 438
 439        if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
 440                return;
 441
 442        msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
 443                                         &asoc->outqueue.retransmit,
 444                                         msg_len);
 445        if (msg_len <= 0)
 446                return;
 447
 448        list_for_each_entry(transport, &asoc->peer.transport_addr_list,
 449                            transports) {
 450                msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
 451                                                 &transport->transmitted,
 452                                                 msg_len);
 453                if (msg_len <= 0)
 454                        return;
 455        }
 456
 457        sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
 458}
 459
 460/* Mark all the eligible packets on a transport for retransmission.  */
 461void sctp_retransmit_mark(struct sctp_outq *q,
 462                          struct sctp_transport *transport,
 463                          __u8 reason)
 464{
 465        struct list_head *lchunk, *ltemp;
 466        struct sctp_chunk *chunk;
 467
 468        /* Walk through the specified transmitted queue.  */
 469        list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
 470                chunk = list_entry(lchunk, struct sctp_chunk,
 471                                   transmitted_list);
 472
 473                /* If the chunk is abandoned, move it to abandoned list. */
 474                if (sctp_chunk_abandoned(chunk)) {
 475                        list_del_init(lchunk);
 476                        sctp_insert_list(&q->abandoned, lchunk);
 477
 478                        /* If this chunk has not been previousely acked,
 479                         * stop considering it 'outstanding'.  Our peer
 480                         * will most likely never see it since it will
 481                         * not be retransmitted
 482                         */
 483                        if (!chunk->tsn_gap_acked) {
 484                                if (chunk->transport)
 485                                        chunk->transport->flight_size -=
 486                                                        sctp_data_size(chunk);
 487                                q->outstanding_bytes -= sctp_data_size(chunk);
 488                                q->asoc->peer.rwnd += sctp_data_size(chunk);
 489                        }
 490                        continue;
 491                }
 492
 493                /* If we are doing  retransmission due to a timeout or pmtu
 494                 * discovery, only the  chunks that are not yet acked should
 495                 * be added to the retransmit queue.
 496                 */
 497                if ((reason == SCTP_RTXR_FAST_RTX  &&
 498                            (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
 499                    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
 500                        /* RFC 2960 6.2.1 Processing a Received SACK
 501                         *
 502                         * C) Any time a DATA chunk is marked for
 503                         * retransmission (via either T3-rtx timer expiration
 504                         * (Section 6.3.3) or via fast retransmit
 505                         * (Section 7.2.4)), add the data size of those
 506                         * chunks to the rwnd.
 507                         */
 508                        q->asoc->peer.rwnd += sctp_data_size(chunk);
 509                        q->outstanding_bytes -= sctp_data_size(chunk);
 510                        if (chunk->transport)
 511                                transport->flight_size -= sctp_data_size(chunk);
 512
 513                        /* sctpimpguide-05 Section 2.8.2
 514                         * M5) If a T3-rtx timer expires, the
 515                         * 'TSN.Missing.Report' of all affected TSNs is set
 516                         * to 0.
 517                         */
 518                        chunk->tsn_missing_report = 0;
 519
 520                        /* If a chunk that is being used for RTT measurement
 521                         * has to be retransmitted, we cannot use this chunk
 522                         * anymore for RTT measurements. Reset rto_pending so
 523                         * that a new RTT measurement is started when a new
 524                         * data chunk is sent.
 525                         */
 526                        if (chunk->rtt_in_progress) {
 527                                chunk->rtt_in_progress = 0;
 528                                transport->rto_pending = 0;
 529                        }
 530
 531                        /* Move the chunk to the retransmit queue. The chunks
 532                         * on the retransmit queue are always kept in order.
 533                         */
 534                        list_del_init(lchunk);
 535                        sctp_insert_list(&q->retransmit, lchunk);
 536                }
 537        }
 538
 539        pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
 540                 "flight_size:%d, pba:%d\n", __func__, transport, reason,
 541                 transport->cwnd, transport->ssthresh, transport->flight_size,
 542                 transport->partial_bytes_acked);
 543}
 544
 545/* Mark all the eligible packets on a transport for retransmission and force
 546 * one packet out.
 547 */
 548void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
 549                     enum sctp_retransmit_reason reason)
 550{
 551        struct net *net = sock_net(q->asoc->base.sk);
 552
 553        switch (reason) {
 554        case SCTP_RTXR_T3_RTX:
 555                SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
 556                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
 557                /* Update the retran path if the T3-rtx timer has expired for
 558                 * the current retran path.
 559                 */
 560                if (transport == transport->asoc->peer.retran_path)
 561                        sctp_assoc_update_retran_path(transport->asoc);
 562                transport->asoc->rtx_data_chunks +=
 563                        transport->asoc->unack_data;
 564                break;
 565        case SCTP_RTXR_FAST_RTX:
 566                SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
 567                sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
 568                q->fast_rtx = 1;
 569                break;
 570        case SCTP_RTXR_PMTUD:
 571                SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
 572                break;
 573        case SCTP_RTXR_T1_RTX:
 574                SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
 575                transport->asoc->init_retries++;
 576                break;
 577        default:
 578                BUG();
 579        }
 580
 581        sctp_retransmit_mark(q, transport, reason);
 582
 583        /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
 584         * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
 585         * following the procedures outlined in C1 - C5.
 586         */
 587        if (reason == SCTP_RTXR_T3_RTX)
 588                q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
 589
 590        /* Flush the queues only on timeout, since fast_rtx is only
 591         * triggered during sack processing and the queue
 592         * will be flushed at the end.
 593         */
 594        if (reason != SCTP_RTXR_FAST_RTX)
 595                sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC);
 596}
 597
 598/*
 599 * Transmit DATA chunks on the retransmit queue.  Upon return from
 600 * __sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
 601 * need to be transmitted by the caller.
 602 * We assume that pkt->transport has already been set.
 603 *
 604 * The return value is a normal kernel error return value.
 605 */
 606static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
 607                                 int rtx_timeout, int *start_timer, gfp_t gfp)
 608{
 609        struct sctp_transport *transport = pkt->transport;
 610        struct sctp_chunk *chunk, *chunk1;
 611        struct list_head *lqueue;
 612        enum sctp_xmit status;
 613        int error = 0;
 614        int timer = 0;
 615        int done = 0;
 616        int fast_rtx;
 617
 618        lqueue = &q->retransmit;
 619        fast_rtx = q->fast_rtx;
 620
 621        /* This loop handles time-out retransmissions, fast retransmissions,
 622         * and retransmissions due to opening of whindow.
 623         *
 624         * RFC 2960 6.3.3 Handle T3-rtx Expiration
 625         *
 626         * E3) Determine how many of the earliest (i.e., lowest TSN)
 627         * outstanding DATA chunks for the address for which the
 628         * T3-rtx has expired will fit into a single packet, subject
 629         * to the MTU constraint for the path corresponding to the
 630         * destination transport address to which the retransmission
 631         * is being sent (this may be different from the address for
 632         * which the timer expires [see Section 6.4]). Call this value
 633         * K. Bundle and retransmit those K DATA chunks in a single
 634         * packet to the destination endpoint.
 635         *
 636         * [Just to be painfully clear, if we are retransmitting
 637         * because a timeout just happened, we should send only ONE
 638         * packet of retransmitted data.]
 639         *
 640         * For fast retransmissions we also send only ONE packet.  However,
 641         * if we are just flushing the queue due to open window, we'll
 642         * try to send as much as possible.
 643         */
 644        list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
 645                /* If the chunk is abandoned, move it to abandoned list. */
 646                if (sctp_chunk_abandoned(chunk)) {
 647                        list_del_init(&chunk->transmitted_list);
 648                        sctp_insert_list(&q->abandoned,
 649                                         &chunk->transmitted_list);
 650                        continue;
 651                }
 652
 653                /* Make sure that Gap Acked TSNs are not retransmitted.  A
 654                 * simple approach is just to move such TSNs out of the
 655                 * way and into a 'transmitted' queue and skip to the
 656                 * next chunk.
 657                 */
 658                if (chunk->tsn_gap_acked) {
 659                        list_move_tail(&chunk->transmitted_list,
 660                                       &transport->transmitted);
 661                        continue;
 662                }
 663
 664                /* If we are doing fast retransmit, ignore non-fast_rtransmit
 665                 * chunks
 666                 */
 667                if (fast_rtx && !chunk->fast_retransmit)
 668                        continue;
 669
 670redo:
 671                /* Attempt to append this chunk to the packet. */
 672                status = sctp_packet_append_chunk(pkt, chunk);
 673
 674                switch (status) {
 675                case SCTP_XMIT_PMTU_FULL:
 676                        if (!pkt->has_data && !pkt->has_cookie_echo) {
 677                                /* If this packet did not contain DATA then
 678                                 * retransmission did not happen, so do it
 679                                 * again.  We'll ignore the error here since
 680                                 * control chunks are already freed so there
 681                                 * is nothing we can do.
 682                                 */
 683                                sctp_packet_transmit(pkt, gfp);
 684                                goto redo;
 685                        }
 686
 687                        /* Send this packet.  */
 688                        error = sctp_packet_transmit(pkt, gfp);
 689
 690                        /* If we are retransmitting, we should only
 691                         * send a single packet.
 692                         * Otherwise, try appending this chunk again.
 693                         */
 694                        if (rtx_timeout || fast_rtx)
 695                                done = 1;
 696                        else
 697                                goto redo;
 698
 699                        /* Bundle next chunk in the next round.  */
 700                        break;
 701
 702                case SCTP_XMIT_RWND_FULL:
 703                        /* Send this packet. */
 704                        error = sctp_packet_transmit(pkt, gfp);
 705
 706                        /* Stop sending DATA as there is no more room
 707                         * at the receiver.
 708                         */
 709                        done = 1;
 710                        break;
 711
 712                case SCTP_XMIT_DELAY:
 713                        /* Send this packet. */
 714                        error = sctp_packet_transmit(pkt, gfp);
 715
 716                        /* Stop sending DATA because of nagle delay. */
 717                        done = 1;
 718                        break;
 719
 720                default:
 721                        /* The append was successful, so add this chunk to
 722                         * the transmitted list.
 723                         */
 724                        list_move_tail(&chunk->transmitted_list,
 725                                       &transport->transmitted);
 726
 727                        /* Mark the chunk as ineligible for fast retransmit
 728                         * after it is retransmitted.
 729                         */
 730                        if (chunk->fast_retransmit == SCTP_NEED_FRTX)
 731                                chunk->fast_retransmit = SCTP_DONT_FRTX;
 732
 733                        q->asoc->stats.rtxchunks++;
 734                        break;
 735                }
 736
 737                /* Set the timer if there were no errors */
 738                if (!error && !timer)
 739                        timer = 1;
 740
 741                if (done)
 742                        break;
 743        }
 744
 745        /* If we are here due to a retransmit timeout or a fast
 746         * retransmit and if there are any chunks left in the retransmit
 747         * queue that could not fit in the PMTU sized packet, they need
 748         * to be marked as ineligible for a subsequent fast retransmit.
 749         */
 750        if (rtx_timeout || fast_rtx) {
 751                list_for_each_entry(chunk1, lqueue, transmitted_list) {
 752                        if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
 753                                chunk1->fast_retransmit = SCTP_DONT_FRTX;
 754                }
 755        }
 756
 757        *start_timer = timer;
 758
 759        /* Clear fast retransmit hint */
 760        if (fast_rtx)
 761                q->fast_rtx = 0;
 762
 763        return error;
 764}
 765
 766/* Cork the outqueue so queued chunks are really queued. */
 767void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
 768{
 769        if (q->cork)
 770                q->cork = 0;
 771
 772        sctp_outq_flush(q, 0, gfp);
 773}
 774
 775static int sctp_packet_singleton(struct sctp_transport *transport,
 776                                 struct sctp_chunk *chunk, gfp_t gfp)
 777{
 778        const struct sctp_association *asoc = transport->asoc;
 779        const __u16 sport = asoc->base.bind_addr.port;
 780        const __u16 dport = asoc->peer.port;
 781        const __u32 vtag = asoc->peer.i.init_tag;
 782        struct sctp_packet singleton;
 783
 784        sctp_packet_init(&singleton, transport, sport, dport);
 785        sctp_packet_config(&singleton, vtag, 0);
 786        sctp_packet_append_chunk(&singleton, chunk);
 787        return sctp_packet_transmit(&singleton, gfp);
 788}
 789
 790/* Struct to hold the context during sctp outq flush */
 791struct sctp_flush_ctx {
 792        struct sctp_outq *q;
 793        /* Current transport being used. It's NOT the same as curr active one */
 794        struct sctp_transport *transport;
 795        /* These transports have chunks to send. */
 796        struct list_head transport_list;
 797        struct sctp_association *asoc;
 798        /* Packet on the current transport above */
 799        struct sctp_packet *packet;
 800        gfp_t gfp;
 801};
 802
 803/* transport: current transport */
 804static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
 805                                       struct sctp_chunk *chunk)
 806{
 807        struct sctp_transport *new_transport = chunk->transport;
 808
 809        if (!new_transport) {
 810                if (!sctp_chunk_is_data(chunk)) {
 811                        /* If we have a prior transport pointer, see if
 812                         * the destination address of the chunk
 813                         * matches the destination address of the
 814                         * current transport.  If not a match, then
 815                         * try to look up the transport with a given
 816                         * destination address.  We do this because
 817                         * after processing ASCONFs, we may have new
 818                         * transports created.
 819                         */
 820                        if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest,
 821                                                        &ctx->transport->ipaddr))
 822                                new_transport = ctx->transport;
 823                        else
 824                                new_transport = sctp_assoc_lookup_paddr(ctx->asoc,
 825                                                                  &chunk->dest);
 826                }
 827
 828                /* if we still don't have a new transport, then
 829                 * use the current active path.
 830                 */
 831                if (!new_transport)
 832                        new_transport = ctx->asoc->peer.active_path;
 833        } else {
 834                __u8 type;
 835
 836                switch (new_transport->state) {
 837                case SCTP_INACTIVE:
 838                case SCTP_UNCONFIRMED:
 839                case SCTP_PF:
 840                        /* If the chunk is Heartbeat or Heartbeat Ack,
 841                         * send it to chunk->transport, even if it's
 842                         * inactive.
 843                         *
 844                         * 3.3.6 Heartbeat Acknowledgement:
 845                         * ...
 846                         * A HEARTBEAT ACK is always sent to the source IP
 847                         * address of the IP datagram containing the
 848                         * HEARTBEAT chunk to which this ack is responding.
 849                         * ...
 850                         *
 851                         * ASCONF_ACKs also must be sent to the source.
 852                         */
 853                        type = chunk->chunk_hdr->type;
 854                        if (type != SCTP_CID_HEARTBEAT &&
 855                            type != SCTP_CID_HEARTBEAT_ACK &&
 856                            type != SCTP_CID_ASCONF_ACK)
 857                                new_transport = ctx->asoc->peer.active_path;
 858                        break;
 859                default:
 860                        break;
 861                }
 862        }
 863
 864        /* Are we switching transports? Take care of transport locks. */
 865        if (new_transport != ctx->transport) {
 866                ctx->transport = new_transport;
 867                ctx->packet = &ctx->transport->packet;
 868
 869                if (list_empty(&ctx->transport->send_ready))
 870                        list_add_tail(&ctx->transport->send_ready,
 871                                      &ctx->transport_list);
 872
 873                sctp_packet_config(ctx->packet,
 874                                   ctx->asoc->peer.i.init_tag,
 875                                   ctx->asoc->peer.ecn_capable);
 876                /* We've switched transports, so apply the
 877                 * Burst limit to the new transport.
 878                 */
 879                sctp_transport_burst_limited(ctx->transport);
 880        }
 881}
 882
 883static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 884{
 885        struct sctp_chunk *chunk, *tmp;
 886        enum sctp_xmit status;
 887        int one_packet, error;
 888
 889        list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
 890                one_packet = 0;
 891
 892                /* RFC 5061, 5.3
 893                 * F1) This means that until such time as the ASCONF
 894                 * containing the add is acknowledged, the sender MUST
 895                 * NOT use the new IP address as a source for ANY SCTP
 896                 * packet except on carrying an ASCONF Chunk.
 897                 */
 898                if (ctx->asoc->src_out_of_asoc_ok &&
 899                    chunk->chunk_hdr->type != SCTP_CID_ASCONF)
 900                        continue;
 901
 902                list_del_init(&chunk->list);
 903
 904                /* Pick the right transport to use. Should always be true for
 905                 * the first chunk as we don't have a transport by then.
 906                 */
 907                sctp_outq_select_transport(ctx, chunk);
 908
 909                switch (chunk->chunk_hdr->type) {
 910                /* 6.10 Bundling
 911                 *   ...
 912                 *   An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
 913                 *   COMPLETE with any other chunks.  [Send them immediately.]
 914                 */
 915                case SCTP_CID_INIT:
 916                case SCTP_CID_INIT_ACK:
 917                case SCTP_CID_SHUTDOWN_COMPLETE:
 918                        error = sctp_packet_singleton(ctx->transport, chunk,
 919                                                      ctx->gfp);
 920                        if (error < 0) {
 921                                ctx->asoc->base.sk->sk_err = -error;
 922                                return;
 923                        }
 924                        break;
 925
 926                case SCTP_CID_ABORT:
 927                        if (sctp_test_T_bit(chunk))
 928                                ctx->packet->vtag = ctx->asoc->c.my_vtag;
 929                        /* fallthru */
 930
 931                /* The following chunks are "response" chunks, i.e.
 932                 * they are generated in response to something we
 933                 * received.  If we are sending these, then we can
 934                 * send only 1 packet containing these chunks.
 935                 */
 936                case SCTP_CID_HEARTBEAT_ACK:
 937                case SCTP_CID_SHUTDOWN_ACK:
 938                case SCTP_CID_COOKIE_ACK:
 939                case SCTP_CID_COOKIE_ECHO:
 940                case SCTP_CID_ERROR:
 941                case SCTP_CID_ECN_CWR:
 942                case SCTP_CID_ASCONF_ACK:
 943                        one_packet = 1;
 944                        /* Fall through */
 945
 946                case SCTP_CID_SACK:
 947                case SCTP_CID_HEARTBEAT:
 948                case SCTP_CID_SHUTDOWN:
 949                case SCTP_CID_ECN_ECNE:
 950                case SCTP_CID_ASCONF:
 951                case SCTP_CID_FWD_TSN:
 952                case SCTP_CID_I_FWD_TSN:
 953                case SCTP_CID_RECONF:
 954                        status = sctp_packet_transmit_chunk(ctx->packet, chunk,
 955                                                            one_packet, ctx->gfp);
 956                        if (status != SCTP_XMIT_OK) {
 957                                /* put the chunk back */
 958                                list_add(&chunk->list, &ctx->q->control_chunk_list);
 959                                break;
 960                        }
 961
 962                        ctx->asoc->stats.octrlchunks++;
 963                        /* PR-SCTP C5) If a FORWARD TSN is sent, the
 964                         * sender MUST assure that at least one T3-rtx
 965                         * timer is running.
 966                         */
 967                        if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
 968                            chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
 969                                sctp_transport_reset_t3_rtx(ctx->transport);
 970                                ctx->transport->last_time_sent = jiffies;
 971                        }
 972
 973                        if (chunk == ctx->asoc->strreset_chunk)
 974                                sctp_transport_reset_reconf_timer(ctx->transport);
 975
 976                        break;
 977
 978                default:
 979                        /* We built a chunk with an illegal type! */
 980                        BUG();
 981                }
 982        }
 983}
 984
 985/* Returns false if new data shouldn't be sent */
 986static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
 987                                int rtx_timeout)
 988{
 989        int error, start_timer = 0;
 990
 991        if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
 992                return false;
 993
 994        if (ctx->transport != ctx->asoc->peer.retran_path) {
 995                /* Switch transports & prepare the packet.  */
 996                ctx->transport = ctx->asoc->peer.retran_path;
 997                ctx->packet = &ctx->transport->packet;
 998
 999                if (list_empty(&ctx->transport->send_ready))
1000                        list_add_tail(&ctx->transport->send_ready,
1001                                      &ctx->transport_list);
1002
1003                sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
1004                                   ctx->asoc->peer.ecn_capable);
1005        }
1006
1007        error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
1008                                      &start_timer, ctx->gfp);
1009        if (error < 0)
1010                ctx->asoc->base.sk->sk_err = -error;
1011
1012        if (start_timer) {
1013                sctp_transport_reset_t3_rtx(ctx->transport);
1014                ctx->transport->last_time_sent = jiffies;
1015        }
1016
1017        /* This can happen on COOKIE-ECHO resend.  Only
1018         * one chunk can get bundled with a COOKIE-ECHO.
1019         */
1020        if (ctx->packet->has_cookie_echo)
1021                return false;
1022
1023        /* Don't send new data if there is still data
1024         * waiting to retransmit.
1025         */
1026        if (!list_empty(&ctx->q->retransmit))
1027                return false;
1028
1029        return true;
1030}
1031
1032static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
1033                                 int rtx_timeout)
1034{
1035        struct sctp_chunk *chunk;
1036        enum sctp_xmit status;
1037
1038        /* Is it OK to send data chunks?  */
1039        switch (ctx->asoc->state) {
1040        case SCTP_STATE_COOKIE_ECHOED:
1041                /* Only allow bundling when this packet has a COOKIE-ECHO
1042                 * chunk.
1043                 */
1044                if (!ctx->packet || !ctx->packet->has_cookie_echo)
1045                        return;
1046
1047                /* fall through */
1048        case SCTP_STATE_ESTABLISHED:
1049        case SCTP_STATE_SHUTDOWN_PENDING:
1050        case SCTP_STATE_SHUTDOWN_RECEIVED:
1051                break;
1052
1053        default:
1054                /* Do nothing. */
1055                return;
1056        }
1057
1058        /* RFC 2960 6.1  Transmission of DATA Chunks
1059         *
1060         * C) When the time comes for the sender to transmit,
1061         * before sending new DATA chunks, the sender MUST
1062         * first transmit any outstanding DATA chunks which
1063         * are marked for retransmission (limited by the
1064         * current cwnd).
1065         */
1066        if (!list_empty(&ctx->q->retransmit) &&
1067            !sctp_outq_flush_rtx(ctx, rtx_timeout))
1068                return;
1069
1070        /* Apply Max.Burst limitation to the current transport in
1071         * case it will be used for new data.  We are going to
1072         * rest it before we return, but we want to apply the limit
1073         * to the currently queued data.
1074         */
1075        if (ctx->transport)
1076                sctp_transport_burst_limited(ctx->transport);
1077
1078        /* Finally, transmit new packets.  */
1079        while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
1080                __u32 sid = ntohs(chunk->subh.data_hdr->stream);
1081                __u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state;
1082
1083                /* Has this chunk expired? */
1084                if (sctp_chunk_abandoned(chunk)) {
1085                        sctp_sched_dequeue_done(ctx->q, chunk);
1086                        sctp_chunk_fail(chunk, 0);
1087                        sctp_chunk_free(chunk);
1088                        continue;
1089                }
1090
1091                if (stream_state == SCTP_STREAM_CLOSED) {
1092                        sctp_outq_head_data(ctx->q, chunk);
1093                        break;
1094                }
1095
1096                sctp_outq_select_transport(ctx, chunk);
1097
1098                pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
1099                         __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
1100                         sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1101                         "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1102                         chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1103                         refcount_read(&chunk->skb->users) : -1);
1104
1105                /* Add the chunk to the packet.  */
1106                status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0,
1107                                                    ctx->gfp);
1108                if (status != SCTP_XMIT_OK) {
1109                        /* We could not append this chunk, so put
1110                         * the chunk back on the output queue.
1111                         */
1112                        pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1113                                 __func__, ntohl(chunk->subh.data_hdr->tsn),
1114                                 status);
1115
1116                        sctp_outq_head_data(ctx->q, chunk);
1117                        break;
1118                }
1119
1120                /* The sender is in the SHUTDOWN-PENDING state,
1121                 * The sender MAY set the I-bit in the DATA
1122                 * chunk header.
1123                 */
1124                if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1125                        chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1126                if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1127                        ctx->asoc->stats.ouodchunks++;
1128                else
1129                        ctx->asoc->stats.oodchunks++;
1130
1131                /* Only now it's safe to consider this
1132                 * chunk as sent, sched-wise.
1133                 */
1134                sctp_sched_dequeue_done(ctx->q, chunk);
1135
1136                list_add_tail(&chunk->transmitted_list,
1137                              &ctx->transport->transmitted);
1138
1139                sctp_transport_reset_t3_rtx(ctx->transport);
1140                ctx->transport->last_time_sent = jiffies;
1141
1142                /* Only let one DATA chunk get bundled with a
1143                 * COOKIE-ECHO chunk.
1144                 */
1145                if (ctx->packet->has_cookie_echo)
1146                        break;
1147        }
1148}
1149
1150static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
1151{
1152        struct sock *sk = ctx->asoc->base.sk;
1153        struct list_head *ltransport;
1154        struct sctp_packet *packet;
1155        struct sctp_transport *t;
1156        int error = 0;
1157
1158        while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) {
1159                t = list_entry(ltransport, struct sctp_transport, send_ready);
1160                packet = &t->packet;
1161                if (!sctp_packet_empty(packet)) {
1162                        rcu_read_lock();
1163                        if (t->dst && __sk_dst_get(sk) != t->dst) {
1164                                dst_hold(t->dst);
1165                                sk_setup_caps(sk, t->dst);
1166                        }
1167                        rcu_read_unlock();
1168                        error = sctp_packet_transmit(packet, ctx->gfp);
1169                        if (error < 0)
1170                                ctx->q->asoc->base.sk->sk_err = -error;
1171                }
1172
1173                /* Clear the burst limited state, if any */
1174                sctp_transport_burst_reset(t);
1175        }
1176}
1177
1178/* Try to flush an outqueue.
1179 *
1180 * Description: Send everything in q which we legally can, subject to
1181 * congestion limitations.
1182 * * Note: This function can be called from multiple contexts so appropriate
1183 * locking concerns must be made.  Today we use the sock lock to protect
1184 * this function.
1185 */
1186
1187static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1188{
1189        struct sctp_flush_ctx ctx = {
1190                .q = q,
1191                .transport = NULL,
1192                .transport_list = LIST_HEAD_INIT(ctx.transport_list),
1193                .asoc = q->asoc,
1194                .packet = NULL,
1195                .gfp = gfp,
1196        };
1197
1198        /* 6.10 Bundling
1199         *   ...
1200         *   When bundling control chunks with DATA chunks, an
1201         *   endpoint MUST place control chunks first in the outbound
1202         *   SCTP packet.  The transmitter MUST transmit DATA chunks
1203         *   within a SCTP packet in increasing order of TSN.
1204         *   ...
1205         */
1206
1207        sctp_outq_flush_ctrl(&ctx);
1208
1209        if (q->asoc->src_out_of_asoc_ok)
1210                goto sctp_flush_out;
1211
1212        sctp_outq_flush_data(&ctx, rtx_timeout);
1213
1214sctp_flush_out:
1215
1216        sctp_outq_flush_transports(&ctx);
1217}
1218
1219/* Update unack_data based on the incoming SACK chunk */
1220static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1221                                        struct sctp_sackhdr *sack)
1222{
1223        union sctp_sack_variable *frags;
1224        __u16 unack_data;
1225        int i;
1226
1227        unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1228
1229        frags = sack->variable;
1230        for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1231                unack_data -= ((ntohs(frags[i].gab.end) -
1232                                ntohs(frags[i].gab.start) + 1));
1233        }
1234
1235        assoc->unack_data = unack_data;
1236}
1237
1238/* This is where we REALLY process a SACK.
1239 *
1240 * Process the SACK against the outqueue.  Mostly, this just frees
1241 * things off the transmitted queue.
1242 */
1243int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1244{
1245        struct sctp_association *asoc = q->asoc;
1246        struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1247        struct sctp_transport *transport;
1248        struct sctp_chunk *tchunk = NULL;
1249        struct list_head *lchunk, *transport_list, *temp;
1250        union sctp_sack_variable *frags = sack->variable;
1251        __u32 sack_ctsn, ctsn, tsn;
1252        __u32 highest_tsn, highest_new_tsn;
1253        __u32 sack_a_rwnd;
1254        unsigned int outstanding;
1255        struct sctp_transport *primary = asoc->peer.primary_path;
1256        int count_of_newacks = 0;
1257        int gap_ack_blocks;
1258        u8 accum_moved = 0;
1259
1260        /* Grab the association's destination address list. */
1261        transport_list = &asoc->peer.transport_addr_list;
1262
1263        sack_ctsn = ntohl(sack->cum_tsn_ack);
1264        gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1265        asoc->stats.gapcnt += gap_ack_blocks;
1266        /*
1267         * SFR-CACC algorithm:
1268         * On receipt of a SACK the sender SHOULD execute the
1269         * following statements.
1270         *
1271         * 1) If the cumulative ack in the SACK passes next tsn_at_change
1272         * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1273         * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1274         * all destinations.
1275         * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1276         * is set the receiver of the SACK MUST take the following actions:
1277         *
1278         * A) Initialize the cacc_saw_newack to 0 for all destination
1279         * addresses.
1280         *
1281         * Only bother if changeover_active is set. Otherwise, this is
1282         * totally suboptimal to do on every SACK.
1283         */
1284        if (primary->cacc.changeover_active) {
1285                u8 clear_cycling = 0;
1286
1287                if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1288                        primary->cacc.changeover_active = 0;
1289                        clear_cycling = 1;
1290                }
1291
1292                if (clear_cycling || gap_ack_blocks) {
1293                        list_for_each_entry(transport, transport_list,
1294                                        transports) {
1295                                if (clear_cycling)
1296                                        transport->cacc.cycling_changeover = 0;
1297                                if (gap_ack_blocks)
1298                                        transport->cacc.cacc_saw_newack = 0;
1299                        }
1300                }
1301        }
1302
1303        /* Get the highest TSN in the sack. */
1304        highest_tsn = sack_ctsn;
1305        if (gap_ack_blocks)
1306                highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1307
1308        if (TSN_lt(asoc->highest_sacked, highest_tsn))
1309                asoc->highest_sacked = highest_tsn;
1310
1311        highest_new_tsn = sack_ctsn;
1312
1313        /* Run through the retransmit queue.  Credit bytes received
1314         * and free those chunks that we can.
1315         */
1316        sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1317
1318        /* Run through the transmitted queue.
1319         * Credit bytes received and free those chunks which we can.
1320         *
1321         * This is a MASSIVE candidate for optimization.
1322         */
1323        list_for_each_entry(transport, transport_list, transports) {
1324                sctp_check_transmitted(q, &transport->transmitted,
1325                                       transport, &chunk->source, sack,
1326                                       &highest_new_tsn);
1327                /*
1328                 * SFR-CACC algorithm:
1329                 * C) Let count_of_newacks be the number of
1330                 * destinations for which cacc_saw_newack is set.
1331                 */
1332                if (transport->cacc.cacc_saw_newack)
1333                        count_of_newacks++;
1334        }
1335
1336        /* Move the Cumulative TSN Ack Point if appropriate.  */
1337        if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1338                asoc->ctsn_ack_point = sack_ctsn;
1339                accum_moved = 1;
1340        }
1341
1342        if (gap_ack_blocks) {
1343
1344                if (asoc->fast_recovery && accum_moved)
1345                        highest_new_tsn = highest_tsn;
1346
1347                list_for_each_entry(transport, transport_list, transports)
1348                        sctp_mark_missing(q, &transport->transmitted, transport,
1349                                          highest_new_tsn, count_of_newacks);
1350        }
1351
1352        /* Update unack_data field in the assoc. */
1353        sctp_sack_update_unack_data(asoc, sack);
1354
1355        ctsn = asoc->ctsn_ack_point;
1356
1357        /* Throw away stuff rotting on the sack queue.  */
1358        list_for_each_safe(lchunk, temp, &q->sacked) {
1359                tchunk = list_entry(lchunk, struct sctp_chunk,
1360                                    transmitted_list);
1361                tsn = ntohl(tchunk->subh.data_hdr->tsn);
1362                if (TSN_lte(tsn, ctsn)) {
1363                        list_del_init(&tchunk->transmitted_list);
1364                        if (asoc->peer.prsctp_capable &&
1365                            SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1366                                asoc->sent_cnt_removable--;
1367                        sctp_chunk_free(tchunk);
1368                }
1369        }
1370
1371        /* ii) Set rwnd equal to the newly received a_rwnd minus the
1372         *     number of bytes still outstanding after processing the
1373         *     Cumulative TSN Ack and the Gap Ack Blocks.
1374         */
1375
1376        sack_a_rwnd = ntohl(sack->a_rwnd);
1377        asoc->peer.zero_window_announced = !sack_a_rwnd;
1378        outstanding = q->outstanding_bytes;
1379
1380        if (outstanding < sack_a_rwnd)
1381                sack_a_rwnd -= outstanding;
1382        else
1383                sack_a_rwnd = 0;
1384
1385        asoc->peer.rwnd = sack_a_rwnd;
1386
1387        asoc->stream.si->generate_ftsn(q, sack_ctsn);
1388
1389        pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1390        pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1391                 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1392                 asoc->adv_peer_ack_point);
1393
1394        return sctp_outq_is_empty(q);
1395}
1396
1397/* Is the outqueue empty?
1398 * The queue is empty when we have not pending data, no in-flight data
1399 * and nothing pending retransmissions.
1400 */
1401int sctp_outq_is_empty(const struct sctp_outq *q)
1402{
1403        return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1404               list_empty(&q->retransmit);
1405}
1406
1407/********************************************************************
1408 * 2nd Level Abstractions
1409 ********************************************************************/
1410
1411/* Go through a transport's transmitted list or the association's retransmit
1412 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1413 * The retransmit list will not have an associated transport.
1414 *
1415 * I added coherent debug information output.   --xguo
1416 *
1417 * Instead of printing 'sacked' or 'kept' for each TSN on the
1418 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1419 * KEPT TSN6-TSN7, etc.
1420 */
1421static void sctp_check_transmitted(struct sctp_outq *q,
1422                                   struct list_head *transmitted_queue,
1423                                   struct sctp_transport *transport,
1424                                   union sctp_addr *saddr,
1425                                   struct sctp_sackhdr *sack,
1426                                   __u32 *highest_new_tsn_in_sack)
1427{
1428        struct list_head *lchunk;
1429        struct sctp_chunk *tchunk;
1430        struct list_head tlist;
1431        __u32 tsn;
1432        __u32 sack_ctsn;
1433        __u32 rtt;
1434        __u8 restart_timer = 0;
1435        int bytes_acked = 0;
1436        int migrate_bytes = 0;
1437        bool forward_progress = false;
1438
1439        sack_ctsn = ntohl(sack->cum_tsn_ack);
1440
1441        INIT_LIST_HEAD(&tlist);
1442
1443        /* The while loop will skip empty transmitted queues. */
1444        while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1445                tchunk = list_entry(lchunk, struct sctp_chunk,
1446                                    transmitted_list);
1447
1448                if (sctp_chunk_abandoned(tchunk)) {
1449                        /* Move the chunk to abandoned list. */
1450                        sctp_insert_list(&q->abandoned, lchunk);
1451
1452                        /* If this chunk has not been acked, stop
1453                         * considering it as 'outstanding'.
1454                         */
1455                        if (transmitted_queue != &q->retransmit &&
1456                            !tchunk->tsn_gap_acked) {
1457                                if (tchunk->transport)
1458                                        tchunk->transport->flight_size -=
1459                                                        sctp_data_size(tchunk);
1460                                q->outstanding_bytes -= sctp_data_size(tchunk);
1461                        }
1462                        continue;
1463                }
1464
1465                tsn = ntohl(tchunk->subh.data_hdr->tsn);
1466                if (sctp_acked(sack, tsn)) {
1467                        /* If this queue is the retransmit queue, the
1468                         * retransmit timer has already reclaimed
1469                         * the outstanding bytes for this chunk, so only
1470                         * count bytes associated with a transport.
1471                         */
1472                        if (transport && !tchunk->tsn_gap_acked) {
1473                                /* If this chunk is being used for RTT
1474                                 * measurement, calculate the RTT and update
1475                                 * the RTO using this value.
1476                                 *
1477                                 * 6.3.1 C5) Karn's algorithm: RTT measurements
1478                                 * MUST NOT be made using packets that were
1479                                 * retransmitted (and thus for which it is
1480                                 * ambiguous whether the reply was for the
1481                                 * first instance of the packet or a later
1482                                 * instance).
1483                                 */
1484                                if (!sctp_chunk_retransmitted(tchunk) &&
1485                                    tchunk->rtt_in_progress) {
1486                                        tchunk->rtt_in_progress = 0;
1487                                        rtt = jiffies - tchunk->sent_at;
1488                                        sctp_transport_update_rto(transport,
1489                                                                  rtt);
1490                                }
1491
1492                                if (TSN_lte(tsn, sack_ctsn)) {
1493                                        /*
1494                                         * SFR-CACC algorithm:
1495                                         * 2) If the SACK contains gap acks
1496                                         * and the flag CHANGEOVER_ACTIVE is
1497                                         * set the receiver of the SACK MUST
1498                                         * take the following action:
1499                                         *
1500                                         * B) For each TSN t being acked that
1501                                         * has not been acked in any SACK so
1502                                         * far, set cacc_saw_newack to 1 for
1503                                         * the destination that the TSN was
1504                                         * sent to.
1505                                         */
1506                                        if (sack->num_gap_ack_blocks &&
1507                                            q->asoc->peer.primary_path->cacc.
1508                                            changeover_active)
1509                                                transport->cacc.cacc_saw_newack
1510                                                        = 1;
1511                                }
1512                        }
1513
1514                        /* If the chunk hasn't been marked as ACKED,
1515                         * mark it and account bytes_acked if the
1516                         * chunk had a valid transport (it will not
1517                         * have a transport if ASCONF had deleted it
1518                         * while DATA was outstanding).
1519                         */
1520                        if (!tchunk->tsn_gap_acked) {
1521                                tchunk->tsn_gap_acked = 1;
1522                                if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1523                                        *highest_new_tsn_in_sack = tsn;
1524                                bytes_acked += sctp_data_size(tchunk);
1525                                if (!tchunk->transport)
1526                                        migrate_bytes += sctp_data_size(tchunk);
1527                                forward_progress = true;
1528                        }
1529
1530                        if (TSN_lte(tsn, sack_ctsn)) {
1531                                /* RFC 2960  6.3.2 Retransmission Timer Rules
1532                                 *
1533                                 * R3) Whenever a SACK is received
1534                                 * that acknowledges the DATA chunk
1535                                 * with the earliest outstanding TSN
1536                                 * for that address, restart T3-rtx
1537                                 * timer for that address with its
1538                                 * current RTO.
1539                                 */
1540                                restart_timer = 1;
1541                                forward_progress = true;
1542
1543                                list_add_tail(&tchunk->transmitted_list,
1544                                              &q->sacked);
1545                        } else {
1546                                /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1547                                 * M2) Each time a SACK arrives reporting
1548                                 * 'Stray DATA chunk(s)' record the highest TSN
1549                                 * reported as newly acknowledged, call this
1550                                 * value 'HighestTSNinSack'. A newly
1551                                 * acknowledged DATA chunk is one not
1552                                 * previously acknowledged in a SACK.
1553                                 *
1554                                 * When the SCTP sender of data receives a SACK
1555                                 * chunk that acknowledges, for the first time,
1556                                 * the receipt of a DATA chunk, all the still
1557                                 * unacknowledged DATA chunks whose TSN is
1558                                 * older than that newly acknowledged DATA
1559                                 * chunk, are qualified as 'Stray DATA chunks'.
1560                                 */
1561                                list_add_tail(lchunk, &tlist);
1562                        }
1563                } else {
1564                        if (tchunk->tsn_gap_acked) {
1565                                pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1566                                         __func__, tsn);
1567
1568                                tchunk->tsn_gap_acked = 0;
1569
1570                                if (tchunk->transport)
1571                                        bytes_acked -= sctp_data_size(tchunk);
1572
1573                                /* RFC 2960 6.3.2 Retransmission Timer Rules
1574                                 *
1575                                 * R4) Whenever a SACK is received missing a
1576                                 * TSN that was previously acknowledged via a
1577                                 * Gap Ack Block, start T3-rtx for the
1578                                 * destination address to which the DATA
1579                                 * chunk was originally
1580                                 * transmitted if it is not already running.
1581                                 */
1582                                restart_timer = 1;
1583                        }
1584
1585                        list_add_tail(lchunk, &tlist);
1586                }
1587        }
1588
1589        if (transport) {
1590                if (bytes_acked) {
1591                        struct sctp_association *asoc = transport->asoc;
1592
1593                        /* We may have counted DATA that was migrated
1594                         * to this transport due to DEL-IP operation.
1595                         * Subtract those bytes, since the were never
1596                         * send on this transport and shouldn't be
1597                         * credited to this transport.
1598                         */
1599                        bytes_acked -= migrate_bytes;
1600
1601                        /* 8.2. When an outstanding TSN is acknowledged,
1602                         * the endpoint shall clear the error counter of
1603                         * the destination transport address to which the
1604                         * DATA chunk was last sent.
1605                         * The association's overall error counter is
1606                         * also cleared.
1607                         */
1608                        transport->error_count = 0;
1609                        transport->asoc->overall_error_count = 0;
1610                        forward_progress = true;
1611
1612                        /*
1613                         * While in SHUTDOWN PENDING, we may have started
1614                         * the T5 shutdown guard timer after reaching the
1615                         * retransmission limit. Stop that timer as soon
1616                         * as the receiver acknowledged any data.
1617                         */
1618                        if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1619                            del_timer(&asoc->timers
1620                                [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1621                                        sctp_association_put(asoc);
1622
1623                        /* Mark the destination transport address as
1624                         * active if it is not so marked.
1625                         */
1626                        if ((transport->state == SCTP_INACTIVE ||
1627                             transport->state == SCTP_UNCONFIRMED) &&
1628                            sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1629                                sctp_assoc_control_transport(
1630                                        transport->asoc,
1631                                        transport,
1632                                        SCTP_TRANSPORT_UP,
1633                                        SCTP_RECEIVED_SACK);
1634                        }
1635
1636                        sctp_transport_raise_cwnd(transport, sack_ctsn,
1637                                                  bytes_acked);
1638
1639                        transport->flight_size -= bytes_acked;
1640                        if (transport->flight_size == 0)
1641                                transport->partial_bytes_acked = 0;
1642                        q->outstanding_bytes -= bytes_acked + migrate_bytes;
1643                } else {
1644                        /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1645                         * When a sender is doing zero window probing, it
1646                         * should not timeout the association if it continues
1647                         * to receive new packets from the receiver. The
1648                         * reason is that the receiver MAY keep its window
1649                         * closed for an indefinite time.
1650                         * A sender is doing zero window probing when the
1651                         * receiver's advertised window is zero, and there is
1652                         * only one data chunk in flight to the receiver.
1653                         *
1654                         * Allow the association to timeout while in SHUTDOWN
1655                         * PENDING or SHUTDOWN RECEIVED in case the receiver
1656                         * stays in zero window mode forever.
1657                         */
1658                        if (!q->asoc->peer.rwnd &&
1659                            !list_empty(&tlist) &&
1660                            (sack_ctsn+2 == q->asoc->next_tsn) &&
1661                            q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1662                                pr_debug("%s: sack received for zero window "
1663                                         "probe:%u\n", __func__, sack_ctsn);
1664
1665                                q->asoc->overall_error_count = 0;
1666                                transport->error_count = 0;
1667                        }
1668                }
1669
1670                /* RFC 2960 6.3.2 Retransmission Timer Rules
1671                 *
1672                 * R2) Whenever all outstanding data sent to an address have
1673                 * been acknowledged, turn off the T3-rtx timer of that
1674                 * address.
1675                 */
1676                if (!transport->flight_size) {
1677                        if (del_timer(&transport->T3_rtx_timer))
1678                                sctp_transport_put(transport);
1679                } else if (restart_timer) {
1680                        if (!mod_timer(&transport->T3_rtx_timer,
1681                                       jiffies + transport->rto))
1682                                sctp_transport_hold(transport);
1683                }
1684
1685                if (forward_progress) {
1686                        if (transport->dst)
1687                                sctp_transport_dst_confirm(transport);
1688                }
1689        }
1690
1691        list_splice(&tlist, transmitted_queue);
1692}
1693
1694/* Mark chunks as missing and consequently may get retransmitted. */
1695static void sctp_mark_missing(struct sctp_outq *q,
1696                              struct list_head *transmitted_queue,
1697                              struct sctp_transport *transport,
1698                              __u32 highest_new_tsn_in_sack,
1699                              int count_of_newacks)
1700{
1701        struct sctp_chunk *chunk;
1702        __u32 tsn;
1703        char do_fast_retransmit = 0;
1704        struct sctp_association *asoc = q->asoc;
1705        struct sctp_transport *primary = asoc->peer.primary_path;
1706
1707        list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1708
1709                tsn = ntohl(chunk->subh.data_hdr->tsn);
1710
1711                /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1712                 * 'Unacknowledged TSN's', if the TSN number of an
1713                 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1714                 * value, increment the 'TSN.Missing.Report' count on that
1715                 * chunk if it has NOT been fast retransmitted or marked for
1716                 * fast retransmit already.
1717                 */
1718                if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1719                    !chunk->tsn_gap_acked &&
1720                    TSN_lt(tsn, highest_new_tsn_in_sack)) {
1721
1722                        /* SFR-CACC may require us to skip marking
1723                         * this chunk as missing.
1724                         */
1725                        if (!transport || !sctp_cacc_skip(primary,
1726                                                chunk->transport,
1727                                                count_of_newacks, tsn)) {
1728                                chunk->tsn_missing_report++;
1729
1730                                pr_debug("%s: tsn:0x%x missing counter:%d\n",
1731                                         __func__, tsn, chunk->tsn_missing_report);
1732                        }
1733                }
1734                /*
1735                 * M4) If any DATA chunk is found to have a
1736                 * 'TSN.Missing.Report'
1737                 * value larger than or equal to 3, mark that chunk for
1738                 * retransmission and start the fast retransmit procedure.
1739                 */
1740
1741                if (chunk->tsn_missing_report >= 3) {
1742                        chunk->fast_retransmit = SCTP_NEED_FRTX;
1743                        do_fast_retransmit = 1;
1744                }
1745        }
1746
1747        if (transport) {
1748                if (do_fast_retransmit)
1749                        sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1750
1751                pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1752                         "flight_size:%d, pba:%d\n",  __func__, transport,
1753                         transport->cwnd, transport->ssthresh,
1754                         transport->flight_size, transport->partial_bytes_acked);
1755        }
1756}
1757
1758/* Is the given TSN acked by this packet?  */
1759static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1760{
1761        __u32 ctsn = ntohl(sack->cum_tsn_ack);
1762        union sctp_sack_variable *frags;
1763        __u16 tsn_offset, blocks;
1764        int i;
1765
1766        if (TSN_lte(tsn, ctsn))
1767                goto pass;
1768
1769        /* 3.3.4 Selective Acknowledgment (SACK) (3):
1770         *
1771         * Gap Ack Blocks:
1772         *  These fields contain the Gap Ack Blocks. They are repeated
1773         *  for each Gap Ack Block up to the number of Gap Ack Blocks
1774         *  defined in the Number of Gap Ack Blocks field. All DATA
1775         *  chunks with TSNs greater than or equal to (Cumulative TSN
1776         *  Ack + Gap Ack Block Start) and less than or equal to
1777         *  (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1778         *  Block are assumed to have been received correctly.
1779         */
1780
1781        frags = sack->variable;
1782        blocks = ntohs(sack->num_gap_ack_blocks);
1783        tsn_offset = tsn - ctsn;
1784        for (i = 0; i < blocks; ++i) {
1785                if (tsn_offset >= ntohs(frags[i].gab.start) &&
1786                    tsn_offset <= ntohs(frags[i].gab.end))
1787                        goto pass;
1788        }
1789
1790        return 0;
1791pass:
1792        return 1;
1793}
1794
1795static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1796                                    int nskips, __be16 stream)
1797{
1798        int i;
1799
1800        for (i = 0; i < nskips; i++) {
1801                if (skiplist[i].stream == stream)
1802                        return i;
1803        }
1804        return i;
1805}
1806
1807/* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1808void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1809{
1810        struct sctp_association *asoc = q->asoc;
1811        struct sctp_chunk *ftsn_chunk = NULL;
1812        struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1813        int nskips = 0;
1814        int skip_pos = 0;
1815        __u32 tsn;
1816        struct sctp_chunk *chunk;
1817        struct list_head *lchunk, *temp;
1818
1819        if (!asoc->peer.prsctp_capable)
1820                return;
1821
1822        /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1823         * received SACK.
1824         *
1825         * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1826         * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1827         */
1828        if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1829                asoc->adv_peer_ack_point = ctsn;
1830
1831        /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1832         * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1833         * the chunk next in the out-queue space is marked as "abandoned" as
1834         * shown in the following example:
1835         *
1836         * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1837         * and the Advanced.Peer.Ack.Point is updated to this value:
1838         *
1839         *   out-queue at the end of  ==>   out-queue after Adv.Ack.Point
1840         *   normal SACK processing           local advancement
1841         *                ...                           ...
1842         *   Adv.Ack.Pt-> 102 acked                     102 acked
1843         *                103 abandoned                 103 abandoned
1844         *                104 abandoned     Adv.Ack.P-> 104 abandoned
1845         *                105                           105
1846         *                106 acked                     106 acked
1847         *                ...                           ...
1848         *
1849         * In this example, the data sender successfully advanced the
1850         * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1851         */
1852        list_for_each_safe(lchunk, temp, &q->abandoned) {
1853                chunk = list_entry(lchunk, struct sctp_chunk,
1854                                        transmitted_list);
1855                tsn = ntohl(chunk->subh.data_hdr->tsn);
1856
1857                /* Remove any chunks in the abandoned queue that are acked by
1858                 * the ctsn.
1859                 */
1860                if (TSN_lte(tsn, ctsn)) {
1861                        list_del_init(lchunk);
1862                        sctp_chunk_free(chunk);
1863                } else {
1864                        if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1865                                asoc->adv_peer_ack_point = tsn;
1866                                if (chunk->chunk_hdr->flags &
1867                                         SCTP_DATA_UNORDERED)
1868                                        continue;
1869                                skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1870                                                nskips,
1871                                                chunk->subh.data_hdr->stream);
1872                                ftsn_skip_arr[skip_pos].stream =
1873                                        chunk->subh.data_hdr->stream;
1874                                ftsn_skip_arr[skip_pos].ssn =
1875                                         chunk->subh.data_hdr->ssn;
1876                                if (skip_pos == nskips)
1877                                        nskips++;
1878                                if (nskips == 10)
1879                                        break;
1880                        } else
1881                                break;
1882                }
1883        }
1884
1885        /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1886         * is greater than the Cumulative TSN ACK carried in the received
1887         * SACK, the data sender MUST send the data receiver a FORWARD TSN
1888         * chunk containing the latest value of the
1889         * "Advanced.Peer.Ack.Point".
1890         *
1891         * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1892         * list each stream and sequence number in the forwarded TSN. This
1893         * information will enable the receiver to easily find any
1894         * stranded TSN's waiting on stream reorder queues. Each stream
1895         * SHOULD only be reported once; this means that if multiple
1896         * abandoned messages occur in the same stream then only the
1897         * highest abandoned stream sequence number is reported. If the
1898         * total size of the FORWARD TSN does NOT fit in a single MTU then
1899         * the sender of the FORWARD TSN SHOULD lower the
1900         * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1901         * single MTU.
1902         */
1903        if (asoc->adv_peer_ack_point > ctsn)
1904                ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1905                                              nskips, &ftsn_skip_arr[0]);
1906
1907        if (ftsn_chunk) {
1908                list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1909                SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1910        }
1911}
1912