1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43
44#include <linux/types.h>
45#include <linux/list.h>
46#include <linux/socket.h>
47#include <linux/ip.h>
48#include <linux/slab.h>
49#include <net/sock.h>
50
51#include <net/sctp/sctp.h>
52#include <net/sctp/sm.h>
53#include <net/sctp/stream_sched.h>
54
55
56static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
57static void sctp_check_transmitted(struct sctp_outq *q,
58 struct list_head *transmitted_queue,
59 struct sctp_transport *transport,
60 union sctp_addr *saddr,
61 struct sctp_sackhdr *sack,
62 __u32 *highest_new_tsn);
63
64static void sctp_mark_missing(struct sctp_outq *q,
65 struct list_head *transmitted_queue,
66 struct sctp_transport *transport,
67 __u32 highest_new_tsn,
68 int count_of_newacks);
69
70static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp);
71
72
73static inline void sctp_outq_head_data(struct sctp_outq *q,
74 struct sctp_chunk *ch)
75{
76 struct sctp_stream_out_ext *oute;
77 __u16 stream;
78
79 list_add(&ch->list, &q->out_chunk_list);
80 q->out_qlen += ch->skb->len;
81
82 stream = sctp_chunk_stream_no(ch);
83 oute = SCTP_SO(&q->asoc->stream, stream)->ext;
84 list_add(&ch->stream_list, &oute->outq);
85}
86
87
88static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
89{
90 return q->sched->dequeue(q);
91}
92
93
94static inline void sctp_outq_tail_data(struct sctp_outq *q,
95 struct sctp_chunk *ch)
96{
97 struct sctp_stream_out_ext *oute;
98 __u16 stream;
99
100 list_add_tail(&ch->list, &q->out_chunk_list);
101 q->out_qlen += ch->skb->len;
102
103 stream = sctp_chunk_stream_no(ch);
104 oute = SCTP_SO(&q->asoc->stream, stream)->ext;
105 list_add_tail(&ch->stream_list, &oute->outq);
106}
107
108
109
110
111
112
113
114static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
115 struct sctp_transport *transport,
116 int count_of_newacks)
117{
118 if (count_of_newacks >= 2 && transport != primary)
119 return 1;
120 return 0;
121}
122
123
124
125
126
127
128
129
130static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
131 int count_of_newacks)
132{
133 if (count_of_newacks < 2 &&
134 (transport && !transport->cacc.cacc_saw_newack))
135 return 1;
136 return 0;
137}
138
139
140
141
142
143
144
145
146static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
147 struct sctp_transport *transport,
148 int count_of_newacks)
149{
150 if (!primary->cacc.cycling_changeover) {
151 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
152 return 1;
153 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
154 return 1;
155 return 0;
156 }
157 return 0;
158}
159
160
161
162
163
164
165
166
167static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
168{
169 if (primary->cacc.cycling_changeover &&
170 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
171 return 1;
172 return 0;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189static inline int sctp_cacc_skip(struct sctp_transport *primary,
190 struct sctp_transport *transport,
191 int count_of_newacks,
192 __u32 tsn)
193{
194 if (primary->cacc.changeover_active &&
195 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
196 sctp_cacc_skip_3_2(primary, tsn)))
197 return 1;
198 return 0;
199}
200
201
202
203
204
205void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
206{
207 memset(q, 0, sizeof(struct sctp_outq));
208
209 q->asoc = asoc;
210 INIT_LIST_HEAD(&q->out_chunk_list);
211 INIT_LIST_HEAD(&q->control_chunk_list);
212 INIT_LIST_HEAD(&q->retransmit);
213 INIT_LIST_HEAD(&q->sacked);
214 INIT_LIST_HEAD(&q->abandoned);
215 sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
216}
217
218
219
220static void __sctp_outq_teardown(struct sctp_outq *q)
221{
222 struct sctp_transport *transport;
223 struct list_head *lchunk, *temp;
224 struct sctp_chunk *chunk, *tmp;
225
226
227 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
228 transports) {
229 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
230 chunk = list_entry(lchunk, struct sctp_chunk,
231 transmitted_list);
232
233 sctp_chunk_fail(chunk, q->error);
234 sctp_chunk_free(chunk);
235 }
236 }
237
238
239 list_for_each_safe(lchunk, temp, &q->sacked) {
240 list_del_init(lchunk);
241 chunk = list_entry(lchunk, struct sctp_chunk,
242 transmitted_list);
243 sctp_chunk_fail(chunk, q->error);
244 sctp_chunk_free(chunk);
245 }
246
247
248 list_for_each_safe(lchunk, temp, &q->retransmit) {
249 list_del_init(lchunk);
250 chunk = list_entry(lchunk, struct sctp_chunk,
251 transmitted_list);
252 sctp_chunk_fail(chunk, q->error);
253 sctp_chunk_free(chunk);
254 }
255
256
257 list_for_each_safe(lchunk, temp, &q->abandoned) {
258 list_del_init(lchunk);
259 chunk = list_entry(lchunk, struct sctp_chunk,
260 transmitted_list);
261 sctp_chunk_fail(chunk, q->error);
262 sctp_chunk_free(chunk);
263 }
264
265
266 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
267 sctp_sched_dequeue_done(q, chunk);
268
269
270 sctp_chunk_fail(chunk, q->error);
271 sctp_chunk_free(chunk);
272 }
273
274
275 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
276 list_del_init(&chunk->list);
277 sctp_chunk_free(chunk);
278 }
279}
280
281void sctp_outq_teardown(struct sctp_outq *q)
282{
283 __sctp_outq_teardown(q);
284 sctp_outq_init(q->asoc, q);
285}
286
287
288void sctp_outq_free(struct sctp_outq *q)
289{
290
291 __sctp_outq_teardown(q);
292}
293
294
295void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp)
296{
297 struct net *net = sock_net(q->asoc->base.sk);
298
299 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
300 chunk && chunk->chunk_hdr ?
301 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
302 "illegal chunk");
303
304
305
306
307 if (sctp_chunk_is_data(chunk)) {
308 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
309 __func__, q, chunk, chunk && chunk->chunk_hdr ?
310 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
311 "illegal chunk");
312
313 sctp_outq_tail_data(q, chunk);
314 if (chunk->asoc->peer.prsctp_capable &&
315 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
316 chunk->asoc->sent_cnt_removable++;
317 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
318 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
319 else
320 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
321 } else {
322 list_add_tail(&chunk->list, &q->control_chunk_list);
323 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
324 }
325
326 if (!q->cork)
327 sctp_outq_flush(q, 0, gfp);
328}
329
330
331
332
333static void sctp_insert_list(struct list_head *head, struct list_head *new)
334{
335 struct list_head *pos;
336 struct sctp_chunk *nchunk, *lchunk;
337 __u32 ntsn, ltsn;
338 int done = 0;
339
340 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
341 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
342
343 list_for_each(pos, head) {
344 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
345 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
346 if (TSN_lt(ntsn, ltsn)) {
347 list_add(new, pos->prev);
348 done = 1;
349 break;
350 }
351 }
352 if (!done)
353 list_add_tail(new, head);
354}
355
356static int sctp_prsctp_prune_sent(struct sctp_association *asoc,
357 struct sctp_sndrcvinfo *sinfo,
358 struct list_head *queue, int msg_len)
359{
360 struct sctp_chunk *chk, *temp;
361
362 list_for_each_entry_safe(chk, temp, queue, transmitted_list) {
363 struct sctp_stream_out *streamout;
364
365 if (!chk->msg->abandoned &&
366 (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
367 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
368 continue;
369
370 chk->msg->abandoned = 1;
371 list_del_init(&chk->transmitted_list);
372 sctp_insert_list(&asoc->outqueue.abandoned,
373 &chk->transmitted_list);
374
375 streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
376 asoc->sent_cnt_removable--;
377 asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
378 streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++;
379
380 if (queue != &asoc->outqueue.retransmit &&
381 !chk->tsn_gap_acked) {
382 if (chk->transport)
383 chk->transport->flight_size -=
384 sctp_data_size(chk);
385 asoc->outqueue.outstanding_bytes -= sctp_data_size(chk);
386 }
387
388 msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
389 if (msg_len <= 0)
390 break;
391 }
392
393 return msg_len;
394}
395
396static int sctp_prsctp_prune_unsent(struct sctp_association *asoc,
397 struct sctp_sndrcvinfo *sinfo, int msg_len)
398{
399 struct sctp_outq *q = &asoc->outqueue;
400 struct sctp_chunk *chk, *temp;
401
402 q->sched->unsched_all(&asoc->stream);
403
404 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) {
405 if (!chk->msg->abandoned &&
406 (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) ||
407 !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) ||
408 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive))
409 continue;
410
411 chk->msg->abandoned = 1;
412 sctp_sched_dequeue_common(q, chk);
413 asoc->sent_cnt_removable--;
414 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
415 if (chk->sinfo.sinfo_stream < asoc->stream.outcnt) {
416 struct sctp_stream_out *streamout =
417 SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream);
418
419 streamout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++;
420 }
421
422 msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk);
423 sctp_chunk_free(chk);
424 if (msg_len <= 0)
425 break;
426 }
427
428 q->sched->sched_all(&asoc->stream);
429
430 return msg_len;
431}
432
433
434void sctp_prsctp_prune(struct sctp_association *asoc,
435 struct sctp_sndrcvinfo *sinfo, int msg_len)
436{
437 struct sctp_transport *transport;
438
439 if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
440 return;
441
442 msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
443 &asoc->outqueue.retransmit,
444 msg_len);
445 if (msg_len <= 0)
446 return;
447
448 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
449 transports) {
450 msg_len = sctp_prsctp_prune_sent(asoc, sinfo,
451 &transport->transmitted,
452 msg_len);
453 if (msg_len <= 0)
454 return;
455 }
456
457 sctp_prsctp_prune_unsent(asoc, sinfo, msg_len);
458}
459
460
461void sctp_retransmit_mark(struct sctp_outq *q,
462 struct sctp_transport *transport,
463 __u8 reason)
464{
465 struct list_head *lchunk, *ltemp;
466 struct sctp_chunk *chunk;
467
468
469 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
470 chunk = list_entry(lchunk, struct sctp_chunk,
471 transmitted_list);
472
473
474 if (sctp_chunk_abandoned(chunk)) {
475 list_del_init(lchunk);
476 sctp_insert_list(&q->abandoned, lchunk);
477
478
479
480
481
482
483 if (!chunk->tsn_gap_acked) {
484 if (chunk->transport)
485 chunk->transport->flight_size -=
486 sctp_data_size(chunk);
487 q->outstanding_bytes -= sctp_data_size(chunk);
488 q->asoc->peer.rwnd += sctp_data_size(chunk);
489 }
490 continue;
491 }
492
493
494
495
496
497 if ((reason == SCTP_RTXR_FAST_RTX &&
498 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
499 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
500
501
502
503
504
505
506
507
508 q->asoc->peer.rwnd += sctp_data_size(chunk);
509 q->outstanding_bytes -= sctp_data_size(chunk);
510 if (chunk->transport)
511 transport->flight_size -= sctp_data_size(chunk);
512
513
514
515
516
517
518 chunk->tsn_missing_report = 0;
519
520
521
522
523
524
525
526 if (chunk->rtt_in_progress) {
527 chunk->rtt_in_progress = 0;
528 transport->rto_pending = 0;
529 }
530
531
532
533
534 list_del_init(lchunk);
535 sctp_insert_list(&q->retransmit, lchunk);
536 }
537 }
538
539 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
540 "flight_size:%d, pba:%d\n", __func__, transport, reason,
541 transport->cwnd, transport->ssthresh, transport->flight_size,
542 transport->partial_bytes_acked);
543}
544
545
546
547
548void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
549 enum sctp_retransmit_reason reason)
550{
551 struct net *net = sock_net(q->asoc->base.sk);
552
553 switch (reason) {
554 case SCTP_RTXR_T3_RTX:
555 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
556 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
557
558
559
560 if (transport == transport->asoc->peer.retran_path)
561 sctp_assoc_update_retran_path(transport->asoc);
562 transport->asoc->rtx_data_chunks +=
563 transport->asoc->unack_data;
564 break;
565 case SCTP_RTXR_FAST_RTX:
566 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
567 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
568 q->fast_rtx = 1;
569 break;
570 case SCTP_RTXR_PMTUD:
571 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
572 break;
573 case SCTP_RTXR_T1_RTX:
574 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
575 transport->asoc->init_retries++;
576 break;
577 default:
578 BUG();
579 }
580
581 sctp_retransmit_mark(q, transport, reason);
582
583
584
585
586
587 if (reason == SCTP_RTXR_T3_RTX)
588 q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point);
589
590
591
592
593
594 if (reason != SCTP_RTXR_FAST_RTX)
595 sctp_outq_flush(q, 1, GFP_ATOMIC);
596}
597
598
599
600
601
602
603
604
605
606static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
607 int rtx_timeout, int *start_timer, gfp_t gfp)
608{
609 struct sctp_transport *transport = pkt->transport;
610 struct sctp_chunk *chunk, *chunk1;
611 struct list_head *lqueue;
612 enum sctp_xmit status;
613 int error = 0;
614 int timer = 0;
615 int done = 0;
616 int fast_rtx;
617
618 lqueue = &q->retransmit;
619 fast_rtx = q->fast_rtx;
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
645
646 if (sctp_chunk_abandoned(chunk)) {
647 list_del_init(&chunk->transmitted_list);
648 sctp_insert_list(&q->abandoned,
649 &chunk->transmitted_list);
650 continue;
651 }
652
653
654
655
656
657
658 if (chunk->tsn_gap_acked) {
659 list_move_tail(&chunk->transmitted_list,
660 &transport->transmitted);
661 continue;
662 }
663
664
665
666
667 if (fast_rtx && !chunk->fast_retransmit)
668 continue;
669
670redo:
671
672 status = sctp_packet_append_chunk(pkt, chunk);
673
674 switch (status) {
675 case SCTP_XMIT_PMTU_FULL:
676 if (!pkt->has_data && !pkt->has_cookie_echo) {
677
678
679
680
681
682
683 sctp_packet_transmit(pkt, gfp);
684 goto redo;
685 }
686
687
688 error = sctp_packet_transmit(pkt, gfp);
689
690
691
692
693
694 if (rtx_timeout || fast_rtx)
695 done = 1;
696 else
697 goto redo;
698
699
700 break;
701
702 case SCTP_XMIT_RWND_FULL:
703
704 error = sctp_packet_transmit(pkt, gfp);
705
706
707
708
709 done = 1;
710 break;
711
712 case SCTP_XMIT_DELAY:
713
714 error = sctp_packet_transmit(pkt, gfp);
715
716
717 done = 1;
718 break;
719
720 default:
721
722
723
724 list_move_tail(&chunk->transmitted_list,
725 &transport->transmitted);
726
727
728
729
730 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
731 chunk->fast_retransmit = SCTP_DONT_FRTX;
732
733 q->asoc->stats.rtxchunks++;
734 break;
735 }
736
737
738 if (!error && !timer)
739 timer = 1;
740
741 if (done)
742 break;
743 }
744
745
746
747
748
749
750 if (rtx_timeout || fast_rtx) {
751 list_for_each_entry(chunk1, lqueue, transmitted_list) {
752 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
753 chunk1->fast_retransmit = SCTP_DONT_FRTX;
754 }
755 }
756
757 *start_timer = timer;
758
759
760 if (fast_rtx)
761 q->fast_rtx = 0;
762
763 return error;
764}
765
766
767void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp)
768{
769 if (q->cork)
770 q->cork = 0;
771
772 sctp_outq_flush(q, 0, gfp);
773}
774
775static int sctp_packet_singleton(struct sctp_transport *transport,
776 struct sctp_chunk *chunk, gfp_t gfp)
777{
778 const struct sctp_association *asoc = transport->asoc;
779 const __u16 sport = asoc->base.bind_addr.port;
780 const __u16 dport = asoc->peer.port;
781 const __u32 vtag = asoc->peer.i.init_tag;
782 struct sctp_packet singleton;
783
784 sctp_packet_init(&singleton, transport, sport, dport);
785 sctp_packet_config(&singleton, vtag, 0);
786 sctp_packet_append_chunk(&singleton, chunk);
787 return sctp_packet_transmit(&singleton, gfp);
788}
789
790
791struct sctp_flush_ctx {
792 struct sctp_outq *q;
793
794 struct sctp_transport *transport;
795
796 struct list_head transport_list;
797 struct sctp_association *asoc;
798
799 struct sctp_packet *packet;
800 gfp_t gfp;
801};
802
803
804static void sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
805 struct sctp_chunk *chunk)
806{
807 struct sctp_transport *new_transport = chunk->transport;
808
809 if (!new_transport) {
810 if (!sctp_chunk_is_data(chunk)) {
811
812
813
814
815
816
817
818
819
820 if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest,
821 &ctx->transport->ipaddr))
822 new_transport = ctx->transport;
823 else
824 new_transport = sctp_assoc_lookup_paddr(ctx->asoc,
825 &chunk->dest);
826 }
827
828
829
830
831 if (!new_transport)
832 new_transport = ctx->asoc->peer.active_path;
833 } else {
834 __u8 type;
835
836 switch (new_transport->state) {
837 case SCTP_INACTIVE:
838 case SCTP_UNCONFIRMED:
839 case SCTP_PF:
840
841
842
843
844
845
846
847
848
849
850
851
852
853 type = chunk->chunk_hdr->type;
854 if (type != SCTP_CID_HEARTBEAT &&
855 type != SCTP_CID_HEARTBEAT_ACK &&
856 type != SCTP_CID_ASCONF_ACK)
857 new_transport = ctx->asoc->peer.active_path;
858 break;
859 default:
860 break;
861 }
862 }
863
864
865 if (new_transport != ctx->transport) {
866 ctx->transport = new_transport;
867 ctx->packet = &ctx->transport->packet;
868
869 if (list_empty(&ctx->transport->send_ready))
870 list_add_tail(&ctx->transport->send_ready,
871 &ctx->transport_list);
872
873 sctp_packet_config(ctx->packet,
874 ctx->asoc->peer.i.init_tag,
875 ctx->asoc->peer.ecn_capable);
876
877
878
879 sctp_transport_burst_limited(ctx->transport);
880 }
881}
882
883static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
884{
885 struct sctp_chunk *chunk, *tmp;
886 enum sctp_xmit status;
887 int one_packet, error;
888
889 list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) {
890 one_packet = 0;
891
892
893
894
895
896
897
898 if (ctx->asoc->src_out_of_asoc_ok &&
899 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
900 continue;
901
902 list_del_init(&chunk->list);
903
904
905
906
907 sctp_outq_select_transport(ctx, chunk);
908
909 switch (chunk->chunk_hdr->type) {
910
911
912
913
914
915 case SCTP_CID_INIT:
916 case SCTP_CID_INIT_ACK:
917 case SCTP_CID_SHUTDOWN_COMPLETE:
918 error = sctp_packet_singleton(ctx->transport, chunk,
919 ctx->gfp);
920 if (error < 0) {
921 ctx->asoc->base.sk->sk_err = -error;
922 return;
923 }
924 break;
925
926 case SCTP_CID_ABORT:
927 if (sctp_test_T_bit(chunk))
928 ctx->packet->vtag = ctx->asoc->c.my_vtag;
929
930
931
932
933
934
935
936 case SCTP_CID_HEARTBEAT_ACK:
937 case SCTP_CID_SHUTDOWN_ACK:
938 case SCTP_CID_COOKIE_ACK:
939 case SCTP_CID_COOKIE_ECHO:
940 case SCTP_CID_ERROR:
941 case SCTP_CID_ECN_CWR:
942 case SCTP_CID_ASCONF_ACK:
943 one_packet = 1;
944
945
946 case SCTP_CID_SACK:
947 case SCTP_CID_HEARTBEAT:
948 case SCTP_CID_SHUTDOWN:
949 case SCTP_CID_ECN_ECNE:
950 case SCTP_CID_ASCONF:
951 case SCTP_CID_FWD_TSN:
952 case SCTP_CID_I_FWD_TSN:
953 case SCTP_CID_RECONF:
954 status = sctp_packet_transmit_chunk(ctx->packet, chunk,
955 one_packet, ctx->gfp);
956 if (status != SCTP_XMIT_OK) {
957
958 list_add(&chunk->list, &ctx->q->control_chunk_list);
959 break;
960 }
961
962 ctx->asoc->stats.octrlchunks++;
963
964
965
966
967 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN ||
968 chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) {
969 sctp_transport_reset_t3_rtx(ctx->transport);
970 ctx->transport->last_time_sent = jiffies;
971 }
972
973 if (chunk == ctx->asoc->strreset_chunk)
974 sctp_transport_reset_reconf_timer(ctx->transport);
975
976 break;
977
978 default:
979
980 BUG();
981 }
982 }
983}
984
985
986static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
987 int rtx_timeout)
988{
989 int error, start_timer = 0;
990
991 if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
992 return false;
993
994 if (ctx->transport != ctx->asoc->peer.retran_path) {
995
996 ctx->transport = ctx->asoc->peer.retran_path;
997 ctx->packet = &ctx->transport->packet;
998
999 if (list_empty(&ctx->transport->send_ready))
1000 list_add_tail(&ctx->transport->send_ready,
1001 &ctx->transport_list);
1002
1003 sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
1004 ctx->asoc->peer.ecn_capable);
1005 }
1006
1007 error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout,
1008 &start_timer, ctx->gfp);
1009 if (error < 0)
1010 ctx->asoc->base.sk->sk_err = -error;
1011
1012 if (start_timer) {
1013 sctp_transport_reset_t3_rtx(ctx->transport);
1014 ctx->transport->last_time_sent = jiffies;
1015 }
1016
1017
1018
1019
1020 if (ctx->packet->has_cookie_echo)
1021 return false;
1022
1023
1024
1025
1026 if (!list_empty(&ctx->q->retransmit))
1027 return false;
1028
1029 return true;
1030}
1031
1032static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
1033 int rtx_timeout)
1034{
1035 struct sctp_chunk *chunk;
1036 enum sctp_xmit status;
1037
1038
1039 switch (ctx->asoc->state) {
1040 case SCTP_STATE_COOKIE_ECHOED:
1041
1042
1043
1044 if (!ctx->packet || !ctx->packet->has_cookie_echo)
1045 return;
1046
1047
1048 case SCTP_STATE_ESTABLISHED:
1049 case SCTP_STATE_SHUTDOWN_PENDING:
1050 case SCTP_STATE_SHUTDOWN_RECEIVED:
1051 break;
1052
1053 default:
1054
1055 return;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 if (!list_empty(&ctx->q->retransmit) &&
1067 !sctp_outq_flush_rtx(ctx, rtx_timeout))
1068 return;
1069
1070
1071
1072
1073
1074
1075 if (ctx->transport)
1076 sctp_transport_burst_limited(ctx->transport);
1077
1078
1079 while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) {
1080 __u32 sid = ntohs(chunk->subh.data_hdr->stream);
1081 __u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state;
1082
1083
1084 if (sctp_chunk_abandoned(chunk)) {
1085 sctp_sched_dequeue_done(ctx->q, chunk);
1086 sctp_chunk_fail(chunk, 0);
1087 sctp_chunk_free(chunk);
1088 continue;
1089 }
1090
1091 if (stream_state == SCTP_STREAM_CLOSED) {
1092 sctp_outq_head_data(ctx->q, chunk);
1093 break;
1094 }
1095
1096 sctp_outq_select_transport(ctx, chunk);
1097
1098 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n",
1099 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ?
1100 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1101 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1102 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1103 refcount_read(&chunk->skb->users) : -1);
1104
1105
1106 status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0,
1107 ctx->gfp);
1108 if (status != SCTP_XMIT_OK) {
1109
1110
1111
1112 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1113 __func__, ntohl(chunk->subh.data_hdr->tsn),
1114 status);
1115
1116 sctp_outq_head_data(ctx->q, chunk);
1117 break;
1118 }
1119
1120
1121
1122
1123
1124 if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1125 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1126 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1127 ctx->asoc->stats.ouodchunks++;
1128 else
1129 ctx->asoc->stats.oodchunks++;
1130
1131
1132
1133
1134 sctp_sched_dequeue_done(ctx->q, chunk);
1135
1136 list_add_tail(&chunk->transmitted_list,
1137 &ctx->transport->transmitted);
1138
1139 sctp_transport_reset_t3_rtx(ctx->transport);
1140 ctx->transport->last_time_sent = jiffies;
1141
1142
1143
1144
1145 if (ctx->packet->has_cookie_echo)
1146 break;
1147 }
1148}
1149
1150static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
1151{
1152 struct sock *sk = ctx->asoc->base.sk;
1153 struct list_head *ltransport;
1154 struct sctp_packet *packet;
1155 struct sctp_transport *t;
1156 int error = 0;
1157
1158 while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) {
1159 t = list_entry(ltransport, struct sctp_transport, send_ready);
1160 packet = &t->packet;
1161 if (!sctp_packet_empty(packet)) {
1162 rcu_read_lock();
1163 if (t->dst && __sk_dst_get(sk) != t->dst) {
1164 dst_hold(t->dst);
1165 sk_setup_caps(sk, t->dst);
1166 }
1167 rcu_read_unlock();
1168 error = sctp_packet_transmit(packet, ctx->gfp);
1169 if (error < 0)
1170 ctx->q->asoc->base.sk->sk_err = -error;
1171 }
1172
1173
1174 sctp_transport_burst_reset(t);
1175 }
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
1188{
1189 struct sctp_flush_ctx ctx = {
1190 .q = q,
1191 .transport = NULL,
1192 .transport_list = LIST_HEAD_INIT(ctx.transport_list),
1193 .asoc = q->asoc,
1194 .packet = NULL,
1195 .gfp = gfp,
1196 };
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 sctp_outq_flush_ctrl(&ctx);
1208
1209 if (q->asoc->src_out_of_asoc_ok)
1210 goto sctp_flush_out;
1211
1212 sctp_outq_flush_data(&ctx, rtx_timeout);
1213
1214sctp_flush_out:
1215
1216 sctp_outq_flush_transports(&ctx);
1217}
1218
1219
1220static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1221 struct sctp_sackhdr *sack)
1222{
1223 union sctp_sack_variable *frags;
1224 __u16 unack_data;
1225 int i;
1226
1227 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1228
1229 frags = sack->variable;
1230 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1231 unack_data -= ((ntohs(frags[i].gab.end) -
1232 ntohs(frags[i].gab.start) + 1));
1233 }
1234
1235 assoc->unack_data = unack_data;
1236}
1237
1238
1239
1240
1241
1242
1243int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1244{
1245 struct sctp_association *asoc = q->asoc;
1246 struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1247 struct sctp_transport *transport;
1248 struct sctp_chunk *tchunk = NULL;
1249 struct list_head *lchunk, *transport_list, *temp;
1250 union sctp_sack_variable *frags = sack->variable;
1251 __u32 sack_ctsn, ctsn, tsn;
1252 __u32 highest_tsn, highest_new_tsn;
1253 __u32 sack_a_rwnd;
1254 unsigned int outstanding;
1255 struct sctp_transport *primary = asoc->peer.primary_path;
1256 int count_of_newacks = 0;
1257 int gap_ack_blocks;
1258 u8 accum_moved = 0;
1259
1260
1261 transport_list = &asoc->peer.transport_addr_list;
1262
1263 sack_ctsn = ntohl(sack->cum_tsn_ack);
1264 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1265 asoc->stats.gapcnt += gap_ack_blocks;
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 if (primary->cacc.changeover_active) {
1285 u8 clear_cycling = 0;
1286
1287 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1288 primary->cacc.changeover_active = 0;
1289 clear_cycling = 1;
1290 }
1291
1292 if (clear_cycling || gap_ack_blocks) {
1293 list_for_each_entry(transport, transport_list,
1294 transports) {
1295 if (clear_cycling)
1296 transport->cacc.cycling_changeover = 0;
1297 if (gap_ack_blocks)
1298 transport->cacc.cacc_saw_newack = 0;
1299 }
1300 }
1301 }
1302
1303
1304 highest_tsn = sack_ctsn;
1305 if (gap_ack_blocks)
1306 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1307
1308 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1309 asoc->highest_sacked = highest_tsn;
1310
1311 highest_new_tsn = sack_ctsn;
1312
1313
1314
1315
1316 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1317
1318
1319
1320
1321
1322
1323 list_for_each_entry(transport, transport_list, transports) {
1324 sctp_check_transmitted(q, &transport->transmitted,
1325 transport, &chunk->source, sack,
1326 &highest_new_tsn);
1327
1328
1329
1330
1331
1332 if (transport->cacc.cacc_saw_newack)
1333 count_of_newacks++;
1334 }
1335
1336
1337 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1338 asoc->ctsn_ack_point = sack_ctsn;
1339 accum_moved = 1;
1340 }
1341
1342 if (gap_ack_blocks) {
1343
1344 if (asoc->fast_recovery && accum_moved)
1345 highest_new_tsn = highest_tsn;
1346
1347 list_for_each_entry(transport, transport_list, transports)
1348 sctp_mark_missing(q, &transport->transmitted, transport,
1349 highest_new_tsn, count_of_newacks);
1350 }
1351
1352
1353 sctp_sack_update_unack_data(asoc, sack);
1354
1355 ctsn = asoc->ctsn_ack_point;
1356
1357
1358 list_for_each_safe(lchunk, temp, &q->sacked) {
1359 tchunk = list_entry(lchunk, struct sctp_chunk,
1360 transmitted_list);
1361 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1362 if (TSN_lte(tsn, ctsn)) {
1363 list_del_init(&tchunk->transmitted_list);
1364 if (asoc->peer.prsctp_capable &&
1365 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags))
1366 asoc->sent_cnt_removable--;
1367 sctp_chunk_free(tchunk);
1368 }
1369 }
1370
1371
1372
1373
1374
1375
1376 sack_a_rwnd = ntohl(sack->a_rwnd);
1377 asoc->peer.zero_window_announced = !sack_a_rwnd;
1378 outstanding = q->outstanding_bytes;
1379
1380 if (outstanding < sack_a_rwnd)
1381 sack_a_rwnd -= outstanding;
1382 else
1383 sack_a_rwnd = 0;
1384
1385 asoc->peer.rwnd = sack_a_rwnd;
1386
1387 asoc->stream.si->generate_ftsn(q, sack_ctsn);
1388
1389 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1390 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1391 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1392 asoc->adv_peer_ack_point);
1393
1394 return sctp_outq_is_empty(q);
1395}
1396
1397
1398
1399
1400
1401int sctp_outq_is_empty(const struct sctp_outq *q)
1402{
1403 return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
1404 list_empty(&q->retransmit);
1405}
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421static void sctp_check_transmitted(struct sctp_outq *q,
1422 struct list_head *transmitted_queue,
1423 struct sctp_transport *transport,
1424 union sctp_addr *saddr,
1425 struct sctp_sackhdr *sack,
1426 __u32 *highest_new_tsn_in_sack)
1427{
1428 struct list_head *lchunk;
1429 struct sctp_chunk *tchunk;
1430 struct list_head tlist;
1431 __u32 tsn;
1432 __u32 sack_ctsn;
1433 __u32 rtt;
1434 __u8 restart_timer = 0;
1435 int bytes_acked = 0;
1436 int migrate_bytes = 0;
1437 bool forward_progress = false;
1438
1439 sack_ctsn = ntohl(sack->cum_tsn_ack);
1440
1441 INIT_LIST_HEAD(&tlist);
1442
1443
1444 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1445 tchunk = list_entry(lchunk, struct sctp_chunk,
1446 transmitted_list);
1447
1448 if (sctp_chunk_abandoned(tchunk)) {
1449
1450 sctp_insert_list(&q->abandoned, lchunk);
1451
1452
1453
1454
1455 if (transmitted_queue != &q->retransmit &&
1456 !tchunk->tsn_gap_acked) {
1457 if (tchunk->transport)
1458 tchunk->transport->flight_size -=
1459 sctp_data_size(tchunk);
1460 q->outstanding_bytes -= sctp_data_size(tchunk);
1461 }
1462 continue;
1463 }
1464
1465 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1466 if (sctp_acked(sack, tsn)) {
1467
1468
1469
1470
1471
1472 if (transport && !tchunk->tsn_gap_acked) {
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484 if (!sctp_chunk_retransmitted(tchunk) &&
1485 tchunk->rtt_in_progress) {
1486 tchunk->rtt_in_progress = 0;
1487 rtt = jiffies - tchunk->sent_at;
1488 sctp_transport_update_rto(transport,
1489 rtt);
1490 }
1491
1492 if (TSN_lte(tsn, sack_ctsn)) {
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506 if (sack->num_gap_ack_blocks &&
1507 q->asoc->peer.primary_path->cacc.
1508 changeover_active)
1509 transport->cacc.cacc_saw_newack
1510 = 1;
1511 }
1512 }
1513
1514
1515
1516
1517
1518
1519
1520 if (!tchunk->tsn_gap_acked) {
1521 tchunk->tsn_gap_acked = 1;
1522 if (TSN_lt(*highest_new_tsn_in_sack, tsn))
1523 *highest_new_tsn_in_sack = tsn;
1524 bytes_acked += sctp_data_size(tchunk);
1525 if (!tchunk->transport)
1526 migrate_bytes += sctp_data_size(tchunk);
1527 forward_progress = true;
1528 }
1529
1530 if (TSN_lte(tsn, sack_ctsn)) {
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 restart_timer = 1;
1541 forward_progress = true;
1542
1543 list_add_tail(&tchunk->transmitted_list,
1544 &q->sacked);
1545 } else {
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 list_add_tail(lchunk, &tlist);
1562 }
1563 } else {
1564 if (tchunk->tsn_gap_acked) {
1565 pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1566 __func__, tsn);
1567
1568 tchunk->tsn_gap_acked = 0;
1569
1570 if (tchunk->transport)
1571 bytes_acked -= sctp_data_size(tchunk);
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582 restart_timer = 1;
1583 }
1584
1585 list_add_tail(lchunk, &tlist);
1586 }
1587 }
1588
1589 if (transport) {
1590 if (bytes_acked) {
1591 struct sctp_association *asoc = transport->asoc;
1592
1593
1594
1595
1596
1597
1598
1599 bytes_acked -= migrate_bytes;
1600
1601
1602
1603
1604
1605
1606
1607
1608 transport->error_count = 0;
1609 transport->asoc->overall_error_count = 0;
1610 forward_progress = true;
1611
1612
1613
1614
1615
1616
1617
1618 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1619 del_timer(&asoc->timers
1620 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1621 sctp_association_put(asoc);
1622
1623
1624
1625
1626 if ((transport->state == SCTP_INACTIVE ||
1627 transport->state == SCTP_UNCONFIRMED) &&
1628 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1629 sctp_assoc_control_transport(
1630 transport->asoc,
1631 transport,
1632 SCTP_TRANSPORT_UP,
1633 SCTP_RECEIVED_SACK);
1634 }
1635
1636 sctp_transport_raise_cwnd(transport, sack_ctsn,
1637 bytes_acked);
1638
1639 transport->flight_size -= bytes_acked;
1640 if (transport->flight_size == 0)
1641 transport->partial_bytes_acked = 0;
1642 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1643 } else {
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 if (!q->asoc->peer.rwnd &&
1659 !list_empty(&tlist) &&
1660 (sack_ctsn+2 == q->asoc->next_tsn) &&
1661 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1662 pr_debug("%s: sack received for zero window "
1663 "probe:%u\n", __func__, sack_ctsn);
1664
1665 q->asoc->overall_error_count = 0;
1666 transport->error_count = 0;
1667 }
1668 }
1669
1670
1671
1672
1673
1674
1675
1676 if (!transport->flight_size) {
1677 if (del_timer(&transport->T3_rtx_timer))
1678 sctp_transport_put(transport);
1679 } else if (restart_timer) {
1680 if (!mod_timer(&transport->T3_rtx_timer,
1681 jiffies + transport->rto))
1682 sctp_transport_hold(transport);
1683 }
1684
1685 if (forward_progress) {
1686 if (transport->dst)
1687 sctp_transport_dst_confirm(transport);
1688 }
1689 }
1690
1691 list_splice(&tlist, transmitted_queue);
1692}
1693
1694
1695static void sctp_mark_missing(struct sctp_outq *q,
1696 struct list_head *transmitted_queue,
1697 struct sctp_transport *transport,
1698 __u32 highest_new_tsn_in_sack,
1699 int count_of_newacks)
1700{
1701 struct sctp_chunk *chunk;
1702 __u32 tsn;
1703 char do_fast_retransmit = 0;
1704 struct sctp_association *asoc = q->asoc;
1705 struct sctp_transport *primary = asoc->peer.primary_path;
1706
1707 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1708
1709 tsn = ntohl(chunk->subh.data_hdr->tsn);
1710
1711
1712
1713
1714
1715
1716
1717
1718 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1719 !chunk->tsn_gap_acked &&
1720 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1721
1722
1723
1724
1725 if (!transport || !sctp_cacc_skip(primary,
1726 chunk->transport,
1727 count_of_newacks, tsn)) {
1728 chunk->tsn_missing_report++;
1729
1730 pr_debug("%s: tsn:0x%x missing counter:%d\n",
1731 __func__, tsn, chunk->tsn_missing_report);
1732 }
1733 }
1734
1735
1736
1737
1738
1739
1740
1741 if (chunk->tsn_missing_report >= 3) {
1742 chunk->fast_retransmit = SCTP_NEED_FRTX;
1743 do_fast_retransmit = 1;
1744 }
1745 }
1746
1747 if (transport) {
1748 if (do_fast_retransmit)
1749 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1750
1751 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1752 "flight_size:%d, pba:%d\n", __func__, transport,
1753 transport->cwnd, transport->ssthresh,
1754 transport->flight_size, transport->partial_bytes_acked);
1755 }
1756}
1757
1758
1759static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1760{
1761 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1762 union sctp_sack_variable *frags;
1763 __u16 tsn_offset, blocks;
1764 int i;
1765
1766 if (TSN_lte(tsn, ctsn))
1767 goto pass;
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781 frags = sack->variable;
1782 blocks = ntohs(sack->num_gap_ack_blocks);
1783 tsn_offset = tsn - ctsn;
1784 for (i = 0; i < blocks; ++i) {
1785 if (tsn_offset >= ntohs(frags[i].gab.start) &&
1786 tsn_offset <= ntohs(frags[i].gab.end))
1787 goto pass;
1788 }
1789
1790 return 0;
1791pass:
1792 return 1;
1793}
1794
1795static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1796 int nskips, __be16 stream)
1797{
1798 int i;
1799
1800 for (i = 0; i < nskips; i++) {
1801 if (skiplist[i].stream == stream)
1802 return i;
1803 }
1804 return i;
1805}
1806
1807
1808void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1809{
1810 struct sctp_association *asoc = q->asoc;
1811 struct sctp_chunk *ftsn_chunk = NULL;
1812 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1813 int nskips = 0;
1814 int skip_pos = 0;
1815 __u32 tsn;
1816 struct sctp_chunk *chunk;
1817 struct list_head *lchunk, *temp;
1818
1819 if (!asoc->peer.prsctp_capable)
1820 return;
1821
1822
1823
1824
1825
1826
1827
1828 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1829 asoc->adv_peer_ack_point = ctsn;
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 list_for_each_safe(lchunk, temp, &q->abandoned) {
1853 chunk = list_entry(lchunk, struct sctp_chunk,
1854 transmitted_list);
1855 tsn = ntohl(chunk->subh.data_hdr->tsn);
1856
1857
1858
1859
1860 if (TSN_lte(tsn, ctsn)) {
1861 list_del_init(lchunk);
1862 sctp_chunk_free(chunk);
1863 } else {
1864 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1865 asoc->adv_peer_ack_point = tsn;
1866 if (chunk->chunk_hdr->flags &
1867 SCTP_DATA_UNORDERED)
1868 continue;
1869 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1870 nskips,
1871 chunk->subh.data_hdr->stream);
1872 ftsn_skip_arr[skip_pos].stream =
1873 chunk->subh.data_hdr->stream;
1874 ftsn_skip_arr[skip_pos].ssn =
1875 chunk->subh.data_hdr->ssn;
1876 if (skip_pos == nskips)
1877 nskips++;
1878 if (nskips == 10)
1879 break;
1880 } else
1881 break;
1882 }
1883 }
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903 if (asoc->adv_peer_ack_point > ctsn)
1904 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1905 nskips, &ftsn_skip_arr[0]);
1906
1907 if (ftsn_chunk) {
1908 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1909 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1910 }
1911}
1912