1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51#include <linux/types.h>
52#include <linux/list.h>
53#include <linux/socket.h>
54#include <linux/ip.h>
55#include <linux/slab.h>
56#include <net/sock.h>
57
58#include <net/sctp/sctp.h>
59#include <net/sctp/sm.h>
60
61
62static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63static void sctp_check_transmitted(struct sctp_outq *q,
64 struct list_head *transmitted_queue,
65 struct sctp_transport *transport,
66 union sctp_addr *saddr,
67 struct sctp_sackhdr *sack,
68 __u32 *highest_new_tsn);
69
70static void sctp_mark_missing(struct sctp_outq *q,
71 struct list_head *transmitted_queue,
72 struct sctp_transport *transport,
73 __u32 highest_new_tsn,
74 int count_of_newacks);
75
76static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
77
78static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
79
80
81static inline void sctp_outq_head_data(struct sctp_outq *q,
82 struct sctp_chunk *ch)
83{
84 list_add(&ch->list, &q->out_chunk_list);
85 q->out_qlen += ch->skb->len;
86}
87
88
89static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
90{
91 struct sctp_chunk *ch = NULL;
92
93 if (!list_empty(&q->out_chunk_list)) {
94 struct list_head *entry = q->out_chunk_list.next;
95
96 ch = list_entry(entry, struct sctp_chunk, list);
97 list_del_init(entry);
98 q->out_qlen -= ch->skb->len;
99 }
100 return ch;
101}
102
103static inline void sctp_outq_tail_data(struct sctp_outq *q,
104 struct sctp_chunk *ch)
105{
106 list_add_tail(&ch->list, &q->out_chunk_list);
107 q->out_qlen += ch->skb->len;
108}
109
110
111
112
113
114
115
116static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
117 struct sctp_transport *transport,
118 int count_of_newacks)
119{
120 if (count_of_newacks >=2 && transport != primary)
121 return 1;
122 return 0;
123}
124
125
126
127
128
129
130
131
132static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
133 int count_of_newacks)
134{
135 if (count_of_newacks < 2 &&
136 (transport && !transport->cacc.cacc_saw_newack))
137 return 1;
138 return 0;
139}
140
141
142
143
144
145
146
147
148static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
149 struct sctp_transport *transport,
150 int count_of_newacks)
151{
152 if (!primary->cacc.cycling_changeover) {
153 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
154 return 1;
155 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
156 return 1;
157 return 0;
158 }
159 return 0;
160}
161
162
163
164
165
166
167
168
169static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
170{
171 if (primary->cacc.cycling_changeover &&
172 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
173 return 1;
174 return 0;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191static inline int sctp_cacc_skip(struct sctp_transport *primary,
192 struct sctp_transport *transport,
193 int count_of_newacks,
194 __u32 tsn)
195{
196 if (primary->cacc.changeover_active &&
197 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
198 sctp_cacc_skip_3_2(primary, tsn)))
199 return 1;
200 return 0;
201}
202
203
204
205
206
207void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208{
209 memset(q, 0, sizeof(struct sctp_outq));
210
211 q->asoc = asoc;
212 INIT_LIST_HEAD(&q->out_chunk_list);
213 INIT_LIST_HEAD(&q->control_chunk_list);
214 INIT_LIST_HEAD(&q->retransmit);
215 INIT_LIST_HEAD(&q->sacked);
216 INIT_LIST_HEAD(&q->abandoned);
217
218 q->empty = 1;
219}
220
221
222
223static void __sctp_outq_teardown(struct sctp_outq *q)
224{
225 struct sctp_transport *transport;
226 struct list_head *lchunk, *temp;
227 struct sctp_chunk *chunk, *tmp;
228
229
230 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
231 transports) {
232 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
233 chunk = list_entry(lchunk, struct sctp_chunk,
234 transmitted_list);
235
236 sctp_chunk_fail(chunk, q->error);
237 sctp_chunk_free(chunk);
238 }
239 }
240
241
242 list_for_each_safe(lchunk, temp, &q->sacked) {
243 list_del_init(lchunk);
244 chunk = list_entry(lchunk, struct sctp_chunk,
245 transmitted_list);
246 sctp_chunk_fail(chunk, q->error);
247 sctp_chunk_free(chunk);
248 }
249
250
251 list_for_each_safe(lchunk, temp, &q->retransmit) {
252 list_del_init(lchunk);
253 chunk = list_entry(lchunk, struct sctp_chunk,
254 transmitted_list);
255 sctp_chunk_fail(chunk, q->error);
256 sctp_chunk_free(chunk);
257 }
258
259
260 list_for_each_safe(lchunk, temp, &q->abandoned) {
261 list_del_init(lchunk);
262 chunk = list_entry(lchunk, struct sctp_chunk,
263 transmitted_list);
264 sctp_chunk_fail(chunk, q->error);
265 sctp_chunk_free(chunk);
266 }
267
268
269 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
270
271
272 sctp_chunk_fail(chunk, q->error);
273 sctp_chunk_free(chunk);
274 }
275
276
277 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
278 list_del_init(&chunk->list);
279 sctp_chunk_free(chunk);
280 }
281}
282
283void sctp_outq_teardown(struct sctp_outq *q)
284{
285 __sctp_outq_teardown(q);
286 sctp_outq_init(q->asoc, q);
287}
288
289
290void sctp_outq_free(struct sctp_outq *q)
291{
292
293 __sctp_outq_teardown(q);
294}
295
296
297int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
298{
299 struct net *net = sock_net(q->asoc->base.sk);
300 int error = 0;
301
302 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk,
303 chunk && chunk->chunk_hdr ?
304 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
305 "illegal chunk");
306
307
308
309
310 if (sctp_chunk_is_data(chunk)) {
311
312
313
314
315
316
317
318
319
320 switch (q->asoc->state) {
321 case SCTP_STATE_CLOSED:
322 case SCTP_STATE_SHUTDOWN_PENDING:
323 case SCTP_STATE_SHUTDOWN_SENT:
324 case SCTP_STATE_SHUTDOWN_RECEIVED:
325 case SCTP_STATE_SHUTDOWN_ACK_SENT:
326
327 error = -ESHUTDOWN;
328 break;
329
330 default:
331 pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n",
332 __func__, q, chunk, chunk && chunk->chunk_hdr ?
333 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
334 "illegal chunk");
335
336 sctp_outq_tail_data(q, chunk);
337 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
338 SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
339 else
340 SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
341 q->empty = 0;
342 break;
343 }
344 } else {
345 list_add_tail(&chunk->list, &q->control_chunk_list);
346 SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
347 }
348
349 if (error < 0)
350 return error;
351
352 if (!q->cork)
353 error = sctp_outq_flush(q, 0);
354
355 return error;
356}
357
358
359
360
361static void sctp_insert_list(struct list_head *head, struct list_head *new)
362{
363 struct list_head *pos;
364 struct sctp_chunk *nchunk, *lchunk;
365 __u32 ntsn, ltsn;
366 int done = 0;
367
368 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
369 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
370
371 list_for_each(pos, head) {
372 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
373 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
374 if (TSN_lt(ntsn, ltsn)) {
375 list_add(new, pos->prev);
376 done = 1;
377 break;
378 }
379 }
380 if (!done)
381 list_add_tail(new, head);
382}
383
384
385void sctp_retransmit_mark(struct sctp_outq *q,
386 struct sctp_transport *transport,
387 __u8 reason)
388{
389 struct list_head *lchunk, *ltemp;
390 struct sctp_chunk *chunk;
391
392
393 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
394 chunk = list_entry(lchunk, struct sctp_chunk,
395 transmitted_list);
396
397
398 if (sctp_chunk_abandoned(chunk)) {
399 list_del_init(lchunk);
400 sctp_insert_list(&q->abandoned, lchunk);
401
402
403
404
405
406
407 if (!chunk->tsn_gap_acked) {
408 if (chunk->transport)
409 chunk->transport->flight_size -=
410 sctp_data_size(chunk);
411 q->outstanding_bytes -= sctp_data_size(chunk);
412 q->asoc->peer.rwnd += sctp_data_size(chunk);
413 }
414 continue;
415 }
416
417
418
419
420
421 if ((reason == SCTP_RTXR_FAST_RTX &&
422 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
423 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
424
425
426
427
428
429
430
431
432 q->asoc->peer.rwnd += sctp_data_size(chunk);
433 q->outstanding_bytes -= sctp_data_size(chunk);
434 if (chunk->transport)
435 transport->flight_size -= sctp_data_size(chunk);
436
437
438
439
440
441
442 chunk->tsn_missing_report = 0;
443
444
445
446
447
448
449
450 if (chunk->rtt_in_progress) {
451 chunk->rtt_in_progress = 0;
452 transport->rto_pending = 0;
453 }
454
455
456
457
458 list_del_init(lchunk);
459 sctp_insert_list(&q->retransmit, lchunk);
460 }
461 }
462
463 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, "
464 "flight_size:%d, pba:%d\n", __func__, transport, reason,
465 transport->cwnd, transport->ssthresh, transport->flight_size,
466 transport->partial_bytes_acked);
467}
468
469
470
471
472void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
473 sctp_retransmit_reason_t reason)
474{
475 struct net *net = sock_net(q->asoc->base.sk);
476 int error = 0;
477
478 switch(reason) {
479 case SCTP_RTXR_T3_RTX:
480 SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS);
481 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
482
483
484
485 if (transport == transport->asoc->peer.retran_path)
486 sctp_assoc_update_retran_path(transport->asoc);
487 transport->asoc->rtx_data_chunks +=
488 transport->asoc->unack_data;
489 break;
490 case SCTP_RTXR_FAST_RTX:
491 SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
492 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
493 q->fast_rtx = 1;
494 break;
495 case SCTP_RTXR_PMTUD:
496 SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS);
497 break;
498 case SCTP_RTXR_T1_RTX:
499 SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS);
500 transport->asoc->init_retries++;
501 break;
502 default:
503 BUG();
504 }
505
506 sctp_retransmit_mark(q, transport, reason);
507
508
509
510
511
512 if (reason == SCTP_RTXR_T3_RTX)
513 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
514
515
516
517
518
519 if (reason != SCTP_RTXR_FAST_RTX)
520 error = sctp_outq_flush(q, 1);
521
522 if (error)
523 q->asoc->base.sk->sk_err = -error;
524}
525
526
527
528
529
530
531
532
533
534static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
535 int rtx_timeout, int *start_timer)
536{
537 struct list_head *lqueue;
538 struct sctp_transport *transport = pkt->transport;
539 sctp_xmit_t status;
540 struct sctp_chunk *chunk, *chunk1;
541 int fast_rtx;
542 int error = 0;
543 int timer = 0;
544 int done = 0;
545
546 lqueue = &q->retransmit;
547 fast_rtx = q->fast_rtx;
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
573
574 if (sctp_chunk_abandoned(chunk)) {
575 list_del_init(&chunk->transmitted_list);
576 sctp_insert_list(&q->abandoned,
577 &chunk->transmitted_list);
578 continue;
579 }
580
581
582
583
584
585
586 if (chunk->tsn_gap_acked) {
587 list_move_tail(&chunk->transmitted_list,
588 &transport->transmitted);
589 continue;
590 }
591
592
593
594
595 if (fast_rtx && !chunk->fast_retransmit)
596 continue;
597
598redo:
599
600 status = sctp_packet_append_chunk(pkt, chunk);
601
602 switch (status) {
603 case SCTP_XMIT_PMTU_FULL:
604 if (!pkt->has_data && !pkt->has_cookie_echo) {
605
606
607
608
609
610
611 sctp_packet_transmit(pkt);
612 goto redo;
613 }
614
615
616 error = sctp_packet_transmit(pkt);
617
618
619
620
621
622 if (rtx_timeout || fast_rtx)
623 done = 1;
624 else
625 goto redo;
626
627
628 break;
629
630 case SCTP_XMIT_RWND_FULL:
631
632 error = sctp_packet_transmit(pkt);
633
634
635
636
637 done = 1;
638 break;
639
640 case SCTP_XMIT_NAGLE_DELAY:
641
642 error = sctp_packet_transmit(pkt);
643
644
645 done = 1;
646 break;
647
648 default:
649
650
651
652 list_move_tail(&chunk->transmitted_list,
653 &transport->transmitted);
654
655
656
657
658 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
659 chunk->fast_retransmit = SCTP_DONT_FRTX;
660
661 q->empty = 0;
662 q->asoc->stats.rtxchunks++;
663 break;
664 }
665
666
667 if (!error && !timer)
668 timer = 1;
669
670 if (done)
671 break;
672 }
673
674
675
676
677
678
679 if (rtx_timeout || fast_rtx) {
680 list_for_each_entry(chunk1, lqueue, transmitted_list) {
681 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
682 chunk1->fast_retransmit = SCTP_DONT_FRTX;
683 }
684 }
685
686 *start_timer = timer;
687
688
689 if (fast_rtx)
690 q->fast_rtx = 0;
691
692 return error;
693}
694
695
696int sctp_outq_uncork(struct sctp_outq *q)
697{
698 if (q->cork)
699 q->cork = 0;
700
701 return sctp_outq_flush(q, 0);
702}
703
704
705
706
707
708
709
710
711
712
713
714static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
715{
716 struct sctp_packet *packet;
717 struct sctp_packet singleton;
718 struct sctp_association *asoc = q->asoc;
719 __u16 sport = asoc->base.bind_addr.port;
720 __u16 dport = asoc->peer.port;
721 __u32 vtag = asoc->peer.i.init_tag;
722 struct sctp_transport *transport = NULL;
723 struct sctp_transport *new_transport;
724 struct sctp_chunk *chunk, *tmp;
725 sctp_xmit_t status;
726 int error = 0;
727 int start_timer = 0;
728 int one_packet = 0;
729
730
731 struct list_head transport_list;
732 struct list_head *ltransport;
733
734 INIT_LIST_HEAD(&transport_list);
735 packet = NULL;
736
737
738
739
740
741
742
743
744
745
746
747 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
748
749
750
751
752
753
754 if (asoc->src_out_of_asoc_ok &&
755 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
756 continue;
757
758 list_del_init(&chunk->list);
759
760
761 new_transport = chunk->transport;
762
763 if (!new_transport) {
764
765
766
767
768
769
770
771
772
773
774 if (transport &&
775 sctp_cmp_addr_exact(&chunk->dest,
776 &transport->ipaddr))
777 new_transport = transport;
778 else
779 new_transport = sctp_assoc_lookup_paddr(asoc,
780 &chunk->dest);
781
782
783
784
785 if (!new_transport)
786 new_transport = asoc->peer.active_path;
787 } else if ((new_transport->state == SCTP_INACTIVE) ||
788 (new_transport->state == SCTP_UNCONFIRMED) ||
789 (new_transport->state == SCTP_PF)) {
790
791
792
793
794
795
796
797
798
799
800
801
802
803 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
804 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
805 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
806 new_transport = asoc->peer.active_path;
807 }
808
809
810
811
812 if (new_transport != transport) {
813 transport = new_transport;
814 if (list_empty(&transport->send_ready)) {
815 list_add_tail(&transport->send_ready,
816 &transport_list);
817 }
818 packet = &transport->packet;
819 sctp_packet_config(packet, vtag,
820 asoc->peer.ecn_capable);
821 }
822
823 switch (chunk->chunk_hdr->type) {
824
825
826
827
828
829
830 case SCTP_CID_INIT:
831 case SCTP_CID_INIT_ACK:
832 case SCTP_CID_SHUTDOWN_COMPLETE:
833 sctp_packet_init(&singleton, transport, sport, dport);
834 sctp_packet_config(&singleton, vtag, 0);
835 sctp_packet_append_chunk(&singleton, chunk);
836 error = sctp_packet_transmit(&singleton);
837 if (error < 0)
838 return error;
839 break;
840
841 case SCTP_CID_ABORT:
842 if (sctp_test_T_bit(chunk)) {
843 packet->vtag = asoc->c.my_vtag;
844 }
845
846
847
848
849
850 case SCTP_CID_HEARTBEAT_ACK:
851 case SCTP_CID_SHUTDOWN_ACK:
852 case SCTP_CID_COOKIE_ACK:
853 case SCTP_CID_COOKIE_ECHO:
854 case SCTP_CID_ERROR:
855 case SCTP_CID_ECN_CWR:
856 case SCTP_CID_ASCONF_ACK:
857 one_packet = 1;
858
859
860 case SCTP_CID_SACK:
861 case SCTP_CID_HEARTBEAT:
862 case SCTP_CID_SHUTDOWN:
863 case SCTP_CID_ECN_ECNE:
864 case SCTP_CID_ASCONF:
865 case SCTP_CID_FWD_TSN:
866 status = sctp_packet_transmit_chunk(packet, chunk,
867 one_packet);
868 if (status != SCTP_XMIT_OK) {
869
870 list_add(&chunk->list, &q->control_chunk_list);
871 } else {
872 asoc->stats.octrlchunks++;
873
874
875
876
877 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN)
878 sctp_transport_reset_timers(transport);
879 }
880 break;
881
882 default:
883
884 BUG();
885 }
886 }
887
888 if (q->asoc->src_out_of_asoc_ok)
889 goto sctp_flush_out;
890
891
892 switch (asoc->state) {
893 case SCTP_STATE_COOKIE_ECHOED:
894
895
896
897 if (!packet || !packet->has_cookie_echo)
898 break;
899
900
901 case SCTP_STATE_ESTABLISHED:
902 case SCTP_STATE_SHUTDOWN_PENDING:
903 case SCTP_STATE_SHUTDOWN_RECEIVED:
904
905
906
907
908
909
910
911
912
913 if (!list_empty(&q->retransmit)) {
914 if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
915 goto sctp_flush_out;
916 if (transport == asoc->peer.retran_path)
917 goto retran;
918
919
920
921 transport = asoc->peer.retran_path;
922
923 if (list_empty(&transport->send_ready)) {
924 list_add_tail(&transport->send_ready,
925 &transport_list);
926 }
927
928 packet = &transport->packet;
929 sctp_packet_config(packet, vtag,
930 asoc->peer.ecn_capable);
931 retran:
932 error = sctp_outq_flush_rtx(q, packet,
933 rtx_timeout, &start_timer);
934
935 if (start_timer)
936 sctp_transport_reset_timers(transport);
937
938
939
940
941 if (packet->has_cookie_echo)
942 goto sctp_flush_out;
943
944
945
946
947 if (!list_empty(&q->retransmit))
948 goto sctp_flush_out;
949 }
950
951
952
953
954
955
956 if (transport)
957 sctp_transport_burst_limited(transport);
958
959
960 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
961
962
963
964 if (chunk->sinfo.sinfo_stream >=
965 asoc->c.sinit_num_ostreams) {
966
967
968 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
969 sctp_chunk_free(chunk);
970 continue;
971 }
972
973
974 if (sctp_chunk_abandoned(chunk)) {
975 sctp_chunk_fail(chunk, 0);
976 sctp_chunk_free(chunk);
977 continue;
978 }
979
980
981
982
983 new_transport = chunk->transport;
984 if (!new_transport ||
985 ((new_transport->state == SCTP_INACTIVE) ||
986 (new_transport->state == SCTP_UNCONFIRMED) ||
987 (new_transport->state == SCTP_PF)))
988 new_transport = asoc->peer.active_path;
989 if (new_transport->state == SCTP_UNCONFIRMED)
990 continue;
991
992
993 if (new_transport != transport) {
994 transport = new_transport;
995
996
997
998
999 if (list_empty(&transport->send_ready)) {
1000 list_add_tail(&transport->send_ready,
1001 &transport_list);
1002 }
1003
1004 packet = &transport->packet;
1005 sctp_packet_config(packet, vtag,
1006 asoc->peer.ecn_capable);
1007
1008
1009
1010 sctp_transport_burst_limited(transport);
1011 }
1012
1013 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
1014 "skb->users:%d\n",
1015 __func__, q, chunk, chunk && chunk->chunk_hdr ?
1016 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) :
1017 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn),
1018 chunk->skb ? chunk->skb->head : NULL, chunk->skb ?
1019 atomic_read(&chunk->skb->users) : -1);
1020
1021
1022 status = sctp_packet_transmit_chunk(packet, chunk, 0);
1023
1024 switch (status) {
1025 case SCTP_XMIT_PMTU_FULL:
1026 case SCTP_XMIT_RWND_FULL:
1027 case SCTP_XMIT_NAGLE_DELAY:
1028
1029
1030
1031 pr_debug("%s: could not transmit tsn:0x%x, status:%d\n",
1032 __func__, ntohl(chunk->subh.data_hdr->tsn),
1033 status);
1034
1035 sctp_outq_head_data(q, chunk);
1036 goto sctp_flush_out;
1037 break;
1038
1039 case SCTP_XMIT_OK:
1040
1041
1042
1043
1044 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1045 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1046 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1047 asoc->stats.ouodchunks++;
1048 else
1049 asoc->stats.oodchunks++;
1050
1051 break;
1052
1053 default:
1054 BUG();
1055 }
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 list_add_tail(&chunk->transmitted_list,
1068 &transport->transmitted);
1069
1070 sctp_transport_reset_timers(transport);
1071
1072 q->empty = 0;
1073
1074
1075
1076
1077 if (packet->has_cookie_echo)
1078 goto sctp_flush_out;
1079 }
1080 break;
1081
1082 default:
1083
1084 break;
1085 }
1086
1087sctp_flush_out:
1088
1089
1090
1091
1092
1093
1094
1095
1096 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1097 struct sctp_transport *t = list_entry(ltransport,
1098 struct sctp_transport,
1099 send_ready);
1100 packet = &t->packet;
1101 if (!sctp_packet_empty(packet))
1102 error = sctp_packet_transmit(packet);
1103
1104
1105 sctp_transport_burst_reset(t);
1106 }
1107
1108 return error;
1109}
1110
1111
1112static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1113 struct sctp_sackhdr *sack)
1114{
1115 sctp_sack_variable_t *frags;
1116 __u16 unack_data;
1117 int i;
1118
1119 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1120
1121 frags = sack->variable;
1122 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1123 unack_data -= ((ntohs(frags[i].gab.end) -
1124 ntohs(frags[i].gab.start) + 1));
1125 }
1126
1127 assoc->unack_data = unack_data;
1128}
1129
1130
1131
1132
1133
1134
1135int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
1136{
1137 struct sctp_association *asoc = q->asoc;
1138 struct sctp_sackhdr *sack = chunk->subh.sack_hdr;
1139 struct sctp_transport *transport;
1140 struct sctp_chunk *tchunk = NULL;
1141 struct list_head *lchunk, *transport_list, *temp;
1142 sctp_sack_variable_t *frags = sack->variable;
1143 __u32 sack_ctsn, ctsn, tsn;
1144 __u32 highest_tsn, highest_new_tsn;
1145 __u32 sack_a_rwnd;
1146 unsigned int outstanding;
1147 struct sctp_transport *primary = asoc->peer.primary_path;
1148 int count_of_newacks = 0;
1149 int gap_ack_blocks;
1150 u8 accum_moved = 0;
1151
1152
1153 transport_list = &asoc->peer.transport_addr_list;
1154
1155 sack_ctsn = ntohl(sack->cum_tsn_ack);
1156 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1157 asoc->stats.gapcnt += gap_ack_blocks;
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176 if (primary->cacc.changeover_active) {
1177 u8 clear_cycling = 0;
1178
1179 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1180 primary->cacc.changeover_active = 0;
1181 clear_cycling = 1;
1182 }
1183
1184 if (clear_cycling || gap_ack_blocks) {
1185 list_for_each_entry(transport, transport_list,
1186 transports) {
1187 if (clear_cycling)
1188 transport->cacc.cycling_changeover = 0;
1189 if (gap_ack_blocks)
1190 transport->cacc.cacc_saw_newack = 0;
1191 }
1192 }
1193 }
1194
1195
1196 highest_tsn = sack_ctsn;
1197 if (gap_ack_blocks)
1198 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1199
1200 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1201 asoc->highest_sacked = highest_tsn;
1202
1203 highest_new_tsn = sack_ctsn;
1204
1205
1206
1207
1208 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn);
1209
1210
1211
1212
1213
1214
1215 list_for_each_entry(transport, transport_list, transports) {
1216 sctp_check_transmitted(q, &transport->transmitted,
1217 transport, &chunk->source, sack,
1218 &highest_new_tsn);
1219
1220
1221
1222
1223
1224 if (transport->cacc.cacc_saw_newack)
1225 count_of_newacks ++;
1226 }
1227
1228
1229 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1230 asoc->ctsn_ack_point = sack_ctsn;
1231 accum_moved = 1;
1232 }
1233
1234 if (gap_ack_blocks) {
1235
1236 if (asoc->fast_recovery && accum_moved)
1237 highest_new_tsn = highest_tsn;
1238
1239 list_for_each_entry(transport, transport_list, transports)
1240 sctp_mark_missing(q, &transport->transmitted, transport,
1241 highest_new_tsn, count_of_newacks);
1242 }
1243
1244
1245 sctp_sack_update_unack_data(asoc, sack);
1246
1247 ctsn = asoc->ctsn_ack_point;
1248
1249
1250 list_for_each_safe(lchunk, temp, &q->sacked) {
1251 tchunk = list_entry(lchunk, struct sctp_chunk,
1252 transmitted_list);
1253 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1254 if (TSN_lte(tsn, ctsn)) {
1255 list_del_init(&tchunk->transmitted_list);
1256 sctp_chunk_free(tchunk);
1257 }
1258 }
1259
1260
1261
1262
1263
1264
1265 sack_a_rwnd = ntohl(sack->a_rwnd);
1266 outstanding = q->outstanding_bytes;
1267
1268 if (outstanding < sack_a_rwnd)
1269 sack_a_rwnd -= outstanding;
1270 else
1271 sack_a_rwnd = 0;
1272
1273 asoc->peer.rwnd = sack_a_rwnd;
1274
1275 sctp_generate_fwdtsn(q, sack_ctsn);
1276
1277 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn);
1278 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, "
1279 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
1280 asoc->adv_peer_ack_point);
1281
1282
1283
1284
1285 q->empty = (list_empty(&q->out_chunk_list) &&
1286 list_empty(&q->retransmit));
1287 if (!q->empty)
1288 goto finish;
1289
1290 list_for_each_entry(transport, transport_list, transports) {
1291 q->empty = q->empty && list_empty(&transport->transmitted);
1292 if (!q->empty)
1293 goto finish;
1294 }
1295
1296 pr_debug("%s: sack queue is empty\n", __func__);
1297finish:
1298 return q->empty;
1299}
1300
1301
1302int sctp_outq_is_empty(const struct sctp_outq *q)
1303{
1304 return q->empty;
1305}
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321static void sctp_check_transmitted(struct sctp_outq *q,
1322 struct list_head *transmitted_queue,
1323 struct sctp_transport *transport,
1324 union sctp_addr *saddr,
1325 struct sctp_sackhdr *sack,
1326 __u32 *highest_new_tsn_in_sack)
1327{
1328 struct list_head *lchunk;
1329 struct sctp_chunk *tchunk;
1330 struct list_head tlist;
1331 __u32 tsn;
1332 __u32 sack_ctsn;
1333 __u32 rtt;
1334 __u8 restart_timer = 0;
1335 int bytes_acked = 0;
1336 int migrate_bytes = 0;
1337 bool forward_progress = false;
1338
1339 sack_ctsn = ntohl(sack->cum_tsn_ack);
1340
1341 INIT_LIST_HEAD(&tlist);
1342
1343
1344 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1345 tchunk = list_entry(lchunk, struct sctp_chunk,
1346 transmitted_list);
1347
1348 if (sctp_chunk_abandoned(tchunk)) {
1349
1350 sctp_insert_list(&q->abandoned, lchunk);
1351
1352
1353
1354
1355 if (!tchunk->tsn_gap_acked) {
1356 if (tchunk->transport)
1357 tchunk->transport->flight_size -=
1358 sctp_data_size(tchunk);
1359 q->outstanding_bytes -= sctp_data_size(tchunk);
1360 }
1361 continue;
1362 }
1363
1364 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1365 if (sctp_acked(sack, tsn)) {
1366
1367
1368
1369
1370
1371 if (transport) {
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383 if (!tchunk->tsn_gap_acked &&
1384 tchunk->rtt_in_progress) {
1385 tchunk->rtt_in_progress = 0;
1386 rtt = jiffies - tchunk->sent_at;
1387 sctp_transport_update_rto(transport,
1388 rtt);
1389 }
1390 }
1391
1392
1393
1394
1395
1396
1397
1398 if (!tchunk->tsn_gap_acked) {
1399 tchunk->tsn_gap_acked = 1;
1400 *highest_new_tsn_in_sack = tsn;
1401 bytes_acked += sctp_data_size(tchunk);
1402 if (!tchunk->transport)
1403 migrate_bytes += sctp_data_size(tchunk);
1404 forward_progress = true;
1405 }
1406
1407 if (TSN_lte(tsn, sack_ctsn)) {
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417 restart_timer = 1;
1418 forward_progress = true;
1419
1420 if (!tchunk->tsn_gap_acked) {
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434 if (transport &&
1435 sack->num_gap_ack_blocks &&
1436 q->asoc->peer.primary_path->cacc.
1437 changeover_active)
1438 transport->cacc.cacc_saw_newack
1439 = 1;
1440 }
1441
1442 list_add_tail(&tchunk->transmitted_list,
1443 &q->sacked);
1444 } else {
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460 list_add_tail(lchunk, &tlist);
1461 }
1462 } else {
1463 if (tchunk->tsn_gap_acked) {
1464 pr_debug("%s: receiver reneged on data TSN:0x%x\n",
1465 __func__, tsn);
1466
1467 tchunk->tsn_gap_acked = 0;
1468
1469 if (tchunk->transport)
1470 bytes_acked -= sctp_data_size(tchunk);
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481 restart_timer = 1;
1482 }
1483
1484 list_add_tail(lchunk, &tlist);
1485 }
1486 }
1487
1488 if (transport) {
1489 if (bytes_acked) {
1490 struct sctp_association *asoc = transport->asoc;
1491
1492
1493
1494
1495
1496
1497
1498 bytes_acked -= migrate_bytes;
1499
1500
1501
1502
1503
1504
1505
1506
1507 transport->error_count = 0;
1508 transport->asoc->overall_error_count = 0;
1509 forward_progress = true;
1510
1511
1512
1513
1514
1515
1516
1517 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1518 del_timer(&asoc->timers
1519 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1520 sctp_association_put(asoc);
1521
1522
1523
1524
1525 if ((transport->state == SCTP_INACTIVE ||
1526 transport->state == SCTP_UNCONFIRMED) &&
1527 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) {
1528 sctp_assoc_control_transport(
1529 transport->asoc,
1530 transport,
1531 SCTP_TRANSPORT_UP,
1532 SCTP_RECEIVED_SACK);
1533 }
1534
1535 sctp_transport_raise_cwnd(transport, sack_ctsn,
1536 bytes_acked);
1537
1538 transport->flight_size -= bytes_acked;
1539 if (transport->flight_size == 0)
1540 transport->partial_bytes_acked = 0;
1541 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1542 } else {
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 if (!q->asoc->peer.rwnd &&
1558 !list_empty(&tlist) &&
1559 (sack_ctsn+2 == q->asoc->next_tsn) &&
1560 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1561 pr_debug("%s: sack received for zero window "
1562 "probe:%u\n", __func__, sack_ctsn);
1563
1564 q->asoc->overall_error_count = 0;
1565 transport->error_count = 0;
1566 }
1567 }
1568
1569
1570
1571
1572
1573
1574
1575 if (!transport->flight_size) {
1576 if (del_timer(&transport->T3_rtx_timer))
1577 sctp_transport_put(transport);
1578 } else if (restart_timer) {
1579 if (!mod_timer(&transport->T3_rtx_timer,
1580 jiffies + transport->rto))
1581 sctp_transport_hold(transport);
1582 }
1583
1584 if (forward_progress) {
1585 if (transport->dst)
1586 dst_confirm(transport->dst);
1587 }
1588 }
1589
1590 list_splice(&tlist, transmitted_queue);
1591}
1592
1593
1594static void sctp_mark_missing(struct sctp_outq *q,
1595 struct list_head *transmitted_queue,
1596 struct sctp_transport *transport,
1597 __u32 highest_new_tsn_in_sack,
1598 int count_of_newacks)
1599{
1600 struct sctp_chunk *chunk;
1601 __u32 tsn;
1602 char do_fast_retransmit = 0;
1603 struct sctp_association *asoc = q->asoc;
1604 struct sctp_transport *primary = asoc->peer.primary_path;
1605
1606 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1607
1608 tsn = ntohl(chunk->subh.data_hdr->tsn);
1609
1610
1611
1612
1613
1614
1615
1616
1617 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1618 !chunk->tsn_gap_acked &&
1619 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1620
1621
1622
1623
1624 if (!transport || !sctp_cacc_skip(primary,
1625 chunk->transport,
1626 count_of_newacks, tsn)) {
1627 chunk->tsn_missing_report++;
1628
1629 pr_debug("%s: tsn:0x%x missing counter:%d\n",
1630 __func__, tsn, chunk->tsn_missing_report);
1631 }
1632 }
1633
1634
1635
1636
1637
1638
1639
1640 if (chunk->tsn_missing_report >= 3) {
1641 chunk->fast_retransmit = SCTP_NEED_FRTX;
1642 do_fast_retransmit = 1;
1643 }
1644 }
1645
1646 if (transport) {
1647 if (do_fast_retransmit)
1648 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1649
1650 pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, "
1651 "flight_size:%d, pba:%d\n", __func__, transport,
1652 transport->cwnd, transport->ssthresh,
1653 transport->flight_size, transport->partial_bytes_acked);
1654 }
1655}
1656
1657
1658static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1659{
1660 int i;
1661 sctp_sack_variable_t *frags;
1662 __u16 gap;
1663 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1664
1665 if (TSN_lte(tsn, ctsn))
1666 goto pass;
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 frags = sack->variable;
1681 gap = tsn - ctsn;
1682 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1683 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1684 TSN_lte(gap, ntohs(frags[i].gab.end)))
1685 goto pass;
1686 }
1687
1688 return 0;
1689pass:
1690 return 1;
1691}
1692
1693static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1694 int nskips, __be16 stream)
1695{
1696 int i;
1697
1698 for (i = 0; i < nskips; i++) {
1699 if (skiplist[i].stream == stream)
1700 return i;
1701 }
1702 return i;
1703}
1704
1705
1706static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1707{
1708 struct sctp_association *asoc = q->asoc;
1709 struct sctp_chunk *ftsn_chunk = NULL;
1710 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1711 int nskips = 0;
1712 int skip_pos = 0;
1713 __u32 tsn;
1714 struct sctp_chunk *chunk;
1715 struct list_head *lchunk, *temp;
1716
1717 if (!asoc->peer.prsctp_capable)
1718 return;
1719
1720
1721
1722
1723
1724
1725
1726 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1727 asoc->adv_peer_ack_point = ctsn;
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750 list_for_each_safe(lchunk, temp, &q->abandoned) {
1751 chunk = list_entry(lchunk, struct sctp_chunk,
1752 transmitted_list);
1753 tsn = ntohl(chunk->subh.data_hdr->tsn);
1754
1755
1756
1757
1758 if (TSN_lte(tsn, ctsn)) {
1759 list_del_init(lchunk);
1760 sctp_chunk_free(chunk);
1761 } else {
1762 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1763 asoc->adv_peer_ack_point = tsn;
1764 if (chunk->chunk_hdr->flags &
1765 SCTP_DATA_UNORDERED)
1766 continue;
1767 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1768 nskips,
1769 chunk->subh.data_hdr->stream);
1770 ftsn_skip_arr[skip_pos].stream =
1771 chunk->subh.data_hdr->stream;
1772 ftsn_skip_arr[skip_pos].ssn =
1773 chunk->subh.data_hdr->ssn;
1774 if (skip_pos == nskips)
1775 nskips++;
1776 if (nskips == 10)
1777 break;
1778 } else
1779 break;
1780 }
1781 }
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801 if (asoc->adv_peer_ack_point > ctsn)
1802 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1803 nskips, &ftsn_skip_arr[0]);
1804
1805 if (ftsn_chunk) {
1806 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1807 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1808 }
1809}
1810