1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <linux/slab.h>
45#include <linux/types.h>
46#include <linux/skbuff.h>
47#include <net/sock.h>
48#include <net/sctp/structs.h>
49#include <net/sctp/sctp.h>
50#include <net/sctp/sm.h>
51
52
53static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
54 struct sctp_ulpevent *);
55static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
56 struct sctp_ulpevent *);
57static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
58
59
60
61
62struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
63 struct sctp_association *asoc)
64{
65 memset(ulpq, 0, sizeof(struct sctp_ulpq));
66
67 ulpq->asoc = asoc;
68 skb_queue_head_init(&ulpq->reasm);
69 skb_queue_head_init(&ulpq->lobby);
70 ulpq->pd_mode = 0;
71
72 return ulpq;
73}
74
75
76
77void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
78{
79 struct sk_buff *skb;
80 struct sctp_ulpevent *event;
81
82 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
83 event = sctp_skb2event(skb);
84 sctp_ulpevent_free(event);
85 }
86
87 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
88 event = sctp_skb2event(skb);
89 sctp_ulpevent_free(event);
90 }
91
92}
93
94
95void sctp_ulpq_free(struct sctp_ulpq *ulpq)
96{
97 sctp_ulpq_flush(ulpq);
98}
99
100
101int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
102 gfp_t gfp)
103{
104 struct sk_buff_head temp;
105 struct sctp_ulpevent *event;
106 int event_eor = 0;
107
108
109 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
110 if (!event)
111 return -ENOMEM;
112
113
114 event = sctp_ulpq_reasm(ulpq, event);
115
116
117 if ((event) && (event->msg_flags & MSG_EOR)){
118
119 skb_queue_head_init(&temp);
120 __skb_queue_tail(&temp, sctp_event2skb(event));
121
122 event = sctp_ulpq_order(ulpq, event);
123 }
124
125
126
127
128 if (event) {
129 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
130 sctp_ulpq_tail_event(ulpq, event);
131 }
132
133 return event_eor;
134}
135
136
137
138
139
140int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
141{
142 struct sctp_sock *sp = sctp_sk(sk);
143
144 if (atomic_dec_and_test(&sp->pd_mode)) {
145
146
147
148 if (!skb_queue_empty(&sp->pd_lobby)) {
149 struct list_head *list;
150 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
151 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
152 INIT_LIST_HEAD(list);
153 return 1;
154 }
155 } else {
156
157
158
159
160
161 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
162 struct sk_buff *skb, *tmp;
163 struct sctp_ulpevent *event;
164
165 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
166 event = sctp_skb2event(skb);
167 if (event->asoc == asoc) {
168 __skb_unlink(skb, &sp->pd_lobby);
169 __skb_queue_tail(&sk->sk_receive_queue,
170 skb);
171 }
172 }
173 }
174 }
175
176 return 0;
177}
178
179
180static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
181{
182 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
183
184 atomic_inc(&sp->pd_mode);
185 ulpq->pd_mode = 1;
186}
187
188
189static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
190{
191 ulpq->pd_mode = 0;
192 sctp_ulpq_reasm_drain(ulpq);
193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
194}
195
196
197
198
199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
200{
201 struct sock *sk = ulpq->asoc->base.sk;
202 struct sk_buff_head *queue, *skb_list;
203 struct sk_buff *skb = sctp_event2skb(event);
204 int clear_pd = 0;
205
206 skb_list = (struct sk_buff_head *) skb->prev;
207
208
209
210
211 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
212 goto out_free;
213
214
215 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
216 goto out_free;
217
218
219
220
221
222
223 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
224 queue = &sk->sk_receive_queue;
225 } else {
226 if (ulpq->pd_mode) {
227
228
229
230
231
232 if ((event->msg_flags & MSG_NOTIFICATION) ||
233 (SCTP_DATA_NOT_FRAG ==
234 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
235 queue = &sctp_sk(sk)->pd_lobby;
236 else {
237 clear_pd = event->msg_flags & MSG_EOR;
238 queue = &sk->sk_receive_queue;
239 }
240 } else {
241
242
243
244
245
246 if (sctp_sk(sk)->frag_interleave)
247 queue = &sk->sk_receive_queue;
248 else
249 queue = &sctp_sk(sk)->pd_lobby;
250 }
251 }
252
253
254
255
256 if (skb_list)
257 sctp_skb_list_tail(skb_list, queue);
258 else
259 __skb_queue_tail(queue, skb);
260
261
262
263
264
265 if (clear_pd)
266 sctp_ulpq_clear_pd(ulpq);
267
268 if (queue == &sk->sk_receive_queue)
269 sk->sk_data_ready(sk, 0);
270 return 1;
271
272out_free:
273 if (skb_list)
274 sctp_queue_purge_ulpevents(skb_list);
275 else
276 sctp_ulpevent_free(event);
277
278 return 0;
279}
280
281
282
283
284static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
285 struct sctp_ulpevent *event)
286{
287 struct sk_buff *pos;
288 struct sctp_ulpevent *cevent;
289 __u32 tsn, ctsn;
290
291 tsn = event->tsn;
292
293
294 pos = skb_peek_tail(&ulpq->reasm);
295 if (!pos) {
296 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
297 return;
298 }
299
300
301 cevent = sctp_skb2event(pos);
302 ctsn = cevent->tsn;
303 if (TSN_lt(ctsn, tsn)) {
304 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
305 return;
306 }
307
308
309 skb_queue_walk(&ulpq->reasm, pos) {
310 cevent = sctp_skb2event(pos);
311 ctsn = cevent->tsn;
312
313 if (TSN_lt(tsn, ctsn))
314 break;
315 }
316
317
318 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
319
320}
321
322
323
324
325
326
327
328
329static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
330 struct sk_buff_head *queue, struct sk_buff *f_frag,
331 struct sk_buff *l_frag)
332{
333 struct sk_buff *pos;
334 struct sk_buff *new = NULL;
335 struct sctp_ulpevent *event;
336 struct sk_buff *pnext, *last;
337 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
338
339
340 if (f_frag == l_frag)
341 pos = NULL;
342 else
343 pos = f_frag->next;
344
345
346 for (last = list; list; last = list, list = list->next);
347
348
349
350
351 if (last)
352 last->next = pos;
353 else {
354 if (skb_cloned(f_frag)) {
355
356
357
358
359
360 new = skb_copy(f_frag, GFP_ATOMIC);
361 if (!new)
362 return NULL;
363
364 sctp_skb_set_owner_r(new, f_frag->sk);
365
366 skb_shinfo(new)->frag_list = pos;
367 } else
368 skb_shinfo(f_frag)->frag_list = pos;
369 }
370
371
372 __skb_unlink(f_frag, queue);
373
374
375 if (new) {
376 kfree_skb(f_frag);
377 f_frag = new;
378 }
379
380 while (pos) {
381
382 pnext = pos->next;
383
384
385 f_frag->len += pos->len;
386 f_frag->data_len += pos->len;
387
388
389 __skb_unlink(pos, queue);
390
391
392 if (pos == l_frag)
393 break;
394 pos->next = pnext;
395 pos = pnext;
396 }
397
398 event = sctp_skb2event(f_frag);
399 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
400
401 return event;
402}
403
404
405
406
407
408static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
409{
410 struct sk_buff *pos;
411 struct sctp_ulpevent *cevent;
412 struct sk_buff *first_frag = NULL;
413 __u32 ctsn, next_tsn;
414 struct sctp_ulpevent *retval = NULL;
415 struct sk_buff *pd_first = NULL;
416 struct sk_buff *pd_last = NULL;
417 size_t pd_len = 0;
418 struct sctp_association *asoc;
419 u32 pd_point;
420
421
422
423
424
425 next_tsn = 0;
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440 skb_queue_walk(&ulpq->reasm, pos) {
441 cevent = sctp_skb2event(pos);
442 ctsn = cevent->tsn;
443
444 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
445 case SCTP_DATA_FIRST_FRAG:
446
447
448
449
450 if (pos == ulpq->reasm.next) {
451 pd_first = pos;
452 pd_last = pos;
453 pd_len = pos->len;
454 } else {
455 pd_first = NULL;
456 pd_last = NULL;
457 pd_len = 0;
458 }
459
460 first_frag = pos;
461 next_tsn = ctsn + 1;
462 break;
463
464 case SCTP_DATA_MIDDLE_FRAG:
465 if ((first_frag) && (ctsn == next_tsn)) {
466 next_tsn++;
467 if (pd_first) {
468 pd_last = pos;
469 pd_len += pos->len;
470 }
471 } else
472 first_frag = NULL;
473 break;
474
475 case SCTP_DATA_LAST_FRAG:
476 if (first_frag && (ctsn == next_tsn))
477 goto found;
478 else
479 first_frag = NULL;
480 break;
481 }
482 }
483
484 asoc = ulpq->asoc;
485 if (pd_first) {
486
487
488
489
490
491 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
492 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
493 goto done;
494
495 cevent = sctp_skb2event(pd_first);
496 pd_point = sctp_sk(asoc->base.sk)->pd_point;
497 if (pd_point && pd_point <= pd_len) {
498 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
499 &ulpq->reasm,
500 pd_first,
501 pd_last);
502 if (retval)
503 sctp_ulpq_set_pd(ulpq);
504 }
505 }
506done:
507 return retval;
508found:
509 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
510 &ulpq->reasm, first_frag, pos);
511 if (retval)
512 retval->msg_flags |= MSG_EOR;
513 goto done;
514}
515
516
517static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
518{
519 struct sk_buff *pos, *last_frag, *first_frag;
520 struct sctp_ulpevent *cevent;
521 __u32 ctsn, next_tsn;
522 int is_last;
523 struct sctp_ulpevent *retval;
524
525
526
527
528
529
530 if (skb_queue_empty(&ulpq->reasm))
531 return NULL;
532
533 last_frag = first_frag = NULL;
534 retval = NULL;
535 next_tsn = 0;
536 is_last = 0;
537
538 skb_queue_walk(&ulpq->reasm, pos) {
539 cevent = sctp_skb2event(pos);
540 ctsn = cevent->tsn;
541
542 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
543 case SCTP_DATA_FIRST_FRAG:
544 if (!first_frag)
545 return NULL;
546 goto done;
547 case SCTP_DATA_MIDDLE_FRAG:
548 if (!first_frag) {
549 first_frag = pos;
550 next_tsn = ctsn + 1;
551 last_frag = pos;
552 } else if (next_tsn == ctsn) {
553 next_tsn++;
554 last_frag = pos;
555 } else
556 goto done;
557 break;
558 case SCTP_DATA_LAST_FRAG:
559 if (!first_frag)
560 first_frag = pos;
561 else if (ctsn != next_tsn)
562 goto done;
563 last_frag = pos;
564 is_last = 1;
565 goto done;
566 default:
567 return NULL;
568 }
569 }
570
571
572
573
574done:
575 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
576 &ulpq->reasm, first_frag, last_frag);
577 if (retval && is_last)
578 retval->msg_flags |= MSG_EOR;
579
580 return retval;
581}
582
583
584
585
586
587static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
588 struct sctp_ulpevent *event)
589{
590 struct sctp_ulpevent *retval = NULL;
591
592
593 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
594 event->msg_flags |= MSG_EOR;
595 return event;
596 }
597
598 sctp_ulpq_store_reasm(ulpq, event);
599 if (!ulpq->pd_mode)
600 retval = sctp_ulpq_retrieve_reassembled(ulpq);
601 else {
602 __u32 ctsn, ctsnap;
603
604
605
606
607 ctsn = event->tsn;
608 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
609 if (TSN_lte(ctsn, ctsnap))
610 retval = sctp_ulpq_retrieve_partial(ulpq);
611 }
612
613 return retval;
614}
615
616
617static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
618{
619 struct sk_buff *pos, *last_frag, *first_frag;
620 struct sctp_ulpevent *cevent;
621 __u32 ctsn, next_tsn;
622 struct sctp_ulpevent *retval;
623
624
625
626
627
628
629 if (skb_queue_empty(&ulpq->reasm))
630 return NULL;
631
632 last_frag = first_frag = NULL;
633 retval = NULL;
634 next_tsn = 0;
635
636 skb_queue_walk(&ulpq->reasm, pos) {
637 cevent = sctp_skb2event(pos);
638 ctsn = cevent->tsn;
639
640 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
641 case SCTP_DATA_FIRST_FRAG:
642 if (!first_frag) {
643 first_frag = pos;
644 next_tsn = ctsn + 1;
645 last_frag = pos;
646 } else
647 goto done;
648 break;
649
650 case SCTP_DATA_MIDDLE_FRAG:
651 if (!first_frag)
652 return NULL;
653 if (ctsn == next_tsn) {
654 next_tsn++;
655 last_frag = pos;
656 } else
657 goto done;
658 break;
659
660 case SCTP_DATA_LAST_FRAG:
661 if (!first_frag)
662 return NULL;
663 else
664 goto done;
665 break;
666
667 default:
668 return NULL;
669 }
670 }
671
672
673
674
675done:
676 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
677 &ulpq->reasm, first_frag, last_frag);
678 return retval;
679}
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
696{
697 struct sk_buff *pos, *tmp;
698 struct sctp_ulpevent *event;
699 __u32 tsn;
700
701 if (skb_queue_empty(&ulpq->reasm))
702 return;
703
704 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
705 event = sctp_skb2event(pos);
706 tsn = event->tsn;
707
708
709
710
711
712
713 if (TSN_lte(tsn, fwd_tsn)) {
714 __skb_unlink(pos, &ulpq->reasm);
715 sctp_ulpevent_free(event);
716 } else
717 break;
718 }
719}
720
721
722
723
724
725
726static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
727{
728 struct sctp_ulpevent *event = NULL;
729 struct sk_buff_head temp;
730
731 if (skb_queue_empty(&ulpq->reasm))
732 return;
733
734 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
735
736 if ((event) && (event->msg_flags & MSG_EOR)){
737 skb_queue_head_init(&temp);
738 __skb_queue_tail(&temp, sctp_event2skb(event));
739
740 event = sctp_ulpq_order(ulpq, event);
741 }
742
743
744
745
746 if (event)
747 sctp_ulpq_tail_event(ulpq, event);
748 }
749}
750
751
752
753
754
755static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
756 struct sctp_ulpevent *event)
757{
758 struct sk_buff_head *event_list;
759 struct sk_buff *pos, *tmp;
760 struct sctp_ulpevent *cevent;
761 struct sctp_stream *in;
762 __u16 sid, csid, cssn;
763
764 sid = event->stream;
765 in = &ulpq->asoc->ssnmap->in;
766
767 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
768
769
770 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
771 cevent = (struct sctp_ulpevent *) pos->cb;
772 csid = cevent->stream;
773 cssn = cevent->ssn;
774
775
776 if (csid > sid)
777 break;
778
779
780 if (csid < sid)
781 continue;
782
783 if (cssn != sctp_ssn_peek(in, sid))
784 break;
785
786
787 sctp_ssn_next(in, sid);
788
789 __skb_unlink(pos, &ulpq->lobby);
790
791
792 __skb_queue_tail(event_list, pos);
793 }
794}
795
796
797static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
798 struct sctp_ulpevent *event)
799{
800 struct sk_buff *pos;
801 struct sctp_ulpevent *cevent;
802 __u16 sid, csid;
803 __u16 ssn, cssn;
804
805 pos = skb_peek_tail(&ulpq->lobby);
806 if (!pos) {
807 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
808 return;
809 }
810
811 sid = event->stream;
812 ssn = event->ssn;
813
814 cevent = (struct sctp_ulpevent *) pos->cb;
815 csid = cevent->stream;
816 cssn = cevent->ssn;
817 if (sid > csid) {
818 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
819 return;
820 }
821
822 if ((sid == csid) && SSN_lt(cssn, ssn)) {
823 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
824 return;
825 }
826
827
828
829
830 skb_queue_walk(&ulpq->lobby, pos) {
831 cevent = (struct sctp_ulpevent *) pos->cb;
832 csid = cevent->stream;
833 cssn = cevent->ssn;
834
835 if (csid > sid)
836 break;
837 if (csid == sid && SSN_lt(ssn, cssn))
838 break;
839 }
840
841
842
843 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
844}
845
846static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
847 struct sctp_ulpevent *event)
848{
849 __u16 sid, ssn;
850 struct sctp_stream *in;
851
852
853 if (SCTP_DATA_UNORDERED & event->msg_flags)
854 return event;
855
856
857 sid = event->stream;
858 ssn = event->ssn;
859 in = &ulpq->asoc->ssnmap->in;
860
861
862 if (ssn != sctp_ssn_peek(in, sid)) {
863
864
865
866 sctp_ulpq_store_ordered(ulpq, event);
867 return NULL;
868 }
869
870
871 sctp_ssn_next(in, sid);
872
873
874
875
876 sctp_ulpq_retrieve_ordered(ulpq, event);
877
878 return event;
879}
880
881
882
883
884static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
885{
886 struct sk_buff *pos, *tmp;
887 struct sctp_ulpevent *cevent;
888 struct sctp_ulpevent *event;
889 struct sctp_stream *in;
890 struct sk_buff_head temp;
891 struct sk_buff_head *lobby = &ulpq->lobby;
892 __u16 csid, cssn;
893
894 in = &ulpq->asoc->ssnmap->in;
895
896
897 skb_queue_head_init(&temp);
898 event = NULL;
899 sctp_skb_for_each(pos, lobby, tmp) {
900 cevent = (struct sctp_ulpevent *) pos->cb;
901 csid = cevent->stream;
902 cssn = cevent->ssn;
903
904
905 if (csid > sid)
906 break;
907
908
909 if (csid < sid)
910 continue;
911
912
913 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
914 break;
915
916 __skb_unlink(pos, lobby);
917 if (!event)
918
919 event = sctp_skb2event(pos);
920
921
922 __skb_queue_tail(&temp, pos);
923 }
924
925
926
927
928 if (event == NULL && pos != (struct sk_buff *)lobby) {
929 cevent = (struct sctp_ulpevent *) pos->cb;
930 csid = cevent->stream;
931 cssn = cevent->ssn;
932
933 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
934 sctp_ssn_next(in, csid);
935 __skb_unlink(pos, lobby);
936 __skb_queue_tail(&temp, pos);
937 event = sctp_skb2event(pos);
938 }
939 }
940
941
942
943
944 if (event) {
945
946 sctp_ulpq_retrieve_ordered(ulpq, event);
947 sctp_ulpq_tail_event(ulpq, event);
948 }
949}
950
951
952
953
954void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
955{
956 struct sctp_stream *in;
957
958
959 in = &ulpq->asoc->ssnmap->in;
960
961
962 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
963 return;
964
965
966 sctp_ssn_skip(in, sid, ssn);
967
968
969
970
971 sctp_ulpq_reap_ordered(ulpq, sid);
972}
973
974static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
975 struct sk_buff_head *list, __u16 needed)
976{
977 __u16 freed = 0;
978 __u32 tsn, last_tsn;
979 struct sk_buff *skb, *flist, *last;
980 struct sctp_ulpevent *event;
981 struct sctp_tsnmap *tsnmap;
982
983 tsnmap = &ulpq->asoc->peer.tsn_map;
984
985 while ((skb = skb_peek_tail(list)) != NULL) {
986 event = sctp_skb2event(skb);
987 tsn = event->tsn;
988
989
990 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
991 break;
992
993
994
995
996
997 freed += skb_headlen(skb);
998 flist = skb_shinfo(skb)->frag_list;
999 for (last = flist; flist; flist = flist->next) {
1000 last = flist;
1001 freed += skb_headlen(last);
1002 }
1003 if (last)
1004 last_tsn = sctp_skb2event(last)->tsn;
1005 else
1006 last_tsn = tsn;
1007
1008
1009 __skb_unlink(skb, list);
1010 sctp_ulpevent_free(event);
1011 while (TSN_lte(tsn, last_tsn)) {
1012 sctp_tsnmap_renege(tsnmap, tsn);
1013 tsn++;
1014 }
1015 if (freed >= needed)
1016 return freed;
1017 }
1018
1019 return freed;
1020}
1021
1022
1023static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1024{
1025 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1026}
1027
1028
1029static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1030{
1031 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1032}
1033
1034
1035void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1036 gfp_t gfp)
1037{
1038 struct sctp_ulpevent *event;
1039 struct sctp_association *asoc;
1040 struct sctp_sock *sp;
1041 __u32 ctsn;
1042 struct sk_buff *skb;
1043
1044 asoc = ulpq->asoc;
1045 sp = sctp_sk(asoc->base.sk);
1046
1047
1048
1049
1050 if (ulpq->pd_mode)
1051 return;
1052
1053
1054
1055
1056 skb = skb_peek(&asoc->ulpq.reasm);
1057 if (skb != NULL) {
1058 ctsn = sctp_skb2event(skb)->tsn;
1059 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1060 return;
1061 }
1062
1063
1064
1065
1066
1067
1068 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1069
1070 event = sctp_ulpq_retrieve_first(ulpq);
1071
1072 if (event) {
1073 sctp_ulpq_tail_event(ulpq, event);
1074 sctp_ulpq_set_pd(ulpq);
1075 return;
1076 }
1077 }
1078}
1079
1080
1081void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1082 gfp_t gfp)
1083{
1084 struct sctp_association *asoc;
1085 __u16 needed, freed;
1086
1087 asoc = ulpq->asoc;
1088
1089 if (chunk) {
1090 needed = ntohs(chunk->chunk_hdr->length);
1091 needed -= sizeof(sctp_data_chunk_t);
1092 } else
1093 needed = SCTP_DEFAULT_MAXWINDOW;
1094
1095 freed = 0;
1096
1097 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1098 freed = sctp_ulpq_renege_order(ulpq, needed);
1099 if (freed < needed) {
1100 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1101 }
1102 }
1103
1104 if (chunk && (freed >= needed)) {
1105 int retval;
1106 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1107
1108
1109
1110
1111 if (retval <= 0)
1112 sctp_ulpq_partial_delivery(ulpq, gfp);
1113 else if (retval == 1)
1114 sctp_ulpq_reasm_drain(ulpq);
1115 }
1116
1117 sk_mem_reclaim(asoc->base.sk);
1118}
1119
1120
1121
1122
1123
1124
1125void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1126{
1127 struct sctp_ulpevent *ev = NULL;
1128 struct sock *sk;
1129
1130 if (!ulpq->pd_mode)
1131 return;
1132
1133 sk = ulpq->asoc->base.sk;
1134 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1135 &sctp_sk(sk)->subscribe))
1136 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1137 SCTP_PARTIAL_DELIVERY_ABORTED,
1138 gfp);
1139 if (ev)
1140 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1141
1142
1143 if (sctp_ulpq_clear_pd(ulpq) || ev)
1144 sk->sk_data_ready(sk, 0);
1145}
1146