1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51#include <linux/types.h>
52#include <linux/list.h>
53#include <linux/socket.h>
54#include <linux/ip.h>
55#include <linux/time.h>
56#include <linux/slab.h>
57#include <net/ip.h>
58#include <net/icmp.h>
59#include <net/snmp.h>
60#include <net/sock.h>
61#include <net/xfrm.h>
62#include <net/sctp/sctp.h>
63#include <net/sctp/sm.h>
64#include <net/sctp/checksum.h>
65#include <net/net_namespace.h>
66
67
68static int sctp_rcv_ootb(struct sk_buff *);
69static struct sctp_association *__sctp_rcv_lookup(struct net *net,
70 struct sk_buff *skb,
71 const union sctp_addr *paddr,
72 const union sctp_addr *laddr,
73 struct sctp_transport **transportp);
74static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
75 const union sctp_addr *laddr);
76static struct sctp_association *__sctp_lookup_association(
77 struct net *net,
78 const union sctp_addr *local,
79 const union sctp_addr *peer,
80 struct sctp_transport **pt);
81
82static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
83
84
85
86static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
87{
88 struct sctphdr *sh = sctp_hdr(skb);
89 __le32 cmp = sh->checksum;
90 struct sk_buff *list;
91 __le32 val;
92 __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
93
94 skb_walk_frags(skb, list)
95 tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
96 tmp);
97
98 val = sctp_end_cksum(tmp);
99
100 if (val != cmp) {
101
102 SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS);
103 return -1;
104 }
105 return 0;
106}
107
108struct sctp_input_cb {
109 union {
110 struct inet_skb_parm h4;
111#if IS_ENABLED(CONFIG_IPV6)
112 struct inet6_skb_parm h6;
113#endif
114 } header;
115 struct sctp_chunk *chunk;
116};
117#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0]))
118
119
120
121
122int sctp_rcv(struct sk_buff *skb)
123{
124 struct sock *sk;
125 struct sctp_association *asoc;
126 struct sctp_endpoint *ep = NULL;
127 struct sctp_ep_common *rcvr;
128 struct sctp_transport *transport = NULL;
129 struct sctp_chunk *chunk;
130 struct sctphdr *sh;
131 union sctp_addr src;
132 union sctp_addr dest;
133 int family;
134 struct sctp_af *af;
135 struct net *net = dev_net(skb->dev);
136
137 if (skb->pkt_type!=PACKET_HOST)
138 goto discard_it;
139
140 SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS);
141
142 if (skb_linearize(skb))
143 goto discard_it;
144
145 sh = sctp_hdr(skb);
146
147
148 __skb_pull(skb, skb_transport_offset(skb));
149 if (skb->len < sizeof(struct sctphdr))
150 goto discard_it;
151 if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
152 sctp_rcv_checksum(net, skb) < 0)
153 goto discard_it;
154
155 skb_pull(skb, sizeof(struct sctphdr));
156
157
158 if (skb->len < sizeof(struct sctp_chunkhdr))
159 goto discard_it;
160
161 family = ipver2af(ip_hdr(skb)->version);
162 af = sctp_get_af_specific(family);
163 if (unlikely(!af))
164 goto discard_it;
165
166
167 af->from_skb(&src, skb, 1);
168 af->from_skb(&dest, skb, 0);
169
170
171
172
173
174
175
176
177
178
179
180
181 if (!af->addr_valid(&src, NULL, skb) ||
182 !af->addr_valid(&dest, NULL, skb))
183 goto discard_it;
184
185 asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
186
187 if (!asoc)
188 ep = __sctp_rcv_lookup_endpoint(net, &dest);
189
190
191 rcvr = asoc ? &asoc->base : &ep->base;
192 sk = rcvr->sk;
193
194
195
196
197
198 if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb)))
199 {
200 if (asoc) {
201 sctp_association_put(asoc);
202 asoc = NULL;
203 } else {
204 sctp_endpoint_put(ep);
205 ep = NULL;
206 }
207 sk = net->sctp.ctl_sock;
208 ep = sctp_sk(sk)->ep;
209 sctp_endpoint_hold(ep);
210 rcvr = &ep->base;
211 }
212
213
214
215
216
217
218
219
220
221 if (!asoc) {
222 if (sctp_rcv_ootb(skb)) {
223 SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES);
224 goto discard_release;
225 }
226 }
227
228 if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
229 goto discard_release;
230 nf_reset(skb);
231
232 if (sk_filter(sk, skb))
233 goto discard_release;
234
235
236 chunk = sctp_chunkify(skb, asoc, sk);
237 if (!chunk)
238 goto discard_release;
239 SCTP_INPUT_CB(skb)->chunk = chunk;
240
241
242 chunk->rcvr = rcvr;
243
244
245 chunk->sctp_hdr = sh;
246
247
248 sctp_init_addrs(chunk, &src, &dest);
249
250
251 chunk->transport = transport;
252
253
254
255
256
257 sctp_bh_lock_sock(sk);
258
259 if (sk != rcvr->sk) {
260
261
262
263
264
265
266
267 sctp_bh_unlock_sock(sk);
268 sk = rcvr->sk;
269 sctp_bh_lock_sock(sk);
270 }
271
272 if (sock_owned_by_user(sk)) {
273 if (sctp_add_backlog(sk, skb)) {
274 sctp_bh_unlock_sock(sk);
275 sctp_chunk_free(chunk);
276 skb = NULL;
277 goto discard_release;
278 }
279 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG);
280 } else {
281 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ);
282 sctp_inq_push(&chunk->rcvr->inqueue, chunk);
283 }
284
285 sctp_bh_unlock_sock(sk);
286
287
288 if (asoc)
289 sctp_association_put(asoc);
290 else
291 sctp_endpoint_put(ep);
292
293 return 0;
294
295discard_it:
296 SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS);
297 kfree_skb(skb);
298 return 0;
299
300discard_release:
301
302 if (asoc)
303 sctp_association_put(asoc);
304 else
305 sctp_endpoint_put(ep);
306
307 goto discard_it;
308}
309
310
311
312
313
314
315int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
316{
317 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
318 struct sctp_inq *inqueue = &chunk->rcvr->inqueue;
319 struct sctp_ep_common *rcvr = NULL;
320 int backloged = 0;
321
322 rcvr = chunk->rcvr;
323
324
325
326
327
328 if (rcvr->dead) {
329 sctp_chunk_free(chunk);
330 goto done;
331 }
332
333 if (unlikely(rcvr->sk != sk)) {
334
335
336
337
338
339
340
341
342
343
344
345 sk = rcvr->sk;
346 sctp_bh_lock_sock(sk);
347
348 if (sock_owned_by_user(sk)) {
349 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
350 sctp_chunk_free(chunk);
351 else
352 backloged = 1;
353 } else
354 sctp_inq_push(inqueue, chunk);
355
356 sctp_bh_unlock_sock(sk);
357
358
359 if (backloged)
360 return 0;
361 } else {
362 sctp_inq_push(inqueue, chunk);
363 }
364
365done:
366
367 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
368 sctp_association_put(sctp_assoc(rcvr));
369 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
370 sctp_endpoint_put(sctp_ep(rcvr));
371 else
372 BUG();
373
374 return 0;
375}
376
377static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
378{
379 struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk;
380 struct sctp_ep_common *rcvr = chunk->rcvr;
381 int ret;
382
383 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
384 if (!ret) {
385
386
387
388
389 if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type)
390 sctp_association_hold(sctp_assoc(rcvr));
391 else if (SCTP_EP_TYPE_SOCKET == rcvr->type)
392 sctp_endpoint_hold(sctp_ep(rcvr));
393 else
394 BUG();
395 }
396 return ret;
397
398}
399
400
401void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
402 struct sctp_transport *t, __u32 pmtu)
403{
404 if (!t || (t->pathmtu <= pmtu))
405 return;
406
407 if (sock_owned_by_user(sk)) {
408 asoc->pmtu_pending = 1;
409 t->pmtu_pending = 1;
410 return;
411 }
412
413 if (t->param_flags & SPP_PMTUD_ENABLE) {
414
415 sctp_transport_update_pmtu(sk, t, pmtu);
416
417
418 sctp_assoc_sync_pmtu(sk, asoc);
419 }
420
421
422
423
424
425
426
427 sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
428}
429
430void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
431 struct sk_buff *skb)
432{
433 struct dst_entry *dst;
434
435 if (!t)
436 return;
437 dst = sctp_transport_dst_check(t);
438 if (dst)
439 dst->ops->redirect(dst, sk, skb);
440}
441
442
443
444
445
446
447
448
449
450
451
452
453void sctp_icmp_proto_unreachable(struct sock *sk,
454 struct sctp_association *asoc,
455 struct sctp_transport *t)
456{
457 SCTP_DEBUG_PRINTK("%s\n", __func__);
458
459 if (sock_owned_by_user(sk)) {
460 if (timer_pending(&t->proto_unreach_timer))
461 return;
462 else {
463 if (!mod_timer(&t->proto_unreach_timer,
464 jiffies + (HZ/20)))
465 sctp_association_hold(asoc);
466 }
467
468 } else {
469 struct net *net = sock_net(sk);
470
471 if (del_timer(&t->proto_unreach_timer))
472 sctp_association_put(asoc);
473
474 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
475 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
476 asoc->state, asoc->ep, asoc, t,
477 GFP_ATOMIC);
478 }
479}
480
481
482struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
483 struct sctphdr *sctphdr,
484 struct sctp_association **app,
485 struct sctp_transport **tpp)
486{
487 union sctp_addr saddr;
488 union sctp_addr daddr;
489 struct sctp_af *af;
490 struct sock *sk = NULL;
491 struct sctp_association *asoc;
492 struct sctp_transport *transport = NULL;
493 struct sctp_init_chunk *chunkhdr;
494 __u32 vtag = ntohl(sctphdr->vtag);
495 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
496
497 *app = NULL; *tpp = NULL;
498
499 af = sctp_get_af_specific(family);
500 if (unlikely(!af)) {
501 return NULL;
502 }
503
504
505 af->from_skb(&saddr, skb, 1);
506 af->from_skb(&daddr, skb, 0);
507
508
509
510
511 asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport);
512 if (!asoc)
513 return NULL;
514
515 sk = asoc->base.sk;
516
517
518
519
520
521
522
523
524
525
526
527
528
529 if (vtag == 0) {
530 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
531 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
532 + sizeof(__be32) ||
533 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
534 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
535 goto out;
536 }
537 } else if (vtag != asoc->c.peer_vtag) {
538 goto out;
539 }
540
541 sctp_bh_lock_sock(sk);
542
543
544
545
546 if (sock_owned_by_user(sk))
547 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
548
549 *app = asoc;
550 *tpp = transport;
551 return sk;
552
553out:
554 if (asoc)
555 sctp_association_put(asoc);
556 return NULL;
557}
558
559
560void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
561{
562 sctp_bh_unlock_sock(sk);
563 if (asoc)
564 sctp_association_put(asoc);
565}
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582void sctp_v4_err(struct sk_buff *skb, __u32 info)
583{
584 const struct iphdr *iph = (const struct iphdr *)skb->data;
585 const int ihlen = iph->ihl * 4;
586 const int type = icmp_hdr(skb)->type;
587 const int code = icmp_hdr(skb)->code;
588 struct sock *sk;
589 struct sctp_association *asoc = NULL;
590 struct sctp_transport *transport;
591 struct inet_sock *inet;
592 sk_buff_data_t saveip, savesctp;
593 int err;
594 struct net *net = dev_net(skb->dev);
595
596 if (skb->len < ihlen + 8) {
597 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
598 return;
599 }
600
601
602 saveip = skb->network_header;
603 savesctp = skb->transport_header;
604 skb_reset_network_header(skb);
605 skb_set_transport_header(skb, ihlen);
606 sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
607
608 skb->network_header = saveip;
609 skb->transport_header = savesctp;
610 if (!sk) {
611 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
612 return;
613 }
614
615
616
617
618 switch (type) {
619 case ICMP_PARAMETERPROB:
620 err = EPROTO;
621 break;
622 case ICMP_DEST_UNREACH:
623 if (code > NR_ICMP_UNREACH)
624 goto out_unlock;
625
626
627 if (ICMP_FRAG_NEEDED == code) {
628 sctp_icmp_frag_needed(sk, asoc, transport, info);
629 goto out_unlock;
630 }
631 else {
632 if (ICMP_PROT_UNREACH == code) {
633 sctp_icmp_proto_unreachable(sk, asoc,
634 transport);
635 goto out_unlock;
636 }
637 }
638 err = icmp_err_convert[code].errno;
639 break;
640 case ICMP_TIME_EXCEEDED:
641
642
643
644 if (ICMP_EXC_FRAGTIME == code)
645 goto out_unlock;
646
647 err = EHOSTUNREACH;
648 break;
649 case ICMP_REDIRECT:
650 sctp_icmp_redirect(sk, transport, skb);
651 err = 0;
652 break;
653 default:
654 goto out_unlock;
655 }
656
657 inet = inet_sk(sk);
658 if (!sock_owned_by_user(sk) && inet->recverr) {
659 sk->sk_err = err;
660 sk->sk_error_report(sk);
661 } else {
662 sk->sk_err_soft = err;
663 }
664
665out_unlock:
666 sctp_err_finish(sk, asoc);
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681static int sctp_rcv_ootb(struct sk_buff *skb)
682{
683 sctp_chunkhdr_t *ch;
684 __u8 *ch_end;
685
686 ch = (sctp_chunkhdr_t *) skb->data;
687
688
689 do {
690
691 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
692 break;
693
694 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
695 if (ch_end > skb_tail_pointer(skb))
696 break;
697
698
699
700
701
702 if (SCTP_CID_ABORT == ch->type)
703 goto discard;
704
705
706
707
708
709 if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type)
710 goto discard;
711
712
713
714
715
716
717 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
718 goto discard;
719
720 ch = (sctp_chunkhdr_t *) ch_end;
721 } while (ch_end < skb_tail_pointer(skb));
722
723 return 0;
724
725discard:
726 return 1;
727}
728
729
730static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
731{
732 struct net *net = sock_net(ep->base.sk);
733 struct sctp_ep_common *epb;
734 struct sctp_hashbucket *head;
735
736 epb = &ep->base;
737
738 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
739 head = &sctp_ep_hashtable[epb->hashent];
740
741 sctp_write_lock(&head->lock);
742 hlist_add_head(&epb->node, &head->chain);
743 sctp_write_unlock(&head->lock);
744}
745
746
747void sctp_hash_endpoint(struct sctp_endpoint *ep)
748{
749 sctp_local_bh_disable();
750 __sctp_hash_endpoint(ep);
751 sctp_local_bh_enable();
752}
753
754
755static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
756{
757 struct net *net = sock_net(ep->base.sk);
758 struct sctp_hashbucket *head;
759 struct sctp_ep_common *epb;
760
761 epb = &ep->base;
762
763 epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
764
765 head = &sctp_ep_hashtable[epb->hashent];
766
767 sctp_write_lock(&head->lock);
768 hlist_del_init(&epb->node);
769 sctp_write_unlock(&head->lock);
770}
771
772
773void sctp_unhash_endpoint(struct sctp_endpoint *ep)
774{
775 sctp_local_bh_disable();
776 __sctp_unhash_endpoint(ep);
777 sctp_local_bh_enable();
778}
779
780
781static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
782 const union sctp_addr *laddr)
783{
784 struct sctp_hashbucket *head;
785 struct sctp_ep_common *epb;
786 struct sctp_endpoint *ep;
787 int hash;
788
789 hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
790 head = &sctp_ep_hashtable[hash];
791 read_lock(&head->lock);
792 sctp_for_each_hentry(epb, &head->chain) {
793 ep = sctp_ep(epb);
794 if (sctp_endpoint_is_match(ep, net, laddr))
795 goto hit;
796 }
797
798 ep = sctp_sk(net->sctp.ctl_sock)->ep;
799
800hit:
801 sctp_endpoint_hold(ep);
802 read_unlock(&head->lock);
803 return ep;
804}
805
806
807static void __sctp_hash_established(struct sctp_association *asoc)
808{
809 struct net *net = sock_net(asoc->base.sk);
810 struct sctp_ep_common *epb;
811 struct sctp_hashbucket *head;
812
813 epb = &asoc->base;
814
815
816 epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
817 asoc->peer.port);
818
819 head = &sctp_assoc_hashtable[epb->hashent];
820
821 sctp_write_lock(&head->lock);
822 hlist_add_head(&epb->node, &head->chain);
823 sctp_write_unlock(&head->lock);
824}
825
826
827void sctp_hash_established(struct sctp_association *asoc)
828{
829 if (asoc->temp)
830 return;
831
832 sctp_local_bh_disable();
833 __sctp_hash_established(asoc);
834 sctp_local_bh_enable();
835}
836
837
838static void __sctp_unhash_established(struct sctp_association *asoc)
839{
840 struct net *net = sock_net(asoc->base.sk);
841 struct sctp_hashbucket *head;
842 struct sctp_ep_common *epb;
843
844 epb = &asoc->base;
845
846 epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port,
847 asoc->peer.port);
848
849 head = &sctp_assoc_hashtable[epb->hashent];
850
851 sctp_write_lock(&head->lock);
852 hlist_del_init(&epb->node);
853 sctp_write_unlock(&head->lock);
854}
855
856
857void sctp_unhash_established(struct sctp_association *asoc)
858{
859 if (asoc->temp)
860 return;
861
862 sctp_local_bh_disable();
863 __sctp_unhash_established(asoc);
864 sctp_local_bh_enable();
865}
866
867
868static struct sctp_association *__sctp_lookup_association(
869 struct net *net,
870 const union sctp_addr *local,
871 const union sctp_addr *peer,
872 struct sctp_transport **pt)
873{
874 struct sctp_hashbucket *head;
875 struct sctp_ep_common *epb;
876 struct sctp_association *asoc;
877 struct sctp_transport *transport;
878 int hash;
879
880
881
882
883 hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port),
884 ntohs(peer->v4.sin_port));
885 head = &sctp_assoc_hashtable[hash];
886 read_lock(&head->lock);
887 sctp_for_each_hentry(epb, &head->chain) {
888 asoc = sctp_assoc(epb);
889 transport = sctp_assoc_is_match(asoc, net, local, peer);
890 if (transport)
891 goto hit;
892 }
893
894 read_unlock(&head->lock);
895
896 return NULL;
897
898hit:
899 *pt = transport;
900 sctp_association_hold(asoc);
901 read_unlock(&head->lock);
902 return asoc;
903}
904
905
906SCTP_STATIC
907struct sctp_association *sctp_lookup_association(struct net *net,
908 const union sctp_addr *laddr,
909 const union sctp_addr *paddr,
910 struct sctp_transport **transportp)
911{
912 struct sctp_association *asoc;
913
914 sctp_local_bh_disable();
915 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
916 sctp_local_bh_enable();
917
918 return asoc;
919}
920
921
922int sctp_has_association(struct net *net,
923 const union sctp_addr *laddr,
924 const union sctp_addr *paddr)
925{
926 struct sctp_association *asoc;
927 struct sctp_transport *transport;
928
929 if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) {
930 sctp_association_put(asoc);
931 return 1;
932 }
933
934 return 0;
935}
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955static struct sctp_association *__sctp_rcv_init_lookup(struct net *net,
956 struct sk_buff *skb,
957 const union sctp_addr *laddr, struct sctp_transport **transportp)
958{
959 struct sctp_association *asoc;
960 union sctp_addr addr;
961 union sctp_addr *paddr = &addr;
962 struct sctphdr *sh = sctp_hdr(skb);
963 union sctp_params params;
964 sctp_init_chunk_t *init;
965 struct sctp_transport *transport;
966 struct sctp_af *af;
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984 init = (sctp_init_chunk_t *)skb->data;
985
986
987 sctp_walk_params(params, init, init_hdr.params) {
988
989
990 af = sctp_get_af_specific(param_type2af(params.p->type));
991 if (!af)
992 continue;
993
994 af->from_addr_param(paddr, params.addr, sh->source, 0);
995
996 asoc = __sctp_lookup_association(net, laddr, paddr, &transport);
997 if (asoc)
998 return asoc;
999 }
1000
1001 return NULL;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static struct sctp_association *__sctp_rcv_asconf_lookup(
1019 struct net *net,
1020 sctp_chunkhdr_t *ch,
1021 const union sctp_addr *laddr,
1022 __be16 peer_port,
1023 struct sctp_transport **transportp)
1024{
1025 sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch;
1026 struct sctp_af *af;
1027 union sctp_addr_param *param;
1028 union sctp_addr paddr;
1029
1030
1031 param = (union sctp_addr_param *)(asconf + 1);
1032
1033 af = sctp_get_af_specific(param_type2af(param->p.type));
1034 if (unlikely(!af))
1035 return NULL;
1036
1037 af->from_addr_param(&paddr, param, peer_port, 0);
1038
1039 return __sctp_lookup_association(net, laddr, &paddr, transportp);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net,
1053 struct sk_buff *skb,
1054 const union sctp_addr *laddr,
1055 struct sctp_transport **transportp)
1056{
1057 struct sctp_association *asoc = NULL;
1058 sctp_chunkhdr_t *ch;
1059 int have_auth = 0;
1060 unsigned int chunk_num = 1;
1061 __u8 *ch_end;
1062
1063
1064
1065
1066 ch = (sctp_chunkhdr_t *) skb->data;
1067 do {
1068
1069 if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
1070 break;
1071
1072 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
1073 if (ch_end > skb_tail_pointer(skb))
1074 break;
1075
1076 switch(ch->type) {
1077 case SCTP_CID_AUTH:
1078 have_auth = chunk_num;
1079 break;
1080
1081 case SCTP_CID_COOKIE_ECHO:
1082
1083
1084
1085
1086
1087
1088
1089 if (have_auth == 1 && chunk_num == 2)
1090 return NULL;
1091 break;
1092
1093 case SCTP_CID_ASCONF:
1094 if (have_auth || net->sctp.addip_noauth)
1095 asoc = __sctp_rcv_asconf_lookup(
1096 net, ch, laddr,
1097 sctp_hdr(skb)->source,
1098 transportp);
1099 default:
1100 break;
1101 }
1102
1103 if (asoc)
1104 break;
1105
1106 ch = (sctp_chunkhdr_t *) ch_end;
1107 chunk_num++;
1108 } while (ch_end < skb_tail_pointer(skb));
1109
1110 return asoc;
1111}
1112
1113
1114
1115
1116
1117
1118
1119static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net,
1120 struct sk_buff *skb,
1121 const union sctp_addr *laddr,
1122 struct sctp_transport **transportp)
1123{
1124 sctp_chunkhdr_t *ch;
1125
1126 ch = (sctp_chunkhdr_t *) skb->data;
1127
1128
1129
1130
1131
1132
1133 if (WORD_ROUND(ntohs(ch->length)) > skb->len)
1134 return NULL;
1135
1136
1137 switch (ch->type) {
1138 case SCTP_CID_INIT:
1139 case SCTP_CID_INIT_ACK:
1140 return __sctp_rcv_init_lookup(net, skb, laddr, transportp);
1141 break;
1142
1143 default:
1144 return __sctp_rcv_walk_lookup(net, skb, laddr, transportp);
1145 break;
1146 }
1147
1148
1149 return NULL;
1150}
1151
1152
1153static struct sctp_association *__sctp_rcv_lookup(struct net *net,
1154 struct sk_buff *skb,
1155 const union sctp_addr *paddr,
1156 const union sctp_addr *laddr,
1157 struct sctp_transport **transportp)
1158{
1159 struct sctp_association *asoc;
1160
1161 asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
1162
1163
1164
1165
1166
1167 if (!asoc)
1168 asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
1169
1170 return asoc;
1171}
1172