1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/types.h>
47#include <linux/fcntl.h>
48#include <linux/poll.h>
49#include <linux/init.h>
50
51#include <linux/slab.h>
52#include <linux/in.h>
53#include <net/ipv6.h>
54#include <net/sctp/sctp.h>
55#include <net/sctp/sm.h>
56
57
58static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59static void sctp_assoc_bh_rcv(struct work_struct *work);
60static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62
63
64
65
66static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 const struct sctp_endpoint *ep,
68 const struct sock *sk,
69 sctp_scope_t scope,
70 gfp_t gfp)
71{
72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p;
76 int err;
77
78
79 sp = sctp_sk((struct sock *)sk);
80
81
82 asoc->ep = (struct sctp_endpoint *)ep;
83 asoc->base.sk = (struct sock *)sk;
84
85 sctp_endpoint_hold(asoc->ep);
86 sock_hold(asoc->base.sk);
87
88
89 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90
91
92 atomic_set(&asoc->base.refcnt, 1);
93
94
95 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96
97 asoc->state = SCTP_STATE_CLOSED;
98 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
99 asoc->user_frag = sp->user_frag;
100
101
102
103
104 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
105 asoc->pf_retrans = net->sctp.pf_retrans;
106
107 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
109 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110
111
112
113
114 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115
116
117 asoc->pathmaxrxt = sp->pathmaxrxt;
118
119
120 asoc->pathmtu = sp->pathmtu;
121
122
123 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
124 asoc->sackfreq = sp->sackfreq;
125
126
127
128
129 asoc->param_flags = sp->param_flags;
130
131
132
133
134 asoc->max_burst = sp->max_burst;
135
136
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
138 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
139 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140
141
142
143
144
145 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
146 = 5 * asoc->rto_max;
147
148 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
149 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
150
151
152 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 (unsigned long)asoc);
155
156
157
158
159
160 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
162 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
163
164 asoc->max_init_timeo =
165 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
166
167
168
169
170
171
172 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
174 else
175 asoc->rwnd = sk->sk_rcvbuf/2;
176
177 asoc->a_rwnd = asoc->rwnd;
178
179
180 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
181
182
183 atomic_set(&asoc->rmem_alloc, 0);
184
185 init_waitqueue_head(&asoc->wait);
186
187 asoc->c.my_vtag = sctp_generate_tag(ep);
188 asoc->c.my_port = ep->base.bind_addr.port;
189
190 asoc->c.initial_tsn = sctp_generate_tsn(ep);
191
192 asoc->next_tsn = asoc->c.initial_tsn;
193
194 asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 asoc->highest_sacked = asoc->ctsn_ack_point;
197 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
198
199
200
201
202
203
204
205
206
207
208
209 asoc->addip_serial = asoc->c.initial_tsn;
210
211 INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 INIT_LIST_HEAD(&asoc->asconf_ack_list);
213
214
215 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
216
217
218
219
220
221
222
223
224
225
226
227
228 asoc->peer.sack_needed = 1;
229 asoc->peer.sack_generation = 1;
230
231
232
233
234
235
236 if (net->sctp.addip_noauth)
237 asoc->peer.asconf_capable = 1;
238
239
240 sctp_inq_init(&asoc->base.inqueue);
241 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
242
243
244 sctp_outq_init(asoc, &asoc->outqueue);
245
246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 goto fail_init;
248
249
250
251
252 asoc->peer.ipv4_address = 1;
253 if (asoc->base.sk->sk_family == PF_INET6)
254 asoc->peer.ipv6_address = 1;
255 INIT_LIST_HEAD(&asoc->asocs);
256
257 asoc->default_stream = sp->default_stream;
258 asoc->default_ppid = sp->default_ppid;
259 asoc->default_flags = sp->default_flags;
260 asoc->default_context = sp->default_context;
261 asoc->default_timetolive = sp->default_timetolive;
262 asoc->default_rcv_context = sp->default_rcv_context;
263
264
265 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
267 if (err)
268 goto fail_init;
269
270 asoc->active_key_id = ep->active_key_id;
271 asoc->prsctp_enable = ep->prsctp_enable;
272
273
274 if (ep->auth_hmacs_list)
275 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
276 ntohs(ep->auth_hmacs_list->param_hdr.length));
277 if (ep->auth_chunk_list)
278 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
279 ntohs(ep->auth_chunk_list->param_hdr.length));
280
281
282 p = (sctp_paramhdr_t *)asoc->c.auth_random;
283 p->type = SCTP_PARAM_RANDOM;
284 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
285 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
286
287 return asoc;
288
289fail_init:
290 sock_put(asoc->base.sk);
291 sctp_endpoint_put(asoc->ep);
292 return NULL;
293}
294
295
296struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
297 const struct sock *sk,
298 sctp_scope_t scope,
299 gfp_t gfp)
300{
301 struct sctp_association *asoc;
302
303 asoc = kzalloc(sizeof(*asoc), gfp);
304 if (!asoc)
305 goto fail;
306
307 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
308 goto fail_init;
309
310 SCTP_DBG_OBJCNT_INC(assoc);
311
312 pr_debug("Created asoc %p\n", asoc);
313
314 return asoc;
315
316fail_init:
317 kfree(asoc);
318fail:
319 return NULL;
320}
321
322
323
324
325void sctp_association_free(struct sctp_association *asoc)
326{
327 struct sock *sk = asoc->base.sk;
328 struct sctp_transport *transport;
329 struct list_head *pos, *temp;
330 int i;
331
332
333
334
335 if (!list_empty(&asoc->asocs)) {
336 list_del(&asoc->asocs);
337
338
339
340
341 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
342 sk->sk_ack_backlog--;
343 }
344
345
346
347
348 asoc->base.dead = true;
349
350
351 sctp_outq_free(&asoc->outqueue);
352
353
354 sctp_ulpq_free(&asoc->ulpq);
355
356
357 sctp_inq_free(&asoc->base.inqueue);
358
359 sctp_tsnmap_free(&asoc->peer.tsn_map);
360
361
362 sctp_ssnmap_free(asoc->ssnmap);
363
364
365 sctp_bind_addr_free(&asoc->base.bind_addr);
366
367
368
369
370
371
372 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
373 if (del_timer(&asoc->timers[i]))
374 sctp_association_put(asoc);
375 }
376
377
378 kfree(asoc->peer.cookie);
379 kfree(asoc->peer.peer_random);
380 kfree(asoc->peer.peer_chunks);
381 kfree(asoc->peer.peer_hmacs);
382
383
384 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
385 transport = list_entry(pos, struct sctp_transport, transports);
386 list_del_rcu(pos);
387 sctp_unhash_transport(transport);
388 sctp_transport_free(transport);
389 }
390
391 asoc->peer.transport_count = 0;
392
393 sctp_asconf_queue_teardown(asoc);
394
395
396 kfree(asoc->asconf_addr_del_pending);
397
398
399 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
400
401
402 sctp_auth_key_put(asoc->asoc_shared_key);
403
404 sctp_association_put(asoc);
405}
406
407
408static void sctp_association_destroy(struct sctp_association *asoc)
409{
410 if (unlikely(!asoc->base.dead)) {
411 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
412 return;
413 }
414
415 sctp_endpoint_put(asoc->ep);
416 sock_put(asoc->base.sk);
417
418 if (asoc->assoc_id != 0) {
419 spin_lock_bh(&sctp_assocs_id_lock);
420 idr_remove(&sctp_assocs_id, asoc->assoc_id);
421 spin_unlock_bh(&sctp_assocs_id_lock);
422 }
423
424 WARN_ON(atomic_read(&asoc->rmem_alloc));
425
426 kfree(asoc);
427 SCTP_DBG_OBJCNT_DEC(assoc);
428}
429
430
431void sctp_assoc_set_primary(struct sctp_association *asoc,
432 struct sctp_transport *transport)
433{
434 int changeover = 0;
435
436
437
438
439 if (asoc->peer.primary_path != NULL &&
440 asoc->peer.primary_path != transport)
441 changeover = 1 ;
442
443 asoc->peer.primary_path = transport;
444
445
446 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
447 sizeof(union sctp_addr));
448
449
450
451
452 if ((transport->state == SCTP_ACTIVE) ||
453 (transport->state == SCTP_UNKNOWN))
454 asoc->peer.active_path = transport;
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
471 return;
472
473 if (transport->cacc.changeover_active)
474 transport->cacc.cycling_changeover = changeover;
475
476
477
478
479 transport->cacc.changeover_active = changeover;
480
481
482
483
484 transport->cacc.next_tsn_at_change = asoc->next_tsn;
485}
486
487
488void sctp_assoc_rm_peer(struct sctp_association *asoc,
489 struct sctp_transport *peer)
490{
491 struct list_head *pos;
492 struct sctp_transport *transport;
493
494 pr_debug("%s: association:%p addr:%pISpc\n",
495 __func__, asoc, &peer->ipaddr.sa);
496
497
498
499
500 if (asoc->peer.retran_path == peer)
501 sctp_assoc_update_retran_path(asoc);
502
503
504 list_del_rcu(&peer->transports);
505
506 sctp_unhash_transport(peer);
507
508
509 pos = asoc->peer.transport_addr_list.next;
510 transport = list_entry(pos, struct sctp_transport, transports);
511
512
513 if (asoc->peer.primary_path == peer)
514 sctp_assoc_set_primary(asoc, transport);
515 if (asoc->peer.active_path == peer)
516 asoc->peer.active_path = transport;
517 if (asoc->peer.retran_path == peer)
518 asoc->peer.retran_path = transport;
519 if (asoc->peer.last_data_from == peer)
520 asoc->peer.last_data_from = transport;
521
522
523
524
525
526
527 if (asoc->init_last_sent_to == peer)
528 asoc->init_last_sent_to = NULL;
529
530
531
532
533
534
535 if (asoc->shutdown_last_sent_to == peer)
536 asoc->shutdown_last_sent_to = NULL;
537
538
539
540
541 if (asoc->addip_last_asconf &&
542 asoc->addip_last_asconf->transport == peer)
543 asoc->addip_last_asconf->transport = NULL;
544
545
546
547
548 if (!list_empty(&peer->transmitted)) {
549 struct sctp_transport *active = asoc->peer.active_path;
550 struct sctp_chunk *ch;
551
552
553 list_for_each_entry(ch, &peer->transmitted,
554 transmitted_list) {
555 ch->transport = NULL;
556 ch->rtt_in_progress = 0;
557 }
558
559 list_splice_tail_init(&peer->transmitted,
560 &active->transmitted);
561
562
563
564
565
566 if (!timer_pending(&active->T3_rtx_timer))
567 if (!mod_timer(&active->T3_rtx_timer,
568 jiffies + active->rto))
569 sctp_transport_hold(active);
570 }
571
572 asoc->peer.transport_count--;
573
574 sctp_transport_free(peer);
575}
576
577
578struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
579 const union sctp_addr *addr,
580 const gfp_t gfp,
581 const int peer_state)
582{
583 struct net *net = sock_net(asoc->base.sk);
584 struct sctp_transport *peer;
585 struct sctp_sock *sp;
586 unsigned short port;
587
588 sp = sctp_sk(asoc->base.sk);
589
590
591 port = ntohs(addr->v4.sin_port);
592
593 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
594 asoc, &addr->sa, peer_state);
595
596
597 if (0 == asoc->peer.port)
598 asoc->peer.port = port;
599
600
601 peer = sctp_assoc_lookup_paddr(asoc, addr);
602 if (peer) {
603
604
605
606
607 if (peer->state == SCTP_UNKNOWN) {
608 peer->state = SCTP_ACTIVE;
609 }
610 return peer;
611 }
612
613 peer = sctp_transport_new(net, addr, gfp);
614 if (!peer)
615 return NULL;
616
617 sctp_transport_set_owner(peer, asoc);
618
619
620
621
622 peer->hbinterval = asoc->hbinterval;
623
624
625 peer->pathmaxrxt = asoc->pathmaxrxt;
626
627
628 peer->pf_retrans = asoc->pf_retrans;
629
630
631
632
633 peer->sackdelay = asoc->sackdelay;
634 peer->sackfreq = asoc->sackfreq;
635
636
637
638
639 peer->param_flags = asoc->param_flags;
640
641 sctp_transport_route(peer, NULL, sp);
642
643
644 if (peer->param_flags & SPP_PMTUD_DISABLE) {
645 if (asoc->pathmtu)
646 peer->pathmtu = asoc->pathmtu;
647 else
648 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
649 }
650
651
652
653
654
655
656 if (asoc->pathmtu)
657 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
658 else
659 asoc->pathmtu = peer->pathmtu;
660
661 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
662 asoc->pathmtu);
663
664 peer->pmtu_pending = 0;
665
666 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
667
668
669
670
671 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
672 asoc->peer.port);
673
674
675
676
677
678
679
680
681
682
683
684 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
685
686
687
688
689
690 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
691
692 peer->partial_bytes_acked = 0;
693 peer->flight_size = 0;
694 peer->burst_limited = 0;
695
696
697 peer->rto = asoc->rto_initial;
698 sctp_max_rto(asoc, peer);
699
700
701 peer->state = peer_state;
702
703
704 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
705 asoc->peer.transport_count++;
706
707 sctp_hash_transport(peer);
708
709
710 if (!asoc->peer.primary_path) {
711 sctp_assoc_set_primary(asoc, peer);
712 asoc->peer.retran_path = peer;
713 }
714
715 if (asoc->peer.active_path == asoc->peer.retran_path &&
716 peer->state != SCTP_UNCONFIRMED) {
717 asoc->peer.retran_path = peer;
718 }
719
720 return peer;
721}
722
723
724void sctp_assoc_del_peer(struct sctp_association *asoc,
725 const union sctp_addr *addr)
726{
727 struct list_head *pos;
728 struct list_head *temp;
729 struct sctp_transport *transport;
730
731 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
732 transport = list_entry(pos, struct sctp_transport, transports);
733 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
734
735 sctp_assoc_rm_peer(asoc, transport);
736 break;
737 }
738 }
739}
740
741
742struct sctp_transport *sctp_assoc_lookup_paddr(
743 const struct sctp_association *asoc,
744 const union sctp_addr *address)
745{
746 struct sctp_transport *t;
747
748
749
750 list_for_each_entry(t, &asoc->peer.transport_addr_list,
751 transports) {
752 if (sctp_cmp_addr_exact(address, &t->ipaddr))
753 return t;
754 }
755
756 return NULL;
757}
758
759
760void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
761 struct sctp_transport *primary)
762{
763 struct sctp_transport *temp;
764 struct sctp_transport *t;
765
766 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
767 transports) {
768
769 if (t != primary)
770 sctp_assoc_rm_peer(asoc, t);
771 }
772}
773
774
775
776
777
778void sctp_assoc_control_transport(struct sctp_association *asoc,
779 struct sctp_transport *transport,
780 sctp_transport_cmd_t command,
781 sctp_sn_error_t error)
782{
783 struct sctp_ulpevent *event;
784 struct sockaddr_storage addr;
785 int spc_state = 0;
786 bool ulp_notify = true;
787
788
789 switch (command) {
790 case SCTP_TRANSPORT_UP:
791
792
793
794
795 if (SCTP_UNCONFIRMED == transport->state &&
796 SCTP_HEARTBEAT_SUCCESS == error)
797 spc_state = SCTP_ADDR_CONFIRMED;
798 else
799 spc_state = SCTP_ADDR_AVAILABLE;
800
801
802
803
804 if (transport->state == SCTP_PF) {
805 ulp_notify = false;
806 transport->cwnd = asoc->pathmtu;
807 }
808 transport->state = SCTP_ACTIVE;
809 break;
810
811 case SCTP_TRANSPORT_DOWN:
812
813
814
815
816 if (transport->state != SCTP_UNCONFIRMED)
817 transport->state = SCTP_INACTIVE;
818 else {
819 dst_release(transport->dst);
820 transport->dst = NULL;
821 ulp_notify = false;
822 }
823
824 spc_state = SCTP_ADDR_UNREACHABLE;
825 break;
826
827 case SCTP_TRANSPORT_PF:
828 transport->state = SCTP_PF;
829 ulp_notify = false;
830 break;
831
832 default:
833 return;
834 }
835
836
837
838
839 if (ulp_notify) {
840 memset(&addr, 0, sizeof(struct sockaddr_storage));
841 memcpy(&addr, &transport->ipaddr,
842 transport->af_specific->sockaddr_len);
843
844 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
845 0, spc_state, error, GFP_ATOMIC);
846 if (event)
847 sctp_ulpq_tail_event(&asoc->ulpq, event);
848 }
849
850
851 sctp_select_active_and_retran_path(asoc);
852}
853
854
855void sctp_association_hold(struct sctp_association *asoc)
856{
857 atomic_inc(&asoc->base.refcnt);
858}
859
860
861
862
863void sctp_association_put(struct sctp_association *asoc)
864{
865 if (atomic_dec_and_test(&asoc->base.refcnt))
866 sctp_association_destroy(asoc);
867}
868
869
870
871
872__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
873{
874
875
876
877
878
879 __u32 retval = asoc->next_tsn;
880 asoc->next_tsn++;
881 asoc->unack_data++;
882
883 return retval;
884}
885
886
887
888
889int sctp_cmp_addr_exact(const union sctp_addr *ss1,
890 const union sctp_addr *ss2)
891{
892 struct sctp_af *af;
893
894 af = sctp_get_af_specific(ss1->sa.sa_family);
895 if (unlikely(!af))
896 return 0;
897
898 return af->cmp_addr(ss1, ss2);
899}
900
901
902
903
904
905struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
906{
907 if (!asoc->need_ecne)
908 return NULL;
909
910
911
912
913 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
914}
915
916
917
918
919struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
920 __u32 tsn)
921{
922 struct sctp_transport *active;
923 struct sctp_transport *match;
924 struct sctp_transport *transport;
925 struct sctp_chunk *chunk;
926 __be32 key = htonl(tsn);
927
928 match = NULL;
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945 active = asoc->peer.active_path;
946
947 list_for_each_entry(chunk, &active->transmitted,
948 transmitted_list) {
949
950 if (key == chunk->subh.data_hdr->tsn) {
951 match = active;
952 goto out;
953 }
954 }
955
956
957 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
958 transports) {
959
960 if (transport == active)
961 continue;
962 list_for_each_entry(chunk, &transport->transmitted,
963 transmitted_list) {
964 if (key == chunk->subh.data_hdr->tsn) {
965 match = transport;
966 goto out;
967 }
968 }
969 }
970out:
971 return match;
972}
973
974
975struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
976 struct net *net,
977 const union sctp_addr *laddr,
978 const union sctp_addr *paddr)
979{
980 struct sctp_transport *transport;
981
982 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
983 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
984 net_eq(sock_net(asoc->base.sk), net)) {
985 transport = sctp_assoc_lookup_paddr(asoc, paddr);
986 if (!transport)
987 goto out;
988
989 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
990 sctp_sk(asoc->base.sk)))
991 goto out;
992 }
993 transport = NULL;
994
995out:
996 return transport;
997}
998
999
1000static void sctp_assoc_bh_rcv(struct work_struct *work)
1001{
1002 struct sctp_association *asoc =
1003 container_of(work, struct sctp_association,
1004 base.inqueue.immediate);
1005 struct net *net = sock_net(asoc->base.sk);
1006 struct sctp_endpoint *ep;
1007 struct sctp_chunk *chunk;
1008 struct sctp_inq *inqueue;
1009 int state;
1010 sctp_subtype_t subtype;
1011 int error = 0;
1012
1013
1014 ep = asoc->ep;
1015
1016 inqueue = &asoc->base.inqueue;
1017 sctp_association_hold(asoc);
1018 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1019 state = asoc->state;
1020 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1021
1022
1023
1024
1025
1026
1027
1028
1029 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1030 continue;
1031
1032
1033
1034
1035 if (sctp_chunk_is_data(chunk))
1036 asoc->peer.last_data_from = chunk->transport;
1037 else {
1038 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1039 asoc->stats.ictrlchunks++;
1040 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1041 asoc->stats.isacks++;
1042 }
1043
1044 if (chunk->transport)
1045 chunk->transport->last_time_heard = ktime_get();
1046
1047
1048 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1049 state, ep, asoc, chunk, GFP_ATOMIC);
1050
1051
1052
1053
1054 if (asoc->base.dead)
1055 break;
1056
1057
1058 if (error && chunk)
1059 chunk->pdiscard = 1;
1060 }
1061 sctp_association_put(asoc);
1062}
1063
1064
1065void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1066{
1067 struct sctp_sock *newsp = sctp_sk(newsk);
1068 struct sock *oldsk = assoc->base.sk;
1069
1070
1071
1072
1073 list_del_init(&assoc->asocs);
1074
1075
1076 if (sctp_style(oldsk, TCP))
1077 oldsk->sk_ack_backlog--;
1078
1079
1080 sctp_endpoint_put(assoc->ep);
1081 sock_put(assoc->base.sk);
1082
1083
1084 assoc->ep = newsp->ep;
1085 sctp_endpoint_hold(assoc->ep);
1086
1087
1088 assoc->base.sk = newsk;
1089 sock_hold(assoc->base.sk);
1090
1091
1092 sctp_endpoint_add_asoc(newsp->ep, assoc);
1093}
1094
1095
1096void sctp_assoc_update(struct sctp_association *asoc,
1097 struct sctp_association *new)
1098{
1099 struct sctp_transport *trans;
1100 struct list_head *pos, *temp;
1101
1102
1103 asoc->c = new->c;
1104 asoc->peer.rwnd = new->peer.rwnd;
1105 asoc->peer.sack_needed = new->peer.sack_needed;
1106 asoc->peer.auth_capable = new->peer.auth_capable;
1107 asoc->peer.i = new->peer.i;
1108 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1109 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1110
1111
1112 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1113 trans = list_entry(pos, struct sctp_transport, transports);
1114 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1115 sctp_assoc_rm_peer(asoc, trans);
1116 continue;
1117 }
1118
1119 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1120 sctp_transport_reset(trans);
1121 }
1122
1123
1124
1125
1126
1127
1128 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1129 asoc->next_tsn = new->next_tsn;
1130 asoc->ctsn_ack_point = new->ctsn_ack_point;
1131 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1132
1133
1134
1135
1136 sctp_ssnmap_clear(asoc->ssnmap);
1137
1138
1139
1140
1141
1142 sctp_ulpq_flush(&asoc->ulpq);
1143
1144
1145
1146
1147
1148 asoc->overall_error_count = 0;
1149
1150 } else {
1151
1152 list_for_each_entry(trans, &new->peer.transport_addr_list,
1153 transports) {
1154 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1155 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1156 GFP_ATOMIC, trans->state);
1157 }
1158
1159 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1160 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1161 if (!asoc->ssnmap) {
1162
1163 asoc->ssnmap = new->ssnmap;
1164 new->ssnmap = NULL;
1165 }
1166
1167 if (!asoc->assoc_id) {
1168
1169
1170
1171 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1172 }
1173 }
1174
1175
1176
1177
1178 kfree(asoc->peer.peer_random);
1179 asoc->peer.peer_random = new->peer.peer_random;
1180 new->peer.peer_random = NULL;
1181
1182 kfree(asoc->peer.peer_chunks);
1183 asoc->peer.peer_chunks = new->peer.peer_chunks;
1184 new->peer.peer_chunks = NULL;
1185
1186 kfree(asoc->peer.peer_hmacs);
1187 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1188 new->peer.peer_hmacs = NULL;
1189
1190 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221static u8 sctp_trans_score(const struct sctp_transport *trans)
1222{
1223 switch (trans->state) {
1224 case SCTP_ACTIVE:
1225 return 3;
1226 case SCTP_UNKNOWN:
1227 return 2;
1228 case SCTP_PF:
1229 return 1;
1230 default:
1231 return 0;
1232 }
1233}
1234
1235static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1236 struct sctp_transport *trans2)
1237{
1238 if (trans1->error_count > trans2->error_count) {
1239 return trans2;
1240 } else if (trans1->error_count == trans2->error_count &&
1241 ktime_after(trans2->last_time_heard,
1242 trans1->last_time_heard)) {
1243 return trans2;
1244 } else {
1245 return trans1;
1246 }
1247}
1248
1249static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1250 struct sctp_transport *best)
1251{
1252 u8 score_curr, score_best;
1253
1254 if (best == NULL || curr == best)
1255 return curr;
1256
1257 score_curr = sctp_trans_score(curr);
1258 score_best = sctp_trans_score(best);
1259
1260
1261
1262
1263
1264 if (score_curr > score_best)
1265 return curr;
1266 else if (score_curr == score_best)
1267 return sctp_trans_elect_tie(best, curr);
1268 else
1269 return best;
1270}
1271
1272void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1273{
1274 struct sctp_transport *trans = asoc->peer.retran_path;
1275 struct sctp_transport *trans_next = NULL;
1276
1277
1278 if (asoc->peer.transport_count == 1)
1279 return;
1280
1281
1282
1283 if (asoc->peer.active_path == asoc->peer.retran_path &&
1284 asoc->peer.active_path->state == SCTP_ACTIVE)
1285 return;
1286
1287
1288 for (trans = list_next_entry(trans, transports); 1;
1289 trans = list_next_entry(trans, transports)) {
1290
1291 if (&trans->transports == &asoc->peer.transport_addr_list)
1292 continue;
1293 if (trans->state == SCTP_UNCONFIRMED)
1294 continue;
1295 trans_next = sctp_trans_elect_best(trans, trans_next);
1296
1297 if (trans_next->state == SCTP_ACTIVE)
1298 break;
1299
1300 if (trans == asoc->peer.retran_path)
1301 break;
1302 }
1303
1304 asoc->peer.retran_path = trans_next;
1305
1306 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1307 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1308}
1309
1310static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1311{
1312 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1313 struct sctp_transport *trans_pf = NULL;
1314
1315
1316 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1317 transports) {
1318
1319 if (trans->state == SCTP_INACTIVE ||
1320 trans->state == SCTP_UNCONFIRMED)
1321 continue;
1322
1323
1324
1325 if (trans->state == SCTP_PF) {
1326 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1327 continue;
1328 }
1329
1330 if (trans_pri == NULL ||
1331 ktime_after(trans->last_time_heard,
1332 trans_pri->last_time_heard)) {
1333 trans_sec = trans_pri;
1334 trans_pri = trans;
1335 } else if (trans_sec == NULL ||
1336 ktime_after(trans->last_time_heard,
1337 trans_sec->last_time_heard)) {
1338 trans_sec = trans;
1339 }
1340 }
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1351 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1352 asoc->peer.primary_path != trans_pri) {
1353 trans_sec = trans_pri;
1354 trans_pri = asoc->peer.primary_path;
1355 }
1356
1357
1358
1359
1360
1361 if (trans_sec == NULL)
1362 trans_sec = trans_pri;
1363
1364
1365
1366
1367 if (trans_pri == NULL) {
1368 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1369 trans_sec = trans_pri;
1370 }
1371
1372
1373 asoc->peer.active_path = trans_pri;
1374 asoc->peer.retran_path = trans_sec;
1375}
1376
1377struct sctp_transport *
1378sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1379 struct sctp_transport *last_sent_to)
1380{
1381
1382
1383
1384
1385 if (last_sent_to == NULL) {
1386 return asoc->peer.active_path;
1387 } else {
1388 if (last_sent_to == asoc->peer.retran_path)
1389 sctp_assoc_update_retran_path(asoc);
1390
1391 return asoc->peer.retran_path;
1392 }
1393}
1394
1395
1396
1397
1398void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1399{
1400 struct sctp_transport *t;
1401 __u32 pmtu = 0;
1402
1403 if (!asoc)
1404 return;
1405
1406
1407 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1408 transports) {
1409 if (t->pmtu_pending && t->dst) {
1410 sctp_transport_update_pmtu(sk, t,
1411 SCTP_TRUNC4(dst_mtu(t->dst)));
1412 t->pmtu_pending = 0;
1413 }
1414 if (!pmtu || (t->pathmtu < pmtu))
1415 pmtu = t->pathmtu;
1416 }
1417
1418 if (pmtu) {
1419 asoc->pathmtu = pmtu;
1420 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1421 }
1422
1423 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1424 asoc->pathmtu, asoc->frag_point);
1425}
1426
1427
1428static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1429{
1430 struct net *net = sock_net(asoc->base.sk);
1431 switch (asoc->state) {
1432 case SCTP_STATE_ESTABLISHED:
1433 case SCTP_STATE_SHUTDOWN_PENDING:
1434 case SCTP_STATE_SHUTDOWN_RECEIVED:
1435 case SCTP_STATE_SHUTDOWN_SENT:
1436 if ((asoc->rwnd > asoc->a_rwnd) &&
1437 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1438 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1439 asoc->pathmtu)))
1440 return true;
1441 break;
1442 default:
1443 break;
1444 }
1445 return false;
1446}
1447
1448
1449void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1450{
1451 struct sctp_chunk *sack;
1452 struct timer_list *timer;
1453
1454 if (asoc->rwnd_over) {
1455 if (asoc->rwnd_over >= len) {
1456 asoc->rwnd_over -= len;
1457 } else {
1458 asoc->rwnd += (len - asoc->rwnd_over);
1459 asoc->rwnd_over = 0;
1460 }
1461 } else {
1462 asoc->rwnd += len;
1463 }
1464
1465
1466
1467
1468
1469
1470 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1471 int change = min(asoc->pathmtu, asoc->rwnd_press);
1472 asoc->rwnd += change;
1473 asoc->rwnd_press -= change;
1474 }
1475
1476 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1477 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1478 asoc->a_rwnd);
1479
1480
1481
1482
1483
1484
1485 if (sctp_peer_needs_update(asoc)) {
1486 asoc->a_rwnd = asoc->rwnd;
1487
1488 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1489 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1490 asoc->a_rwnd);
1491
1492 sack = sctp_make_sack(asoc);
1493 if (!sack)
1494 return;
1495
1496 asoc->peer.sack_needed = 0;
1497
1498 sctp_outq_tail(&asoc->outqueue, sack, GFP_ATOMIC);
1499
1500
1501 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1502 if (del_timer(timer))
1503 sctp_association_put(asoc);
1504 }
1505}
1506
1507
1508void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1509{
1510 int rx_count;
1511 int over = 0;
1512
1513 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1514 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1515 "asoc->rwnd_over:%u!\n", __func__, asoc,
1516 asoc->rwnd, asoc->rwnd_over);
1517
1518 if (asoc->ep->rcvbuf_policy)
1519 rx_count = atomic_read(&asoc->rmem_alloc);
1520 else
1521 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1522
1523
1524
1525
1526
1527
1528 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1529 over = 1;
1530
1531 if (asoc->rwnd >= len) {
1532 asoc->rwnd -= len;
1533 if (over) {
1534 asoc->rwnd_press += asoc->rwnd;
1535 asoc->rwnd = 0;
1536 }
1537 } else {
1538 asoc->rwnd_over = len - asoc->rwnd;
1539 asoc->rwnd = 0;
1540 }
1541
1542 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1543 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1544 asoc->rwnd_press);
1545}
1546
1547
1548
1549
1550int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1551 sctp_scope_t scope, gfp_t gfp)
1552{
1553 int flags;
1554
1555
1556
1557
1558 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1559 if (asoc->peer.ipv4_address)
1560 flags |= SCTP_ADDR4_PEERSUPP;
1561 if (asoc->peer.ipv6_address)
1562 flags |= SCTP_ADDR6_PEERSUPP;
1563
1564 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1565 &asoc->base.bind_addr,
1566 &asoc->ep->base.bind_addr,
1567 scope, gfp, flags);
1568}
1569
1570
1571int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1572 struct sctp_cookie *cookie,
1573 gfp_t gfp)
1574{
1575 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1576 int var_size3 = cookie->raw_addr_list_len;
1577 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1578
1579 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1580 asoc->ep->base.bind_addr.port, gfp);
1581}
1582
1583
1584int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1585 const union sctp_addr *laddr)
1586{
1587 int found = 0;
1588
1589 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1590 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1591 sctp_sk(asoc->base.sk)))
1592 found = 1;
1593
1594 return found;
1595}
1596
1597
1598int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1599{
1600 bool preload = gfpflags_allow_blocking(gfp);
1601 int ret;
1602
1603
1604 if (asoc->assoc_id)
1605 return 0;
1606
1607 if (preload)
1608 idr_preload(gfp);
1609 spin_lock_bh(&sctp_assocs_id_lock);
1610
1611 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1612 spin_unlock_bh(&sctp_assocs_id_lock);
1613 if (preload)
1614 idr_preload_end();
1615 if (ret < 0)
1616 return ret;
1617
1618 asoc->assoc_id = (sctp_assoc_t)ret;
1619 return 0;
1620}
1621
1622
1623static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1624{
1625 struct sctp_chunk *asconf;
1626 struct sctp_chunk *tmp;
1627
1628 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1629 list_del_init(&asconf->list);
1630 sctp_chunk_free(asconf);
1631 }
1632}
1633
1634
1635static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1636{
1637 struct sctp_chunk *ack;
1638 struct sctp_chunk *tmp;
1639
1640 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1641 transmitted_list) {
1642 list_del_init(&ack->transmitted_list);
1643 sctp_chunk_free(ack);
1644 }
1645}
1646
1647
1648void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1649{
1650 struct sctp_chunk *ack;
1651 struct sctp_chunk *tmp;
1652
1653
1654
1655
1656 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1657 transmitted_list) {
1658 if (ack->subh.addip_hdr->serial ==
1659 htonl(asoc->peer.addip_serial))
1660 break;
1661
1662 list_del_init(&ack->transmitted_list);
1663 sctp_chunk_free(ack);
1664 }
1665}
1666
1667
1668struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1669 const struct sctp_association *asoc,
1670 __be32 serial)
1671{
1672 struct sctp_chunk *ack;
1673
1674
1675
1676
1677 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1678 if (sctp_chunk_pending(ack))
1679 continue;
1680 if (ack->subh.addip_hdr->serial == serial) {
1681 sctp_chunk_hold(ack);
1682 return ack;
1683 }
1684 }
1685
1686 return NULL;
1687}
1688
1689void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1690{
1691
1692 sctp_assoc_free_asconf_acks(asoc);
1693
1694
1695 sctp_assoc_free_asconf_queue(asoc);
1696
1697
1698 if (asoc->addip_last_asconf)
1699 sctp_chunk_free(asoc->addip_last_asconf);
1700}
1701