1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/types.h>
47#include <linux/fcntl.h>
48#include <linux/poll.h>
49#include <linux/init.h>
50
51#include <linux/slab.h>
52#include <linux/in.h>
53#include <net/ipv6.h>
54#include <net/sctp/sctp.h>
55#include <net/sctp/sm.h>
56
57
58static void sctp_select_active_and_retran_path(struct sctp_association *asoc);
59static void sctp_assoc_bh_rcv(struct work_struct *work);
60static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62
63
64
65
66static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 const struct sctp_endpoint *ep,
68 const struct sock *sk,
69 sctp_scope_t scope,
70 gfp_t gfp)
71{
72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p;
76 int err;
77
78
79 sp = sctp_sk((struct sock *)sk);
80
81
82 asoc->ep = (struct sctp_endpoint *)ep;
83 asoc->base.sk = (struct sock *)sk;
84
85 sctp_endpoint_hold(asoc->ep);
86 sock_hold(asoc->base.sk);
87
88
89 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90
91
92 atomic_set(&asoc->base.refcnt, 1);
93
94
95 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
96
97 asoc->state = SCTP_STATE_CLOSED;
98 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
99 asoc->user_frag = sp->user_frag;
100
101
102
103
104 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
105 asoc->pf_retrans = net->sctp.pf_retrans;
106
107 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
108 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
109 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
110
111
112
113
114 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
115
116
117 asoc->pathmaxrxt = sp->pathmaxrxt;
118
119
120 asoc->pathmtu = sp->pathmtu;
121
122
123 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
124 asoc->sackfreq = sp->sackfreq;
125
126
127
128
129 asoc->param_flags = sp->param_flags;
130
131
132
133
134 asoc->max_burst = sp->max_burst;
135
136
137 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
138 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
139 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
140
141
142
143
144
145 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
146 = 5 * asoc->rto_max;
147
148 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
149 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
150
151
152 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
153 setup_timer(&asoc->timers[i], sctp_timer_events[i],
154 (unsigned long)asoc);
155
156
157
158
159
160 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
161 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
162 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
163
164 asoc->max_init_timeo =
165 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
166
167
168
169
170
171
172 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
173 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
174 else
175 asoc->rwnd = sk->sk_rcvbuf/2;
176
177 asoc->a_rwnd = asoc->rwnd;
178
179
180 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
181
182
183 atomic_set(&asoc->rmem_alloc, 0);
184
185 init_waitqueue_head(&asoc->wait);
186
187 asoc->c.my_vtag = sctp_generate_tag(ep);
188 asoc->c.my_port = ep->base.bind_addr.port;
189
190 asoc->c.initial_tsn = sctp_generate_tsn(ep);
191
192 asoc->next_tsn = asoc->c.initial_tsn;
193
194 asoc->ctsn_ack_point = asoc->next_tsn - 1;
195 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
196 asoc->highest_sacked = asoc->ctsn_ack_point;
197 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
198
199
200
201
202
203
204
205
206
207
208
209 asoc->addip_serial = asoc->c.initial_tsn;
210
211 INIT_LIST_HEAD(&asoc->addip_chunk_list);
212 INIT_LIST_HEAD(&asoc->asconf_ack_list);
213
214
215 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
216
217
218
219
220
221
222
223
224
225
226
227
228 asoc->peer.sack_needed = 1;
229 asoc->peer.sack_generation = 1;
230
231
232
233
234
235
236 if (net->sctp.addip_noauth)
237 asoc->peer.asconf_capable = 1;
238
239
240 sctp_inq_init(&asoc->base.inqueue);
241 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
242
243
244 sctp_outq_init(asoc, &asoc->outqueue);
245
246 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
247 goto fail_init;
248
249
250
251
252 asoc->peer.ipv4_address = 1;
253 if (asoc->base.sk->sk_family == PF_INET6)
254 asoc->peer.ipv6_address = 1;
255 INIT_LIST_HEAD(&asoc->asocs);
256
257 asoc->default_stream = sp->default_stream;
258 asoc->default_ppid = sp->default_ppid;
259 asoc->default_flags = sp->default_flags;
260 asoc->default_context = sp->default_context;
261 asoc->default_timetolive = sp->default_timetolive;
262 asoc->default_rcv_context = sp->default_rcv_context;
263
264
265 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
266 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
267 if (err)
268 goto fail_init;
269
270 asoc->active_key_id = ep->active_key_id;
271
272
273 if (ep->auth_hmacs_list)
274 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
275 ntohs(ep->auth_hmacs_list->param_hdr.length));
276 if (ep->auth_chunk_list)
277 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
278 ntohs(ep->auth_chunk_list->param_hdr.length));
279
280
281 p = (sctp_paramhdr_t *)asoc->c.auth_random;
282 p->type = SCTP_PARAM_RANDOM;
283 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
284 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
285
286 return asoc;
287
288fail_init:
289 sock_put(asoc->base.sk);
290 sctp_endpoint_put(asoc->ep);
291 return NULL;
292}
293
294
295struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
296 const struct sock *sk,
297 sctp_scope_t scope,
298 gfp_t gfp)
299{
300 struct sctp_association *asoc;
301
302 asoc = kzalloc(sizeof(*asoc), gfp);
303 if (!asoc)
304 goto fail;
305
306 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
307 goto fail_init;
308
309 SCTP_DBG_OBJCNT_INC(assoc);
310
311 pr_debug("Created asoc %p\n", asoc);
312
313 return asoc;
314
315fail_init:
316 kfree(asoc);
317fail:
318 return NULL;
319}
320
321
322
323
324void sctp_association_free(struct sctp_association *asoc)
325{
326 struct sock *sk = asoc->base.sk;
327 struct sctp_transport *transport;
328 struct list_head *pos, *temp;
329 int i;
330
331
332
333
334 if (!list_empty(&asoc->asocs)) {
335 list_del(&asoc->asocs);
336
337
338
339
340 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
341 sk->sk_ack_backlog--;
342 }
343
344
345
346
347 asoc->base.dead = true;
348
349
350 sctp_outq_free(&asoc->outqueue);
351
352
353 sctp_ulpq_free(&asoc->ulpq);
354
355
356 sctp_inq_free(&asoc->base.inqueue);
357
358 sctp_tsnmap_free(&asoc->peer.tsn_map);
359
360
361 sctp_ssnmap_free(asoc->ssnmap);
362
363
364 sctp_bind_addr_free(&asoc->base.bind_addr);
365
366
367
368
369
370
371 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
372 if (del_timer(&asoc->timers[i]))
373 sctp_association_put(asoc);
374 }
375
376
377 kfree(asoc->peer.cookie);
378 kfree(asoc->peer.peer_random);
379 kfree(asoc->peer.peer_chunks);
380 kfree(asoc->peer.peer_hmacs);
381
382
383 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
384 transport = list_entry(pos, struct sctp_transport, transports);
385 list_del_rcu(pos);
386 sctp_transport_free(transport);
387 }
388
389 asoc->peer.transport_count = 0;
390
391 sctp_asconf_queue_teardown(asoc);
392
393
394 if (asoc->asconf_addr_del_pending != NULL)
395 kfree(asoc->asconf_addr_del_pending);
396
397
398 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
399
400
401 sctp_auth_key_put(asoc->asoc_shared_key);
402
403 sctp_association_put(asoc);
404}
405
406
407static void sctp_association_destroy(struct sctp_association *asoc)
408{
409 if (unlikely(!asoc->base.dead)) {
410 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
411 return;
412 }
413
414 sctp_endpoint_put(asoc->ep);
415 sock_put(asoc->base.sk);
416
417 if (asoc->assoc_id != 0) {
418 spin_lock_bh(&sctp_assocs_id_lock);
419 idr_remove(&sctp_assocs_id, asoc->assoc_id);
420 spin_unlock_bh(&sctp_assocs_id_lock);
421 }
422
423 WARN_ON(atomic_read(&asoc->rmem_alloc));
424
425 kfree(asoc);
426 SCTP_DBG_OBJCNT_DEC(assoc);
427}
428
429
430void sctp_assoc_set_primary(struct sctp_association *asoc,
431 struct sctp_transport *transport)
432{
433 int changeover = 0;
434
435
436
437
438 if (asoc->peer.primary_path != NULL &&
439 asoc->peer.primary_path != transport)
440 changeover = 1 ;
441
442 asoc->peer.primary_path = transport;
443
444
445 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
446 sizeof(union sctp_addr));
447
448
449
450
451 if ((transport->state == SCTP_ACTIVE) ||
452 (transport->state == SCTP_UNKNOWN))
453 asoc->peer.active_path = transport;
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
470 return;
471
472 if (transport->cacc.changeover_active)
473 transport->cacc.cycling_changeover = changeover;
474
475
476
477
478 transport->cacc.changeover_active = changeover;
479
480
481
482
483 transport->cacc.next_tsn_at_change = asoc->next_tsn;
484}
485
486
487void sctp_assoc_rm_peer(struct sctp_association *asoc,
488 struct sctp_transport *peer)
489{
490 struct list_head *pos;
491 struct sctp_transport *transport;
492
493 pr_debug("%s: association:%p addr:%pISpc\n",
494 __func__, asoc, &peer->ipaddr.sa);
495
496
497
498
499 if (asoc->peer.retran_path == peer)
500 sctp_assoc_update_retran_path(asoc);
501
502
503 list_del_rcu(&peer->transports);
504
505
506 pos = asoc->peer.transport_addr_list.next;
507 transport = list_entry(pos, struct sctp_transport, transports);
508
509
510 if (asoc->peer.primary_path == peer)
511 sctp_assoc_set_primary(asoc, transport);
512 if (asoc->peer.active_path == peer)
513 asoc->peer.active_path = transport;
514 if (asoc->peer.retran_path == peer)
515 asoc->peer.retran_path = transport;
516 if (asoc->peer.last_data_from == peer)
517 asoc->peer.last_data_from = transport;
518
519
520
521
522
523
524 if (asoc->init_last_sent_to == peer)
525 asoc->init_last_sent_to = NULL;
526
527
528
529
530
531
532 if (asoc->shutdown_last_sent_to == peer)
533 asoc->shutdown_last_sent_to = NULL;
534
535
536
537
538 if (asoc->addip_last_asconf &&
539 asoc->addip_last_asconf->transport == peer)
540 asoc->addip_last_asconf->transport = NULL;
541
542
543
544
545 if (!list_empty(&peer->transmitted)) {
546 struct sctp_transport *active = asoc->peer.active_path;
547 struct sctp_chunk *ch;
548
549
550 list_for_each_entry(ch, &peer->transmitted,
551 transmitted_list) {
552 ch->transport = NULL;
553 ch->rtt_in_progress = 0;
554 }
555
556 list_splice_tail_init(&peer->transmitted,
557 &active->transmitted);
558
559
560
561
562
563 if (!timer_pending(&active->T3_rtx_timer))
564 if (!mod_timer(&active->T3_rtx_timer,
565 jiffies + active->rto))
566 sctp_transport_hold(active);
567 }
568
569 asoc->peer.transport_count--;
570
571 sctp_transport_free(peer);
572}
573
574
575struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
576 const union sctp_addr *addr,
577 const gfp_t gfp,
578 const int peer_state)
579{
580 struct net *net = sock_net(asoc->base.sk);
581 struct sctp_transport *peer;
582 struct sctp_sock *sp;
583 unsigned short port;
584
585 sp = sctp_sk(asoc->base.sk);
586
587
588 port = ntohs(addr->v4.sin_port);
589
590 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
591 asoc, &addr->sa, peer_state);
592
593
594 if (0 == asoc->peer.port)
595 asoc->peer.port = port;
596
597
598 peer = sctp_assoc_lookup_paddr(asoc, addr);
599 if (peer) {
600
601
602
603
604 if (peer->state == SCTP_UNKNOWN) {
605 peer->state = SCTP_ACTIVE;
606 }
607 return peer;
608 }
609
610 peer = sctp_transport_new(net, addr, gfp);
611 if (!peer)
612 return NULL;
613
614 sctp_transport_set_owner(peer, asoc);
615
616
617
618
619 peer->hbinterval = asoc->hbinterval;
620
621
622 peer->pathmaxrxt = asoc->pathmaxrxt;
623
624
625 peer->pf_retrans = asoc->pf_retrans;
626
627
628
629
630 peer->sackdelay = asoc->sackdelay;
631 peer->sackfreq = asoc->sackfreq;
632
633
634
635
636 peer->param_flags = asoc->param_flags;
637
638 sctp_transport_route(peer, NULL, sp);
639
640
641 if (peer->param_flags & SPP_PMTUD_DISABLE) {
642 if (asoc->pathmtu)
643 peer->pathmtu = asoc->pathmtu;
644 else
645 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
646 }
647
648
649
650
651
652
653 if (asoc->pathmtu)
654 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
655 else
656 asoc->pathmtu = peer->pathmtu;
657
658 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
659 asoc->pathmtu);
660
661 peer->pmtu_pending = 0;
662
663 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
664
665
666
667
668 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
669 asoc->peer.port);
670
671
672
673
674
675
676
677
678
679
680
681 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
682
683
684
685
686
687 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
688
689 peer->partial_bytes_acked = 0;
690 peer->flight_size = 0;
691 peer->burst_limited = 0;
692
693
694 peer->rto = asoc->rto_initial;
695 sctp_max_rto(asoc, peer);
696
697
698 peer->state = peer_state;
699
700
701 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
702 asoc->peer.transport_count++;
703
704
705 if (!asoc->peer.primary_path) {
706 sctp_assoc_set_primary(asoc, peer);
707 asoc->peer.retran_path = peer;
708 }
709
710 if (asoc->peer.active_path == asoc->peer.retran_path &&
711 peer->state != SCTP_UNCONFIRMED) {
712 asoc->peer.retran_path = peer;
713 }
714
715 return peer;
716}
717
718
719void sctp_assoc_del_peer(struct sctp_association *asoc,
720 const union sctp_addr *addr)
721{
722 struct list_head *pos;
723 struct list_head *temp;
724 struct sctp_transport *transport;
725
726 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
727 transport = list_entry(pos, struct sctp_transport, transports);
728 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
729
730 sctp_assoc_rm_peer(asoc, transport);
731 break;
732 }
733 }
734}
735
736
737struct sctp_transport *sctp_assoc_lookup_paddr(
738 const struct sctp_association *asoc,
739 const union sctp_addr *address)
740{
741 struct sctp_transport *t;
742
743
744
745 list_for_each_entry(t, &asoc->peer.transport_addr_list,
746 transports) {
747 if (sctp_cmp_addr_exact(address, &t->ipaddr))
748 return t;
749 }
750
751 return NULL;
752}
753
754
755void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
756 struct sctp_transport *primary)
757{
758 struct sctp_transport *temp;
759 struct sctp_transport *t;
760
761 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
762 transports) {
763
764 if (t != primary)
765 sctp_assoc_rm_peer(asoc, t);
766 }
767}
768
769
770
771
772
773void sctp_assoc_control_transport(struct sctp_association *asoc,
774 struct sctp_transport *transport,
775 sctp_transport_cmd_t command,
776 sctp_sn_error_t error)
777{
778 struct sctp_ulpevent *event;
779 struct sockaddr_storage addr;
780 int spc_state = 0;
781 bool ulp_notify = true;
782
783
784 switch (command) {
785 case SCTP_TRANSPORT_UP:
786
787
788
789
790 if (SCTP_UNCONFIRMED == transport->state &&
791 SCTP_HEARTBEAT_SUCCESS == error)
792 spc_state = SCTP_ADDR_CONFIRMED;
793 else
794 spc_state = SCTP_ADDR_AVAILABLE;
795
796
797
798
799 if (transport->state == SCTP_PF) {
800 ulp_notify = false;
801 transport->cwnd = asoc->pathmtu;
802 }
803 transport->state = SCTP_ACTIVE;
804 break;
805
806 case SCTP_TRANSPORT_DOWN:
807
808
809
810
811 if (transport->state != SCTP_UNCONFIRMED)
812 transport->state = SCTP_INACTIVE;
813 else {
814 dst_release(transport->dst);
815 transport->dst = NULL;
816 ulp_notify = false;
817 }
818
819 spc_state = SCTP_ADDR_UNREACHABLE;
820 break;
821
822 case SCTP_TRANSPORT_PF:
823 transport->state = SCTP_PF;
824 ulp_notify = false;
825 break;
826
827 default:
828 return;
829 }
830
831
832
833
834 if (ulp_notify) {
835 memset(&addr, 0, sizeof(struct sockaddr_storage));
836 memcpy(&addr, &transport->ipaddr,
837 transport->af_specific->sockaddr_len);
838
839 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
840 0, spc_state, error, GFP_ATOMIC);
841 if (event)
842 sctp_ulpq_tail_event(&asoc->ulpq, event);
843 }
844
845
846 sctp_select_active_and_retran_path(asoc);
847}
848
849
850void sctp_association_hold(struct sctp_association *asoc)
851{
852 atomic_inc(&asoc->base.refcnt);
853}
854
855
856
857
858void sctp_association_put(struct sctp_association *asoc)
859{
860 if (atomic_dec_and_test(&asoc->base.refcnt))
861 sctp_association_destroy(asoc);
862}
863
864
865
866
867__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
868{
869
870
871
872
873
874 __u32 retval = asoc->next_tsn;
875 asoc->next_tsn++;
876 asoc->unack_data++;
877
878 return retval;
879}
880
881
882
883
884int sctp_cmp_addr_exact(const union sctp_addr *ss1,
885 const union sctp_addr *ss2)
886{
887 struct sctp_af *af;
888
889 af = sctp_get_af_specific(ss1->sa.sa_family);
890 if (unlikely(!af))
891 return 0;
892
893 return af->cmp_addr(ss1, ss2);
894}
895
896
897
898
899
900struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
901{
902 if (!asoc->need_ecne)
903 return NULL;
904
905
906
907
908 return sctp_make_ecne(asoc, asoc->last_ecne_tsn);
909}
910
911
912
913
914struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
915 __u32 tsn)
916{
917 struct sctp_transport *active;
918 struct sctp_transport *match;
919 struct sctp_transport *transport;
920 struct sctp_chunk *chunk;
921 __be32 key = htonl(tsn);
922
923 match = NULL;
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940 active = asoc->peer.active_path;
941
942 list_for_each_entry(chunk, &active->transmitted,
943 transmitted_list) {
944
945 if (key == chunk->subh.data_hdr->tsn) {
946 match = active;
947 goto out;
948 }
949 }
950
951
952 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
953 transports) {
954
955 if (transport == active)
956 continue;
957 list_for_each_entry(chunk, &transport->transmitted,
958 transmitted_list) {
959 if (key == chunk->subh.data_hdr->tsn) {
960 match = transport;
961 goto out;
962 }
963 }
964 }
965out:
966 return match;
967}
968
969
970struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
971 struct net *net,
972 const union sctp_addr *laddr,
973 const union sctp_addr *paddr)
974{
975 struct sctp_transport *transport;
976
977 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
978 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
979 net_eq(sock_net(asoc->base.sk), net)) {
980 transport = sctp_assoc_lookup_paddr(asoc, paddr);
981 if (!transport)
982 goto out;
983
984 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
985 sctp_sk(asoc->base.sk)))
986 goto out;
987 }
988 transport = NULL;
989
990out:
991 return transport;
992}
993
994
995static void sctp_assoc_bh_rcv(struct work_struct *work)
996{
997 struct sctp_association *asoc =
998 container_of(work, struct sctp_association,
999 base.inqueue.immediate);
1000 struct net *net = sock_net(asoc->base.sk);
1001 struct sctp_endpoint *ep;
1002 struct sctp_chunk *chunk;
1003 struct sctp_inq *inqueue;
1004 int state;
1005 sctp_subtype_t subtype;
1006 int error = 0;
1007
1008
1009 ep = asoc->ep;
1010
1011 inqueue = &asoc->base.inqueue;
1012 sctp_association_hold(asoc);
1013 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1014 state = asoc->state;
1015 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1016
1017
1018
1019
1020
1021
1022
1023
1024 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1025 continue;
1026
1027
1028
1029
1030 if (sctp_chunk_is_data(chunk))
1031 asoc->peer.last_data_from = chunk->transport;
1032 else {
1033 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1034 asoc->stats.ictrlchunks++;
1035 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1036 asoc->stats.isacks++;
1037 }
1038
1039 if (chunk->transport)
1040 chunk->transport->last_time_heard = ktime_get();
1041
1042
1043 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1044 state, ep, asoc, chunk, GFP_ATOMIC);
1045
1046
1047
1048
1049 if (asoc->base.dead)
1050 break;
1051
1052
1053 if (error && chunk)
1054 chunk->pdiscard = 1;
1055 }
1056 sctp_association_put(asoc);
1057}
1058
1059
1060void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1061{
1062 struct sctp_sock *newsp = sctp_sk(newsk);
1063 struct sock *oldsk = assoc->base.sk;
1064
1065
1066
1067
1068 list_del_init(&assoc->asocs);
1069
1070
1071 if (sctp_style(oldsk, TCP))
1072 oldsk->sk_ack_backlog--;
1073
1074
1075 sctp_endpoint_put(assoc->ep);
1076 sock_put(assoc->base.sk);
1077
1078
1079 assoc->ep = newsp->ep;
1080 sctp_endpoint_hold(assoc->ep);
1081
1082
1083 assoc->base.sk = newsk;
1084 sock_hold(assoc->base.sk);
1085
1086
1087 sctp_endpoint_add_asoc(newsp->ep, assoc);
1088}
1089
1090
1091void sctp_assoc_update(struct sctp_association *asoc,
1092 struct sctp_association *new)
1093{
1094 struct sctp_transport *trans;
1095 struct list_head *pos, *temp;
1096
1097
1098 asoc->c = new->c;
1099 asoc->peer.rwnd = new->peer.rwnd;
1100 asoc->peer.sack_needed = new->peer.sack_needed;
1101 asoc->peer.auth_capable = new->peer.auth_capable;
1102 asoc->peer.i = new->peer.i;
1103 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1104 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1105
1106
1107 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1108 trans = list_entry(pos, struct sctp_transport, transports);
1109 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1110 sctp_assoc_rm_peer(asoc, trans);
1111 continue;
1112 }
1113
1114 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1115 sctp_transport_reset(trans);
1116 }
1117
1118
1119
1120
1121
1122
1123 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1124 asoc->next_tsn = new->next_tsn;
1125 asoc->ctsn_ack_point = new->ctsn_ack_point;
1126 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1127
1128
1129
1130
1131 sctp_ssnmap_clear(asoc->ssnmap);
1132
1133
1134
1135
1136
1137 sctp_ulpq_flush(&asoc->ulpq);
1138
1139
1140
1141
1142
1143 asoc->overall_error_count = 0;
1144
1145 } else {
1146
1147 list_for_each_entry(trans, &new->peer.transport_addr_list,
1148 transports) {
1149 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1150 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1151 GFP_ATOMIC, trans->state);
1152 }
1153
1154 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1155 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1156 if (!asoc->ssnmap) {
1157
1158 asoc->ssnmap = new->ssnmap;
1159 new->ssnmap = NULL;
1160 }
1161
1162 if (!asoc->assoc_id) {
1163
1164
1165
1166 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1167 }
1168 }
1169
1170
1171
1172
1173 kfree(asoc->peer.peer_random);
1174 asoc->peer.peer_random = new->peer.peer_random;
1175 new->peer.peer_random = NULL;
1176
1177 kfree(asoc->peer.peer_chunks);
1178 asoc->peer.peer_chunks = new->peer.peer_chunks;
1179 new->peer.peer_chunks = NULL;
1180
1181 kfree(asoc->peer.peer_hmacs);
1182 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1183 new->peer.peer_hmacs = NULL;
1184
1185 sctp_auth_key_put(asoc->asoc_shared_key);
1186 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1187}
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static const u8 sctp_trans_state_to_prio_map[] = {
1218 [SCTP_ACTIVE] = 3,
1219 [SCTP_UNKNOWN] = 2,
1220 [SCTP_PF] = 1,
1221 [SCTP_INACTIVE] = 0,
1222};
1223
1224static u8 sctp_trans_score(const struct sctp_transport *trans)
1225{
1226 return sctp_trans_state_to_prio_map[trans->state];
1227}
1228
1229static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1,
1230 struct sctp_transport *trans2)
1231{
1232 if (trans1->error_count > trans2->error_count) {
1233 return trans2;
1234 } else if (trans1->error_count == trans2->error_count &&
1235 ktime_after(trans2->last_time_heard,
1236 trans1->last_time_heard)) {
1237 return trans2;
1238 } else {
1239 return trans1;
1240 }
1241}
1242
1243static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
1244 struct sctp_transport *best)
1245{
1246 u8 score_curr, score_best;
1247
1248 if (best == NULL || curr == best)
1249 return curr;
1250
1251 score_curr = sctp_trans_score(curr);
1252 score_best = sctp_trans_score(best);
1253
1254
1255
1256
1257
1258 if (score_curr > score_best)
1259 return curr;
1260 else if (score_curr == score_best)
1261 return sctp_trans_elect_tie(curr, best);
1262 else
1263 return best;
1264}
1265
1266void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1267{
1268 struct sctp_transport *trans = asoc->peer.retran_path;
1269 struct sctp_transport *trans_next = NULL;
1270
1271
1272 if (asoc->peer.transport_count == 1)
1273 return;
1274
1275
1276
1277 if (asoc->peer.active_path == asoc->peer.retran_path &&
1278 asoc->peer.active_path->state == SCTP_ACTIVE)
1279 return;
1280
1281
1282 for (trans = list_next_entry(trans, transports); 1;
1283 trans = list_next_entry(trans, transports)) {
1284
1285 if (&trans->transports == &asoc->peer.transport_addr_list)
1286 continue;
1287 if (trans->state == SCTP_UNCONFIRMED)
1288 continue;
1289 trans_next = sctp_trans_elect_best(trans, trans_next);
1290
1291 if (trans_next->state == SCTP_ACTIVE)
1292 break;
1293
1294 if (trans == asoc->peer.retran_path)
1295 break;
1296 }
1297
1298 asoc->peer.retran_path = trans_next;
1299
1300 pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
1301 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
1302}
1303
1304static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
1305{
1306 struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL;
1307 struct sctp_transport *trans_pf = NULL;
1308
1309
1310 list_for_each_entry(trans, &asoc->peer.transport_addr_list,
1311 transports) {
1312
1313 if (trans->state == SCTP_INACTIVE ||
1314 trans->state == SCTP_UNCONFIRMED)
1315 continue;
1316
1317
1318
1319 if (trans->state == SCTP_PF) {
1320 trans_pf = sctp_trans_elect_best(trans, trans_pf);
1321 continue;
1322 }
1323
1324 if (trans_pri == NULL ||
1325 ktime_after(trans->last_time_heard,
1326 trans_pri->last_time_heard)) {
1327 trans_sec = trans_pri;
1328 trans_pri = trans;
1329 } else if (trans_sec == NULL ||
1330 ktime_after(trans->last_time_heard,
1331 trans_sec->last_time_heard)) {
1332 trans_sec = trans;
1333 }
1334 }
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344 if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
1345 asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
1346 asoc->peer.primary_path != trans_pri) {
1347 trans_sec = trans_pri;
1348 trans_pri = asoc->peer.primary_path;
1349 }
1350
1351
1352
1353
1354
1355 if (trans_sec == NULL)
1356 trans_sec = trans_pri;
1357
1358
1359
1360
1361 if (trans_pri == NULL) {
1362 trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
1363 trans_sec = trans_pri;
1364 }
1365
1366
1367 asoc->peer.active_path = trans_pri;
1368 asoc->peer.retran_path = trans_sec;
1369}
1370
1371struct sctp_transport *
1372sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
1373 struct sctp_transport *last_sent_to)
1374{
1375
1376
1377
1378
1379 if (last_sent_to == NULL) {
1380 return asoc->peer.active_path;
1381 } else {
1382 if (last_sent_to == asoc->peer.retran_path)
1383 sctp_assoc_update_retran_path(asoc);
1384
1385 return asoc->peer.retran_path;
1386 }
1387}
1388
1389
1390
1391
1392void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1393{
1394 struct sctp_transport *t;
1395 __u32 pmtu = 0;
1396
1397 if (!asoc)
1398 return;
1399
1400
1401 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1402 transports) {
1403 if (t->pmtu_pending && t->dst) {
1404 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1405 t->pmtu_pending = 0;
1406 }
1407 if (!pmtu || (t->pathmtu < pmtu))
1408 pmtu = t->pathmtu;
1409 }
1410
1411 if (pmtu) {
1412 asoc->pathmtu = pmtu;
1413 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1414 }
1415
1416 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1417 asoc->pathmtu, asoc->frag_point);
1418}
1419
1420
1421static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
1422{
1423 struct net *net = sock_net(asoc->base.sk);
1424 switch (asoc->state) {
1425 case SCTP_STATE_ESTABLISHED:
1426 case SCTP_STATE_SHUTDOWN_PENDING:
1427 case SCTP_STATE_SHUTDOWN_RECEIVED:
1428 case SCTP_STATE_SHUTDOWN_SENT:
1429 if ((asoc->rwnd > asoc->a_rwnd) &&
1430 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1431 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1432 asoc->pathmtu)))
1433 return true;
1434 break;
1435 default:
1436 break;
1437 }
1438 return false;
1439}
1440
1441
1442void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1443{
1444 struct sctp_chunk *sack;
1445 struct timer_list *timer;
1446
1447 if (asoc->rwnd_over) {
1448 if (asoc->rwnd_over >= len) {
1449 asoc->rwnd_over -= len;
1450 } else {
1451 asoc->rwnd += (len - asoc->rwnd_over);
1452 asoc->rwnd_over = 0;
1453 }
1454 } else {
1455 asoc->rwnd += len;
1456 }
1457
1458
1459
1460
1461
1462
1463 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1464 int change = min(asoc->pathmtu, asoc->rwnd_press);
1465 asoc->rwnd += change;
1466 asoc->rwnd_press -= change;
1467 }
1468
1469 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1470 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1471 asoc->a_rwnd);
1472
1473
1474
1475
1476
1477
1478 if (sctp_peer_needs_update(asoc)) {
1479 asoc->a_rwnd = asoc->rwnd;
1480
1481 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1482 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1483 asoc->a_rwnd);
1484
1485 sack = sctp_make_sack(asoc);
1486 if (!sack)
1487 return;
1488
1489 asoc->peer.sack_needed = 0;
1490
1491 sctp_outq_tail(&asoc->outqueue, sack);
1492
1493
1494 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1495 if (del_timer(timer))
1496 sctp_association_put(asoc);
1497 }
1498}
1499
1500
1501void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1502{
1503 int rx_count;
1504 int over = 0;
1505
1506 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1507 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1508 "asoc->rwnd_over:%u!\n", __func__, asoc,
1509 asoc->rwnd, asoc->rwnd_over);
1510
1511 if (asoc->ep->rcvbuf_policy)
1512 rx_count = atomic_read(&asoc->rmem_alloc);
1513 else
1514 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1515
1516
1517
1518
1519
1520
1521 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1522 over = 1;
1523
1524 if (asoc->rwnd >= len) {
1525 asoc->rwnd -= len;
1526 if (over) {
1527 asoc->rwnd_press += asoc->rwnd;
1528 asoc->rwnd = 0;
1529 }
1530 } else {
1531 asoc->rwnd_over = len - asoc->rwnd;
1532 asoc->rwnd = 0;
1533 }
1534
1535 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1536 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1537 asoc->rwnd_press);
1538}
1539
1540
1541
1542
1543int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1544 sctp_scope_t scope, gfp_t gfp)
1545{
1546 int flags;
1547
1548
1549
1550
1551 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1552 if (asoc->peer.ipv4_address)
1553 flags |= SCTP_ADDR4_PEERSUPP;
1554 if (asoc->peer.ipv6_address)
1555 flags |= SCTP_ADDR6_PEERSUPP;
1556
1557 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1558 &asoc->base.bind_addr,
1559 &asoc->ep->base.bind_addr,
1560 scope, gfp, flags);
1561}
1562
1563
1564int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1565 struct sctp_cookie *cookie,
1566 gfp_t gfp)
1567{
1568 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1569 int var_size3 = cookie->raw_addr_list_len;
1570 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1571
1572 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1573 asoc->ep->base.bind_addr.port, gfp);
1574}
1575
1576
1577int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1578 const union sctp_addr *laddr)
1579{
1580 int found = 0;
1581
1582 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1583 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1584 sctp_sk(asoc->base.sk)))
1585 found = 1;
1586
1587 return found;
1588}
1589
1590
1591int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1592{
1593 bool preload = !!(gfp & __GFP_WAIT);
1594 int ret;
1595
1596
1597 if (asoc->assoc_id)
1598 return 0;
1599
1600 if (preload)
1601 idr_preload(gfp);
1602 spin_lock_bh(&sctp_assocs_id_lock);
1603
1604 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1605 spin_unlock_bh(&sctp_assocs_id_lock);
1606 if (preload)
1607 idr_preload_end();
1608 if (ret < 0)
1609 return ret;
1610
1611 asoc->assoc_id = (sctp_assoc_t)ret;
1612 return 0;
1613}
1614
1615
1616static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1617{
1618 struct sctp_chunk *asconf;
1619 struct sctp_chunk *tmp;
1620
1621 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1622 list_del_init(&asconf->list);
1623 sctp_chunk_free(asconf);
1624 }
1625}
1626
1627
1628static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1629{
1630 struct sctp_chunk *ack;
1631 struct sctp_chunk *tmp;
1632
1633 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1634 transmitted_list) {
1635 list_del_init(&ack->transmitted_list);
1636 sctp_chunk_free(ack);
1637 }
1638}
1639
1640
1641void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1642{
1643 struct sctp_chunk *ack;
1644 struct sctp_chunk *tmp;
1645
1646
1647
1648
1649 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1650 transmitted_list) {
1651 if (ack->subh.addip_hdr->serial ==
1652 htonl(asoc->peer.addip_serial))
1653 break;
1654
1655 list_del_init(&ack->transmitted_list);
1656 sctp_chunk_free(ack);
1657 }
1658}
1659
1660
1661struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1662 const struct sctp_association *asoc,
1663 __be32 serial)
1664{
1665 struct sctp_chunk *ack;
1666
1667
1668
1669
1670 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1671 if (ack->subh.addip_hdr->serial == serial) {
1672 sctp_chunk_hold(ack);
1673 return ack;
1674 }
1675 }
1676
1677 return NULL;
1678}
1679
1680void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1681{
1682
1683 sctp_assoc_free_asconf_acks(asoc);
1684
1685
1686 sctp_assoc_free_asconf_queue(asoc);
1687
1688
1689 if (asoc->addip_last_asconf)
1690 sctp_chunk_free(asoc->addip_last_asconf);
1691}
1692