1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46
47#include <linux/types.h>
48#include <linux/fcntl.h>
49#include <linux/poll.h>
50#include <linux/init.h>
51
52#include <linux/slab.h>
53#include <linux/in.h>
54#include <net/ipv6.h>
55#include <net/sctp/sctp.h>
56#include <net/sctp/sm.h>
57
58
59static void sctp_assoc_bh_rcv(struct work_struct *work);
60static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
61static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
62
63
64
65
66static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
67 const struct sctp_endpoint *ep,
68 const struct sock *sk,
69 sctp_scope_t scope,
70 gfp_t gfp)
71{
72 struct net *net = sock_net(sk);
73 struct sctp_sock *sp;
74 int i;
75 sctp_paramhdr_t *p;
76 int err;
77
78
79 sp = sctp_sk((struct sock *)sk);
80
81
82 asoc->ep = (struct sctp_endpoint *)ep;
83 asoc->base.sk = (struct sock *)sk;
84
85 sctp_endpoint_hold(asoc->ep);
86 sock_hold(asoc->base.sk);
87
88
89 asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
90
91
92 atomic_set(&asoc->base.refcnt, 1);
93 asoc->base.dead = false;
94
95
96 sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
97
98 asoc->state = SCTP_STATE_CLOSED;
99 asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life);
100 asoc->frag_point = 0;
101 asoc->user_frag = sp->user_frag;
102
103
104
105
106 asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
107 asoc->pf_retrans = net->sctp.pf_retrans;
108
109 asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
110 asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
111 asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
112
113 asoc->overall_error_count = 0;
114
115
116
117
118 asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
119
120
121 asoc->pathmaxrxt = sp->pathmaxrxt;
122
123
124 asoc->pathmtu = sp->pathmtu;
125
126
127 asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
128 asoc->sackfreq = sp->sackfreq;
129
130
131
132
133 asoc->param_flags = sp->param_flags;
134
135
136
137
138 asoc->max_burst = sp->max_burst;
139
140
141 asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
142 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
143 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
144 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
145 asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
146 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
147
148
149
150
151
152 asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
153 = 5 * asoc->rto_max;
154
155 asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
156 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
157 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
158 min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
159
160
161 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
162 setup_timer(&asoc->timers[i], sctp_timer_events[i],
163 (unsigned long)asoc);
164
165
166
167
168
169 asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
170 asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
171 asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
172
173 asoc->max_init_timeo =
174 msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
175
176
177
178
179 asoc->ssnmap = NULL;
180
181
182
183
184
185
186 if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
187 asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
188 else
189 asoc->rwnd = sk->sk_rcvbuf/2;
190
191 asoc->a_rwnd = asoc->rwnd;
192
193 asoc->rwnd_over = 0;
194 asoc->rwnd_press = 0;
195
196
197 asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
198
199
200 asoc->sndbuf_used = 0;
201
202
203 atomic_set(&asoc->rmem_alloc, 0);
204
205 init_waitqueue_head(&asoc->wait);
206
207 asoc->c.my_vtag = sctp_generate_tag(ep);
208 asoc->peer.i.init_tag = 0;
209 asoc->c.peer_vtag = 0;
210 asoc->c.my_ttag = 0;
211 asoc->c.peer_ttag = 0;
212 asoc->c.my_port = ep->base.bind_addr.port;
213
214 asoc->c.initial_tsn = sctp_generate_tsn(ep);
215
216 asoc->next_tsn = asoc->c.initial_tsn;
217
218 asoc->ctsn_ack_point = asoc->next_tsn - 1;
219 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
220 asoc->highest_sacked = asoc->ctsn_ack_point;
221 asoc->last_cwr_tsn = asoc->ctsn_ack_point;
222 asoc->unack_data = 0;
223
224
225
226
227
228
229
230
231
232
233
234 asoc->addip_serial = asoc->c.initial_tsn;
235
236 INIT_LIST_HEAD(&asoc->addip_chunk_list);
237 INIT_LIST_HEAD(&asoc->asconf_ack_list);
238
239
240 INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
241 asoc->peer.transport_count = 0;
242
243
244
245
246
247
248
249
250
251
252
253
254 asoc->peer.sack_needed = 1;
255 asoc->peer.sack_cnt = 0;
256 asoc->peer.sack_generation = 1;
257
258
259
260
261
262
263 asoc->peer.asconf_capable = 0;
264 if (net->sctp.addip_noauth)
265 asoc->peer.asconf_capable = 1;
266 asoc->asconf_addr_del_pending = NULL;
267 asoc->src_out_of_asoc_ok = 0;
268 asoc->new_transport = NULL;
269
270
271 sctp_inq_init(&asoc->base.inqueue);
272 sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
273
274
275 sctp_outq_init(asoc, &asoc->outqueue);
276
277 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
278 goto fail_init;
279
280 memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
281
282 asoc->need_ecne = 0;
283
284 asoc->assoc_id = 0;
285
286
287
288
289 asoc->peer.ipv4_address = 1;
290 if (asoc->base.sk->sk_family == PF_INET6)
291 asoc->peer.ipv6_address = 1;
292 INIT_LIST_HEAD(&asoc->asocs);
293
294 asoc->autoclose = sp->autoclose;
295
296 asoc->default_stream = sp->default_stream;
297 asoc->default_ppid = sp->default_ppid;
298 asoc->default_flags = sp->default_flags;
299 asoc->default_context = sp->default_context;
300 asoc->default_timetolive = sp->default_timetolive;
301 asoc->default_rcv_context = sp->default_rcv_context;
302
303
304 memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
305
306
307 INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
308 err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
309 if (err)
310 goto fail_init;
311
312 asoc->active_key_id = ep->active_key_id;
313 asoc->asoc_shared_key = NULL;
314
315 asoc->default_hmac_id = 0;
316
317 if (ep->auth_hmacs_list)
318 memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
319 ntohs(ep->auth_hmacs_list->param_hdr.length));
320 if (ep->auth_chunk_list)
321 memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
322 ntohs(ep->auth_chunk_list->param_hdr.length));
323
324
325 p = (sctp_paramhdr_t *)asoc->c.auth_random;
326 p->type = SCTP_PARAM_RANDOM;
327 p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
328 get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
329
330 return asoc;
331
332fail_init:
333 sock_put(asoc->base.sk);
334 sctp_endpoint_put(asoc->ep);
335 return NULL;
336}
337
338
339struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
340 const struct sock *sk,
341 sctp_scope_t scope,
342 gfp_t gfp)
343{
344 struct sctp_association *asoc;
345
346 asoc = kzalloc(sizeof(*asoc), gfp);
347 if (!asoc)
348 goto fail;
349
350 if (!sctp_association_init(asoc, ep, sk, scope, gfp))
351 goto fail_init;
352
353 SCTP_DBG_OBJCNT_INC(assoc);
354
355 pr_debug("Created asoc %p\n", asoc);
356
357 return asoc;
358
359fail_init:
360 kfree(asoc);
361fail:
362 return NULL;
363}
364
365
366
367
368void sctp_association_free(struct sctp_association *asoc)
369{
370 struct sock *sk = asoc->base.sk;
371 struct sctp_transport *transport;
372 struct list_head *pos, *temp;
373 int i;
374
375
376
377
378 if (!asoc->temp) {
379 list_del(&asoc->asocs);
380
381
382
383
384 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
385 sk->sk_ack_backlog--;
386 }
387
388
389
390
391 asoc->base.dead = true;
392
393
394 sctp_outq_free(&asoc->outqueue);
395
396
397 sctp_ulpq_free(&asoc->ulpq);
398
399
400 sctp_inq_free(&asoc->base.inqueue);
401
402 sctp_tsnmap_free(&asoc->peer.tsn_map);
403
404
405 sctp_ssnmap_free(asoc->ssnmap);
406
407
408 sctp_bind_addr_free(&asoc->base.bind_addr);
409
410
411
412
413
414
415 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
416 if (del_timer(&asoc->timers[i]))
417 sctp_association_put(asoc);
418 }
419
420
421 kfree(asoc->peer.cookie);
422 kfree(asoc->peer.peer_random);
423 kfree(asoc->peer.peer_chunks);
424 kfree(asoc->peer.peer_hmacs);
425
426
427 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
428 transport = list_entry(pos, struct sctp_transport, transports);
429 list_del_rcu(pos);
430 sctp_transport_free(transport);
431 }
432
433 asoc->peer.transport_count = 0;
434
435 sctp_asconf_queue_teardown(asoc);
436
437
438 if (asoc->asconf_addr_del_pending != NULL)
439 kfree(asoc->asconf_addr_del_pending);
440
441
442 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
443
444
445 sctp_auth_key_put(asoc->asoc_shared_key);
446
447 sctp_association_put(asoc);
448}
449
450
451static void sctp_association_destroy(struct sctp_association *asoc)
452{
453 if (unlikely(!asoc->base.dead)) {
454 WARN(1, "Attempt to destroy undead association %p!\n", asoc);
455 return;
456 }
457
458 sctp_endpoint_put(asoc->ep);
459 sock_put(asoc->base.sk);
460
461 if (asoc->assoc_id != 0) {
462 spin_lock_bh(&sctp_assocs_id_lock);
463 idr_remove(&sctp_assocs_id, asoc->assoc_id);
464 spin_unlock_bh(&sctp_assocs_id_lock);
465 }
466
467 WARN_ON(atomic_read(&asoc->rmem_alloc));
468
469 kfree(asoc);
470 SCTP_DBG_OBJCNT_DEC(assoc);
471}
472
473
474void sctp_assoc_set_primary(struct sctp_association *asoc,
475 struct sctp_transport *transport)
476{
477 int changeover = 0;
478
479
480
481
482 if (asoc->peer.primary_path != NULL &&
483 asoc->peer.primary_path != transport)
484 changeover = 1 ;
485
486 asoc->peer.primary_path = transport;
487
488
489 memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
490 sizeof(union sctp_addr));
491
492
493
494
495 if ((transport->state == SCTP_ACTIVE) ||
496 (transport->state == SCTP_UNKNOWN))
497 asoc->peer.active_path = transport;
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513 if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
514 return;
515
516 if (transport->cacc.changeover_active)
517 transport->cacc.cycling_changeover = changeover;
518
519
520
521
522 transport->cacc.changeover_active = changeover;
523
524
525
526
527 transport->cacc.next_tsn_at_change = asoc->next_tsn;
528}
529
530
531void sctp_assoc_rm_peer(struct sctp_association *asoc,
532 struct sctp_transport *peer)
533{
534 struct list_head *pos;
535 struct sctp_transport *transport;
536
537 pr_debug("%s: association:%p addr:%pISpc\n",
538 __func__, asoc, &peer->ipaddr.sa);
539
540
541
542
543 if (asoc->peer.retran_path == peer)
544 sctp_assoc_update_retran_path(asoc);
545
546
547 list_del_rcu(&peer->transports);
548
549
550 pos = asoc->peer.transport_addr_list.next;
551 transport = list_entry(pos, struct sctp_transport, transports);
552
553
554 if (asoc->peer.primary_path == peer)
555 sctp_assoc_set_primary(asoc, transport);
556 if (asoc->peer.active_path == peer)
557 asoc->peer.active_path = transport;
558 if (asoc->peer.retran_path == peer)
559 asoc->peer.retran_path = transport;
560 if (asoc->peer.last_data_from == peer)
561 asoc->peer.last_data_from = transport;
562
563
564
565
566
567
568 if (asoc->init_last_sent_to == peer)
569 asoc->init_last_sent_to = NULL;
570
571
572
573
574
575
576 if (asoc->shutdown_last_sent_to == peer)
577 asoc->shutdown_last_sent_to = NULL;
578
579
580
581
582 if (asoc->addip_last_asconf &&
583 asoc->addip_last_asconf->transport == peer)
584 asoc->addip_last_asconf->transport = NULL;
585
586
587
588
589 if (!list_empty(&peer->transmitted)) {
590 struct sctp_transport *active = asoc->peer.active_path;
591 struct sctp_chunk *ch;
592
593
594 list_for_each_entry(ch, &peer->transmitted,
595 transmitted_list) {
596 ch->transport = NULL;
597 ch->rtt_in_progress = 0;
598 }
599
600 list_splice_tail_init(&peer->transmitted,
601 &active->transmitted);
602
603
604
605
606
607 if (!timer_pending(&active->T3_rtx_timer))
608 if (!mod_timer(&active->T3_rtx_timer,
609 jiffies + active->rto))
610 sctp_transport_hold(active);
611 }
612
613 asoc->peer.transport_count--;
614
615 sctp_transport_free(peer);
616}
617
618
619struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
620 const union sctp_addr *addr,
621 const gfp_t gfp,
622 const int peer_state)
623{
624 struct net *net = sock_net(asoc->base.sk);
625 struct sctp_transport *peer;
626 struct sctp_sock *sp;
627 unsigned short port;
628
629 sp = sctp_sk(asoc->base.sk);
630
631
632 port = ntohs(addr->v4.sin_port);
633
634 pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__,
635 asoc, &addr->sa, peer_state);
636
637
638 if (0 == asoc->peer.port)
639 asoc->peer.port = port;
640
641
642 peer = sctp_assoc_lookup_paddr(asoc, addr);
643 if (peer) {
644
645
646
647
648 if (peer->state == SCTP_UNKNOWN) {
649 peer->state = SCTP_ACTIVE;
650 }
651 return peer;
652 }
653
654 peer = sctp_transport_new(net, addr, gfp);
655 if (!peer)
656 return NULL;
657
658 sctp_transport_set_owner(peer, asoc);
659
660
661
662
663 peer->hbinterval = asoc->hbinterval;
664
665
666 peer->pathmaxrxt = asoc->pathmaxrxt;
667
668
669 peer->pf_retrans = asoc->pf_retrans;
670
671
672
673
674 peer->sackdelay = asoc->sackdelay;
675 peer->sackfreq = asoc->sackfreq;
676
677
678
679
680 peer->param_flags = asoc->param_flags;
681
682 sctp_transport_route(peer, NULL, sp);
683
684
685 if (peer->param_flags & SPP_PMTUD_DISABLE) {
686 if (asoc->pathmtu)
687 peer->pathmtu = asoc->pathmtu;
688 else
689 peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
690 }
691
692
693
694
695
696
697 if (asoc->pathmtu)
698 asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
699 else
700 asoc->pathmtu = peer->pathmtu;
701
702 pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
703 asoc->pathmtu);
704
705 peer->pmtu_pending = 0;
706
707 asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
708
709
710
711
712 sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
713 asoc->peer.port);
714
715
716
717
718
719
720
721
722
723
724
725 peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
726
727
728
729
730
731 peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
732
733 peer->partial_bytes_acked = 0;
734 peer->flight_size = 0;
735 peer->burst_limited = 0;
736
737
738 peer->rto = asoc->rto_initial;
739 sctp_max_rto(asoc, peer);
740
741
742 peer->state = peer_state;
743
744
745 list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
746 asoc->peer.transport_count++;
747
748
749 if (!asoc->peer.primary_path) {
750 sctp_assoc_set_primary(asoc, peer);
751 asoc->peer.retran_path = peer;
752 }
753
754 if (asoc->peer.active_path == asoc->peer.retran_path &&
755 peer->state != SCTP_UNCONFIRMED) {
756 asoc->peer.retran_path = peer;
757 }
758
759 return peer;
760}
761
762
763void sctp_assoc_del_peer(struct sctp_association *asoc,
764 const union sctp_addr *addr)
765{
766 struct list_head *pos;
767 struct list_head *temp;
768 struct sctp_transport *transport;
769
770 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
771 transport = list_entry(pos, struct sctp_transport, transports);
772 if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
773
774 sctp_assoc_rm_peer(asoc, transport);
775 break;
776 }
777 }
778}
779
780
781struct sctp_transport *sctp_assoc_lookup_paddr(
782 const struct sctp_association *asoc,
783 const union sctp_addr *address)
784{
785 struct sctp_transport *t;
786
787
788
789 list_for_each_entry(t, &asoc->peer.transport_addr_list,
790 transports) {
791 if (sctp_cmp_addr_exact(address, &t->ipaddr))
792 return t;
793 }
794
795 return NULL;
796}
797
798
799void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
800 struct sctp_transport *primary)
801{
802 struct sctp_transport *temp;
803 struct sctp_transport *t;
804
805 list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
806 transports) {
807
808 if (t != primary)
809 sctp_assoc_rm_peer(asoc, t);
810 }
811}
812
813
814
815
816
817void sctp_assoc_control_transport(struct sctp_association *asoc,
818 struct sctp_transport *transport,
819 sctp_transport_cmd_t command,
820 sctp_sn_error_t error)
821{
822 struct sctp_transport *t = NULL;
823 struct sctp_transport *first;
824 struct sctp_transport *second;
825 struct sctp_ulpevent *event;
826 struct sockaddr_storage addr;
827 int spc_state = 0;
828 bool ulp_notify = true;
829
830
831 switch (command) {
832 case SCTP_TRANSPORT_UP:
833
834
835
836
837 if (SCTP_UNCONFIRMED == transport->state &&
838 SCTP_HEARTBEAT_SUCCESS == error)
839 spc_state = SCTP_ADDR_CONFIRMED;
840 else
841 spc_state = SCTP_ADDR_AVAILABLE;
842
843
844
845
846 if (transport->state == SCTP_PF) {
847 ulp_notify = false;
848 transport->cwnd = asoc->pathmtu;
849 }
850 transport->state = SCTP_ACTIVE;
851 break;
852
853 case SCTP_TRANSPORT_DOWN:
854
855
856
857
858 if (transport->state != SCTP_UNCONFIRMED)
859 transport->state = SCTP_INACTIVE;
860 else {
861 dst_release(transport->dst);
862 transport->dst = NULL;
863 }
864
865 spc_state = SCTP_ADDR_UNREACHABLE;
866 break;
867
868 case SCTP_TRANSPORT_PF:
869 transport->state = SCTP_PF;
870 ulp_notify = false;
871 break;
872
873 default:
874 return;
875 }
876
877
878
879
880 if (ulp_notify) {
881 memset(&addr, 0, sizeof(struct sockaddr_storage));
882 memcpy(&addr, &transport->ipaddr,
883 transport->af_specific->sockaddr_len);
884 event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
885 0, spc_state, error, GFP_ATOMIC);
886 if (event)
887 sctp_ulpq_tail_event(&asoc->ulpq, event);
888 }
889
890
891
892
893
894
895
896
897
898 first = NULL; second = NULL;
899
900 list_for_each_entry(t, &asoc->peer.transport_addr_list,
901 transports) {
902
903 if ((t->state == SCTP_INACTIVE) ||
904 (t->state == SCTP_UNCONFIRMED) ||
905 (t->state == SCTP_PF))
906 continue;
907 if (!first || t->last_time_heard > first->last_time_heard) {
908 second = first;
909 first = t;
910 }
911 if (!second || t->last_time_heard > second->last_time_heard)
912 second = t;
913 }
914
915
916
917
918
919
920
921
922
923
924
925 if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
926 (asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
927 first != asoc->peer.primary_path) {
928 second = first;
929 first = asoc->peer.primary_path;
930 }
931
932
933
934
935 if (!first) {
936 first = asoc->peer.primary_path;
937 second = asoc->peer.primary_path;
938 }
939
940
941 asoc->peer.active_path = first;
942 asoc->peer.retran_path = second;
943}
944
945
946void sctp_association_hold(struct sctp_association *asoc)
947{
948 atomic_inc(&asoc->base.refcnt);
949}
950
951
952
953
954void sctp_association_put(struct sctp_association *asoc)
955{
956 if (atomic_dec_and_test(&asoc->base.refcnt))
957 sctp_association_destroy(asoc);
958}
959
960
961
962
963__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
964{
965
966
967
968
969
970 __u32 retval = asoc->next_tsn;
971 asoc->next_tsn++;
972 asoc->unack_data++;
973
974 return retval;
975}
976
977
978
979
980int sctp_cmp_addr_exact(const union sctp_addr *ss1,
981 const union sctp_addr *ss2)
982{
983 struct sctp_af *af;
984
985 af = sctp_get_af_specific(ss1->sa.sa_family);
986 if (unlikely(!af))
987 return 0;
988
989 return af->cmp_addr(ss1, ss2);
990}
991
992
993
994
995
996struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
997{
998 struct sctp_chunk *chunk;
999
1000
1001
1002
1003 if (asoc->need_ecne)
1004 chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
1005 else
1006 chunk = NULL;
1007
1008 return chunk;
1009}
1010
1011
1012
1013
1014struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
1015 __u32 tsn)
1016{
1017 struct sctp_transport *active;
1018 struct sctp_transport *match;
1019 struct sctp_transport *transport;
1020 struct sctp_chunk *chunk;
1021 __be32 key = htonl(tsn);
1022
1023 match = NULL;
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 active = asoc->peer.active_path;
1041
1042 list_for_each_entry(chunk, &active->transmitted,
1043 transmitted_list) {
1044
1045 if (key == chunk->subh.data_hdr->tsn) {
1046 match = active;
1047 goto out;
1048 }
1049 }
1050
1051
1052 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
1053 transports) {
1054
1055 if (transport == active)
1056 continue;
1057 list_for_each_entry(chunk, &transport->transmitted,
1058 transmitted_list) {
1059 if (key == chunk->subh.data_hdr->tsn) {
1060 match = transport;
1061 goto out;
1062 }
1063 }
1064 }
1065out:
1066 return match;
1067}
1068
1069
1070struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
1071 struct net *net,
1072 const union sctp_addr *laddr,
1073 const union sctp_addr *paddr)
1074{
1075 struct sctp_transport *transport;
1076
1077 if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
1078 (htons(asoc->peer.port) == paddr->v4.sin_port) &&
1079 net_eq(sock_net(asoc->base.sk), net)) {
1080 transport = sctp_assoc_lookup_paddr(asoc, paddr);
1081 if (!transport)
1082 goto out;
1083
1084 if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1085 sctp_sk(asoc->base.sk)))
1086 goto out;
1087 }
1088 transport = NULL;
1089
1090out:
1091 return transport;
1092}
1093
1094
1095static void sctp_assoc_bh_rcv(struct work_struct *work)
1096{
1097 struct sctp_association *asoc =
1098 container_of(work, struct sctp_association,
1099 base.inqueue.immediate);
1100 struct net *net = sock_net(asoc->base.sk);
1101 struct sctp_endpoint *ep;
1102 struct sctp_chunk *chunk;
1103 struct sctp_inq *inqueue;
1104 int state;
1105 sctp_subtype_t subtype;
1106 int error = 0;
1107
1108
1109 ep = asoc->ep;
1110
1111 inqueue = &asoc->base.inqueue;
1112 sctp_association_hold(asoc);
1113 while (NULL != (chunk = sctp_inq_pop(inqueue))) {
1114 state = asoc->state;
1115 subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1116
1117
1118
1119
1120
1121
1122
1123
1124 if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
1125 continue;
1126
1127
1128
1129
1130 if (sctp_chunk_is_data(chunk))
1131 asoc->peer.last_data_from = chunk->transport;
1132 else {
1133 SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
1134 asoc->stats.ictrlchunks++;
1135 if (chunk->chunk_hdr->type == SCTP_CID_SACK)
1136 asoc->stats.isacks++;
1137 }
1138
1139 if (chunk->transport)
1140 chunk->transport->last_time_heard = jiffies;
1141
1142
1143 error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
1144 state, ep, asoc, chunk, GFP_ATOMIC);
1145
1146
1147
1148
1149 if (asoc->base.dead)
1150 break;
1151
1152
1153 if (error && chunk)
1154 chunk->pdiscard = 1;
1155 }
1156 sctp_association_put(asoc);
1157}
1158
1159
1160void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
1161{
1162 struct sctp_sock *newsp = sctp_sk(newsk);
1163 struct sock *oldsk = assoc->base.sk;
1164
1165
1166
1167
1168 list_del_init(&assoc->asocs);
1169
1170
1171 if (sctp_style(oldsk, TCP))
1172 oldsk->sk_ack_backlog--;
1173
1174
1175 sctp_endpoint_put(assoc->ep);
1176 sock_put(assoc->base.sk);
1177
1178
1179 assoc->ep = newsp->ep;
1180 sctp_endpoint_hold(assoc->ep);
1181
1182
1183 assoc->base.sk = newsk;
1184 sock_hold(assoc->base.sk);
1185
1186
1187 sctp_endpoint_add_asoc(newsp->ep, assoc);
1188}
1189
1190
1191void sctp_assoc_update(struct sctp_association *asoc,
1192 struct sctp_association *new)
1193{
1194 struct sctp_transport *trans;
1195 struct list_head *pos, *temp;
1196
1197
1198 asoc->c = new->c;
1199 asoc->peer.rwnd = new->peer.rwnd;
1200 asoc->peer.sack_needed = new->peer.sack_needed;
1201 asoc->peer.i = new->peer.i;
1202 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
1203 asoc->peer.i.initial_tsn, GFP_ATOMIC);
1204
1205
1206 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1207 trans = list_entry(pos, struct sctp_transport, transports);
1208 if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
1209 sctp_assoc_rm_peer(asoc, trans);
1210 continue;
1211 }
1212
1213 if (asoc->state >= SCTP_STATE_ESTABLISHED)
1214 sctp_transport_reset(trans);
1215 }
1216
1217
1218
1219
1220
1221
1222 if (asoc->state >= SCTP_STATE_ESTABLISHED) {
1223 asoc->next_tsn = new->next_tsn;
1224 asoc->ctsn_ack_point = new->ctsn_ack_point;
1225 asoc->adv_peer_ack_point = new->adv_peer_ack_point;
1226
1227
1228
1229
1230 sctp_ssnmap_clear(asoc->ssnmap);
1231
1232
1233
1234
1235
1236 sctp_ulpq_flush(&asoc->ulpq);
1237
1238
1239
1240
1241
1242 asoc->overall_error_count = 0;
1243
1244 } else {
1245
1246 list_for_each_entry(trans, &new->peer.transport_addr_list,
1247 transports) {
1248 if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
1249 sctp_assoc_add_peer(asoc, &trans->ipaddr,
1250 GFP_ATOMIC, trans->state);
1251 }
1252
1253 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1254 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1255 if (!asoc->ssnmap) {
1256
1257 asoc->ssnmap = new->ssnmap;
1258 new->ssnmap = NULL;
1259 }
1260
1261 if (!asoc->assoc_id) {
1262
1263
1264
1265 sctp_assoc_set_id(asoc, GFP_ATOMIC);
1266 }
1267 }
1268
1269
1270
1271
1272 kfree(asoc->peer.peer_random);
1273 asoc->peer.peer_random = new->peer.peer_random;
1274 new->peer.peer_random = NULL;
1275
1276 kfree(asoc->peer.peer_chunks);
1277 asoc->peer.peer_chunks = new->peer.peer_chunks;
1278 new->peer.peer_chunks = NULL;
1279
1280 kfree(asoc->peer.peer_hmacs);
1281 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
1282 new->peer.peer_hmacs = NULL;
1283
1284 sctp_auth_key_put(asoc->asoc_shared_key);
1285 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
1286}
1287
1288
1289
1290
1291
1292
1293void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1294{
1295 struct sctp_transport *t, *next;
1296 struct list_head *head = &asoc->peer.transport_addr_list;
1297 struct list_head *pos;
1298
1299 if (asoc->peer.transport_count == 1)
1300 return;
1301
1302
1303 t = asoc->peer.retran_path;
1304 pos = &t->transports;
1305 next = NULL;
1306
1307 while (1) {
1308
1309 if (pos->next == head)
1310 pos = head->next;
1311 else
1312 pos = pos->next;
1313
1314 t = list_entry(pos, struct sctp_transport, transports);
1315
1316
1317
1318
1319
1320 if (t == asoc->peer.retran_path) {
1321 t = next;
1322 break;
1323 }
1324
1325
1326
1327 if ((t->state == SCTP_ACTIVE) ||
1328 (t->state == SCTP_UNKNOWN)) {
1329 break;
1330 } else {
1331
1332
1333
1334 if (t->state != SCTP_UNCONFIRMED && !next)
1335 next = t;
1336 }
1337 }
1338
1339 if (t)
1340 asoc->peer.retran_path = t;
1341 else
1342 t = asoc->peer.retran_path;
1343
1344 pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
1345 &t->ipaddr.sa);
1346}
1347
1348
1349struct sctp_transport *sctp_assoc_choose_alter_transport(
1350 struct sctp_association *asoc, struct sctp_transport *last_sent_to)
1351{
1352
1353
1354
1355
1356 if (!last_sent_to)
1357 return asoc->peer.active_path;
1358 else {
1359 if (last_sent_to == asoc->peer.retran_path)
1360 sctp_assoc_update_retran_path(asoc);
1361 return asoc->peer.retran_path;
1362 }
1363}
1364
1365
1366
1367
1368void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
1369{
1370 struct sctp_transport *t;
1371 __u32 pmtu = 0;
1372
1373 if (!asoc)
1374 return;
1375
1376
1377 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1378 transports) {
1379 if (t->pmtu_pending && t->dst) {
1380 sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
1381 t->pmtu_pending = 0;
1382 }
1383 if (!pmtu || (t->pathmtu < pmtu))
1384 pmtu = t->pathmtu;
1385 }
1386
1387 if (pmtu) {
1388 asoc->pathmtu = pmtu;
1389 asoc->frag_point = sctp_frag_point(asoc, pmtu);
1390 }
1391
1392 pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
1393 asoc->pathmtu, asoc->frag_point);
1394}
1395
1396
1397static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1398{
1399 struct net *net = sock_net(asoc->base.sk);
1400 switch (asoc->state) {
1401 case SCTP_STATE_ESTABLISHED:
1402 case SCTP_STATE_SHUTDOWN_PENDING:
1403 case SCTP_STATE_SHUTDOWN_RECEIVED:
1404 case SCTP_STATE_SHUTDOWN_SENT:
1405 if ((asoc->rwnd > asoc->a_rwnd) &&
1406 ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
1407 (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
1408 asoc->pathmtu)))
1409 return 1;
1410 break;
1411 default:
1412 break;
1413 }
1414 return 0;
1415}
1416
1417
1418void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1419{
1420 struct sctp_chunk *sack;
1421 struct timer_list *timer;
1422
1423 if (asoc->rwnd_over) {
1424 if (asoc->rwnd_over >= len) {
1425 asoc->rwnd_over -= len;
1426 } else {
1427 asoc->rwnd += (len - asoc->rwnd_over);
1428 asoc->rwnd_over = 0;
1429 }
1430 } else {
1431 asoc->rwnd += len;
1432 }
1433
1434
1435
1436
1437
1438
1439 if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
1440 int change = min(asoc->pathmtu, asoc->rwnd_press);
1441 asoc->rwnd += change;
1442 asoc->rwnd_press -= change;
1443 }
1444
1445 pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
1446 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1447 asoc->a_rwnd);
1448
1449
1450
1451
1452
1453
1454 if (sctp_peer_needs_update(asoc)) {
1455 asoc->a_rwnd = asoc->rwnd;
1456
1457 pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
1458 "a_rwnd:%u\n", __func__, asoc, asoc->rwnd,
1459 asoc->a_rwnd);
1460
1461 sack = sctp_make_sack(asoc);
1462 if (!sack)
1463 return;
1464
1465 asoc->peer.sack_needed = 0;
1466
1467 sctp_outq_tail(&asoc->outqueue, sack);
1468
1469
1470 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
1471 if (del_timer(timer))
1472 sctp_association_put(asoc);
1473 }
1474}
1475
1476
1477void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1478{
1479 int rx_count;
1480 int over = 0;
1481
1482 if (unlikely(!asoc->rwnd || asoc->rwnd_over))
1483 pr_debug("%s: association:%p has asoc->rwnd:%u, "
1484 "asoc->rwnd_over:%u!\n", __func__, asoc,
1485 asoc->rwnd, asoc->rwnd_over);
1486
1487 if (asoc->ep->rcvbuf_policy)
1488 rx_count = atomic_read(&asoc->rmem_alloc);
1489 else
1490 rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
1491
1492
1493
1494
1495
1496
1497 if (rx_count >= asoc->base.sk->sk_rcvbuf)
1498 over = 1;
1499
1500 if (asoc->rwnd >= len) {
1501 asoc->rwnd -= len;
1502 if (over) {
1503 asoc->rwnd_press += asoc->rwnd;
1504 asoc->rwnd = 0;
1505 }
1506 } else {
1507 asoc->rwnd_over = len - asoc->rwnd;
1508 asoc->rwnd = 0;
1509 }
1510
1511 pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
1512 __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
1513 asoc->rwnd_press);
1514}
1515
1516
1517
1518
1519int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
1520 sctp_scope_t scope, gfp_t gfp)
1521{
1522 int flags;
1523
1524
1525
1526
1527 flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
1528 if (asoc->peer.ipv4_address)
1529 flags |= SCTP_ADDR4_PEERSUPP;
1530 if (asoc->peer.ipv6_address)
1531 flags |= SCTP_ADDR6_PEERSUPP;
1532
1533 return sctp_bind_addr_copy(sock_net(asoc->base.sk),
1534 &asoc->base.bind_addr,
1535 &asoc->ep->base.bind_addr,
1536 scope, gfp, flags);
1537}
1538
1539
1540int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
1541 struct sctp_cookie *cookie,
1542 gfp_t gfp)
1543{
1544 int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
1545 int var_size3 = cookie->raw_addr_list_len;
1546 __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
1547
1548 return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
1549 asoc->ep->base.bind_addr.port, gfp);
1550}
1551
1552
1553int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
1554 const union sctp_addr *laddr)
1555{
1556 int found = 0;
1557
1558 if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
1559 sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
1560 sctp_sk(asoc->base.sk)))
1561 found = 1;
1562
1563 return found;
1564}
1565
1566
1567int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
1568{
1569 bool preload = gfp & __GFP_WAIT;
1570 int ret;
1571
1572
1573 if (asoc->assoc_id)
1574 return 0;
1575
1576 if (preload)
1577 idr_preload(gfp);
1578 spin_lock_bh(&sctp_assocs_id_lock);
1579
1580 ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
1581 spin_unlock_bh(&sctp_assocs_id_lock);
1582 if (preload)
1583 idr_preload_end();
1584 if (ret < 0)
1585 return ret;
1586
1587 asoc->assoc_id = (sctp_assoc_t)ret;
1588 return 0;
1589}
1590
1591
1592static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1593{
1594 struct sctp_chunk *asconf;
1595 struct sctp_chunk *tmp;
1596
1597 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1598 list_del_init(&asconf->list);
1599 sctp_chunk_free(asconf);
1600 }
1601}
1602
1603
1604static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1605{
1606 struct sctp_chunk *ack;
1607 struct sctp_chunk *tmp;
1608
1609 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1610 transmitted_list) {
1611 list_del_init(&ack->transmitted_list);
1612 sctp_chunk_free(ack);
1613 }
1614}
1615
1616
1617void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1618{
1619 struct sctp_chunk *ack;
1620 struct sctp_chunk *tmp;
1621
1622
1623
1624
1625 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
1626 transmitted_list) {
1627 if (ack->subh.addip_hdr->serial ==
1628 htonl(asoc->peer.addip_serial))
1629 break;
1630
1631 list_del_init(&ack->transmitted_list);
1632 sctp_chunk_free(ack);
1633 }
1634}
1635
1636
1637struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1638 const struct sctp_association *asoc,
1639 __be32 serial)
1640{
1641 struct sctp_chunk *ack;
1642
1643
1644
1645
1646 list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
1647 if (ack->subh.addip_hdr->serial == serial) {
1648 sctp_chunk_hold(ack);
1649 return ack;
1650 }
1651 }
1652
1653 return NULL;
1654}
1655
1656void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1657{
1658
1659 sctp_assoc_free_asconf_acks(asoc);
1660
1661
1662 sctp_assoc_free_asconf_queue(asoc);
1663
1664
1665 if (asoc->addip_last_asconf)
1666 sctp_chunk_free(asoc->addip_last_asconf);
1667}
1668