1
2
3
4
5
6
7
8
9#include <linux/dccp.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/skbuff.h>
15#include <linux/netdevice.h>
16#include <linux/in.h>
17#include <linux/if_arp.h>
18#include <linux/init.h>
19#include <linux/random.h>
20#include <linux/slab.h>
21#include <net/checksum.h>
22
23#include <net/inet_sock.h>
24#include <net/inet_common.h>
25#include <net/sock.h>
26#include <net/xfrm.h>
27
28#include <asm/ioctls.h>
29#include <linux/spinlock.h>
30#include <linux/timer.h>
31#include <linux/delay.h>
32#include <linux/poll.h>
33
34#include "ccid.h"
35#include "dccp.h"
36#include "feat.h"
37
38#define CREATE_TRACE_POINTS
39#include "trace.h"
40
41DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42
43EXPORT_SYMBOL_GPL(dccp_statistics);
44
45struct percpu_counter dccp_orphan_count;
46EXPORT_SYMBOL_GPL(dccp_orphan_count);
47
48struct inet_hashinfo dccp_hashinfo;
49EXPORT_SYMBOL_GPL(dccp_hashinfo);
50
51
52int sysctl_dccp_tx_qlen __read_mostly = 5;
53
54#ifdef CONFIG_IP_DCCP_DEBUG
55static const char *dccp_state_name(const int state)
56{
57 static const char *const dccp_state_names[] = {
58 [DCCP_OPEN] = "OPEN",
59 [DCCP_REQUESTING] = "REQUESTING",
60 [DCCP_PARTOPEN] = "PARTOPEN",
61 [DCCP_LISTEN] = "LISTEN",
62 [DCCP_RESPOND] = "RESPOND",
63 [DCCP_CLOSING] = "CLOSING",
64 [DCCP_ACTIVE_CLOSEREQ] = "CLOSEREQ",
65 [DCCP_PASSIVE_CLOSE] = "PASSIVE_CLOSE",
66 [DCCP_PASSIVE_CLOSEREQ] = "PASSIVE_CLOSEREQ",
67 [DCCP_TIME_WAIT] = "TIME_WAIT",
68 [DCCP_CLOSED] = "CLOSED",
69 };
70
71 if (state >= DCCP_MAX_STATES)
72 return "INVALID STATE!";
73 else
74 return dccp_state_names[state];
75}
76#endif
77
78void dccp_set_state(struct sock *sk, const int state)
79{
80 const int oldstate = sk->sk_state;
81
82 dccp_pr_debug("%s(%p) %s --> %s\n", dccp_role(sk), sk,
83 dccp_state_name(oldstate), dccp_state_name(state));
84 WARN_ON(state == oldstate);
85
86 switch (state) {
87 case DCCP_OPEN:
88 if (oldstate != DCCP_OPEN)
89 DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
90
91 if (oldstate == DCCP_PARTOPEN)
92 dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
93 break;
94
95 case DCCP_CLOSED:
96 if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 oldstate == DCCP_CLOSING)
98 DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99
100 sk->sk_prot->unhash(sk);
101 if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
103 inet_put_port(sk);
104
105 default:
106 if (oldstate == DCCP_OPEN)
107 DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
108 }
109
110
111
112
113 inet_sk_set_state(sk, state);
114}
115
116EXPORT_SYMBOL_GPL(dccp_set_state);
117
118static void dccp_finish_passive_close(struct sock *sk)
119{
120 switch (sk->sk_state) {
121 case DCCP_PASSIVE_CLOSE:
122
123 dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 dccp_set_state(sk, DCCP_CLOSED);
125 break;
126 case DCCP_PASSIVE_CLOSEREQ:
127
128
129
130
131 dccp_send_close(sk, 1);
132 dccp_set_state(sk, DCCP_CLOSING);
133 }
134}
135
136void dccp_done(struct sock *sk)
137{
138 dccp_set_state(sk, DCCP_CLOSED);
139 dccp_clear_xmit_timers(sk);
140
141 sk->sk_shutdown = SHUTDOWN_MASK;
142
143 if (!sock_flag(sk, SOCK_DEAD))
144 sk->sk_state_change(sk);
145 else
146 inet_csk_destroy_sock(sk);
147}
148
149EXPORT_SYMBOL_GPL(dccp_done);
150
151const char *dccp_packet_name(const int type)
152{
153 static const char *const dccp_packet_names[] = {
154 [DCCP_PKT_REQUEST] = "REQUEST",
155 [DCCP_PKT_RESPONSE] = "RESPONSE",
156 [DCCP_PKT_DATA] = "DATA",
157 [DCCP_PKT_ACK] = "ACK",
158 [DCCP_PKT_DATAACK] = "DATAACK",
159 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 [DCCP_PKT_CLOSE] = "CLOSE",
161 [DCCP_PKT_RESET] = "RESET",
162 [DCCP_PKT_SYNC] = "SYNC",
163 [DCCP_PKT_SYNCACK] = "SYNCACK",
164 };
165
166 if (type >= DCCP_NR_PKT_TYPES)
167 return "INVALID";
168 else
169 return dccp_packet_names[type];
170}
171
172EXPORT_SYMBOL_GPL(dccp_packet_name);
173
174static void dccp_sk_destruct(struct sock *sk)
175{
176 struct dccp_sock *dp = dccp_sk(sk);
177
178 ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 dp->dccps_hc_tx_ccid = NULL;
180 inet_sock_destruct(sk);
181}
182
183int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
184{
185 struct dccp_sock *dp = dccp_sk(sk);
186 struct inet_connection_sock *icsk = inet_csk(sk);
187
188 icsk->icsk_rto = DCCP_TIMEOUT_INIT;
189 icsk->icsk_syn_retries = sysctl_dccp_request_retries;
190 sk->sk_state = DCCP_CLOSED;
191 sk->sk_write_space = dccp_write_space;
192 sk->sk_destruct = dccp_sk_destruct;
193 icsk->icsk_sync_mss = dccp_sync_mss;
194 dp->dccps_mss_cache = 536;
195 dp->dccps_rate_last = jiffies;
196 dp->dccps_role = DCCP_ROLE_UNDEFINED;
197 dp->dccps_service = DCCP_SERVICE_CODE_IS_ABSENT;
198 dp->dccps_tx_qlen = sysctl_dccp_tx_qlen;
199
200 dccp_init_xmit_timers(sk);
201
202 INIT_LIST_HEAD(&dp->dccps_featneg);
203
204 if (likely(ctl_sock_initialized))
205 return dccp_feat_init(sk);
206 return 0;
207}
208
209EXPORT_SYMBOL_GPL(dccp_init_sock);
210
211void dccp_destroy_sock(struct sock *sk)
212{
213 struct dccp_sock *dp = dccp_sk(sk);
214
215 __skb_queue_purge(&sk->sk_write_queue);
216 if (sk->sk_send_head != NULL) {
217 kfree_skb(sk->sk_send_head);
218 sk->sk_send_head = NULL;
219 }
220
221
222 if (inet_csk(sk)->icsk_bind_hash != NULL)
223 inet_put_port(sk);
224
225 kfree(dp->dccps_service_list);
226 dp->dccps_service_list = NULL;
227
228 if (dp->dccps_hc_rx_ackvec != NULL) {
229 dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
230 dp->dccps_hc_rx_ackvec = NULL;
231 }
232 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
233 dp->dccps_hc_rx_ccid = NULL;
234
235
236 dccp_feat_list_purge(&dp->dccps_featneg);
237}
238
239EXPORT_SYMBOL_GPL(dccp_destroy_sock);
240
241static inline int dccp_listen_start(struct sock *sk, int backlog)
242{
243 struct dccp_sock *dp = dccp_sk(sk);
244
245 dp->dccps_role = DCCP_ROLE_LISTEN;
246
247 if (dccp_feat_finalise_settings(dp))
248 return -EPROTO;
249 return inet_csk_listen_start(sk, backlog);
250}
251
252static inline int dccp_need_reset(int state)
253{
254 return state != DCCP_CLOSED && state != DCCP_LISTEN &&
255 state != DCCP_REQUESTING;
256}
257
258int dccp_disconnect(struct sock *sk, int flags)
259{
260 struct inet_connection_sock *icsk = inet_csk(sk);
261 struct inet_sock *inet = inet_sk(sk);
262 struct dccp_sock *dp = dccp_sk(sk);
263 const int old_state = sk->sk_state;
264
265 if (old_state != DCCP_CLOSED)
266 dccp_set_state(sk, DCCP_CLOSED);
267
268
269
270
271
272 if (old_state == DCCP_LISTEN) {
273 inet_csk_listen_stop(sk);
274 } else if (dccp_need_reset(old_state)) {
275 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
276 sk->sk_err = ECONNRESET;
277 } else if (old_state == DCCP_REQUESTING)
278 sk->sk_err = ECONNRESET;
279
280 dccp_clear_xmit_timers(sk);
281 ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
282 dp->dccps_hc_rx_ccid = NULL;
283
284 __skb_queue_purge(&sk->sk_receive_queue);
285 __skb_queue_purge(&sk->sk_write_queue);
286 if (sk->sk_send_head != NULL) {
287 __kfree_skb(sk->sk_send_head);
288 sk->sk_send_head = NULL;
289 }
290
291 inet->inet_dport = 0;
292
293 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
294 inet_reset_saddr(sk);
295
296 sk->sk_shutdown = 0;
297 sock_reset_flag(sk, SOCK_DONE);
298
299 icsk->icsk_backoff = 0;
300 inet_csk_delack_init(sk);
301 __sk_dst_reset(sk);
302
303 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
304
305 sk->sk_error_report(sk);
306 return 0;
307}
308
309EXPORT_SYMBOL_GPL(dccp_disconnect);
310
311
312
313
314
315
316
317
318__poll_t dccp_poll(struct file *file, struct socket *sock,
319 poll_table *wait)
320{
321 __poll_t mask;
322 struct sock *sk = sock->sk;
323
324 sock_poll_wait(file, sock, wait);
325 if (sk->sk_state == DCCP_LISTEN)
326 return inet_csk_listen_poll(sk);
327
328
329
330
331
332
333 mask = 0;
334 if (sk->sk_err)
335 mask = EPOLLERR;
336
337 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
338 mask |= EPOLLHUP;
339 if (sk->sk_shutdown & RCV_SHUTDOWN)
340 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
341
342
343 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
344 if (atomic_read(&sk->sk_rmem_alloc) > 0)
345 mask |= EPOLLIN | EPOLLRDNORM;
346
347 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
348 if (sk_stream_is_writeable(sk)) {
349 mask |= EPOLLOUT | EPOLLWRNORM;
350 } else {
351 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
352 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
353
354
355
356
357
358 if (sk_stream_is_writeable(sk))
359 mask |= EPOLLOUT | EPOLLWRNORM;
360 }
361 }
362 }
363 return mask;
364}
365
366EXPORT_SYMBOL_GPL(dccp_poll);
367
368int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
369{
370 int rc = -ENOTCONN;
371
372 lock_sock(sk);
373
374 if (sk->sk_state == DCCP_LISTEN)
375 goto out;
376
377 switch (cmd) {
378 case SIOCINQ: {
379 struct sk_buff *skb;
380 unsigned long amount = 0;
381
382 skb = skb_peek(&sk->sk_receive_queue);
383 if (skb != NULL) {
384
385
386
387
388 amount = skb->len;
389 }
390 rc = put_user(amount, (int __user *)arg);
391 }
392 break;
393 default:
394 rc = -ENOIOCTLCMD;
395 break;
396 }
397out:
398 release_sock(sk);
399 return rc;
400}
401
402EXPORT_SYMBOL_GPL(dccp_ioctl);
403
404static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
405 char __user *optval, unsigned int optlen)
406{
407 struct dccp_sock *dp = dccp_sk(sk);
408 struct dccp_service_list *sl = NULL;
409
410 if (service == DCCP_SERVICE_INVALID_VALUE ||
411 optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
412 return -EINVAL;
413
414 if (optlen > sizeof(service)) {
415 sl = kmalloc(optlen, GFP_KERNEL);
416 if (sl == NULL)
417 return -ENOMEM;
418
419 sl->dccpsl_nr = optlen / sizeof(u32) - 1;
420 if (copy_from_user(sl->dccpsl_list,
421 optval + sizeof(service),
422 optlen - sizeof(service)) ||
423 dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
424 kfree(sl);
425 return -EFAULT;
426 }
427 }
428
429 lock_sock(sk);
430 dp->dccps_service = service;
431
432 kfree(dp->dccps_service_list);
433
434 dp->dccps_service_list = sl;
435 release_sock(sk);
436 return 0;
437}
438
439static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
440{
441 u8 *list, len;
442 int i, rc;
443
444 if (cscov < 0 || cscov > 15)
445 return -EINVAL;
446
447
448
449
450
451
452 if (cscov == 0)
453 return 0;
454 len = 16 - cscov;
455
456 list = kmalloc(len, GFP_KERNEL);
457 if (list == NULL)
458 return -ENOBUFS;
459
460 for (i = 0; i < len; i++)
461 list[i] = cscov++;
462
463 rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
464
465 if (rc == 0) {
466 if (rx)
467 dccp_sk(sk)->dccps_pcrlen = cscov;
468 else
469 dccp_sk(sk)->dccps_pcslen = cscov;
470 }
471 kfree(list);
472 return rc;
473}
474
475static int dccp_setsockopt_ccid(struct sock *sk, int type,
476 char __user *optval, unsigned int optlen)
477{
478 u8 *val;
479 int rc = 0;
480
481 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
482 return -EINVAL;
483
484 val = memdup_user(optval, optlen);
485 if (IS_ERR(val))
486 return PTR_ERR(val);
487
488 lock_sock(sk);
489 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
490 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
491
492 if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
493 rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
494 release_sock(sk);
495
496 kfree(val);
497 return rc;
498}
499
500static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
501 char __user *optval, unsigned int optlen)
502{
503 struct dccp_sock *dp = dccp_sk(sk);
504 int val, err = 0;
505
506 switch (optname) {
507 case DCCP_SOCKOPT_PACKET_SIZE:
508 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
509 return 0;
510 case DCCP_SOCKOPT_CHANGE_L:
511 case DCCP_SOCKOPT_CHANGE_R:
512 DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
513 return 0;
514 case DCCP_SOCKOPT_CCID:
515 case DCCP_SOCKOPT_RX_CCID:
516 case DCCP_SOCKOPT_TX_CCID:
517 return dccp_setsockopt_ccid(sk, optname, optval, optlen);
518 }
519
520 if (optlen < (int)sizeof(int))
521 return -EINVAL;
522
523 if (get_user(val, (int __user *)optval))
524 return -EFAULT;
525
526 if (optname == DCCP_SOCKOPT_SERVICE)
527 return dccp_setsockopt_service(sk, val, optval, optlen);
528
529 lock_sock(sk);
530 switch (optname) {
531 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
532 if (dp->dccps_role != DCCP_ROLE_SERVER)
533 err = -EOPNOTSUPP;
534 else
535 dp->dccps_server_timewait = (val != 0);
536 break;
537 case DCCP_SOCKOPT_SEND_CSCOV:
538 err = dccp_setsockopt_cscov(sk, val, false);
539 break;
540 case DCCP_SOCKOPT_RECV_CSCOV:
541 err = dccp_setsockopt_cscov(sk, val, true);
542 break;
543 case DCCP_SOCKOPT_QPOLICY_ID:
544 if (sk->sk_state != DCCP_CLOSED)
545 err = -EISCONN;
546 else if (val < 0 || val >= DCCPQ_POLICY_MAX)
547 err = -EINVAL;
548 else
549 dp->dccps_qpolicy = val;
550 break;
551 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
552 if (val < 0)
553 err = -EINVAL;
554 else
555 dp->dccps_tx_qlen = val;
556 break;
557 default:
558 err = -ENOPROTOOPT;
559 break;
560 }
561 release_sock(sk);
562
563 return err;
564}
565
566int dccp_setsockopt(struct sock *sk, int level, int optname,
567 char __user *optval, unsigned int optlen)
568{
569 if (level != SOL_DCCP)
570 return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
571 optname, optval,
572 optlen);
573 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
574}
575
576EXPORT_SYMBOL_GPL(dccp_setsockopt);
577
578#ifdef CONFIG_COMPAT
579int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
580 char __user *optval, unsigned int optlen)
581{
582 if (level != SOL_DCCP)
583 return inet_csk_compat_setsockopt(sk, level, optname,
584 optval, optlen);
585 return do_dccp_setsockopt(sk, level, optname, optval, optlen);
586}
587
588EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
589#endif
590
591static int dccp_getsockopt_service(struct sock *sk, int len,
592 __be32 __user *optval,
593 int __user *optlen)
594{
595 const struct dccp_sock *dp = dccp_sk(sk);
596 const struct dccp_service_list *sl;
597 int err = -ENOENT, slen = 0, total_len = sizeof(u32);
598
599 lock_sock(sk);
600 if ((sl = dp->dccps_service_list) != NULL) {
601 slen = sl->dccpsl_nr * sizeof(u32);
602 total_len += slen;
603 }
604
605 err = -EINVAL;
606 if (total_len > len)
607 goto out;
608
609 err = 0;
610 if (put_user(total_len, optlen) ||
611 put_user(dp->dccps_service, optval) ||
612 (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
613 err = -EFAULT;
614out:
615 release_sock(sk);
616 return err;
617}
618
619static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
620 char __user *optval, int __user *optlen)
621{
622 struct dccp_sock *dp;
623 int val, len;
624
625 if (get_user(len, optlen))
626 return -EFAULT;
627
628 if (len < (int)sizeof(int))
629 return -EINVAL;
630
631 dp = dccp_sk(sk);
632
633 switch (optname) {
634 case DCCP_SOCKOPT_PACKET_SIZE:
635 DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
636 return 0;
637 case DCCP_SOCKOPT_SERVICE:
638 return dccp_getsockopt_service(sk, len,
639 (__be32 __user *)optval, optlen);
640 case DCCP_SOCKOPT_GET_CUR_MPS:
641 val = dp->dccps_mss_cache;
642 break;
643 case DCCP_SOCKOPT_AVAILABLE_CCIDS:
644 return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
645 case DCCP_SOCKOPT_TX_CCID:
646 val = ccid_get_current_tx_ccid(dp);
647 if (val < 0)
648 return -ENOPROTOOPT;
649 break;
650 case DCCP_SOCKOPT_RX_CCID:
651 val = ccid_get_current_rx_ccid(dp);
652 if (val < 0)
653 return -ENOPROTOOPT;
654 break;
655 case DCCP_SOCKOPT_SERVER_TIMEWAIT:
656 val = dp->dccps_server_timewait;
657 break;
658 case DCCP_SOCKOPT_SEND_CSCOV:
659 val = dp->dccps_pcslen;
660 break;
661 case DCCP_SOCKOPT_RECV_CSCOV:
662 val = dp->dccps_pcrlen;
663 break;
664 case DCCP_SOCKOPT_QPOLICY_ID:
665 val = dp->dccps_qpolicy;
666 break;
667 case DCCP_SOCKOPT_QPOLICY_TXQLEN:
668 val = dp->dccps_tx_qlen;
669 break;
670 case 128 ... 191:
671 return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
672 len, (u32 __user *)optval, optlen);
673 case 192 ... 255:
674 return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
675 len, (u32 __user *)optval, optlen);
676 default:
677 return -ENOPROTOOPT;
678 }
679
680 len = sizeof(val);
681 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
682 return -EFAULT;
683
684 return 0;
685}
686
687int dccp_getsockopt(struct sock *sk, int level, int optname,
688 char __user *optval, int __user *optlen)
689{
690 if (level != SOL_DCCP)
691 return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
692 optname, optval,
693 optlen);
694 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
695}
696
697EXPORT_SYMBOL_GPL(dccp_getsockopt);
698
699#ifdef CONFIG_COMPAT
700int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
701 char __user *optval, int __user *optlen)
702{
703 if (level != SOL_DCCP)
704 return inet_csk_compat_getsockopt(sk, level, optname,
705 optval, optlen);
706 return do_dccp_getsockopt(sk, level, optname, optval, optlen);
707}
708
709EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
710#endif
711
712static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
713{
714 struct cmsghdr *cmsg;
715
716
717
718
719
720
721
722
723
724
725
726 skb->priority = 0;
727
728 for_each_cmsghdr(cmsg, msg) {
729 if (!CMSG_OK(msg, cmsg))
730 return -EINVAL;
731
732 if (cmsg->cmsg_level != SOL_DCCP)
733 continue;
734
735 if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
736 !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
737 return -EINVAL;
738
739 switch (cmsg->cmsg_type) {
740 case DCCP_SCM_PRIORITY:
741 if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
742 return -EINVAL;
743 skb->priority = *(__u32 *)CMSG_DATA(cmsg);
744 break;
745 default:
746 return -EINVAL;
747 }
748 }
749 return 0;
750}
751
752int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
753{
754 const struct dccp_sock *dp = dccp_sk(sk);
755 const int flags = msg->msg_flags;
756 const int noblock = flags & MSG_DONTWAIT;
757 struct sk_buff *skb;
758 int rc, size;
759 long timeo;
760
761 trace_dccp_probe(sk, len);
762
763 if (len > dp->dccps_mss_cache)
764 return -EMSGSIZE;
765
766 lock_sock(sk);
767
768 if (dccp_qpolicy_full(sk)) {
769 rc = -EAGAIN;
770 goto out_release;
771 }
772
773 timeo = sock_sndtimeo(sk, noblock);
774
775
776
777
778
779
780 if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
781 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
782 goto out_release;
783
784 size = sk->sk_prot->max_header + len;
785 release_sock(sk);
786 skb = sock_alloc_send_skb(sk, size, noblock, &rc);
787 lock_sock(sk);
788 if (skb == NULL)
789 goto out_release;
790
791 if (sk->sk_state == DCCP_CLOSED) {
792 rc = -ENOTCONN;
793 goto out_discard;
794 }
795
796 skb_reserve(skb, sk->sk_prot->max_header);
797 rc = memcpy_from_msg(skb_put(skb, len), msg, len);
798 if (rc != 0)
799 goto out_discard;
800
801 rc = dccp_msghdr_parse(msg, skb);
802 if (rc != 0)
803 goto out_discard;
804
805 dccp_qpolicy_push(sk, skb);
806
807
808
809
810
811 if (!timer_pending(&dp->dccps_xmit_timer))
812 dccp_write_xmit(sk);
813out_release:
814 release_sock(sk);
815 return rc ? : len;
816out_discard:
817 kfree_skb(skb);
818 goto out_release;
819}
820
821EXPORT_SYMBOL_GPL(dccp_sendmsg);
822
823int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
824 int flags, int *addr_len)
825{
826 const struct dccp_hdr *dh;
827 long timeo;
828
829 lock_sock(sk);
830
831 if (sk->sk_state == DCCP_LISTEN) {
832 len = -ENOTCONN;
833 goto out;
834 }
835
836 timeo = sock_rcvtimeo(sk, nonblock);
837
838 do {
839 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
840
841 if (skb == NULL)
842 goto verify_sock_status;
843
844 dh = dccp_hdr(skb);
845
846 switch (dh->dccph_type) {
847 case DCCP_PKT_DATA:
848 case DCCP_PKT_DATAACK:
849 goto found_ok_skb;
850
851 case DCCP_PKT_CLOSE:
852 case DCCP_PKT_CLOSEREQ:
853 if (!(flags & MSG_PEEK))
854 dccp_finish_passive_close(sk);
855
856 case DCCP_PKT_RESET:
857 dccp_pr_debug("found fin (%s) ok!\n",
858 dccp_packet_name(dh->dccph_type));
859 len = 0;
860 goto found_fin_ok;
861 default:
862 dccp_pr_debug("packet_type=%s\n",
863 dccp_packet_name(dh->dccph_type));
864 sk_eat_skb(sk, skb);
865 }
866verify_sock_status:
867 if (sock_flag(sk, SOCK_DONE)) {
868 len = 0;
869 break;
870 }
871
872 if (sk->sk_err) {
873 len = sock_error(sk);
874 break;
875 }
876
877 if (sk->sk_shutdown & RCV_SHUTDOWN) {
878 len = 0;
879 break;
880 }
881
882 if (sk->sk_state == DCCP_CLOSED) {
883 if (!sock_flag(sk, SOCK_DONE)) {
884
885
886
887 len = -ENOTCONN;
888 break;
889 }
890 len = 0;
891 break;
892 }
893
894 if (!timeo) {
895 len = -EAGAIN;
896 break;
897 }
898
899 if (signal_pending(current)) {
900 len = sock_intr_errno(timeo);
901 break;
902 }
903
904 sk_wait_data(sk, &timeo, NULL);
905 continue;
906 found_ok_skb:
907 if (len > skb->len)
908 len = skb->len;
909 else if (len < skb->len)
910 msg->msg_flags |= MSG_TRUNC;
911
912 if (skb_copy_datagram_msg(skb, 0, msg, len)) {
913
914 len = -EFAULT;
915 break;
916 }
917 if (flags & MSG_TRUNC)
918 len = skb->len;
919 found_fin_ok:
920 if (!(flags & MSG_PEEK))
921 sk_eat_skb(sk, skb);
922 break;
923 } while (1);
924out:
925 release_sock(sk);
926 return len;
927}
928
929EXPORT_SYMBOL_GPL(dccp_recvmsg);
930
931int inet_dccp_listen(struct socket *sock, int backlog)
932{
933 struct sock *sk = sock->sk;
934 unsigned char old_state;
935 int err;
936
937 lock_sock(sk);
938
939 err = -EINVAL;
940 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
941 goto out;
942
943 old_state = sk->sk_state;
944 if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
945 goto out;
946
947 sk->sk_max_ack_backlog = backlog;
948
949
950
951 if (old_state != DCCP_LISTEN) {
952
953
954
955
956 err = dccp_listen_start(sk, backlog);
957 if (err)
958 goto out;
959 }
960 err = 0;
961
962out:
963 release_sock(sk);
964 return err;
965}
966
967EXPORT_SYMBOL_GPL(inet_dccp_listen);
968
969static void dccp_terminate_connection(struct sock *sk)
970{
971 u8 next_state = DCCP_CLOSED;
972
973 switch (sk->sk_state) {
974 case DCCP_PASSIVE_CLOSE:
975 case DCCP_PASSIVE_CLOSEREQ:
976 dccp_finish_passive_close(sk);
977 break;
978 case DCCP_PARTOPEN:
979 dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
980 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
981
982 case DCCP_OPEN:
983 dccp_send_close(sk, 1);
984
985 if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
986 !dccp_sk(sk)->dccps_server_timewait)
987 next_state = DCCP_ACTIVE_CLOSEREQ;
988 else
989 next_state = DCCP_CLOSING;
990
991 default:
992 dccp_set_state(sk, next_state);
993 }
994}
995
996void dccp_close(struct sock *sk, long timeout)
997{
998 struct dccp_sock *dp = dccp_sk(sk);
999 struct sk_buff *skb;
1000 u32 data_was_unread = 0;
1001 int state;
1002
1003 lock_sock(sk);
1004
1005 sk->sk_shutdown = SHUTDOWN_MASK;
1006
1007 if (sk->sk_state == DCCP_LISTEN) {
1008 dccp_set_state(sk, DCCP_CLOSED);
1009
1010
1011 inet_csk_listen_stop(sk);
1012
1013 goto adjudge_to_death;
1014 }
1015
1016 sk_stop_timer(sk, &dp->dccps_xmit_timer);
1017
1018
1019
1020
1021
1022
1023 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1024 data_was_unread += skb->len;
1025 __kfree_skb(skb);
1026 }
1027
1028
1029 if (sk->sk_state == DCCP_CLOSED)
1030 goto adjudge_to_death;
1031
1032 if (data_was_unread) {
1033
1034 DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1035 dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1036 dccp_set_state(sk, DCCP_CLOSED);
1037 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1038
1039 sk->sk_prot->disconnect(sk, 0);
1040 } else if (sk->sk_state != DCCP_CLOSED) {
1041
1042
1043
1044
1045 dccp_flush_write_queue(sk, &timeout);
1046 dccp_terminate_connection(sk);
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 __skb_queue_purge(&sk->sk_write_queue);
1056
1057 sk_stream_wait_close(sk, timeout);
1058
1059adjudge_to_death:
1060 state = sk->sk_state;
1061 sock_hold(sk);
1062 sock_orphan(sk);
1063
1064
1065
1066
1067 release_sock(sk);
1068
1069
1070
1071
1072 local_bh_disable();
1073 bh_lock_sock(sk);
1074 WARN_ON(sock_owned_by_user(sk));
1075
1076 percpu_counter_inc(sk->sk_prot->orphan_count);
1077
1078
1079 if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1080 goto out;
1081
1082 if (sk->sk_state == DCCP_CLOSED)
1083 inet_csk_destroy_sock(sk);
1084
1085
1086
1087out:
1088 bh_unlock_sock(sk);
1089 local_bh_enable();
1090 sock_put(sk);
1091}
1092
1093EXPORT_SYMBOL_GPL(dccp_close);
1094
1095void dccp_shutdown(struct sock *sk, int how)
1096{
1097 dccp_pr_debug("called shutdown(%x)\n", how);
1098}
1099
1100EXPORT_SYMBOL_GPL(dccp_shutdown);
1101
1102static inline int __init dccp_mib_init(void)
1103{
1104 dccp_statistics = alloc_percpu(struct dccp_mib);
1105 if (!dccp_statistics)
1106 return -ENOMEM;
1107 return 0;
1108}
1109
1110static inline void dccp_mib_exit(void)
1111{
1112 free_percpu(dccp_statistics);
1113}
1114
1115static int thash_entries;
1116module_param(thash_entries, int, 0444);
1117MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1118
1119#ifdef CONFIG_IP_DCCP_DEBUG
1120bool dccp_debug;
1121module_param(dccp_debug, bool, 0644);
1122MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1123
1124EXPORT_SYMBOL_GPL(dccp_debug);
1125#endif
1126
1127static int __init dccp_init(void)
1128{
1129 unsigned long goal;
1130 unsigned long nr_pages = totalram_pages();
1131 int ehash_order, bhash_order, i;
1132 int rc;
1133
1134 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1135 FIELD_SIZEOF(struct sk_buff, cb));
1136 rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
1137 if (rc)
1138 goto out_fail;
1139 inet_hashinfo_init(&dccp_hashinfo);
1140 rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1141 if (rc)
1142 goto out_fail;
1143 rc = -ENOBUFS;
1144 dccp_hashinfo.bind_bucket_cachep =
1145 kmem_cache_create("dccp_bind_bucket",
1146 sizeof(struct inet_bind_bucket), 0,
1147 SLAB_HWCACHE_ALIGN, NULL);
1148 if (!dccp_hashinfo.bind_bucket_cachep)
1149 goto out_free_percpu;
1150
1151
1152
1153
1154
1155
1156
1157 if (nr_pages >= (128 * 1024))
1158 goal = nr_pages >> (21 - PAGE_SHIFT);
1159 else
1160 goal = nr_pages >> (23 - PAGE_SHIFT);
1161
1162 if (thash_entries)
1163 goal = (thash_entries *
1164 sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1165 for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1166 ;
1167 do {
1168 unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1169 sizeof(struct inet_ehash_bucket);
1170
1171 while (hash_size & (hash_size - 1))
1172 hash_size--;
1173 dccp_hashinfo.ehash_mask = hash_size - 1;
1174 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1175 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1176 } while (!dccp_hashinfo.ehash && --ehash_order > 0);
1177
1178 if (!dccp_hashinfo.ehash) {
1179 DCCP_CRIT("Failed to allocate DCCP established hash table");
1180 goto out_free_bind_bucket_cachep;
1181 }
1182
1183 for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1184 INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1185
1186 if (inet_ehash_locks_alloc(&dccp_hashinfo))
1187 goto out_free_dccp_ehash;
1188
1189 bhash_order = ehash_order;
1190
1191 do {
1192 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1193 sizeof(struct inet_bind_hashbucket);
1194 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1195 bhash_order > 0)
1196 continue;
1197 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1198 __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1199 } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1200
1201 if (!dccp_hashinfo.bhash) {
1202 DCCP_CRIT("Failed to allocate DCCP bind hash table");
1203 goto out_free_dccp_locks;
1204 }
1205
1206 for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1207 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1208 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1209 }
1210
1211 rc = dccp_mib_init();
1212 if (rc)
1213 goto out_free_dccp_bhash;
1214
1215 rc = dccp_ackvec_init();
1216 if (rc)
1217 goto out_free_dccp_mib;
1218
1219 rc = dccp_sysctl_init();
1220 if (rc)
1221 goto out_ackvec_exit;
1222
1223 rc = ccid_initialize_builtins();
1224 if (rc)
1225 goto out_sysctl_exit;
1226
1227 dccp_timestamping_init();
1228
1229 return 0;
1230
1231out_sysctl_exit:
1232 dccp_sysctl_exit();
1233out_ackvec_exit:
1234 dccp_ackvec_exit();
1235out_free_dccp_mib:
1236 dccp_mib_exit();
1237out_free_dccp_bhash:
1238 free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1239out_free_dccp_locks:
1240 inet_ehash_locks_free(&dccp_hashinfo);
1241out_free_dccp_ehash:
1242 free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1243out_free_bind_bucket_cachep:
1244 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1245out_free_percpu:
1246 percpu_counter_destroy(&dccp_orphan_count);
1247out_fail:
1248 dccp_hashinfo.bhash = NULL;
1249 dccp_hashinfo.ehash = NULL;
1250 dccp_hashinfo.bind_bucket_cachep = NULL;
1251 return rc;
1252}
1253
1254static void __exit dccp_fini(void)
1255{
1256 ccid_cleanup_builtins();
1257 dccp_mib_exit();
1258 free_pages((unsigned long)dccp_hashinfo.bhash,
1259 get_order(dccp_hashinfo.bhash_size *
1260 sizeof(struct inet_bind_hashbucket)));
1261 free_pages((unsigned long)dccp_hashinfo.ehash,
1262 get_order((dccp_hashinfo.ehash_mask + 1) *
1263 sizeof(struct inet_ehash_bucket)));
1264 inet_ehash_locks_free(&dccp_hashinfo);
1265 kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1266 dccp_ackvec_exit();
1267 dccp_sysctl_exit();
1268 percpu_counter_destroy(&dccp_orphan_count);
1269}
1270
1271module_init(dccp_init);
1272module_exit(dccp_fini);
1273
1274MODULE_LICENSE("GPL");
1275MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1276MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
1277