1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37#include "core.h"
38#include "port.h"
39
40#include <linux/export.h>
41#include <net/sock.h>
42
43#define SS_LISTENING -1
44#define SS_READY -2
45
46#define OVERLOAD_LIMIT_BASE 5000
47#define CONN_TIMEOUT_DEFAULT 8000
48
49struct tipc_sock {
50 struct sock sk;
51 struct tipc_port *p;
52 struct tipc_portid peer_name;
53 unsigned int conn_timeout;
54};
55
56#define tipc_sk(sk) ((struct tipc_sock *)(sk))
57#define tipc_sk_port(sk) (tipc_sk(sk)->p)
58
59#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
60 (sock->state == SS_DISCONNECTING))
61
62static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
63static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
64static void wakeupdispatch(struct tipc_port *tport);
65
66static const struct proto_ops packet_ops;
67static const struct proto_ops stream_ops;
68static const struct proto_ops msg_ops;
69
70static struct proto tipc_proto;
71
72static int sockets_enabled;
73
74static atomic_t tipc_queue_size = ATOMIC_INIT(0);
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static void advance_rx_queue(struct sock *sk)
127{
128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
129 atomic_dec(&tipc_queue_size);
130}
131
132
133
134
135
136
137static void discard_rx_queue(struct sock *sk)
138{
139 struct sk_buff *buf;
140
141 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
142 atomic_dec(&tipc_queue_size);
143 kfree_skb(buf);
144 }
145}
146
147
148
149
150
151
152static void reject_rx_queue(struct sock *sk)
153{
154 struct sk_buff *buf;
155
156 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
157 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
158 atomic_dec(&tipc_queue_size);
159 }
160}
161
162
163
164
165
166
167
168
169
170
171
172
173
174static int tipc_create(struct net *net, struct socket *sock, int protocol,
175 int kern)
176{
177 const struct proto_ops *ops;
178 socket_state state;
179 struct sock *sk;
180 struct tipc_port *tp_ptr;
181
182
183 if (unlikely(protocol != 0))
184 return -EPROTONOSUPPORT;
185
186 switch (sock->type) {
187 case SOCK_STREAM:
188 ops = &stream_ops;
189 state = SS_UNCONNECTED;
190 break;
191 case SOCK_SEQPACKET:
192 ops = &packet_ops;
193 state = SS_UNCONNECTED;
194 break;
195 case SOCK_DGRAM:
196 case SOCK_RDM:
197 ops = &msg_ops;
198 state = SS_READY;
199 break;
200 default:
201 return -EPROTOTYPE;
202 }
203
204
205 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
206 if (sk == NULL)
207 return -ENOMEM;
208
209
210 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
211 TIPC_LOW_IMPORTANCE);
212 if (unlikely(!tp_ptr)) {
213 sk_free(sk);
214 return -ENOMEM;
215 }
216
217
218 sock->ops = ops;
219 sock->state = state;
220
221 sock_init_data(sock, sk);
222 sk->sk_backlog_rcv = backlog_rcv;
223 tipc_sk(sk)->p = tp_ptr;
224 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;
225
226 spin_unlock_bh(tp_ptr->lock);
227
228 if (sock->state == SS_READY) {
229 tipc_set_portunreturnable(tp_ptr->ref, 1);
230 if (sock->type == SOCK_DGRAM)
231 tipc_set_portunreliable(tp_ptr->ref, 1);
232 }
233
234 return 0;
235}
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253static int release(struct socket *sock)
254{
255 struct sock *sk = sock->sk;
256 struct tipc_port *tport;
257 struct sk_buff *buf;
258 int res;
259
260
261
262
263
264 if (sk == NULL)
265 return 0;
266
267 tport = tipc_sk_port(sk);
268 lock_sock(sk);
269
270
271
272
273
274 while (sock->state != SS_DISCONNECTING) {
275 buf = __skb_dequeue(&sk->sk_receive_queue);
276 if (buf == NULL)
277 break;
278 atomic_dec(&tipc_queue_size);
279 if (TIPC_SKB_CB(buf)->handle != 0)
280 kfree_skb(buf);
281 else {
282 if ((sock->state == SS_CONNECTING) ||
283 (sock->state == SS_CONNECTED)) {
284 sock->state = SS_DISCONNECTING;
285 tipc_disconnect(tport->ref);
286 }
287 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
288 }
289 }
290
291
292
293
294
295 res = tipc_deleteport(tport->ref);
296
297
298 discard_rx_queue(sk);
299
300
301 sock->state = SS_DISCONNECTING;
302 release_sock(sk);
303
304 sock_put(sk);
305 sock->sk = NULL;
306
307 return res;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
326{
327 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
328 u32 portref = tipc_sk_port(sock->sk)->ref;
329
330 if (unlikely(!uaddr_len))
331 return tipc_withdraw(portref, 0, NULL);
332
333 if (uaddr_len < sizeof(struct sockaddr_tipc))
334 return -EINVAL;
335 if (addr->family != AF_TIPC)
336 return -EAFNOSUPPORT;
337
338 if (addr->addrtype == TIPC_ADDR_NAME)
339 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
340 else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
341 return -EAFNOSUPPORT;
342
343 if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES)
344 return -EACCES;
345
346 return (addr->scope > 0) ?
347 tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
348 tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
349}
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364static int get_name(struct socket *sock, struct sockaddr *uaddr,
365 int *uaddr_len, int peer)
366{
367 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
368 struct tipc_sock *tsock = tipc_sk(sock->sk);
369
370 memset(addr, 0, sizeof(*addr));
371 if (peer) {
372 if ((sock->state != SS_CONNECTED) &&
373 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
374 return -ENOTCONN;
375 addr->addr.id.ref = tsock->peer_name.ref;
376 addr->addr.id.node = tsock->peer_name.node;
377 } else {
378 addr->addr.id.ref = tsock->p->ref;
379 addr->addr.id.node = tipc_own_addr;
380 }
381
382 *uaddr_len = sizeof(*addr);
383 addr->addrtype = TIPC_ADDR_ID;
384 addr->family = AF_TIPC;
385 addr->scope = 0;
386 addr->addr.name.domain = 0;
387
388 return 0;
389}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431static unsigned int poll(struct file *file, struct socket *sock,
432 poll_table *wait)
433{
434 struct sock *sk = sock->sk;
435 u32 mask = 0;
436
437 poll_wait(file, sk_sleep(sk), wait);
438
439 switch ((int)sock->state) {
440 case SS_READY:
441 case SS_CONNECTED:
442 if (!tipc_sk_port(sk)->congested)
443 mask |= POLLOUT;
444
445 case SS_CONNECTING:
446 case SS_LISTENING:
447 if (!skb_queue_empty(&sk->sk_receive_queue))
448 mask |= (POLLIN | POLLRDNORM);
449 break;
450 case SS_DISCONNECTING:
451 mask = (POLLIN | POLLRDNORM | POLLHUP);
452 break;
453 }
454
455 return mask;
456}
457
458
459
460
461
462
463
464
465
466
467
468static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
469{
470 struct tipc_cfg_msg_hdr hdr;
471
472 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
473 return 0;
474 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
475 return 0;
476 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
477 return -EACCES;
478
479 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
480 return -EMSGSIZE;
481 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
482 return -EFAULT;
483 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
484 return -EACCES;
485
486 return 0;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static int send_msg(struct kiocb *iocb, struct socket *sock,
504 struct msghdr *m, size_t total_len)
505{
506 struct sock *sk = sock->sk;
507 struct tipc_port *tport = tipc_sk_port(sk);
508 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
509 int needs_conn;
510 long timeout_val;
511 int res = -EINVAL;
512
513 if (unlikely(!dest))
514 return -EDESTADDRREQ;
515 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
516 (dest->family != AF_TIPC)))
517 return -EINVAL;
518 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
519 (m->msg_iovlen > (unsigned int)INT_MAX))
520 return -EMSGSIZE;
521
522 if (iocb)
523 lock_sock(sk);
524
525 needs_conn = (sock->state != SS_READY);
526 if (unlikely(needs_conn)) {
527 if (sock->state == SS_LISTENING) {
528 res = -EPIPE;
529 goto exit;
530 }
531 if (sock->state != SS_UNCONNECTED) {
532 res = -EISCONN;
533 goto exit;
534 }
535 if ((tport->published) ||
536 ((sock->type == SOCK_STREAM) && (total_len != 0))) {
537 res = -EOPNOTSUPP;
538 goto exit;
539 }
540 if (dest->addrtype == TIPC_ADDR_NAME) {
541 tport->conn_type = dest->addr.name.name.type;
542 tport->conn_instance = dest->addr.name.name.instance;
543 }
544
545
546 reject_rx_queue(sk);
547 }
548
549 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
550
551 do {
552 if (dest->addrtype == TIPC_ADDR_NAME) {
553 res = dest_name_check(dest, m);
554 if (res)
555 break;
556 res = tipc_send2name(tport->ref,
557 &dest->addr.name.name,
558 dest->addr.name.domain,
559 m->msg_iovlen,
560 m->msg_iov,
561 total_len);
562 } else if (dest->addrtype == TIPC_ADDR_ID) {
563 res = tipc_send2port(tport->ref,
564 &dest->addr.id,
565 m->msg_iovlen,
566 m->msg_iov,
567 total_len);
568 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
569 if (needs_conn) {
570 res = -EOPNOTSUPP;
571 break;
572 }
573 res = dest_name_check(dest, m);
574 if (res)
575 break;
576 res = tipc_multicast(tport->ref,
577 &dest->addr.nameseq,
578 m->msg_iovlen,
579 m->msg_iov,
580 total_len);
581 }
582 if (likely(res != -ELINKCONG)) {
583 if (needs_conn && (res >= 0))
584 sock->state = SS_CONNECTING;
585 break;
586 }
587 if (timeout_val <= 0L) {
588 res = timeout_val ? timeout_val : -EWOULDBLOCK;
589 break;
590 }
591 release_sock(sk);
592 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
593 !tport->congested, timeout_val);
594 lock_sock(sk);
595 } while (1);
596
597exit:
598 if (iocb)
599 release_sock(sk);
600 return res;
601}
602
603
604
605
606
607
608
609
610
611
612
613
614static int send_packet(struct kiocb *iocb, struct socket *sock,
615 struct msghdr *m, size_t total_len)
616{
617 struct sock *sk = sock->sk;
618 struct tipc_port *tport = tipc_sk_port(sk);
619 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
620 long timeout_val;
621 int res;
622
623
624 if (unlikely(dest))
625 return send_msg(iocb, sock, m, total_len);
626
627 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
628 (m->msg_iovlen > (unsigned int)INT_MAX))
629 return -EMSGSIZE;
630
631 if (iocb)
632 lock_sock(sk);
633
634 timeout_val = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
635
636 do {
637 if (unlikely(sock->state != SS_CONNECTED)) {
638 if (sock->state == SS_DISCONNECTING)
639 res = -EPIPE;
640 else
641 res = -ENOTCONN;
642 break;
643 }
644
645 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
646 total_len);
647 if (likely(res != -ELINKCONG))
648 break;
649 if (timeout_val <= 0L) {
650 res = timeout_val ? timeout_val : -EWOULDBLOCK;
651 break;
652 }
653 release_sock(sk);
654 timeout_val = wait_event_interruptible_timeout(*sk_sleep(sk),
655 (!tport->congested || !tport->connected), timeout_val);
656 lock_sock(sk);
657 } while (1);
658
659 if (iocb)
660 release_sock(sk);
661 return res;
662}
663
664
665
666
667
668
669
670
671
672
673
674
675
676static int send_stream(struct kiocb *iocb, struct socket *sock,
677 struct msghdr *m, size_t total_len)
678{
679 struct sock *sk = sock->sk;
680 struct tipc_port *tport = tipc_sk_port(sk);
681 struct msghdr my_msg;
682 struct iovec my_iov;
683 struct iovec *curr_iov;
684 int curr_iovlen;
685 char __user *curr_start;
686 u32 hdr_size;
687 int curr_left;
688 int bytes_to_send;
689 int bytes_sent;
690 int res;
691
692 lock_sock(sk);
693
694
695 if (unlikely(sock->state != SS_CONNECTED)) {
696 if (sock->state == SS_UNCONNECTED) {
697 res = send_packet(NULL, sock, m, total_len);
698 goto exit;
699 } else if (sock->state == SS_DISCONNECTING) {
700 res = -EPIPE;
701 goto exit;
702 } else {
703 res = -ENOTCONN;
704 goto exit;
705 }
706 }
707
708 if (unlikely(m->msg_name)) {
709 res = -EISCONN;
710 goto exit;
711 }
712
713 if ((total_len > (unsigned int)INT_MAX) ||
714 (m->msg_iovlen > (unsigned int)INT_MAX)) {
715 res = -EMSGSIZE;
716 goto exit;
717 }
718
719
720
721
722
723
724
725
726 curr_iov = m->msg_iov;
727 curr_iovlen = m->msg_iovlen;
728 my_msg.msg_iov = &my_iov;
729 my_msg.msg_iovlen = 1;
730 my_msg.msg_flags = m->msg_flags;
731 my_msg.msg_name = NULL;
732 bytes_sent = 0;
733
734 hdr_size = msg_hdr_sz(&tport->phdr);
735
736 while (curr_iovlen--) {
737 curr_start = curr_iov->iov_base;
738 curr_left = curr_iov->iov_len;
739
740 while (curr_left) {
741 bytes_to_send = tport->max_pkt - hdr_size;
742 if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
743 bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
744 if (curr_left < bytes_to_send)
745 bytes_to_send = curr_left;
746 my_iov.iov_base = curr_start;
747 my_iov.iov_len = bytes_to_send;
748 res = send_packet(NULL, sock, &my_msg, bytes_to_send);
749 if (res < 0) {
750 if (bytes_sent)
751 res = bytes_sent;
752 goto exit;
753 }
754 curr_left -= bytes_to_send;
755 curr_start += bytes_to_send;
756 bytes_sent += bytes_to_send;
757 }
758
759 curr_iov++;
760 }
761 res = bytes_sent;
762exit:
763 release_sock(sk);
764 return res;
765}
766
767
768
769
770
771
772
773
774static int auto_connect(struct socket *sock, struct tipc_msg *msg)
775{
776 struct tipc_sock *tsock = tipc_sk(sock->sk);
777
778 if (msg_errcode(msg)) {
779 sock->state = SS_DISCONNECTING;
780 return -ECONNREFUSED;
781 }
782
783 tsock->peer_name.ref = msg_origport(msg);
784 tsock->peer_name.node = msg_orignode(msg);
785 tipc_connect2port(tsock->p->ref, &tsock->peer_name);
786 tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
787 sock->state = SS_CONNECTED;
788 return 0;
789}
790
791
792
793
794
795
796
797
798static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
799{
800 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
801
802 if (addr) {
803 addr->family = AF_TIPC;
804 addr->addrtype = TIPC_ADDR_ID;
805 addr->addr.id.ref = msg_origport(msg);
806 addr->addr.id.node = msg_orignode(msg);
807 addr->addr.name.domain = 0;
808 addr->scope = 0;
809 m->msg_namelen = sizeof(struct sockaddr_tipc);
810 }
811}
812
813
814
815
816
817
818
819
820
821
822
823static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
824 struct tipc_port *tport)
825{
826 u32 anc_data[3];
827 u32 err;
828 u32 dest_type;
829 int has_name;
830 int res;
831
832 if (likely(m->msg_controllen == 0))
833 return 0;
834
835
836 err = msg ? msg_errcode(msg) : 0;
837 if (unlikely(err)) {
838 anc_data[0] = err;
839 anc_data[1] = msg_data_sz(msg);
840 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
841 if (res)
842 return res;
843 if (anc_data[1]) {
844 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
845 msg_data(msg));
846 if (res)
847 return res;
848 }
849 }
850
851
852 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
853 switch (dest_type) {
854 case TIPC_NAMED_MSG:
855 has_name = 1;
856 anc_data[0] = msg_nametype(msg);
857 anc_data[1] = msg_namelower(msg);
858 anc_data[2] = msg_namelower(msg);
859 break;
860 case TIPC_MCAST_MSG:
861 has_name = 1;
862 anc_data[0] = msg_nametype(msg);
863 anc_data[1] = msg_namelower(msg);
864 anc_data[2] = msg_nameupper(msg);
865 break;
866 case TIPC_CONN_MSG:
867 has_name = (tport->conn_type != 0);
868 anc_data[0] = tport->conn_type;
869 anc_data[1] = tport->conn_instance;
870 anc_data[2] = tport->conn_instance;
871 break;
872 default:
873 has_name = 0;
874 }
875 if (has_name) {
876 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
877 if (res)
878 return res;
879 }
880
881 return 0;
882}
883
884
885
886
887
888
889
890
891
892
893
894
895
896static int recv_msg(struct kiocb *iocb, struct socket *sock,
897 struct msghdr *m, size_t buf_len, int flags)
898{
899 struct sock *sk = sock->sk;
900 struct tipc_port *tport = tipc_sk_port(sk);
901 struct sk_buff *buf;
902 struct tipc_msg *msg;
903 long timeout;
904 unsigned int sz;
905 u32 err;
906 int res;
907
908
909 if (unlikely(!buf_len))
910 return -EINVAL;
911
912 lock_sock(sk);
913
914 if (unlikely(sock->state == SS_UNCONNECTED)) {
915 res = -ENOTCONN;
916 goto exit;
917 }
918
919 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
920restart:
921
922
923 while (skb_queue_empty(&sk->sk_receive_queue)) {
924 if (sock->state == SS_DISCONNECTING) {
925 res = -ENOTCONN;
926 goto exit;
927 }
928 if (timeout <= 0L) {
929 res = timeout ? timeout : -EWOULDBLOCK;
930 goto exit;
931 }
932 release_sock(sk);
933 timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
934 tipc_rx_ready(sock),
935 timeout);
936 lock_sock(sk);
937 }
938
939
940 buf = skb_peek(&sk->sk_receive_queue);
941 msg = buf_msg(buf);
942 sz = msg_data_sz(msg);
943 err = msg_errcode(msg);
944
945
946 if (unlikely(sock->state == SS_CONNECTING)) {
947 res = auto_connect(sock, msg);
948 if (res)
949 goto exit;
950 }
951
952
953 if ((!sz) && (!err)) {
954 advance_rx_queue(sk);
955 goto restart;
956 }
957
958
959 set_orig_addr(m, msg);
960
961
962 res = anc_data_recv(m, msg, tport);
963 if (res)
964 goto exit;
965
966
967 if (!err) {
968 if (unlikely(buf_len < sz)) {
969 sz = buf_len;
970 m->msg_flags |= MSG_TRUNC;
971 }
972 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg),
973 m->msg_iov, sz);
974 if (res)
975 goto exit;
976 res = sz;
977 } else {
978 if ((sock->state == SS_READY) ||
979 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
980 res = 0;
981 else
982 res = -ECONNRESET;
983 }
984
985
986 if (likely(!(flags & MSG_PEEK))) {
987 if ((sock->state != SS_READY) &&
988 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
989 tipc_acknowledge(tport->ref, tport->conn_unacked);
990 advance_rx_queue(sk);
991 }
992exit:
993 release_sock(sk);
994 return res;
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int recv_stream(struct kiocb *iocb, struct socket *sock,
1010 struct msghdr *m, size_t buf_len, int flags)
1011{
1012 struct sock *sk = sock->sk;
1013 struct tipc_port *tport = tipc_sk_port(sk);
1014 struct sk_buff *buf;
1015 struct tipc_msg *msg;
1016 long timeout;
1017 unsigned int sz;
1018 int sz_to_copy, target, needed;
1019 int sz_copied = 0;
1020 u32 err;
1021 int res = 0;
1022
1023
1024 if (unlikely(!buf_len))
1025 return -EINVAL;
1026
1027 lock_sock(sk);
1028
1029 if (unlikely((sock->state == SS_UNCONNECTED) ||
1030 (sock->state == SS_CONNECTING))) {
1031 res = -ENOTCONN;
1032 goto exit;
1033 }
1034
1035 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1036 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1037
1038restart:
1039
1040 while (skb_queue_empty(&sk->sk_receive_queue)) {
1041 if (sock->state == SS_DISCONNECTING) {
1042 res = -ENOTCONN;
1043 goto exit;
1044 }
1045 if (timeout <= 0L) {
1046 res = timeout ? timeout : -EWOULDBLOCK;
1047 goto exit;
1048 }
1049 release_sock(sk);
1050 timeout = wait_event_interruptible_timeout(*sk_sleep(sk),
1051 tipc_rx_ready(sock),
1052 timeout);
1053 lock_sock(sk);
1054 }
1055
1056
1057 buf = skb_peek(&sk->sk_receive_queue);
1058 msg = buf_msg(buf);
1059 sz = msg_data_sz(msg);
1060 err = msg_errcode(msg);
1061
1062
1063 if ((!sz) && (!err)) {
1064 advance_rx_queue(sk);
1065 goto restart;
1066 }
1067
1068
1069 if (sz_copied == 0) {
1070 set_orig_addr(m, msg);
1071 res = anc_data_recv(m, msg, tport);
1072 if (res)
1073 goto exit;
1074 }
1075
1076
1077 if (!err) {
1078 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1079
1080 sz -= offset;
1081 needed = (buf_len - sz_copied);
1082 sz_to_copy = (sz <= needed) ? sz : needed;
1083
1084 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset,
1085 m->msg_iov, sz_to_copy);
1086 if (res)
1087 goto exit;
1088
1089 sz_copied += sz_to_copy;
1090
1091 if (sz_to_copy < sz) {
1092 if (!(flags & MSG_PEEK))
1093 TIPC_SKB_CB(buf)->handle =
1094 (void *)(unsigned long)(offset + sz_to_copy);
1095 goto exit;
1096 }
1097 } else {
1098 if (sz_copied != 0)
1099 goto exit;
1100
1101 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1102 res = 0;
1103 else
1104 res = -ECONNRESET;
1105 }
1106
1107
1108 if (likely(!(flags & MSG_PEEK))) {
1109 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1110 tipc_acknowledge(tport->ref, tport->conn_unacked);
1111 advance_rx_queue(sk);
1112 }
1113
1114
1115 if ((sz_copied < buf_len) &&
1116 (!skb_queue_empty(&sk->sk_receive_queue) ||
1117 (sz_copied < target)) &&
1118 (!(flags & MSG_PEEK)) &&
1119 (!err))
1120 goto restart;
1121
1122exit:
1123 release_sock(sk);
1124 return sz_copied ? sz_copied : res;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1136{
1137 u32 threshold;
1138 u32 imp = msg_importance(msg);
1139
1140 if (imp == TIPC_LOW_IMPORTANCE)
1141 threshold = base;
1142 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1143 threshold = base * 2;
1144 else if (imp == TIPC_HIGH_IMPORTANCE)
1145 threshold = base * 100;
1146 else
1147 return 0;
1148
1149 if (msg_connected(msg))
1150 threshold *= 4;
1151
1152 return queue_size >= threshold;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1168{
1169 struct socket *sock = sk->sk_socket;
1170 struct tipc_msg *msg = buf_msg(buf);
1171 u32 recv_q_len;
1172
1173
1174 if (msg_type(msg) > TIPC_DIRECT_MSG)
1175 return TIPC_ERR_NO_PORT;
1176
1177 if (sock->state == SS_READY) {
1178 if (msg_connected(msg))
1179 return TIPC_ERR_NO_PORT;
1180 } else {
1181 if (msg_mcast(msg))
1182 return TIPC_ERR_NO_PORT;
1183 if (sock->state == SS_CONNECTED) {
1184 if (!msg_connected(msg) ||
1185 !tipc_port_peer_msg(tipc_sk_port(sk), msg))
1186 return TIPC_ERR_NO_PORT;
1187 } else if (sock->state == SS_CONNECTING) {
1188 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
1189 return TIPC_ERR_NO_PORT;
1190 } else if (sock->state == SS_LISTENING) {
1191 if (msg_connected(msg) || msg_errcode(msg))
1192 return TIPC_ERR_NO_PORT;
1193 } else if (sock->state == SS_DISCONNECTING) {
1194 return TIPC_ERR_NO_PORT;
1195 } else {
1196 if (msg_connected(msg) || msg_errcode(msg))
1197 return TIPC_ERR_NO_PORT;
1198 }
1199 }
1200
1201
1202 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1203 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1204 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
1205 return TIPC_ERR_OVERLOAD;
1206 }
1207 recv_q_len = skb_queue_len(&sk->sk_receive_queue);
1208 if (unlikely(recv_q_len >= (OVERLOAD_LIMIT_BASE / 2))) {
1209 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE / 2))
1210 return TIPC_ERR_OVERLOAD;
1211 }
1212
1213
1214 TIPC_SKB_CB(buf)->handle = 0;
1215 atomic_inc(&tipc_queue_size);
1216 __skb_queue_tail(&sk->sk_receive_queue, buf);
1217
1218
1219 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1220 sock->state = SS_DISCONNECTING;
1221 tipc_disconnect_port(tipc_sk_port(sk));
1222 }
1223
1224 if (waitqueue_active(sk_sleep(sk)))
1225 wake_up_interruptible(sk_sleep(sk));
1226 return TIPC_OK;
1227}
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1239{
1240 u32 res;
1241
1242 res = filter_rcv(sk, buf);
1243 if (res)
1244 tipc_reject_msg(buf, res);
1245 return 0;
1246}
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1258{
1259 struct sock *sk = (struct sock *)tport->usr_handle;
1260 u32 res;
1261
1262
1263
1264
1265
1266
1267
1268 bh_lock_sock(sk);
1269 if (!sock_owned_by_user(sk)) {
1270 res = filter_rcv(sk, buf);
1271 } else {
1272 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
1273 res = TIPC_ERR_OVERLOAD;
1274 else
1275 res = TIPC_OK;
1276 }
1277 bh_unlock_sock(sk);
1278
1279 return res;
1280}
1281
1282
1283
1284
1285
1286
1287
1288static void wakeupdispatch(struct tipc_port *tport)
1289{
1290 struct sock *sk = (struct sock *)tport->usr_handle;
1291
1292 if (waitqueue_active(sk_sleep(sk)))
1293 wake_up_interruptible(sk_sleep(sk));
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1306 int flags)
1307{
1308 struct sock *sk = sock->sk;
1309 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1310 struct msghdr m = {NULL,};
1311 struct sk_buff *buf;
1312 struct tipc_msg *msg;
1313 unsigned int timeout;
1314 int res;
1315
1316 lock_sock(sk);
1317
1318
1319 if (sock->state == SS_READY) {
1320 res = -EOPNOTSUPP;
1321 goto exit;
1322 }
1323
1324
1325 if (flags & O_NONBLOCK) {
1326 res = -EOPNOTSUPP;
1327 goto exit;
1328 }
1329
1330
1331 if (sock->state == SS_LISTENING) {
1332 res = -EOPNOTSUPP;
1333 goto exit;
1334 }
1335 if (sock->state == SS_CONNECTING) {
1336 res = -EALREADY;
1337 goto exit;
1338 }
1339 if (sock->state != SS_UNCONNECTED) {
1340 res = -EISCONN;
1341 goto exit;
1342 }
1343
1344
1345
1346
1347
1348
1349
1350 if (dst->addrtype == TIPC_ADDR_MCAST) {
1351 res = -EINVAL;
1352 goto exit;
1353 }
1354
1355
1356 reject_rx_queue(sk);
1357
1358
1359 m.msg_name = dest;
1360 m.msg_namelen = destlen;
1361 res = send_msg(NULL, sock, &m, 0);
1362 if (res < 0)
1363 goto exit;
1364
1365
1366 timeout = tipc_sk(sk)->conn_timeout;
1367 release_sock(sk);
1368 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1369 (!skb_queue_empty(&sk->sk_receive_queue) ||
1370 (sock->state != SS_CONNECTING)),
1371 timeout ? (long)msecs_to_jiffies(timeout)
1372 : MAX_SCHEDULE_TIMEOUT);
1373 lock_sock(sk);
1374
1375 if (res > 0) {
1376 buf = skb_peek(&sk->sk_receive_queue);
1377 if (buf != NULL) {
1378 msg = buf_msg(buf);
1379 res = auto_connect(sock, msg);
1380 if (!res) {
1381 if (!msg_data_sz(msg))
1382 advance_rx_queue(sk);
1383 }
1384 } else {
1385 if (sock->state == SS_CONNECTED)
1386 res = -EISCONN;
1387 else
1388 res = -ECONNREFUSED;
1389 }
1390 } else {
1391 if (res == 0)
1392 res = -ETIMEDOUT;
1393 else
1394 ;
1395 sock->state = SS_DISCONNECTING;
1396 }
1397
1398exit:
1399 release_sock(sk);
1400 return res;
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410static int listen(struct socket *sock, int len)
1411{
1412 struct sock *sk = sock->sk;
1413 int res;
1414
1415 lock_sock(sk);
1416
1417 if (sock->state != SS_UNCONNECTED)
1418 res = -EINVAL;
1419 else {
1420 sock->state = SS_LISTENING;
1421 res = 0;
1422 }
1423
1424 release_sock(sk);
1425 return res;
1426}
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436static int accept(struct socket *sock, struct socket *new_sock, int flags)
1437{
1438 struct sock *sk = sock->sk;
1439 struct sk_buff *buf;
1440 int res;
1441
1442 lock_sock(sk);
1443
1444 if (sock->state != SS_LISTENING) {
1445 res = -EINVAL;
1446 goto exit;
1447 }
1448
1449 while (skb_queue_empty(&sk->sk_receive_queue)) {
1450 if (flags & O_NONBLOCK) {
1451 res = -EWOULDBLOCK;
1452 goto exit;
1453 }
1454 release_sock(sk);
1455 res = wait_event_interruptible(*sk_sleep(sk),
1456 (!skb_queue_empty(&sk->sk_receive_queue)));
1457 lock_sock(sk);
1458 if (res)
1459 goto exit;
1460 }
1461
1462 buf = skb_peek(&sk->sk_receive_queue);
1463
1464 res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
1465 if (!res) {
1466 struct sock *new_sk = new_sock->sk;
1467 struct tipc_sock *new_tsock = tipc_sk(new_sk);
1468 struct tipc_port *new_tport = new_tsock->p;
1469 u32 new_ref = new_tport->ref;
1470 struct tipc_msg *msg = buf_msg(buf);
1471
1472 lock_sock(new_sk);
1473
1474
1475
1476
1477
1478 reject_rx_queue(new_sk);
1479
1480
1481 new_tsock->peer_name.ref = msg_origport(msg);
1482 new_tsock->peer_name.node = msg_orignode(msg);
1483 tipc_connect2port(new_ref, &new_tsock->peer_name);
1484 new_sock->state = SS_CONNECTED;
1485
1486 tipc_set_portimportance(new_ref, msg_importance(msg));
1487 if (msg_named(msg)) {
1488 new_tport->conn_type = msg_nametype(msg);
1489 new_tport->conn_instance = msg_nameinst(msg);
1490 }
1491
1492
1493
1494
1495
1496 if (!msg_data_sz(msg)) {
1497 struct msghdr m = {NULL,};
1498
1499 advance_rx_queue(sk);
1500 send_packet(NULL, new_sock, &m, 0);
1501 } else {
1502 __skb_dequeue(&sk->sk_receive_queue);
1503 __skb_queue_head(&new_sk->sk_receive_queue, buf);
1504 }
1505 release_sock(new_sk);
1506 }
1507exit:
1508 release_sock(sk);
1509 return res;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521static int shutdown(struct socket *sock, int how)
1522{
1523 struct sock *sk = sock->sk;
1524 struct tipc_port *tport = tipc_sk_port(sk);
1525 struct sk_buff *buf;
1526 int res;
1527
1528 if (how != SHUT_RDWR)
1529 return -EINVAL;
1530
1531 lock_sock(sk);
1532
1533 switch (sock->state) {
1534 case SS_CONNECTING:
1535 case SS_CONNECTED:
1536
1537restart:
1538
1539 buf = __skb_dequeue(&sk->sk_receive_queue);
1540 if (buf) {
1541 atomic_dec(&tipc_queue_size);
1542 if (TIPC_SKB_CB(buf)->handle != 0) {
1543 kfree_skb(buf);
1544 goto restart;
1545 }
1546 tipc_disconnect(tport->ref);
1547 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1548 } else {
1549 tipc_shutdown(tport->ref);
1550 }
1551
1552 sock->state = SS_DISCONNECTING;
1553
1554
1555
1556 case SS_DISCONNECTING:
1557
1558
1559 discard_rx_queue(sk);
1560 if (waitqueue_active(sk_sleep(sk)))
1561 wake_up_interruptible(sk_sleep(sk));
1562 res = 0;
1563 break;
1564
1565 default:
1566 res = -ENOTCONN;
1567 }
1568
1569 release_sock(sk);
1570 return res;
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586static int setsockopt(struct socket *sock,
1587 int lvl, int opt, char __user *ov, unsigned int ol)
1588{
1589 struct sock *sk = sock->sk;
1590 struct tipc_port *tport = tipc_sk_port(sk);
1591 u32 value;
1592 int res;
1593
1594 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1595 return 0;
1596 if (lvl != SOL_TIPC)
1597 return -ENOPROTOOPT;
1598 if (ol < sizeof(value))
1599 return -EINVAL;
1600 res = get_user(value, (u32 __user *)ov);
1601 if (res)
1602 return res;
1603
1604 lock_sock(sk);
1605
1606 switch (opt) {
1607 case TIPC_IMPORTANCE:
1608 res = tipc_set_portimportance(tport->ref, value);
1609 break;
1610 case TIPC_SRC_DROPPABLE:
1611 if (sock->type != SOCK_STREAM)
1612 res = tipc_set_portunreliable(tport->ref, value);
1613 else
1614 res = -ENOPROTOOPT;
1615 break;
1616 case TIPC_DEST_DROPPABLE:
1617 res = tipc_set_portunreturnable(tport->ref, value);
1618 break;
1619 case TIPC_CONN_TIMEOUT:
1620 tipc_sk(sk)->conn_timeout = value;
1621
1622 break;
1623 default:
1624 res = -EINVAL;
1625 }
1626
1627 release_sock(sk);
1628
1629 return res;
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645static int getsockopt(struct socket *sock,
1646 int lvl, int opt, char __user *ov, int __user *ol)
1647{
1648 struct sock *sk = sock->sk;
1649 struct tipc_port *tport = tipc_sk_port(sk);
1650 int len;
1651 u32 value;
1652 int res;
1653
1654 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1655 return put_user(0, ol);
1656 if (lvl != SOL_TIPC)
1657 return -ENOPROTOOPT;
1658 res = get_user(len, ol);
1659 if (res)
1660 return res;
1661
1662 lock_sock(sk);
1663
1664 switch (opt) {
1665 case TIPC_IMPORTANCE:
1666 res = tipc_portimportance(tport->ref, &value);
1667 break;
1668 case TIPC_SRC_DROPPABLE:
1669 res = tipc_portunreliable(tport->ref, &value);
1670 break;
1671 case TIPC_DEST_DROPPABLE:
1672 res = tipc_portunreturnable(tport->ref, &value);
1673 break;
1674 case TIPC_CONN_TIMEOUT:
1675 value = tipc_sk(sk)->conn_timeout;
1676
1677 break;
1678 case TIPC_NODE_RECVQ_DEPTH:
1679 value = (u32)atomic_read(&tipc_queue_size);
1680 break;
1681 case TIPC_SOCK_RECVQ_DEPTH:
1682 value = skb_queue_len(&sk->sk_receive_queue);
1683 break;
1684 default:
1685 res = -EINVAL;
1686 }
1687
1688 release_sock(sk);
1689
1690 if (res)
1691 return res;
1692
1693 if (len < sizeof(value))
1694 return -EINVAL;
1695
1696 if (copy_to_user(ov, &value, sizeof(value)))
1697 return -EFAULT;
1698
1699 return put_user(sizeof(value), ol);
1700}
1701
1702
1703
1704static const struct proto_ops msg_ops = {
1705 .owner = THIS_MODULE,
1706 .family = AF_TIPC,
1707 .release = release,
1708 .bind = bind,
1709 .connect = connect,
1710 .socketpair = sock_no_socketpair,
1711 .accept = sock_no_accept,
1712 .getname = get_name,
1713 .poll = poll,
1714 .ioctl = sock_no_ioctl,
1715 .listen = sock_no_listen,
1716 .shutdown = shutdown,
1717 .setsockopt = setsockopt,
1718 .getsockopt = getsockopt,
1719 .sendmsg = send_msg,
1720 .recvmsg = recv_msg,
1721 .mmap = sock_no_mmap,
1722 .sendpage = sock_no_sendpage
1723};
1724
1725static const struct proto_ops packet_ops = {
1726 .owner = THIS_MODULE,
1727 .family = AF_TIPC,
1728 .release = release,
1729 .bind = bind,
1730 .connect = connect,
1731 .socketpair = sock_no_socketpair,
1732 .accept = accept,
1733 .getname = get_name,
1734 .poll = poll,
1735 .ioctl = sock_no_ioctl,
1736 .listen = listen,
1737 .shutdown = shutdown,
1738 .setsockopt = setsockopt,
1739 .getsockopt = getsockopt,
1740 .sendmsg = send_packet,
1741 .recvmsg = recv_msg,
1742 .mmap = sock_no_mmap,
1743 .sendpage = sock_no_sendpage
1744};
1745
1746static const struct proto_ops stream_ops = {
1747 .owner = THIS_MODULE,
1748 .family = AF_TIPC,
1749 .release = release,
1750 .bind = bind,
1751 .connect = connect,
1752 .socketpair = sock_no_socketpair,
1753 .accept = accept,
1754 .getname = get_name,
1755 .poll = poll,
1756 .ioctl = sock_no_ioctl,
1757 .listen = listen,
1758 .shutdown = shutdown,
1759 .setsockopt = setsockopt,
1760 .getsockopt = getsockopt,
1761 .sendmsg = send_stream,
1762 .recvmsg = recv_stream,
1763 .mmap = sock_no_mmap,
1764 .sendpage = sock_no_sendpage
1765};
1766
1767static const struct net_proto_family tipc_family_ops = {
1768 .owner = THIS_MODULE,
1769 .family = AF_TIPC,
1770 .create = tipc_create
1771};
1772
1773static struct proto tipc_proto = {
1774 .name = "TIPC",
1775 .owner = THIS_MODULE,
1776 .obj_size = sizeof(struct tipc_sock)
1777};
1778
1779
1780
1781
1782
1783
1784int tipc_socket_init(void)
1785{
1786 int res;
1787
1788 res = proto_register(&tipc_proto, 1);
1789 if (res) {
1790 pr_err("Failed to register TIPC protocol type\n");
1791 goto out;
1792 }
1793
1794 res = sock_register(&tipc_family_ops);
1795 if (res) {
1796 pr_err("Failed to register TIPC socket type\n");
1797 proto_unregister(&tipc_proto);
1798 goto out;
1799 }
1800
1801 sockets_enabled = 1;
1802 out:
1803 return res;
1804}
1805
1806
1807
1808
1809void tipc_socket_stop(void)
1810{
1811 if (!sockets_enabled)
1812 return;
1813
1814 sockets_enabled = 0;
1815 sock_unregister(tipc_family_ops.family);
1816 proto_unregister(&tipc_proto);
1817}
1818