1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83#include <linux/module.h>
84#include <linux/kernel.h>
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
104#include <net/net_namespace.h>
105#include <net/sock.h>
106#include <net/tcp_states.h>
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock);
120static atomic_long_t unix_nr_socks;
121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
126#ifdef CONFIG_SECURITY_NETWORK
127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
128{
129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
130}
131
132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133{
134 scm->secid = *UNIXSID(skb);
135}
136#else
137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
138{ }
139
140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141{ }
142#endif
143
144
145
146
147
148
149
150static inline unsigned unix_hash_fold(__wsum n)
151{
152 unsigned hash = (__force unsigned)n;
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156}
157
158#define unix_peer(sk) (unix_sk(sk)->peer)
159
160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161{
162 return unix_peer(osk) == sk;
163}
164
165static inline int unix_may_send(struct sock *sk, struct sock *osk)
166{
167 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
168}
169
170static inline int unix_recvq_full(struct sock const *sk)
171{
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173}
174
175static struct sock *unix_peer_get(struct sock *s)
176{
177 struct sock *peer;
178
179 unix_state_lock(s);
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
183 unix_state_unlock(s);
184 return peer;
185}
186
187static inline void unix_release_addr(struct unix_address *addr)
188{
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191}
192
193
194
195
196
197
198
199
200static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
201{
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207
208
209
210
211
212
213
214 ((char *)sunaddr)[len] = 0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
219 *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
220 return len;
221}
222
223static void __unix_remove_socket(struct sock *sk)
224{
225 sk_del_node_init(sk);
226}
227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{
230 WARN_ON(!sk_unhashed(sk));
231 sk_add_node(sk, list);
232}
233
234static inline void unix_remove_socket(struct sock *sk)
235{
236 spin_lock(&unix_table_lock);
237 __unix_remove_socket(sk);
238 spin_unlock(&unix_table_lock);
239}
240
241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
243 spin_lock(&unix_table_lock);
244 __unix_insert_socket(list, sk);
245 spin_unlock(&unix_table_lock);
246}
247
248static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
250 int len, int type, unsigned hash)
251{
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
258 if (!net_eq(sock_net(s), net))
259 continue;
260
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266found:
267 return s;
268}
269
270static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
272 int len, int type,
273 unsigned hash)
274{
275 struct sock *s;
276
277 spin_lock(&unix_table_lock);
278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
279 if (s)
280 sock_hold(s);
281 spin_unlock(&unix_table_lock);
282 return s;
283}
284
285static struct sock *unix_find_socket_byinode(struct inode *i)
286{
287 struct sock *s;
288 struct hlist_node *node;
289
290 spin_lock(&unix_table_lock);
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
295 if (dentry && dentry->d_inode == i) {
296 sock_hold(s);
297 goto found;
298 }
299 }
300 s = NULL;
301found:
302 spin_unlock(&unix_table_lock);
303 return s;
304}
305
306static inline int unix_writable(struct sock *sk)
307{
308 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
309}
310
311static void unix_write_space(struct sock *sk)
312{
313 struct socket_wq *wq;
314
315 rcu_read_lock();
316 if (unix_writable(sk)) {
317 wq = rcu_dereference(sk->sk_wq);
318 if (wq_has_sleeper(wq))
319 wake_up_interruptible_sync_poll(&wq->wait,
320 POLLOUT | POLLWRNORM | POLLWRBAND);
321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
322 }
323 rcu_read_unlock();
324}
325
326
327
328
329
330static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331{
332 if (!skb_queue_empty(&sk->sk_receive_queue)) {
333 skb_queue_purge(&sk->sk_receive_queue);
334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336
337
338
339
340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341 other->sk_err = ECONNRESET;
342 other->sk_error_report(other);
343 }
344 }
345}
346
347static void unix_sock_destructor(struct sock *sk)
348{
349 struct unix_sock *u = unix_sk(sk);
350
351 skb_queue_purge(&sk->sk_receive_queue);
352
353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 WARN_ON(!sk_unhashed(sk));
355 WARN_ON(sk->sk_socket);
356 if (!sock_flag(sk, SOCK_DEAD)) {
357 printk(KERN_INFO "Attempt to release alive unix socket: %p\n", sk);
358 return;
359 }
360
361 if (u->addr)
362 unix_release_addr(u->addr);
363
364 atomic_long_dec(&unix_nr_socks);
365 local_bh_disable();
366 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
367 local_bh_enable();
368#ifdef UNIX_REFCNT_DEBUG
369 printk(KERN_DEBUG "UNIX %p is destroyed, %ld are still alive.\n", sk,
370 atomic_long_read(&unix_nr_socks));
371#endif
372}
373
374static int unix_release_sock(struct sock *sk, int embrion)
375{
376 struct unix_sock *u = unix_sk(sk);
377 struct dentry *dentry;
378 struct vfsmount *mnt;
379 struct sock *skpair;
380 struct sk_buff *skb;
381 int state;
382
383 unix_remove_socket(sk);
384
385
386 unix_state_lock(sk);
387 sock_orphan(sk);
388 sk->sk_shutdown = SHUTDOWN_MASK;
389 dentry = u->dentry;
390 u->dentry = NULL;
391 mnt = u->mnt;
392 u->mnt = NULL;
393 state = sk->sk_state;
394 sk->sk_state = TCP_CLOSE;
395 unix_state_unlock(sk);
396
397 wake_up_interruptible_all(&u->peer_wait);
398
399 skpair = unix_peer(sk);
400
401 if (skpair != NULL) {
402 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
403 unix_state_lock(skpair);
404
405 skpair->sk_shutdown = SHUTDOWN_MASK;
406 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
407 skpair->sk_err = ECONNRESET;
408 unix_state_unlock(skpair);
409 skpair->sk_state_change(skpair);
410 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
411 }
412 sock_put(skpair);
413 unix_peer(sk) = NULL;
414 }
415
416
417
418 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
419 if (state == TCP_LISTEN)
420 unix_release_sock(skb->sk, 1);
421
422 kfree_skb(skb);
423 }
424
425 if (dentry) {
426 dput(dentry);
427 mntput(mnt);
428 }
429
430 sock_put(sk);
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 if (unix_tot_inflight)
446 unix_gc();
447
448 return 0;
449}
450
451static void init_peercred(struct sock *sk)
452{
453 put_pid(sk->sk_peer_pid);
454 if (sk->sk_peer_cred)
455 put_cred(sk->sk_peer_cred);
456 sk->sk_peer_pid = get_pid(task_tgid(current));
457 sk->sk_peer_cred = get_current_cred();
458}
459
460static void copy_peercred(struct sock *sk, struct sock *peersk)
461{
462 put_pid(sk->sk_peer_pid);
463 if (sk->sk_peer_cred)
464 put_cred(sk->sk_peer_cred);
465 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
466 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
467}
468
469static int unix_listen(struct socket *sock, int backlog)
470{
471 int err;
472 struct sock *sk = sock->sk;
473 struct unix_sock *u = unix_sk(sk);
474 struct pid *old_pid = NULL;
475 const struct cred *old_cred = NULL;
476
477 err = -EOPNOTSUPP;
478 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
479 goto out;
480 err = -EINVAL;
481 if (!u->addr)
482 goto out;
483 unix_state_lock(sk);
484 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
485 goto out_unlock;
486 if (backlog > sk->sk_max_ack_backlog)
487 wake_up_interruptible_all(&u->peer_wait);
488 sk->sk_max_ack_backlog = backlog;
489 sk->sk_state = TCP_LISTEN;
490
491 init_peercred(sk);
492 err = 0;
493
494out_unlock:
495 unix_state_unlock(sk);
496 put_pid(old_pid);
497 if (old_cred)
498 put_cred(old_cred);
499out:
500 return err;
501}
502
503static int unix_release(struct socket *);
504static int unix_bind(struct socket *, struct sockaddr *, int);
505static int unix_stream_connect(struct socket *, struct sockaddr *,
506 int addr_len, int flags);
507static int unix_socketpair(struct socket *, struct socket *);
508static int unix_accept(struct socket *, struct socket *, int);
509static int unix_getname(struct socket *, struct sockaddr *, int *, int);
510static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
511static unsigned int unix_dgram_poll(struct file *, struct socket *,
512 poll_table *);
513static int unix_ioctl(struct socket *, unsigned int, unsigned long);
514static int unix_shutdown(struct socket *, int);
515static int unix_stream_sendmsg(struct kiocb *, struct socket *,
516 struct msghdr *, size_t);
517static int unix_stream_recvmsg(struct kiocb *, struct socket *,
518 struct msghdr *, size_t, int);
519static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
520 struct msghdr *, size_t);
521static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
522 struct msghdr *, size_t, int);
523static int unix_dgram_connect(struct socket *, struct sockaddr *,
524 int, int);
525static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
526 struct msghdr *, size_t);
527static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
528 struct msghdr *, size_t, int);
529
530static const struct proto_ops unix_stream_ops = {
531 .family = PF_UNIX,
532 .owner = THIS_MODULE,
533 .release = unix_release,
534 .bind = unix_bind,
535 .connect = unix_stream_connect,
536 .socketpair = unix_socketpair,
537 .accept = unix_accept,
538 .getname = unix_getname,
539 .poll = unix_poll,
540 .ioctl = unix_ioctl,
541 .listen = unix_listen,
542 .shutdown = unix_shutdown,
543 .setsockopt = sock_no_setsockopt,
544 .getsockopt = sock_no_getsockopt,
545 .sendmsg = unix_stream_sendmsg,
546 .recvmsg = unix_stream_recvmsg,
547 .mmap = sock_no_mmap,
548 .sendpage = sock_no_sendpage,
549};
550
551static const struct proto_ops unix_dgram_ops = {
552 .family = PF_UNIX,
553 .owner = THIS_MODULE,
554 .release = unix_release,
555 .bind = unix_bind,
556 .connect = unix_dgram_connect,
557 .socketpair = unix_socketpair,
558 .accept = sock_no_accept,
559 .getname = unix_getname,
560 .poll = unix_dgram_poll,
561 .ioctl = unix_ioctl,
562 .listen = sock_no_listen,
563 .shutdown = unix_shutdown,
564 .setsockopt = sock_no_setsockopt,
565 .getsockopt = sock_no_getsockopt,
566 .sendmsg = unix_dgram_sendmsg,
567 .recvmsg = unix_dgram_recvmsg,
568 .mmap = sock_no_mmap,
569 .sendpage = sock_no_sendpage,
570};
571
572static const struct proto_ops unix_seqpacket_ops = {
573 .family = PF_UNIX,
574 .owner = THIS_MODULE,
575 .release = unix_release,
576 .bind = unix_bind,
577 .connect = unix_stream_connect,
578 .socketpair = unix_socketpair,
579 .accept = unix_accept,
580 .getname = unix_getname,
581 .poll = unix_dgram_poll,
582 .ioctl = unix_ioctl,
583 .listen = unix_listen,
584 .shutdown = unix_shutdown,
585 .setsockopt = sock_no_setsockopt,
586 .getsockopt = sock_no_getsockopt,
587 .sendmsg = unix_seqpacket_sendmsg,
588 .recvmsg = unix_seqpacket_recvmsg,
589 .mmap = sock_no_mmap,
590 .sendpage = sock_no_sendpage,
591};
592
593static struct proto unix_proto = {
594 .name = "UNIX",
595 .owner = THIS_MODULE,
596 .obj_size = sizeof(struct unix_sock),
597};
598
599
600
601
602
603
604
605static struct lock_class_key af_unix_sk_receive_queue_lock_key;
606
607static struct sock *unix_create1(struct net *net, struct socket *sock)
608{
609 struct sock *sk = NULL;
610 struct unix_sock *u;
611
612 atomic_long_inc(&unix_nr_socks);
613 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
614 goto out;
615
616 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
617 if (!sk)
618 goto out;
619
620 sock_init_data(sock, sk);
621 lockdep_set_class(&sk->sk_receive_queue.lock,
622 &af_unix_sk_receive_queue_lock_key);
623
624 sk->sk_write_space = unix_write_space;
625 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
626 sk->sk_destruct = unix_sock_destructor;
627 u = unix_sk(sk);
628 u->dentry = NULL;
629 u->mnt = NULL;
630 spin_lock_init(&u->lock);
631 atomic_long_set(&u->inflight, 0);
632 INIT_LIST_HEAD(&u->link);
633 mutex_init(&u->readlock);
634 init_waitqueue_head(&u->peer_wait);
635 unix_insert_socket(unix_sockets_unbound, sk);
636out:
637 if (sk == NULL)
638 atomic_long_dec(&unix_nr_socks);
639 else {
640 local_bh_disable();
641 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
642 local_bh_enable();
643 }
644 return sk;
645}
646
647static int unix_create(struct net *net, struct socket *sock, int protocol,
648 int kern)
649{
650 if (protocol && protocol != PF_UNIX)
651 return -EPROTONOSUPPORT;
652
653 sock->state = SS_UNCONNECTED;
654
655 switch (sock->type) {
656 case SOCK_STREAM:
657 sock->ops = &unix_stream_ops;
658 break;
659
660
661
662
663 case SOCK_RAW:
664 sock->type = SOCK_DGRAM;
665 case SOCK_DGRAM:
666 sock->ops = &unix_dgram_ops;
667 break;
668 case SOCK_SEQPACKET:
669 sock->ops = &unix_seqpacket_ops;
670 break;
671 default:
672 return -ESOCKTNOSUPPORT;
673 }
674
675 return unix_create1(net, sock) ? 0 : -ENOMEM;
676}
677
678static int unix_release(struct socket *sock)
679{
680 struct sock *sk = sock->sk;
681
682 if (!sk)
683 return 0;
684
685 sock->sk = NULL;
686
687 return unix_release_sock(sk, 0);
688}
689
690static int unix_autobind(struct socket *sock)
691{
692 struct sock *sk = sock->sk;
693 struct net *net = sock_net(sk);
694 struct unix_sock *u = unix_sk(sk);
695 static u32 ordernum = 1;
696 struct unix_address *addr;
697 int err;
698 unsigned int retries = 0;
699
700 mutex_lock(&u->readlock);
701
702 err = 0;
703 if (u->addr)
704 goto out;
705
706 err = -ENOMEM;
707 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
708 if (!addr)
709 goto out;
710
711 addr->name->sun_family = AF_UNIX;
712 atomic_set(&addr->refcnt, 1);
713
714retry:
715 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
716 addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
717
718 spin_lock(&unix_table_lock);
719 ordernum = (ordernum+1)&0xFFFFF;
720
721 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
722 addr->hash)) {
723 spin_unlock(&unix_table_lock);
724
725
726
727
728 cond_resched();
729
730 if (retries++ == 0xFFFFF) {
731 err = -ENOSPC;
732 kfree(addr);
733 goto out;
734 }
735 goto retry;
736 }
737 addr->hash ^= sk->sk_type;
738
739 __unix_remove_socket(sk);
740 u->addr = addr;
741 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
742 spin_unlock(&unix_table_lock);
743 err = 0;
744
745out: mutex_unlock(&u->readlock);
746 return err;
747}
748
749static struct sock *unix_find_other(struct net *net,
750 struct sockaddr_un *sunname, int len,
751 int type, unsigned hash, int *error)
752{
753 struct sock *u;
754 struct path path;
755 int err = 0;
756
757 if (sunname->sun_path[0]) {
758 struct inode *inode;
759 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
760 if (err)
761 goto fail;
762 inode = path.dentry->d_inode;
763 err = inode_permission(inode, MAY_WRITE);
764 if (err)
765 goto put_fail;
766
767 err = -ECONNREFUSED;
768 if (!S_ISSOCK(inode->i_mode))
769 goto put_fail;
770 u = unix_find_socket_byinode(inode);
771 if (!u)
772 goto put_fail;
773
774 if (u->sk_type == type)
775 touch_atime(path.mnt, path.dentry);
776
777 path_put(&path);
778
779 err = -EPROTOTYPE;
780 if (u->sk_type != type) {
781 sock_put(u);
782 goto fail;
783 }
784 } else {
785 err = -ECONNREFUSED;
786 u = unix_find_socket_byname(net, sunname, len, type, hash);
787 if (u) {
788 struct dentry *dentry;
789 dentry = unix_sk(u)->dentry;
790 if (dentry)
791 touch_atime(unix_sk(u)->mnt, dentry);
792 } else
793 goto fail;
794 }
795 return u;
796
797put_fail:
798 path_put(&path);
799fail:
800 *error = err;
801 return NULL;
802}
803
804
805static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
806{
807 struct sock *sk = sock->sk;
808 struct net *net = sock_net(sk);
809 struct unix_sock *u = unix_sk(sk);
810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
811 char *sun_path = sunaddr->sun_path;
812 struct dentry *dentry = NULL;
813 struct path path;
814 int err;
815 unsigned hash;
816 struct unix_address *addr;
817 struct hlist_head *list;
818
819 err = -EINVAL;
820 if (sunaddr->sun_family != AF_UNIX)
821 goto out;
822
823 if (addr_len == sizeof(short)) {
824 err = unix_autobind(sock);
825 goto out;
826 }
827
828 err = unix_mkname(sunaddr, addr_len, &hash);
829 if (err < 0)
830 goto out;
831 addr_len = err;
832
833 mutex_lock(&u->readlock);
834
835 err = -EINVAL;
836 if (u->addr)
837 goto out_up;
838
839 err = -ENOMEM;
840 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
841 if (!addr)
842 goto out_up;
843
844 memcpy(addr->name, sunaddr, addr_len);
845 addr->len = addr_len;
846 addr->hash = hash ^ sk->sk_type;
847 atomic_set(&addr->refcnt, 1);
848
849 if (sun_path[0]) {
850 unsigned int mode;
851 err = 0;
852
853
854
855
856 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
857 err = PTR_ERR(dentry);
858 if (IS_ERR(dentry))
859 goto out_mknod_parent;
860
861
862
863
864 mode = S_IFSOCK |
865 (SOCK_INODE(sock)->i_mode & ~current_umask());
866 err = mnt_want_write(path.mnt);
867 if (err)
868 goto out_mknod_dput;
869 err = security_path_mknod(&path, dentry, mode, 0);
870 if (err)
871 goto out_mknod_drop_write;
872 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
873out_mknod_drop_write:
874 mnt_drop_write(path.mnt);
875 if (err)
876 goto out_mknod_dput;
877 mutex_unlock(&path.dentry->d_inode->i_mutex);
878 dput(path.dentry);
879 path.dentry = dentry;
880
881 addr->hash = UNIX_HASH_SIZE;
882 }
883
884 spin_lock(&unix_table_lock);
885
886 if (!sun_path[0]) {
887 err = -EADDRINUSE;
888 if (__unix_find_socket_byname(net, sunaddr, addr_len,
889 sk->sk_type, hash)) {
890 unix_release_addr(addr);
891 goto out_unlock;
892 }
893
894 list = &unix_socket_table[addr->hash];
895 } else {
896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
897 u->dentry = path.dentry;
898 u->mnt = path.mnt;
899 }
900
901 err = 0;
902 __unix_remove_socket(sk);
903 u->addr = addr;
904 __unix_insert_socket(list, sk);
905
906out_unlock:
907 spin_unlock(&unix_table_lock);
908out_up:
909 mutex_unlock(&u->readlock);
910out:
911 return err;
912
913out_mknod_dput:
914 dput(dentry);
915 mutex_unlock(&path.dentry->d_inode->i_mutex);
916 path_put(&path);
917out_mknod_parent:
918 if (err == -EEXIST)
919 err = -EADDRINUSE;
920 unix_release_addr(addr);
921 goto out_up;
922}
923
924static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
925{
926 if (unlikely(sk1 == sk2) || !sk2) {
927 unix_state_lock(sk1);
928 return;
929 }
930 if (sk1 < sk2) {
931 unix_state_lock(sk1);
932 unix_state_lock_nested(sk2);
933 } else {
934 unix_state_lock(sk2);
935 unix_state_lock_nested(sk1);
936 }
937}
938
939static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
940{
941 if (unlikely(sk1 == sk2) || !sk2) {
942 unix_state_unlock(sk1);
943 return;
944 }
945 unix_state_unlock(sk1);
946 unix_state_unlock(sk2);
947}
948
949static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
950 int alen, int flags)
951{
952 struct sock *sk = sock->sk;
953 struct net *net = sock_net(sk);
954 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
955 struct sock *other;
956 unsigned hash;
957 int err;
958
959 if (addr->sa_family != AF_UNSPEC) {
960 err = unix_mkname(sunaddr, alen, &hash);
961 if (err < 0)
962 goto out;
963 alen = err;
964
965 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
966 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
967 goto out;
968
969restart:
970 other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
971 if (!other)
972 goto out;
973
974 unix_state_double_lock(sk, other);
975
976
977 if (sock_flag(other, SOCK_DEAD)) {
978 unix_state_double_unlock(sk, other);
979 sock_put(other);
980 goto restart;
981 }
982
983 err = -EPERM;
984 if (!unix_may_send(sk, other))
985 goto out_unlock;
986
987 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
988 if (err)
989 goto out_unlock;
990
991 } else {
992
993
994
995 other = NULL;
996 unix_state_double_lock(sk, other);
997 }
998
999
1000
1001
1002 if (unix_peer(sk)) {
1003 struct sock *old_peer = unix_peer(sk);
1004 unix_peer(sk) = other;
1005 unix_state_double_unlock(sk, other);
1006
1007 if (other != old_peer)
1008 unix_dgram_disconnected(sk, old_peer);
1009 sock_put(old_peer);
1010 } else {
1011 unix_peer(sk) = other;
1012 unix_state_double_unlock(sk, other);
1013 }
1014 return 0;
1015
1016out_unlock:
1017 unix_state_double_unlock(sk, other);
1018 sock_put(other);
1019out:
1020 return err;
1021}
1022
1023static long unix_wait_for_peer(struct sock *other, long timeo)
1024{
1025 struct unix_sock *u = unix_sk(other);
1026 int sched;
1027 DEFINE_WAIT(wait);
1028
1029 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1030
1031 sched = !sock_flag(other, SOCK_DEAD) &&
1032 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1033 unix_recvq_full(other);
1034
1035 unix_state_unlock(other);
1036
1037 if (sched)
1038 timeo = schedule_timeout(timeo);
1039
1040 finish_wait(&u->peer_wait, &wait);
1041 return timeo;
1042}
1043
1044static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1045 int addr_len, int flags)
1046{
1047 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1048 struct sock *sk = sock->sk;
1049 struct net *net = sock_net(sk);
1050 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1051 struct sock *newsk = NULL;
1052 struct sock *other = NULL;
1053 struct sk_buff *skb = NULL;
1054 unsigned hash;
1055 int st;
1056 int err;
1057 long timeo;
1058
1059 err = unix_mkname(sunaddr, addr_len, &hash);
1060 if (err < 0)
1061 goto out;
1062 addr_len = err;
1063
1064 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
1065 (err = unix_autobind(sock)) != 0)
1066 goto out;
1067
1068 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1069
1070
1071
1072
1073
1074
1075 err = -ENOMEM;
1076
1077
1078 newsk = unix_create1(sock_net(sk), NULL);
1079 if (newsk == NULL)
1080 goto out;
1081
1082
1083 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1084 if (skb == NULL)
1085 goto out;
1086
1087restart:
1088
1089 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1090 if (!other)
1091 goto out;
1092
1093
1094 unix_state_lock(other);
1095
1096
1097 if (sock_flag(other, SOCK_DEAD)) {
1098 unix_state_unlock(other);
1099 sock_put(other);
1100 goto restart;
1101 }
1102
1103 err = -ECONNREFUSED;
1104 if (other->sk_state != TCP_LISTEN)
1105 goto out_unlock;
1106 if (other->sk_shutdown & RCV_SHUTDOWN)
1107 goto out_unlock;
1108
1109 if (unix_recvq_full(other)) {
1110 err = -EAGAIN;
1111 if (!timeo)
1112 goto out_unlock;
1113
1114 timeo = unix_wait_for_peer(other, timeo);
1115
1116 err = sock_intr_errno(timeo);
1117 if (signal_pending(current))
1118 goto out;
1119 sock_put(other);
1120 goto restart;
1121 }
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 st = sk->sk_state;
1135
1136 switch (st) {
1137 case TCP_CLOSE:
1138
1139 break;
1140 case TCP_ESTABLISHED:
1141
1142 err = -EISCONN;
1143 goto out_unlock;
1144 default:
1145 err = -EINVAL;
1146 goto out_unlock;
1147 }
1148
1149 unix_state_lock_nested(sk);
1150
1151 if (sk->sk_state != st) {
1152 unix_state_unlock(sk);
1153 unix_state_unlock(other);
1154 sock_put(other);
1155 goto restart;
1156 }
1157
1158 err = security_unix_stream_connect(sk, other, newsk);
1159 if (err) {
1160 unix_state_unlock(sk);
1161 goto out_unlock;
1162 }
1163
1164
1165
1166 sock_hold(sk);
1167 unix_peer(newsk) = sk;
1168 newsk->sk_state = TCP_ESTABLISHED;
1169 newsk->sk_type = sk->sk_type;
1170 init_peercred(newsk);
1171 newu = unix_sk(newsk);
1172 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1173 otheru = unix_sk(other);
1174
1175
1176 if (otheru->addr) {
1177 atomic_inc(&otheru->addr->refcnt);
1178 newu->addr = otheru->addr;
1179 }
1180 if (otheru->dentry) {
1181 newu->dentry = dget(otheru->dentry);
1182 newu->mnt = mntget(otheru->mnt);
1183 }
1184
1185
1186 copy_peercred(sk, other);
1187
1188 sock->state = SS_CONNECTED;
1189 sk->sk_state = TCP_ESTABLISHED;
1190 sock_hold(newsk);
1191
1192 smp_mb__after_atomic_inc();
1193 unix_peer(sk) = newsk;
1194
1195 unix_state_unlock(sk);
1196
1197
1198 spin_lock(&other->sk_receive_queue.lock);
1199 __skb_queue_tail(&other->sk_receive_queue, skb);
1200 spin_unlock(&other->sk_receive_queue.lock);
1201 unix_state_unlock(other);
1202 other->sk_data_ready(other, 0);
1203 sock_put(other);
1204 return 0;
1205
1206out_unlock:
1207 if (other)
1208 unix_state_unlock(other);
1209
1210out:
1211 kfree_skb(skb);
1212 if (newsk)
1213 unix_release_sock(newsk, 0);
1214 if (other)
1215 sock_put(other);
1216 return err;
1217}
1218
1219static int unix_socketpair(struct socket *socka, struct socket *sockb)
1220{
1221 struct sock *ska = socka->sk, *skb = sockb->sk;
1222
1223
1224 sock_hold(ska);
1225 sock_hold(skb);
1226 unix_peer(ska) = skb;
1227 unix_peer(skb) = ska;
1228 init_peercred(ska);
1229 init_peercred(skb);
1230
1231 if (ska->sk_type != SOCK_DGRAM) {
1232 ska->sk_state = TCP_ESTABLISHED;
1233 skb->sk_state = TCP_ESTABLISHED;
1234 socka->state = SS_CONNECTED;
1235 sockb->state = SS_CONNECTED;
1236 }
1237 return 0;
1238}
1239
1240static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1241{
1242 struct sock *sk = sock->sk;
1243 struct sock *tsk;
1244 struct sk_buff *skb;
1245 int err;
1246
1247 err = -EOPNOTSUPP;
1248 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1249 goto out;
1250
1251 err = -EINVAL;
1252 if (sk->sk_state != TCP_LISTEN)
1253 goto out;
1254
1255
1256
1257
1258
1259 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1260 if (!skb) {
1261
1262 if (err == 0)
1263 err = -EINVAL;
1264 goto out;
1265 }
1266
1267 tsk = skb->sk;
1268 skb_free_datagram(sk, skb);
1269 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1270
1271
1272 unix_state_lock(tsk);
1273 newsock->state = SS_CONNECTED;
1274 sock_graft(tsk, newsock);
1275 unix_state_unlock(tsk);
1276 return 0;
1277
1278out:
1279 return err;
1280}
1281
1282
1283static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1284{
1285 struct sock *sk = sock->sk;
1286 struct unix_sock *u;
1287 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1288 int err = 0;
1289
1290 if (peer) {
1291 sk = unix_peer_get(sk);
1292
1293 err = -ENOTCONN;
1294 if (!sk)
1295 goto out;
1296 err = 0;
1297 } else {
1298 sock_hold(sk);
1299 }
1300
1301 u = unix_sk(sk);
1302 unix_state_lock(sk);
1303 if (!u->addr) {
1304 sunaddr->sun_family = AF_UNIX;
1305 sunaddr->sun_path[0] = 0;
1306 *uaddr_len = sizeof(short);
1307 } else {
1308 struct unix_address *addr = u->addr;
1309
1310 *uaddr_len = addr->len;
1311 memcpy(sunaddr, addr->name, *uaddr_len);
1312 }
1313 unix_state_unlock(sk);
1314 sock_put(sk);
1315out:
1316 return err;
1317}
1318
1319static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1320{
1321 int i;
1322
1323 scm->fp = UNIXCB(skb).fp;
1324 UNIXCB(skb).fp = NULL;
1325
1326 for (i = scm->fp->count-1; i >= 0; i--)
1327 unix_notinflight(scm->fp->fp[i]);
1328}
1329
1330static void unix_destruct_scm(struct sk_buff *skb)
1331{
1332 struct scm_cookie scm;
1333 memset(&scm, 0, sizeof(scm));
1334 scm.pid = UNIXCB(skb).pid;
1335 scm.cred = UNIXCB(skb).cred;
1336 if (UNIXCB(skb).fp)
1337 unix_detach_fds(&scm, skb);
1338
1339
1340
1341 scm_destroy(&scm);
1342 sock_wfree(skb);
1343}
1344
1345#define MAX_RECURSION_LEVEL 4
1346
1347static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1348{
1349 int i;
1350 unsigned char max_level = 0;
1351 int unix_sock_count = 0;
1352
1353 for (i = scm->fp->count - 1; i >= 0; i--) {
1354 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1355
1356 if (sk) {
1357 unix_sock_count++;
1358 max_level = max(max_level,
1359 unix_sk(sk)->recursion_level);
1360 }
1361 }
1362 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1363 return -ETOOMANYREFS;
1364
1365
1366
1367
1368
1369
1370 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1371 if (!UNIXCB(skb).fp)
1372 return -ENOMEM;
1373
1374 if (unix_sock_count) {
1375 for (i = scm->fp->count - 1; i >= 0; i--)
1376 unix_inflight(scm->fp->fp[i]);
1377 }
1378 return max_level;
1379}
1380
1381static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1382{
1383 int err = 0;
1384
1385 UNIXCB(skb).pid = get_pid(scm->pid);
1386 if (scm->cred)
1387 UNIXCB(skb).cred = get_cred(scm->cred);
1388 UNIXCB(skb).fp = NULL;
1389 if (scm->fp && send_fds)
1390 err = unix_attach_fds(scm, skb);
1391
1392 skb->destructor = unix_destruct_scm;
1393 return err;
1394}
1395
1396
1397
1398
1399
1400
1401static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1402 const struct sock *other)
1403{
1404 if (UNIXCB(skb).cred)
1405 return;
1406 if (test_bit(SOCK_PASSCRED, &sock->flags) ||
1407 !other->sk_socket ||
1408 test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) {
1409 UNIXCB(skb).pid = get_pid(task_tgid(current));
1410 UNIXCB(skb).cred = get_current_cred();
1411 }
1412}
1413
1414
1415
1416
1417
1418static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1419 struct msghdr *msg, size_t len)
1420{
1421 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1422 struct sock *sk = sock->sk;
1423 struct net *net = sock_net(sk);
1424 struct unix_sock *u = unix_sk(sk);
1425 struct sockaddr_un *sunaddr = msg->msg_name;
1426 struct sock *other = NULL;
1427 int namelen = 0;
1428 int err;
1429 unsigned hash;
1430 struct sk_buff *skb;
1431 long timeo;
1432 struct scm_cookie tmp_scm;
1433 int max_level;
1434
1435 if (NULL == siocb->scm)
1436 siocb->scm = &tmp_scm;
1437 wait_for_unix_gc();
1438 err = scm_send(sock, msg, siocb->scm);
1439 if (err < 0)
1440 return err;
1441
1442 err = -EOPNOTSUPP;
1443 if (msg->msg_flags&MSG_OOB)
1444 goto out;
1445
1446 if (msg->msg_namelen) {
1447 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1448 if (err < 0)
1449 goto out;
1450 namelen = err;
1451 } else {
1452 sunaddr = NULL;
1453 err = -ENOTCONN;
1454 other = unix_peer_get(sk);
1455 if (!other)
1456 goto out;
1457 }
1458
1459 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
1460 && (err = unix_autobind(sock)) != 0)
1461 goto out;
1462
1463 err = -EMSGSIZE;
1464 if (len > sk->sk_sndbuf - 32)
1465 goto out;
1466
1467 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1468 if (skb == NULL)
1469 goto out;
1470
1471 err = unix_scm_to_skb(siocb->scm, skb, true);
1472 if (err < 0)
1473 goto out_free;
1474 max_level = err + 1;
1475 unix_get_secdata(siocb->scm, skb);
1476
1477 skb_reset_transport_header(skb);
1478 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1479 if (err)
1480 goto out_free;
1481
1482 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1483
1484restart:
1485 if (!other) {
1486 err = -ECONNRESET;
1487 if (sunaddr == NULL)
1488 goto out_free;
1489
1490 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1491 hash, &err);
1492 if (other == NULL)
1493 goto out_free;
1494 }
1495
1496 if (sk_filter(other, skb) < 0) {
1497
1498 err = len;
1499 goto out_free;
1500 }
1501
1502 unix_state_lock(other);
1503 err = -EPERM;
1504 if (!unix_may_send(sk, other))
1505 goto out_unlock;
1506
1507 if (sock_flag(other, SOCK_DEAD)) {
1508
1509
1510
1511
1512 unix_state_unlock(other);
1513 sock_put(other);
1514
1515 err = 0;
1516 unix_state_lock(sk);
1517 if (unix_peer(sk) == other) {
1518 unix_peer(sk) = NULL;
1519 unix_state_unlock(sk);
1520
1521 unix_dgram_disconnected(sk, other);
1522 sock_put(other);
1523 err = -ECONNREFUSED;
1524 } else {
1525 unix_state_unlock(sk);
1526 }
1527
1528 other = NULL;
1529 if (err)
1530 goto out_free;
1531 goto restart;
1532 }
1533
1534 err = -EPIPE;
1535 if (other->sk_shutdown & RCV_SHUTDOWN)
1536 goto out_unlock;
1537
1538 if (sk->sk_type != SOCK_SEQPACKET) {
1539 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1540 if (err)
1541 goto out_unlock;
1542 }
1543
1544 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1545 if (!timeo) {
1546 err = -EAGAIN;
1547 goto out_unlock;
1548 }
1549
1550 timeo = unix_wait_for_peer(other, timeo);
1551
1552 err = sock_intr_errno(timeo);
1553 if (signal_pending(current))
1554 goto out_free;
1555
1556 goto restart;
1557 }
1558
1559 if (sock_flag(other, SOCK_RCVTSTAMP))
1560 __net_timestamp(skb);
1561 maybe_add_creds(skb, sock, other);
1562 skb_queue_tail(&other->sk_receive_queue, skb);
1563 if (max_level > unix_sk(other)->recursion_level)
1564 unix_sk(other)->recursion_level = max_level;
1565 unix_state_unlock(other);
1566 other->sk_data_ready(other, len);
1567 sock_put(other);
1568 scm_destroy(siocb->scm);
1569 return len;
1570
1571out_unlock:
1572 unix_state_unlock(other);
1573out_free:
1574 kfree_skb(skb);
1575out:
1576 if (other)
1577 sock_put(other);
1578 scm_destroy(siocb->scm);
1579 return err;
1580}
1581
1582
1583static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1584 struct msghdr *msg, size_t len)
1585{
1586 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1587 struct sock *sk = sock->sk;
1588 struct sock *other = NULL;
1589 int err, size;
1590 struct sk_buff *skb;
1591 int sent = 0;
1592 struct scm_cookie tmp_scm;
1593 bool fds_sent = false;
1594 int max_level;
1595
1596 if (NULL == siocb->scm)
1597 siocb->scm = &tmp_scm;
1598 wait_for_unix_gc();
1599 err = scm_send(sock, msg, siocb->scm);
1600 if (err < 0)
1601 return err;
1602
1603 err = -EOPNOTSUPP;
1604 if (msg->msg_flags&MSG_OOB)
1605 goto out_err;
1606
1607 if (msg->msg_namelen) {
1608 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1609 goto out_err;
1610 } else {
1611 err = -ENOTCONN;
1612 other = unix_peer(sk);
1613 if (!other)
1614 goto out_err;
1615 }
1616
1617 if (sk->sk_shutdown & SEND_SHUTDOWN)
1618 goto pipe_err;
1619
1620 while (sent < len) {
1621
1622
1623
1624
1625
1626 size = len-sent;
1627
1628
1629 if (size > ((sk->sk_sndbuf >> 1) - 64))
1630 size = (sk->sk_sndbuf >> 1) - 64;
1631
1632 if (size > SKB_MAX_ALLOC)
1633 size = SKB_MAX_ALLOC;
1634
1635
1636
1637
1638
1639 skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
1640 &err);
1641
1642 if (skb == NULL)
1643 goto out_err;
1644
1645
1646
1647
1648
1649
1650
1651
1652 size = min_t(int, size, skb_tailroom(skb));
1653
1654
1655
1656 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1657 if (err < 0) {
1658 kfree_skb(skb);
1659 goto out_err;
1660 }
1661 max_level = err + 1;
1662 fds_sent = true;
1663
1664 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1665 if (err) {
1666 kfree_skb(skb);
1667 goto out_err;
1668 }
1669
1670 unix_state_lock(other);
1671
1672 if (sock_flag(other, SOCK_DEAD) ||
1673 (other->sk_shutdown & RCV_SHUTDOWN))
1674 goto pipe_err_free;
1675
1676 maybe_add_creds(skb, sock, other);
1677 skb_queue_tail(&other->sk_receive_queue, skb);
1678 if (max_level > unix_sk(other)->recursion_level)
1679 unix_sk(other)->recursion_level = max_level;
1680 unix_state_unlock(other);
1681 other->sk_data_ready(other, size);
1682 sent += size;
1683 }
1684
1685 scm_destroy(siocb->scm);
1686 siocb->scm = NULL;
1687
1688 return sent;
1689
1690pipe_err_free:
1691 unix_state_unlock(other);
1692 kfree_skb(skb);
1693pipe_err:
1694 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
1695 send_sig(SIGPIPE, current, 0);
1696 err = -EPIPE;
1697out_err:
1698 scm_destroy(siocb->scm);
1699 siocb->scm = NULL;
1700 return sent ? : err;
1701}
1702
1703static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1704 struct msghdr *msg, size_t len)
1705{
1706 int err;
1707 struct sock *sk = sock->sk;
1708
1709 err = sock_error(sk);
1710 if (err)
1711 return err;
1712
1713 if (sk->sk_state != TCP_ESTABLISHED)
1714 return -ENOTCONN;
1715
1716 if (msg->msg_namelen)
1717 msg->msg_namelen = 0;
1718
1719 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1720}
1721
1722static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
1723 struct msghdr *msg, size_t size,
1724 int flags)
1725{
1726 struct sock *sk = sock->sk;
1727
1728 if (sk->sk_state != TCP_ESTABLISHED)
1729 return -ENOTCONN;
1730
1731 return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
1732}
1733
1734static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1735{
1736 struct unix_sock *u = unix_sk(sk);
1737
1738 msg->msg_namelen = 0;
1739 if (u->addr) {
1740 msg->msg_namelen = u->addr->len;
1741 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1742 }
1743}
1744
1745static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1746 struct msghdr *msg, size_t size,
1747 int flags)
1748{
1749 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1750 struct scm_cookie tmp_scm;
1751 struct sock *sk = sock->sk;
1752 struct unix_sock *u = unix_sk(sk);
1753 int noblock = flags & MSG_DONTWAIT;
1754 struct sk_buff *skb;
1755 int err;
1756
1757 err = -EOPNOTSUPP;
1758 if (flags&MSG_OOB)
1759 goto out;
1760
1761 msg->msg_namelen = 0;
1762
1763 err = mutex_lock_interruptible(&u->readlock);
1764 if (err) {
1765 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1766 goto out;
1767 }
1768
1769 skb = skb_recv_datagram(sk, flags, noblock, &err);
1770 if (!skb) {
1771 unix_state_lock(sk);
1772
1773 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1774 (sk->sk_shutdown & RCV_SHUTDOWN))
1775 err = 0;
1776 unix_state_unlock(sk);
1777 goto out_unlock;
1778 }
1779
1780 wake_up_interruptible_sync_poll(&u->peer_wait,
1781 POLLOUT | POLLWRNORM | POLLWRBAND);
1782
1783 if (msg->msg_name)
1784 unix_copy_addr(msg, skb->sk);
1785
1786 if (size > skb->len)
1787 size = skb->len;
1788 else if (size < skb->len)
1789 msg->msg_flags |= MSG_TRUNC;
1790
1791 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1792 if (err)
1793 goto out_free;
1794
1795 if (sock_flag(sk, SOCK_RCVTSTAMP))
1796 __sock_recv_timestamp(msg, sk, skb);
1797
1798 if (!siocb->scm) {
1799 siocb->scm = &tmp_scm;
1800 memset(&tmp_scm, 0, sizeof(tmp_scm));
1801 }
1802 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1803 unix_set_secdata(siocb->scm, skb);
1804
1805 if (!(flags & MSG_PEEK)) {
1806 if (UNIXCB(skb).fp)
1807 unix_detach_fds(siocb->scm, skb);
1808 } else {
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821 if (UNIXCB(skb).fp)
1822 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1823 }
1824 err = size;
1825
1826 scm_recv(sock, msg, siocb->scm, flags);
1827
1828out_free:
1829 skb_free_datagram(sk, skb);
1830out_unlock:
1831 mutex_unlock(&u->readlock);
1832out:
1833 return err;
1834}
1835
1836
1837
1838
1839
1840static long unix_stream_data_wait(struct sock *sk, long timeo)
1841{
1842 DEFINE_WAIT(wait);
1843
1844 unix_state_lock(sk);
1845
1846 for (;;) {
1847 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1848
1849 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1850 sk->sk_err ||
1851 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1852 signal_pending(current) ||
1853 !timeo)
1854 break;
1855
1856 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1857 unix_state_unlock(sk);
1858 timeo = schedule_timeout(timeo);
1859 unix_state_lock(sk);
1860 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1861 }
1862
1863 finish_wait(sk_sleep(sk), &wait);
1864 unix_state_unlock(sk);
1865 return timeo;
1866}
1867
1868
1869
1870static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1871 struct msghdr *msg, size_t size,
1872 int flags)
1873{
1874 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1875 struct scm_cookie tmp_scm;
1876 struct sock *sk = sock->sk;
1877 struct unix_sock *u = unix_sk(sk);
1878 struct sockaddr_un *sunaddr = msg->msg_name;
1879 int copied = 0;
1880 int check_creds = 0;
1881 int target;
1882 int err = 0;
1883 long timeo;
1884
1885 err = -EINVAL;
1886 if (sk->sk_state != TCP_ESTABLISHED)
1887 goto out;
1888
1889 err = -EOPNOTSUPP;
1890 if (flags&MSG_OOB)
1891 goto out;
1892
1893 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1894 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1895
1896 msg->msg_namelen = 0;
1897
1898
1899
1900
1901
1902 if (!siocb->scm) {
1903 siocb->scm = &tmp_scm;
1904 memset(&tmp_scm, 0, sizeof(tmp_scm));
1905 }
1906
1907 err = mutex_lock_interruptible(&u->readlock);
1908 if (err) {
1909 err = sock_intr_errno(timeo);
1910 goto out;
1911 }
1912
1913 do {
1914 int chunk;
1915 struct sk_buff *skb;
1916
1917 unix_state_lock(sk);
1918 skb = skb_dequeue(&sk->sk_receive_queue);
1919 if (skb == NULL) {
1920 unix_sk(sk)->recursion_level = 0;
1921 if (copied >= target)
1922 goto unlock;
1923
1924
1925
1926
1927
1928 err = sock_error(sk);
1929 if (err)
1930 goto unlock;
1931 if (sk->sk_shutdown & RCV_SHUTDOWN)
1932 goto unlock;
1933
1934 unix_state_unlock(sk);
1935 err = -EAGAIN;
1936 if (!timeo)
1937 break;
1938 mutex_unlock(&u->readlock);
1939
1940 timeo = unix_stream_data_wait(sk, timeo);
1941
1942 if (signal_pending(current)
1943 || mutex_lock_interruptible(&u->readlock)) {
1944 err = sock_intr_errno(timeo);
1945 goto out;
1946 }
1947
1948 continue;
1949 unlock:
1950 unix_state_unlock(sk);
1951 break;
1952 }
1953 unix_state_unlock(sk);
1954
1955 if (check_creds) {
1956
1957 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1958 (UNIXCB(skb).cred != siocb->scm->cred)) {
1959 skb_queue_head(&sk->sk_receive_queue, skb);
1960 sk->sk_data_ready(sk, skb->len);
1961 break;
1962 }
1963 } else {
1964
1965 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1966 check_creds = 1;
1967 }
1968
1969
1970 if (sunaddr) {
1971 unix_copy_addr(msg, skb->sk);
1972 sunaddr = NULL;
1973 }
1974
1975 chunk = min_t(unsigned int, skb->len, size);
1976 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1977 skb_queue_head(&sk->sk_receive_queue, skb);
1978 sk->sk_data_ready(sk, skb->len);
1979 if (copied == 0)
1980 copied = -EFAULT;
1981 break;
1982 }
1983 copied += chunk;
1984 size -= chunk;
1985
1986
1987 if (!(flags & MSG_PEEK)) {
1988 skb_pull(skb, chunk);
1989
1990 if (UNIXCB(skb).fp)
1991 unix_detach_fds(siocb->scm, skb);
1992
1993
1994 if (skb->len) {
1995 skb_queue_head(&sk->sk_receive_queue, skb);
1996 sk->sk_data_ready(sk, skb->len);
1997 break;
1998 }
1999
2000 consume_skb(skb);
2001
2002 if (siocb->scm->fp)
2003 break;
2004 } else {
2005
2006
2007 if (UNIXCB(skb).fp)
2008 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
2009
2010
2011 skb_queue_head(&sk->sk_receive_queue, skb);
2012 sk->sk_data_ready(sk, skb->len);
2013 break;
2014 }
2015 } while (size);
2016
2017 mutex_unlock(&u->readlock);
2018 scm_recv(sock, msg, siocb->scm, flags);
2019out:
2020 return copied ? : err;
2021}
2022
2023static int unix_shutdown(struct socket *sock, int mode)
2024{
2025 struct sock *sk = sock->sk;
2026 struct sock *other;
2027
2028 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
2029
2030 if (!mode)
2031 return 0;
2032
2033 unix_state_lock(sk);
2034 sk->sk_shutdown |= mode;
2035 other = unix_peer(sk);
2036 if (other)
2037 sock_hold(other);
2038 unix_state_unlock(sk);
2039 sk->sk_state_change(sk);
2040
2041 if (other &&
2042 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
2043
2044 int peer_mode = 0;
2045
2046 if (mode&RCV_SHUTDOWN)
2047 peer_mode |= SEND_SHUTDOWN;
2048 if (mode&SEND_SHUTDOWN)
2049 peer_mode |= RCV_SHUTDOWN;
2050 unix_state_lock(other);
2051 other->sk_shutdown |= peer_mode;
2052 unix_state_unlock(other);
2053 other->sk_state_change(other);
2054 if (peer_mode == SHUTDOWN_MASK)
2055 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
2056 else if (peer_mode & RCV_SHUTDOWN)
2057 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
2058 }
2059 if (other)
2060 sock_put(other);
2061
2062 return 0;
2063}
2064
2065static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2066{
2067 struct sock *sk = sock->sk;
2068 long amount = 0;
2069 int err;
2070
2071 switch (cmd) {
2072 case SIOCOUTQ:
2073 amount = sk_wmem_alloc_get(sk);
2074 err = put_user(amount, (int __user *)arg);
2075 break;
2076 case SIOCINQ:
2077 {
2078 struct sk_buff *skb;
2079
2080 if (sk->sk_state == TCP_LISTEN) {
2081 err = -EINVAL;
2082 break;
2083 }
2084
2085 spin_lock(&sk->sk_receive_queue.lock);
2086 if (sk->sk_type == SOCK_STREAM ||
2087 sk->sk_type == SOCK_SEQPACKET) {
2088 skb_queue_walk(&sk->sk_receive_queue, skb)
2089 amount += skb->len;
2090 } else {
2091 skb = skb_peek(&sk->sk_receive_queue);
2092 if (skb)
2093 amount = skb->len;
2094 }
2095 spin_unlock(&sk->sk_receive_queue.lock);
2096 err = put_user(amount, (int __user *)arg);
2097 break;
2098 }
2099
2100 default:
2101 err = -ENOIOCTLCMD;
2102 break;
2103 }
2104 return err;
2105}
2106
2107static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
2108{
2109 struct sock *sk = sock->sk;
2110 unsigned int mask;
2111
2112 sock_poll_wait(file, sk_sleep(sk), wait);
2113 mask = 0;
2114
2115
2116 if (sk->sk_err)
2117 mask |= POLLERR;
2118 if (sk->sk_shutdown == SHUTDOWN_MASK)
2119 mask |= POLLHUP;
2120 if (sk->sk_shutdown & RCV_SHUTDOWN)
2121 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2122
2123
2124 if (!skb_queue_empty(&sk->sk_receive_queue))
2125 mask |= POLLIN | POLLRDNORM;
2126
2127
2128 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
2129 sk->sk_state == TCP_CLOSE)
2130 mask |= POLLHUP;
2131
2132
2133
2134
2135
2136 if (unix_writable(sk))
2137 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2138
2139 return mask;
2140}
2141
2142static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2143 poll_table *wait)
2144{
2145 struct sock *sk = sock->sk, *other;
2146 unsigned int mask, writable;
2147
2148 sock_poll_wait(file, sk_sleep(sk), wait);
2149 mask = 0;
2150
2151
2152 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2153 mask |= POLLERR;
2154 if (sk->sk_shutdown & RCV_SHUTDOWN)
2155 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2156 if (sk->sk_shutdown == SHUTDOWN_MASK)
2157 mask |= POLLHUP;
2158
2159
2160 if (!skb_queue_empty(&sk->sk_receive_queue))
2161 mask |= POLLIN | POLLRDNORM;
2162
2163
2164 if (sk->sk_type == SOCK_SEQPACKET) {
2165 if (sk->sk_state == TCP_CLOSE)
2166 mask |= POLLHUP;
2167
2168 if (sk->sk_state == TCP_SYN_SENT)
2169 return mask;
2170 }
2171
2172
2173 if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
2174 return mask;
2175
2176 writable = unix_writable(sk);
2177 other = unix_peer_get(sk);
2178 if (other) {
2179 if (unix_peer(other) != sk) {
2180 sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
2181 if (unix_recvq_full(other))
2182 writable = 0;
2183 }
2184 sock_put(other);
2185 }
2186
2187 if (writable)
2188 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2189 else
2190 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2191
2192 return mask;
2193}
2194
2195#ifdef CONFIG_PROC_FS
2196static struct sock *first_unix_socket(int *i)
2197{
2198 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2199 if (!hlist_empty(&unix_socket_table[*i]))
2200 return __sk_head(&unix_socket_table[*i]);
2201 }
2202 return NULL;
2203}
2204
2205static struct sock *next_unix_socket(int *i, struct sock *s)
2206{
2207 struct sock *next = sk_next(s);
2208
2209 if (next)
2210 return next;
2211
2212 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2213 if (!hlist_empty(&unix_socket_table[*i]))
2214 return __sk_head(&unix_socket_table[*i]);
2215 }
2216 return NULL;
2217}
2218
2219struct unix_iter_state {
2220 struct seq_net_private p;
2221 int i;
2222};
2223
2224static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
2225{
2226 struct unix_iter_state *iter = seq->private;
2227 loff_t off = 0;
2228 struct sock *s;
2229
2230 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
2231 if (sock_net(s) != seq_file_net(seq))
2232 continue;
2233 if (off == pos)
2234 return s;
2235 ++off;
2236 }
2237 return NULL;
2238}
2239
2240static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2241 __acquires(unix_table_lock)
2242{
2243 spin_lock(&unix_table_lock);
2244 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2245}
2246
2247static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2248{
2249 struct unix_iter_state *iter = seq->private;
2250 struct sock *sk = v;
2251 ++*pos;
2252
2253 if (v == SEQ_START_TOKEN)
2254 sk = first_unix_socket(&iter->i);
2255 else
2256 sk = next_unix_socket(&iter->i, sk);
2257 while (sk && (sock_net(sk) != seq_file_net(seq)))
2258 sk = next_unix_socket(&iter->i, sk);
2259 return sk;
2260}
2261
2262static void unix_seq_stop(struct seq_file *seq, void *v)
2263 __releases(unix_table_lock)
2264{
2265 spin_unlock(&unix_table_lock);
2266}
2267
2268static int unix_seq_show(struct seq_file *seq, void *v)
2269{
2270
2271 if (v == SEQ_START_TOKEN)
2272 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2273 "Inode Path\n");
2274 else {
2275 struct sock *s = v;
2276 struct unix_sock *u = unix_sk(s);
2277 unix_state_lock(s);
2278
2279 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2280 s,
2281 atomic_read(&s->sk_refcnt),
2282 0,
2283 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2284 s->sk_type,
2285 s->sk_socket ?
2286 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2287 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2288 sock_i_ino(s));
2289
2290 if (u->addr) {
2291 int i, len;
2292 seq_putc(seq, ' ');
2293
2294 i = 0;
2295 len = u->addr->len - sizeof(short);
2296 if (!UNIX_ABSTRACT(s))
2297 len--;
2298 else {
2299 seq_putc(seq, '@');
2300 i++;
2301 }
2302 for ( ; i < len; i++)
2303 seq_putc(seq, u->addr->name->sun_path[i]);
2304 }
2305 unix_state_unlock(s);
2306 seq_putc(seq, '\n');
2307 }
2308
2309 return 0;
2310}
2311
2312static const struct seq_operations unix_seq_ops = {
2313 .start = unix_seq_start,
2314 .next = unix_seq_next,
2315 .stop = unix_seq_stop,
2316 .show = unix_seq_show,
2317};
2318
2319static int unix_seq_open(struct inode *inode, struct file *file)
2320{
2321 return seq_open_net(inode, file, &unix_seq_ops,
2322 sizeof(struct unix_iter_state));
2323}
2324
2325static const struct file_operations unix_seq_fops = {
2326 .owner = THIS_MODULE,
2327 .open = unix_seq_open,
2328 .read = seq_read,
2329 .llseek = seq_lseek,
2330 .release = seq_release_net,
2331};
2332
2333#endif
2334
2335static const struct net_proto_family unix_family_ops = {
2336 .family = PF_UNIX,
2337 .create = unix_create,
2338 .owner = THIS_MODULE,
2339};
2340
2341
2342static int __net_init unix_net_init(struct net *net)
2343{
2344 int error = -ENOMEM;
2345
2346 net->unx.sysctl_max_dgram_qlen = 10;
2347 if (unix_sysctl_register(net))
2348 goto out;
2349
2350#ifdef CONFIG_PROC_FS
2351 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2352 unix_sysctl_unregister(net);
2353 goto out;
2354 }
2355#endif
2356 error = 0;
2357out:
2358 return error;
2359}
2360
2361static void __net_exit unix_net_exit(struct net *net)
2362{
2363 unix_sysctl_unregister(net);
2364 proc_net_remove(net, "unix");
2365}
2366
2367static struct pernet_operations unix_net_ops = {
2368 .init = unix_net_init,
2369 .exit = unix_net_exit,
2370};
2371
2372static int __init af_unix_init(void)
2373{
2374 int rc = -1;
2375 struct sk_buff *dummy_skb;
2376
2377 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2378
2379 rc = proto_register(&unix_proto, 1);
2380 if (rc != 0) {
2381 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2382 __func__);
2383 goto out;
2384 }
2385
2386 sock_register(&unix_family_ops);
2387 register_pernet_subsys(&unix_net_ops);
2388out:
2389 return rc;
2390}
2391
2392static void __exit af_unix_exit(void)
2393{
2394 sock_unregister(PF_UNIX);
2395 proto_unregister(&unix_proto);
2396 unregister_pernet_subsys(&unix_net_ops);
2397}
2398
2399
2400
2401
2402
2403fs_initcall(af_unix_init);
2404module_exit(af_unix_exit);
2405
2406MODULE_LICENSE("GPL");
2407MODULE_ALIAS_NETPROTO(PF_UNIX);
2408