1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#ifndef _SOCK_H
41#define _SOCK_H
42
43#include <linux/kernel.h>
44#include <linux/list.h>
45#include <linux/list_nulls.h>
46#include <linux/timer.h>
47#include <linux/cache.h>
48#include <linux/module.h>
49#include <linux/lockdep.h>
50#include <linux/netdevice.h>
51#include <linux/skbuff.h>
52#include <linux/mm.h>
53#include <linux/security.h>
54
55#include <linux/filter.h>
56#include <linux/rculist_nulls.h>
57#include <linux/poll.h>
58
59#include <asm/atomic.h>
60#include <net/dst.h>
61#include <net/checksum.h>
62
63
64
65
66
67
68
69
70#define SOCK_DEBUGGING
71#ifdef SOCK_DEBUGGING
72#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
73 printk(KERN_DEBUG msg); } while (0)
74#else
75
76static void inline int __attribute__ ((format (printf, 2, 3)))
77SOCK_DEBUG(struct sock *sk, const char *msg, ...)
78{
79}
80#endif
81
82
83
84
85
86typedef struct {
87 spinlock_t slock;
88 int owned;
89 wait_queue_head_t wq;
90
91
92
93
94
95
96#ifdef CONFIG_DEBUG_LOCK_ALLOC
97 struct lockdep_map dep_map;
98#endif
99} socket_lock_t;
100
101struct sock;
102struct proto;
103struct net;
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122struct sock_common {
123
124
125
126 union {
127 struct hlist_node skc_node;
128 struct hlist_nulls_node skc_nulls_node;
129 };
130 atomic_t skc_refcnt;
131
132 unsigned int skc_hash;
133 unsigned short skc_family;
134 volatile unsigned char skc_state;
135 unsigned char skc_reuse;
136 int skc_bound_dev_if;
137 struct hlist_node skc_bind_node;
138 struct proto *skc_prot;
139#ifdef CONFIG_NET_NS
140 struct net *skc_net;
141#endif
142};
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct sock {
210
211
212
213
214 struct sock_common __sk_common;
215#define sk_node __sk_common.skc_node
216#define sk_nulls_node __sk_common.skc_nulls_node
217#define sk_refcnt __sk_common.skc_refcnt
218
219#define sk_copy_start __sk_common.skc_hash
220#define sk_hash __sk_common.skc_hash
221#define sk_family __sk_common.skc_family
222#define sk_state __sk_common.skc_state
223#define sk_reuse __sk_common.skc_reuse
224#define sk_bound_dev_if __sk_common.skc_bound_dev_if
225#define sk_bind_node __sk_common.skc_bind_node
226#define sk_prot __sk_common.skc_prot
227#define sk_net __sk_common.skc_net
228 kmemcheck_bitfield_begin(flags);
229 unsigned int sk_shutdown : 2,
230 sk_no_check : 2,
231 sk_userlocks : 4,
232 sk_protocol : 8,
233 sk_type : 16;
234 kmemcheck_bitfield_end(flags);
235 int sk_rcvbuf;
236 socket_lock_t sk_lock;
237
238
239
240
241
242 struct {
243 struct sk_buff *head;
244 struct sk_buff *tail;
245 } sk_backlog;
246 wait_queue_head_t *sk_sleep;
247 struct dst_entry *sk_dst_cache;
248#ifdef CONFIG_XFRM
249 struct xfrm_policy *sk_policy[2];
250#endif
251 rwlock_t sk_dst_lock;
252 atomic_t sk_rmem_alloc;
253 atomic_t sk_wmem_alloc;
254 atomic_t sk_omem_alloc;
255 int sk_sndbuf;
256 struct sk_buff_head sk_receive_queue;
257 struct sk_buff_head sk_write_queue;
258#ifdef CONFIG_NET_DMA
259 struct sk_buff_head sk_async_wait_queue;
260#endif
261 int sk_wmem_queued;
262 int sk_forward_alloc;
263 gfp_t sk_allocation;
264 int sk_route_caps;
265 int sk_gso_type;
266 unsigned int sk_gso_max_size;
267 int sk_rcvlowat;
268 unsigned long sk_flags;
269 unsigned long sk_lingertime;
270 struct sk_buff_head sk_error_queue;
271 struct proto *sk_prot_creator;
272 rwlock_t sk_callback_lock;
273 int sk_err,
274 sk_err_soft;
275 atomic_t sk_drops;
276 unsigned short sk_ack_backlog;
277 unsigned short sk_max_ack_backlog;
278 __u32 sk_priority;
279 struct ucred sk_peercred;
280 long sk_rcvtimeo;
281 long sk_sndtimeo;
282 struct sk_filter *sk_filter;
283 void *sk_protinfo;
284 struct timer_list sk_timer;
285 ktime_t sk_stamp;
286 struct socket *sk_socket;
287 void *sk_user_data;
288 struct page *sk_sndmsg_page;
289 struct sk_buff *sk_send_head;
290 __u32 sk_sndmsg_off;
291 int sk_write_pending;
292#ifdef CONFIG_SECURITY
293 void *sk_security;
294#endif
295 __u32 sk_mark;
296
297 void (*sk_state_change)(struct sock *sk);
298 void (*sk_data_ready)(struct sock *sk, int bytes);
299 void (*sk_write_space)(struct sock *sk);
300 void (*sk_error_report)(struct sock *sk);
301 int (*sk_backlog_rcv)(struct sock *sk,
302 struct sk_buff *skb);
303 void (*sk_destruct)(struct sock *sk);
304};
305
306
307
308
309static inline struct sock *__sk_head(const struct hlist_head *head)
310{
311 return hlist_entry(head->first, struct sock, sk_node);
312}
313
314static inline struct sock *sk_head(const struct hlist_head *head)
315{
316 return hlist_empty(head) ? NULL : __sk_head(head);
317}
318
319static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
320{
321 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
322}
323
324static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
325{
326 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
327}
328
329static inline struct sock *sk_next(const struct sock *sk)
330{
331 return sk->sk_node.next ?
332 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
333}
334
335static inline struct sock *sk_nulls_next(const struct sock *sk)
336{
337 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
338 hlist_nulls_entry(sk->sk_nulls_node.next,
339 struct sock, sk_nulls_node) :
340 NULL;
341}
342
343static inline int sk_unhashed(const struct sock *sk)
344{
345 return hlist_unhashed(&sk->sk_node);
346}
347
348static inline int sk_hashed(const struct sock *sk)
349{
350 return !sk_unhashed(sk);
351}
352
353static __inline__ void sk_node_init(struct hlist_node *node)
354{
355 node->pprev = NULL;
356}
357
358static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
359{
360 node->pprev = NULL;
361}
362
363static __inline__ void __sk_del_node(struct sock *sk)
364{
365 __hlist_del(&sk->sk_node);
366}
367
368static __inline__ int __sk_del_node_init(struct sock *sk)
369{
370 if (sk_hashed(sk)) {
371 __sk_del_node(sk);
372 sk_node_init(&sk->sk_node);
373 return 1;
374 }
375 return 0;
376}
377
378
379
380
381
382
383
384static inline void sock_hold(struct sock *sk)
385{
386 atomic_inc(&sk->sk_refcnt);
387}
388
389
390
391
392static inline void __sock_put(struct sock *sk)
393{
394 atomic_dec(&sk->sk_refcnt);
395}
396
397static __inline__ int sk_del_node_init(struct sock *sk)
398{
399 int rc = __sk_del_node_init(sk);
400
401 if (rc) {
402
403 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
404 __sock_put(sk);
405 }
406 return rc;
407}
408
409static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
410{
411 if (sk_hashed(sk)) {
412 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
413 return 1;
414 }
415 return 0;
416}
417
418static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
419{
420 int rc = __sk_nulls_del_node_init_rcu(sk);
421
422 if (rc) {
423
424 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
425 __sock_put(sk);
426 }
427 return rc;
428}
429
430static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
431{
432 hlist_add_head(&sk->sk_node, list);
433}
434
435static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
436{
437 sock_hold(sk);
438 __sk_add_node(sk, list);
439}
440
441static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
442{
443 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
444}
445
446static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
447{
448 sock_hold(sk);
449 __sk_nulls_add_node_rcu(sk, list);
450}
451
452static __inline__ void __sk_del_bind_node(struct sock *sk)
453{
454 __hlist_del(&sk->sk_bind_node);
455}
456
457static __inline__ void sk_add_bind_node(struct sock *sk,
458 struct hlist_head *list)
459{
460 hlist_add_head(&sk->sk_bind_node, list);
461}
462
463#define sk_for_each(__sk, node, list) \
464 hlist_for_each_entry(__sk, node, list, sk_node)
465#define sk_nulls_for_each(__sk, node, list) \
466 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
467#define sk_nulls_for_each_rcu(__sk, node, list) \
468 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
469#define sk_for_each_from(__sk, node) \
470 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
471 hlist_for_each_entry_from(__sk, node, sk_node)
472#define sk_nulls_for_each_from(__sk, node) \
473 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
474 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
475#define sk_for_each_continue(__sk, node) \
476 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
477 hlist_for_each_entry_continue(__sk, node, sk_node)
478#define sk_for_each_safe(__sk, node, tmp, list) \
479 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
480#define sk_for_each_bound(__sk, node, list) \
481 hlist_for_each_entry(__sk, node, list, sk_bind_node)
482
483
484enum sock_flags {
485 SOCK_DEAD,
486 SOCK_DONE,
487 SOCK_URGINLINE,
488 SOCK_KEEPOPEN,
489 SOCK_LINGER,
490 SOCK_DESTROY,
491 SOCK_BROADCAST,
492 SOCK_TIMESTAMP,
493 SOCK_ZAPPED,
494 SOCK_USE_WRITE_QUEUE,
495 SOCK_DBG,
496 SOCK_RCVTSTAMP,
497 SOCK_RCVTSTAMPNS,
498 SOCK_LOCALROUTE,
499 SOCK_QUEUE_SHRUNK,
500 SOCK_TIMESTAMPING_TX_HARDWARE,
501 SOCK_TIMESTAMPING_TX_SOFTWARE,
502 SOCK_TIMESTAMPING_RX_HARDWARE,
503 SOCK_TIMESTAMPING_RX_SOFTWARE,
504 SOCK_TIMESTAMPING_SOFTWARE,
505 SOCK_TIMESTAMPING_RAW_HARDWARE,
506 SOCK_TIMESTAMPING_SYS_HARDWARE,
507};
508
509static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
510{
511 nsk->sk_flags = osk->sk_flags;
512}
513
514static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
515{
516 __set_bit(flag, &sk->sk_flags);
517}
518
519static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
520{
521 __clear_bit(flag, &sk->sk_flags);
522}
523
524static inline int sock_flag(struct sock *sk, enum sock_flags flag)
525{
526 return test_bit(flag, &sk->sk_flags);
527}
528
529static inline void sk_acceptq_removed(struct sock *sk)
530{
531 sk->sk_ack_backlog--;
532}
533
534static inline void sk_acceptq_added(struct sock *sk)
535{
536 sk->sk_ack_backlog++;
537}
538
539static inline int sk_acceptq_is_full(struct sock *sk)
540{
541 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
542}
543
544
545
546
547static inline int sk_stream_min_wspace(struct sock *sk)
548{
549 return sk->sk_wmem_queued >> 1;
550}
551
552static inline int sk_stream_wspace(struct sock *sk)
553{
554 return sk->sk_sndbuf - sk->sk_wmem_queued;
555}
556
557extern void sk_stream_write_space(struct sock *sk);
558
559static inline int sk_stream_memory_free(struct sock *sk)
560{
561 return sk->sk_wmem_queued < sk->sk_sndbuf;
562}
563
564
565static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
566{
567 if (!sk->sk_backlog.tail) {
568 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
569 } else {
570 sk->sk_backlog.tail->next = skb;
571 sk->sk_backlog.tail = skb;
572 }
573 skb->next = NULL;
574}
575
576static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
577{
578 return sk->sk_backlog_rcv(sk, skb);
579}
580
581#define sk_wait_event(__sk, __timeo, __condition) \
582 ({ int __rc; \
583 release_sock(__sk); \
584 __rc = __condition; \
585 if (!__rc) { \
586 *(__timeo) = schedule_timeout(*(__timeo)); \
587 } \
588 lock_sock(__sk); \
589 __rc = __condition; \
590 __rc; \
591 })
592
593extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
594extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
595extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
596extern int sk_stream_error(struct sock *sk, int flags, int err);
597extern void sk_stream_kill_queues(struct sock *sk);
598
599extern int sk_wait_data(struct sock *sk, long *timeo);
600
601struct request_sock_ops;
602struct timewait_sock_ops;
603struct inet_hashinfo;
604struct raw_hashinfo;
605
606
607
608
609
610struct proto {
611 void (*close)(struct sock *sk,
612 long timeout);
613 int (*connect)(struct sock *sk,
614 struct sockaddr *uaddr,
615 int addr_len);
616 int (*disconnect)(struct sock *sk, int flags);
617
618 struct sock * (*accept) (struct sock *sk, int flags, int *err);
619
620 int (*ioctl)(struct sock *sk, int cmd,
621 unsigned long arg);
622 int (*init)(struct sock *sk);
623 void (*destroy)(struct sock *sk);
624 void (*shutdown)(struct sock *sk, int how);
625 int (*setsockopt)(struct sock *sk, int level,
626 int optname, char __user *optval,
627 unsigned int optlen);
628 int (*getsockopt)(struct sock *sk, int level,
629 int optname, char __user *optval,
630 int __user *option);
631#ifdef CONFIG_COMPAT
632 int (*compat_setsockopt)(struct sock *sk,
633 int level,
634 int optname, char __user *optval,
635 unsigned int optlen);
636 int (*compat_getsockopt)(struct sock *sk,
637 int level,
638 int optname, char __user *optval,
639 int __user *option);
640#endif
641 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
642 struct msghdr *msg, size_t len);
643 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
644 struct msghdr *msg,
645 size_t len, int noblock, int flags,
646 int *addr_len);
647 int (*sendpage)(struct sock *sk, struct page *page,
648 int offset, size_t size, int flags);
649 int (*bind)(struct sock *sk,
650 struct sockaddr *uaddr, int addr_len);
651
652 int (*backlog_rcv) (struct sock *sk,
653 struct sk_buff *skb);
654
655
656 void (*hash)(struct sock *sk);
657 void (*unhash)(struct sock *sk);
658 int (*get_port)(struct sock *sk, unsigned short snum);
659
660
661#ifdef CONFIG_PROC_FS
662 unsigned int inuse_idx;
663#endif
664
665
666 void (*enter_memory_pressure)(struct sock *sk);
667 atomic_t *memory_allocated;
668 struct percpu_counter *sockets_allocated;
669
670
671
672
673
674
675 int *memory_pressure;
676 int *sysctl_mem;
677 int *sysctl_wmem;
678 int *sysctl_rmem;
679 int max_header;
680
681 struct kmem_cache *slab;
682 unsigned int obj_size;
683 int slab_flags;
684
685 struct percpu_counter *orphan_count;
686
687 struct request_sock_ops *rsk_prot;
688 struct timewait_sock_ops *twsk_prot;
689
690 union {
691 struct inet_hashinfo *hashinfo;
692 struct udp_table *udp_table;
693 struct raw_hashinfo *raw_hash;
694 } h;
695
696 struct module *owner;
697
698 char name[32];
699
700 struct list_head node;
701#ifdef SOCK_REFCNT_DEBUG
702 atomic_t socks;
703#endif
704};
705
706extern int proto_register(struct proto *prot, int alloc_slab);
707extern void proto_unregister(struct proto *prot);
708
709#ifdef SOCK_REFCNT_DEBUG
710static inline void sk_refcnt_debug_inc(struct sock *sk)
711{
712 atomic_inc(&sk->sk_prot->socks);
713}
714
715static inline void sk_refcnt_debug_dec(struct sock *sk)
716{
717 atomic_dec(&sk->sk_prot->socks);
718 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
719 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
720}
721
722static inline void sk_refcnt_debug_release(const struct sock *sk)
723{
724 if (atomic_read(&sk->sk_refcnt) != 1)
725 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
726 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
727}
728#else
729#define sk_refcnt_debug_inc(sk) do { } while (0)
730#define sk_refcnt_debug_dec(sk) do { } while (0)
731#define sk_refcnt_debug_release(sk) do { } while (0)
732#endif
733
734
735#ifdef CONFIG_PROC_FS
736
737extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
738extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
739#else
740static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
741 int inc)
742{
743}
744#endif
745
746
747
748
749
750static inline void __sk_prot_rehash(struct sock *sk)
751{
752 sk->sk_prot->unhash(sk);
753 sk->sk_prot->hash(sk);
754}
755
756
757#define SOCK_DESTROY_TIME (10*HZ)
758
759
760#define PROT_SOCK 1024
761
762#define SHUTDOWN_MASK 3
763#define RCV_SHUTDOWN 1
764#define SEND_SHUTDOWN 2
765
766#define SOCK_SNDBUF_LOCK 1
767#define SOCK_RCVBUF_LOCK 2
768#define SOCK_BINDADDR_LOCK 4
769#define SOCK_BINDPORT_LOCK 8
770
771
772struct sock_iocb {
773 struct list_head list;
774
775 int flags;
776 int size;
777 struct socket *sock;
778 struct sock *sk;
779 struct scm_cookie *scm;
780 struct msghdr *msg, async_msg;
781 struct kiocb *kiocb;
782};
783
784static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
785{
786 return (struct sock_iocb *)iocb->private;
787}
788
789static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
790{
791 return si->kiocb;
792}
793
794struct socket_alloc {
795 struct socket socket;
796 struct inode vfs_inode;
797};
798
799static inline struct socket *SOCKET_I(struct inode *inode)
800{
801 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
802}
803
804static inline struct inode *SOCK_INODE(struct socket *socket)
805{
806 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
807}
808
809
810
811
812extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
813extern void __sk_mem_reclaim(struct sock *sk);
814
815#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
816#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
817#define SK_MEM_SEND 0
818#define SK_MEM_RECV 1
819
820static inline int sk_mem_pages(int amt)
821{
822 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
823}
824
825static inline int sk_has_account(struct sock *sk)
826{
827
828 return !!sk->sk_prot->memory_allocated;
829}
830
831static inline int sk_wmem_schedule(struct sock *sk, int size)
832{
833 if (!sk_has_account(sk))
834 return 1;
835 return size <= sk->sk_forward_alloc ||
836 __sk_mem_schedule(sk, size, SK_MEM_SEND);
837}
838
839static inline int sk_rmem_schedule(struct sock *sk, int size)
840{
841 if (!sk_has_account(sk))
842 return 1;
843 return size <= sk->sk_forward_alloc ||
844 __sk_mem_schedule(sk, size, SK_MEM_RECV);
845}
846
847static inline void sk_mem_reclaim(struct sock *sk)
848{
849 if (!sk_has_account(sk))
850 return;
851 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
852 __sk_mem_reclaim(sk);
853}
854
855static inline void sk_mem_reclaim_partial(struct sock *sk)
856{
857 if (!sk_has_account(sk))
858 return;
859 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
860 __sk_mem_reclaim(sk);
861}
862
863static inline void sk_mem_charge(struct sock *sk, int size)
864{
865 if (!sk_has_account(sk))
866 return;
867 sk->sk_forward_alloc -= size;
868}
869
870static inline void sk_mem_uncharge(struct sock *sk, int size)
871{
872 if (!sk_has_account(sk))
873 return;
874 sk->sk_forward_alloc += size;
875}
876
877static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
878{
879 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
880 sk->sk_wmem_queued -= skb->truesize;
881 sk_mem_uncharge(sk, skb->truesize);
882 __kfree_skb(skb);
883}
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
899
900
901
902
903
904
905
906
907#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
908do { \
909 sk->sk_lock.owned = 0; \
910 init_waitqueue_head(&sk->sk_lock.wq); \
911 spin_lock_init(&(sk)->sk_lock.slock); \
912 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
913 sizeof((sk)->sk_lock)); \
914 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
915 (skey), (sname)); \
916 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
917} while (0)
918
919extern void lock_sock_nested(struct sock *sk, int subclass);
920
921static inline void lock_sock(struct sock *sk)
922{
923 lock_sock_nested(sk, 0);
924}
925
926extern void release_sock(struct sock *sk);
927
928
929#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
930#define bh_lock_sock_nested(__sk) \
931 spin_lock_nested(&((__sk)->sk_lock.slock), \
932 SINGLE_DEPTH_NESTING)
933#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
934
935extern struct sock *sk_alloc(struct net *net, int family,
936 gfp_t priority,
937 struct proto *prot);
938extern void sk_free(struct sock *sk);
939extern void sk_release_kernel(struct sock *sk);
940extern struct sock *sk_clone(const struct sock *sk,
941 const gfp_t priority);
942
943extern struct sk_buff *sock_wmalloc(struct sock *sk,
944 unsigned long size, int force,
945 gfp_t priority);
946extern struct sk_buff *sock_rmalloc(struct sock *sk,
947 unsigned long size, int force,
948 gfp_t priority);
949extern void sock_wfree(struct sk_buff *skb);
950extern void sock_rfree(struct sk_buff *skb);
951
952extern int sock_setsockopt(struct socket *sock, int level,
953 int op, char __user *optval,
954 unsigned int optlen);
955
956extern int sock_getsockopt(struct socket *sock, int level,
957 int op, char __user *optval,
958 int __user *optlen);
959extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
960 unsigned long size,
961 int noblock,
962 int *errcode);
963extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
964 unsigned long header_len,
965 unsigned long data_len,
966 int noblock,
967 int *errcode);
968extern void *sock_kmalloc(struct sock *sk, int size,
969 gfp_t priority);
970extern void sock_kfree_s(struct sock *sk, void *mem, int size);
971extern void sk_send_sigurg(struct sock *sk);
972
973
974
975
976
977extern int sock_no_bind(struct socket *,
978 struct sockaddr *, int);
979extern int sock_no_connect(struct socket *,
980 struct sockaddr *, int, int);
981extern int sock_no_socketpair(struct socket *,
982 struct socket *);
983extern int sock_no_accept(struct socket *,
984 struct socket *, int);
985extern int sock_no_getname(struct socket *,
986 struct sockaddr *, int *, int);
987extern unsigned int sock_no_poll(struct file *, struct socket *,
988 struct poll_table_struct *);
989extern int sock_no_ioctl(struct socket *, unsigned int,
990 unsigned long);
991extern int sock_no_listen(struct socket *, int);
992extern int sock_no_shutdown(struct socket *, int);
993extern int sock_no_getsockopt(struct socket *, int , int,
994 char __user *, int __user *);
995extern int sock_no_setsockopt(struct socket *, int, int,
996 char __user *, unsigned int);
997extern int sock_no_sendmsg(struct kiocb *, struct socket *,
998 struct msghdr *, size_t);
999extern int sock_no_recvmsg(struct kiocb *, struct socket *,
1000 struct msghdr *, size_t, int);
1001extern int sock_no_mmap(struct file *file,
1002 struct socket *sock,
1003 struct vm_area_struct *vma);
1004extern ssize_t sock_no_sendpage(struct socket *sock,
1005 struct page *page,
1006 int offset, size_t size,
1007 int flags);
1008
1009
1010
1011
1012
1013extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1014 char __user *optval, int __user *optlen);
1015extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1016 struct msghdr *msg, size_t size, int flags);
1017extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1018 char __user *optval, unsigned int optlen);
1019extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1020 int optname, char __user *optval, int __user *optlen);
1021extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1022 int optname, char __user *optval, unsigned int optlen);
1023
1024extern void sk_common_release(struct sock *sk);
1025
1026
1027
1028
1029
1030
1031extern void sock_init_data(struct socket *sock, struct sock *sk);
1032
1033
1034
1035
1036
1037
1038
1039
1040static inline void sk_filter_release(struct sk_filter *fp)
1041{
1042 if (atomic_dec_and_test(&fp->refcnt))
1043 kfree(fp);
1044}
1045
1046static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1047{
1048 unsigned int size = sk_filter_len(fp);
1049
1050 atomic_sub(size, &sk->sk_omem_alloc);
1051 sk_filter_release(fp);
1052}
1053
1054static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1055{
1056 atomic_inc(&fp->refcnt);
1057 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086static inline void sock_put(struct sock *sk)
1087{
1088 if (atomic_dec_and_test(&sk->sk_refcnt))
1089 sk_free(sk);
1090}
1091
1092extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1093 const int nested);
1094
1095static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1096{
1097 sk->sk_socket = sock;
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107static inline void sock_orphan(struct sock *sk)
1108{
1109 write_lock_bh(&sk->sk_callback_lock);
1110 sock_set_flag(sk, SOCK_DEAD);
1111 sk_set_socket(sk, NULL);
1112 sk->sk_sleep = NULL;
1113 write_unlock_bh(&sk->sk_callback_lock);
1114}
1115
1116static inline void sock_graft(struct sock *sk, struct socket *parent)
1117{
1118 write_lock_bh(&sk->sk_callback_lock);
1119 sk->sk_sleep = &parent->wait;
1120 parent->sk = sk;
1121 sk_set_socket(sk, parent);
1122 security_sock_graft(sk, parent);
1123 write_unlock_bh(&sk->sk_callback_lock);
1124}
1125
1126extern int sock_i_uid(struct sock *sk);
1127extern unsigned long sock_i_ino(struct sock *sk);
1128
1129static inline struct dst_entry *
1130__sk_dst_get(struct sock *sk)
1131{
1132 return sk->sk_dst_cache;
1133}
1134
1135static inline struct dst_entry *
1136sk_dst_get(struct sock *sk)
1137{
1138 struct dst_entry *dst;
1139
1140 read_lock(&sk->sk_dst_lock);
1141 dst = sk->sk_dst_cache;
1142 if (dst)
1143 dst_hold(dst);
1144 read_unlock(&sk->sk_dst_lock);
1145 return dst;
1146}
1147
1148static inline void
1149__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1150{
1151 struct dst_entry *old_dst;
1152
1153 old_dst = sk->sk_dst_cache;
1154 sk->sk_dst_cache = dst;
1155 dst_release(old_dst);
1156}
1157
1158static inline void
1159sk_dst_set(struct sock *sk, struct dst_entry *dst)
1160{
1161 write_lock(&sk->sk_dst_lock);
1162 __sk_dst_set(sk, dst);
1163 write_unlock(&sk->sk_dst_lock);
1164}
1165
1166static inline void
1167__sk_dst_reset(struct sock *sk)
1168{
1169 struct dst_entry *old_dst;
1170
1171 old_dst = sk->sk_dst_cache;
1172 sk->sk_dst_cache = NULL;
1173 dst_release(old_dst);
1174}
1175
1176static inline void
1177sk_dst_reset(struct sock *sk)
1178{
1179 write_lock(&sk->sk_dst_lock);
1180 __sk_dst_reset(sk);
1181 write_unlock(&sk->sk_dst_lock);
1182}
1183
1184extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1185
1186extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1187
1188static inline int sk_can_gso(const struct sock *sk)
1189{
1190 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1191}
1192
1193extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1194
1195static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1196 struct sk_buff *skb, struct page *page,
1197 int off, int copy)
1198{
1199 if (skb->ip_summed == CHECKSUM_NONE) {
1200 int err = 0;
1201 __wsum csum = csum_and_copy_from_user(from,
1202 page_address(page) + off,
1203 copy, 0, &err);
1204 if (err)
1205 return err;
1206 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1207 } else if (copy_from_user(page_address(page) + off, from, copy))
1208 return -EFAULT;
1209
1210 skb->len += copy;
1211 skb->data_len += copy;
1212 skb->truesize += copy;
1213 sk->sk_wmem_queued += copy;
1214 sk_mem_charge(sk, copy);
1215 return 0;
1216}
1217
1218
1219
1220
1221
1222
1223
1224static inline int sk_wmem_alloc_get(const struct sock *sk)
1225{
1226 return atomic_read(&sk->sk_wmem_alloc) - 1;
1227}
1228
1229
1230
1231
1232
1233
1234
1235static inline int sk_rmem_alloc_get(const struct sock *sk)
1236{
1237 return atomic_read(&sk->sk_rmem_alloc);
1238}
1239
1240
1241
1242
1243
1244
1245
1246static inline int sk_has_allocations(const struct sock *sk)
1247{
1248 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static inline int sk_has_sleeper(struct sock *sk)
1285{
1286
1287
1288
1289
1290
1291
1292 smp_mb__after_lock();
1293 return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304static inline void sock_poll_wait(struct file *filp,
1305 wait_queue_head_t *wait_address, poll_table *p)
1306{
1307 if (p && wait_address) {
1308 poll_wait(filp, wait_address, p);
1309
1310
1311
1312
1313
1314
1315 smp_mb();
1316 }
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1329{
1330 skb_orphan(skb);
1331 skb->sk = sk;
1332 skb->destructor = sock_wfree;
1333
1334
1335
1336
1337
1338 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1339}
1340
1341static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1342{
1343 skb_orphan(skb);
1344 skb->sk = sk;
1345 skb->destructor = sock_rfree;
1346 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1347 sk_mem_charge(sk, skb->truesize);
1348}
1349
1350extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1351 unsigned long expires);
1352
1353extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1354
1355extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1356
1357static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1358{
1359
1360
1361
1362 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1363 (unsigned)sk->sk_rcvbuf)
1364 return -ENOMEM;
1365 skb_set_owner_r(skb, sk);
1366 skb_queue_tail(&sk->sk_error_queue, skb);
1367 if (!sock_flag(sk, SOCK_DEAD))
1368 sk->sk_data_ready(sk, skb->len);
1369 return 0;
1370}
1371
1372
1373
1374
1375
1376static inline int sock_error(struct sock *sk)
1377{
1378 int err;
1379 if (likely(!sk->sk_err))
1380 return 0;
1381 err = xchg(&sk->sk_err, 0);
1382 return -err;
1383}
1384
1385static inline unsigned long sock_wspace(struct sock *sk)
1386{
1387 int amt = 0;
1388
1389 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1390 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1391 if (amt < 0)
1392 amt = 0;
1393 }
1394 return amt;
1395}
1396
1397static inline void sk_wake_async(struct sock *sk, int how, int band)
1398{
1399 if (sk->sk_socket && sk->sk_socket->fasync_list)
1400 sock_wake_async(sk->sk_socket, how, band);
1401}
1402
1403#define SOCK_MIN_SNDBUF 2048
1404#define SOCK_MIN_RCVBUF 256
1405
1406static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1407{
1408 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1409 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
1410 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1411 }
1412}
1413
1414struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
1415
1416static inline struct page *sk_stream_alloc_page(struct sock *sk)
1417{
1418 struct page *page = NULL;
1419
1420 page = alloc_pages(sk->sk_allocation, 0);
1421 if (!page) {
1422 sk->sk_prot->enter_memory_pressure(sk);
1423 sk_stream_moderate_sndbuf(sk);
1424 }
1425 return page;
1426}
1427
1428
1429
1430
1431static inline int sock_writeable(const struct sock *sk)
1432{
1433 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
1434}
1435
1436static inline gfp_t gfp_any(void)
1437{
1438 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1439}
1440
1441static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1442{
1443 return noblock ? 0 : sk->sk_rcvtimeo;
1444}
1445
1446static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1447{
1448 return noblock ? 0 : sk->sk_sndtimeo;
1449}
1450
1451static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1452{
1453 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1454}
1455
1456
1457
1458
1459static inline int sock_intr_errno(long timeo)
1460{
1461 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1462}
1463
1464extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1465 struct sk_buff *skb);
1466
1467static __inline__ void
1468sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1469{
1470 ktime_t kt = skb->tstamp;
1471 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
1484 sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
1485 (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
1486 (hwtstamps->hwtstamp.tv64 &&
1487 sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
1488 (hwtstamps->syststamp.tv64 &&
1489 sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
1490 __sock_recv_timestamp(msg, sk, skb);
1491 else
1492 sk->sk_stamp = kt;
1493}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504extern int sock_tx_timestamp(struct msghdr *msg,
1505 struct sock *sk,
1506 union skb_shared_tx *shtx);
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518#ifdef CONFIG_NET_DMA
1519static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1520{
1521 __skb_unlink(skb, &sk->sk_receive_queue);
1522 if (!copied_early)
1523 __kfree_skb(skb);
1524 else
1525 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1526}
1527#else
1528static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1529{
1530 __skb_unlink(skb, &sk->sk_receive_queue);
1531 __kfree_skb(skb);
1532}
1533#endif
1534
1535static inline
1536struct net *sock_net(const struct sock *sk)
1537{
1538#ifdef CONFIG_NET_NS
1539 return sk->sk_net;
1540#else
1541 return &init_net;
1542#endif
1543}
1544
1545static inline
1546void sock_net_set(struct sock *sk, struct net *net)
1547{
1548#ifdef CONFIG_NET_NS
1549 sk->sk_net = net;
1550#endif
1551}
1552
1553
1554
1555
1556
1557
1558
1559static inline void sk_change_net(struct sock *sk, struct net *net)
1560{
1561 put_net(sock_net(sk));
1562 sock_net_set(sk, hold_net(net));
1563}
1564
1565static inline struct sock *skb_steal_sock(struct sk_buff *skb)
1566{
1567 if (unlikely(skb->sk)) {
1568 struct sock *sk = skb->sk;
1569
1570 skb->destructor = NULL;
1571 skb->sk = NULL;
1572 return sk;
1573 }
1574 return NULL;
1575}
1576
1577extern void sock_enable_timestamp(struct sock *sk, int flag);
1578extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1579extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1580
1581
1582
1583
1584extern int net_msg_warn;
1585#define NETDEBUG(fmt, args...) \
1586 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1587
1588#define LIMIT_NETDEBUG(fmt, args...) \
1589 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1590
1591extern __u32 sysctl_wmem_max;
1592extern __u32 sysctl_rmem_max;
1593
1594extern void sk_init(void);
1595
1596extern int sysctl_optmem_max;
1597
1598extern __u32 sysctl_wmem_default;
1599extern __u32 sysctl_rmem_default;
1600
1601#endif
1602