1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#ifndef _SOCK_H
41#define _SOCK_H
42
43#include <linux/kernel.h>
44#include <linux/list.h>
45#include <linux/list_nulls.h>
46#include <linux/timer.h>
47#include <linux/cache.h>
48#include <linux/module.h>
49#include <linux/lockdep.h>
50#include <linux/netdevice.h>
51#include <linux/skbuff.h>
52#include <linux/mm.h>
53#include <linux/security.h>
54#include <linux/slab.h>
55
56#include <linux/filter.h>
57#include <linux/rculist_nulls.h>
58#include <linux/poll.h>
59
60#include <linux/atomic.h>
61#include <net/dst.h>
62#include <net/checksum.h>
63
64
65
66
67
68
69
70
71#define SOCK_DEBUGGING
72#ifdef SOCK_DEBUGGING
73#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
74 printk(KERN_DEBUG msg); } while (0)
75#else
76
77static inline void __attribute__ ((format (printf, 2, 3)))
78SOCK_DEBUG(struct sock *sk, const char *msg, ...)
79{
80}
81#endif
82
83
84
85
86
87typedef struct {
88 spinlock_t slock;
89 int owned;
90 wait_queue_head_t wq;
91
92
93
94
95
96
97#ifdef CONFIG_DEBUG_LOCK_ALLOC
98 struct lockdep_map dep_map;
99#endif
100} socket_lock_t;
101
102struct sock;
103struct proto;
104struct net;
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128struct sock_common {
129
130
131
132 __be32 skc_daddr;
133 __be32 skc_rcv_saddr;
134
135 union {
136 unsigned int skc_hash;
137 __u16 skc_u16hashes[2];
138 };
139 unsigned short skc_family;
140 volatile unsigned char skc_state;
141 unsigned char skc_reuse;
142 int skc_bound_dev_if;
143 union {
144 struct hlist_node skc_bind_node;
145 struct hlist_nulls_node skc_portaddr_node;
146 };
147 struct proto *skc_prot;
148#ifdef CONFIG_NET_NS
149 struct net *skc_net;
150#endif
151
152
153
154
155
156 int skc_dontcopy_begin[0];
157
158 union {
159 struct hlist_node skc_node;
160 struct hlist_nulls_node skc_nulls_node;
161 };
162 int skc_tx_queue_mapping;
163 atomic_t skc_refcnt;
164
165 int skc_dontcopy_end[0];
166
167};
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238struct sock {
239
240
241
242
243 struct sock_common __sk_common;
244#define sk_node __sk_common.skc_node
245#define sk_nulls_node __sk_common.skc_nulls_node
246#define sk_refcnt __sk_common.skc_refcnt
247#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
248
249#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
250#define sk_dontcopy_end __sk_common.skc_dontcopy_end
251#define sk_hash __sk_common.skc_hash
252#define sk_family __sk_common.skc_family
253#define sk_state __sk_common.skc_state
254#define sk_reuse __sk_common.skc_reuse
255#define sk_bound_dev_if __sk_common.skc_bound_dev_if
256#define sk_bind_node __sk_common.skc_bind_node
257#define sk_prot __sk_common.skc_prot
258#define sk_net __sk_common.skc_net
259 socket_lock_t sk_lock;
260 struct sk_buff_head sk_receive_queue;
261
262
263
264
265
266
267
268
269 struct {
270 atomic_t rmem_alloc;
271 int len;
272 struct sk_buff *head;
273 struct sk_buff *tail;
274 } sk_backlog;
275#define sk_rmem_alloc sk_backlog.rmem_alloc
276 int sk_forward_alloc;
277#ifdef CONFIG_RPS
278 __u32 sk_rxhash;
279#endif
280 atomic_t sk_drops;
281 int sk_rcvbuf;
282
283 struct sk_filter __rcu *sk_filter;
284 struct socket_wq __rcu *sk_wq;
285
286#ifdef CONFIG_NET_DMA
287 struct sk_buff_head sk_async_wait_queue;
288#endif
289
290#ifdef CONFIG_XFRM
291 struct xfrm_policy *sk_policy[2];
292#endif
293 unsigned long sk_flags;
294 struct dst_entry *sk_dst_cache;
295 spinlock_t sk_dst_lock;
296 atomic_t sk_wmem_alloc;
297 atomic_t sk_omem_alloc;
298 int sk_sndbuf;
299 struct sk_buff_head sk_write_queue;
300 kmemcheck_bitfield_begin(flags);
301 unsigned int sk_shutdown : 2,
302 sk_no_check : 2,
303 sk_userlocks : 4,
304 sk_protocol : 8,
305 sk_type : 16;
306 kmemcheck_bitfield_end(flags);
307 int sk_wmem_queued;
308 gfp_t sk_allocation;
309 int sk_route_caps;
310 int sk_route_nocaps;
311 int sk_gso_type;
312 unsigned int sk_gso_max_size;
313 int sk_rcvlowat;
314 unsigned long sk_lingertime;
315 struct sk_buff_head sk_error_queue;
316 struct proto *sk_prot_creator;
317 rwlock_t sk_callback_lock;
318 int sk_err,
319 sk_err_soft;
320 unsigned short sk_ack_backlog;
321 unsigned short sk_max_ack_backlog;
322 __u32 sk_priority;
323 struct pid *sk_peer_pid;
324 const struct cred *sk_peer_cred;
325 long sk_rcvtimeo;
326 long sk_sndtimeo;
327 void *sk_protinfo;
328 struct timer_list sk_timer;
329 ktime_t sk_stamp;
330 struct socket *sk_socket;
331 void *sk_user_data;
332 struct page *sk_sndmsg_page;
333 struct sk_buff *sk_send_head;
334 __u32 sk_sndmsg_off;
335 int sk_write_pending;
336#ifdef CONFIG_SECURITY
337 void *sk_security;
338#endif
339 __u32 sk_mark;
340 u32 sk_classid;
341 void (*sk_state_change)(struct sock *sk);
342 void (*sk_data_ready)(struct sock *sk, int bytes);
343 void (*sk_write_space)(struct sock *sk);
344 void (*sk_error_report)(struct sock *sk);
345 int (*sk_backlog_rcv)(struct sock *sk,
346 struct sk_buff *skb);
347 void (*sk_destruct)(struct sock *sk);
348};
349
350
351
352
353static inline struct sock *sk_entry(const struct hlist_node *node)
354{
355 return hlist_entry(node, struct sock, sk_node);
356}
357
358static inline struct sock *__sk_head(const struct hlist_head *head)
359{
360 return hlist_entry(head->first, struct sock, sk_node);
361}
362
363static inline struct sock *sk_head(const struct hlist_head *head)
364{
365 return hlist_empty(head) ? NULL : __sk_head(head);
366}
367
368static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
369{
370 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
371}
372
373static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
374{
375 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
376}
377
378static inline struct sock *sk_next(const struct sock *sk)
379{
380 return sk->sk_node.next ?
381 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
382}
383
384static inline struct sock *sk_nulls_next(const struct sock *sk)
385{
386 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
387 hlist_nulls_entry(sk->sk_nulls_node.next,
388 struct sock, sk_nulls_node) :
389 NULL;
390}
391
392static inline int sk_unhashed(const struct sock *sk)
393{
394 return hlist_unhashed(&sk->sk_node);
395}
396
397static inline int sk_hashed(const struct sock *sk)
398{
399 return !sk_unhashed(sk);
400}
401
402static __inline__ void sk_node_init(struct hlist_node *node)
403{
404 node->pprev = NULL;
405}
406
407static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
408{
409 node->pprev = NULL;
410}
411
412static __inline__ void __sk_del_node(struct sock *sk)
413{
414 __hlist_del(&sk->sk_node);
415}
416
417
418static __inline__ int __sk_del_node_init(struct sock *sk)
419{
420 if (sk_hashed(sk)) {
421 __sk_del_node(sk);
422 sk_node_init(&sk->sk_node);
423 return 1;
424 }
425 return 0;
426}
427
428
429
430
431
432
433
434static inline void sock_hold(struct sock *sk)
435{
436 atomic_inc(&sk->sk_refcnt);
437}
438
439
440
441
442static inline void __sock_put(struct sock *sk)
443{
444 atomic_dec(&sk->sk_refcnt);
445}
446
447static __inline__ int sk_del_node_init(struct sock *sk)
448{
449 int rc = __sk_del_node_init(sk);
450
451 if (rc) {
452
453 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
454 __sock_put(sk);
455 }
456 return rc;
457}
458#define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
459
460static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
461{
462 if (sk_hashed(sk)) {
463 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
464 return 1;
465 }
466 return 0;
467}
468
469static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
470{
471 int rc = __sk_nulls_del_node_init_rcu(sk);
472
473 if (rc) {
474
475 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
476 __sock_put(sk);
477 }
478 return rc;
479}
480
481static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
482{
483 hlist_add_head(&sk->sk_node, list);
484}
485
486static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
487{
488 sock_hold(sk);
489 __sk_add_node(sk, list);
490}
491
492static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
493{
494 sock_hold(sk);
495 hlist_add_head_rcu(&sk->sk_node, list);
496}
497
498static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
499{
500 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
501}
502
503static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
504{
505 sock_hold(sk);
506 __sk_nulls_add_node_rcu(sk, list);
507}
508
509static __inline__ void __sk_del_bind_node(struct sock *sk)
510{
511 __hlist_del(&sk->sk_bind_node);
512}
513
514static __inline__ void sk_add_bind_node(struct sock *sk,
515 struct hlist_head *list)
516{
517 hlist_add_head(&sk->sk_bind_node, list);
518}
519
520#define sk_for_each(__sk, node, list) \
521 hlist_for_each_entry(__sk, node, list, sk_node)
522#define sk_for_each_rcu(__sk, node, list) \
523 hlist_for_each_entry_rcu(__sk, node, list, sk_node)
524#define sk_nulls_for_each(__sk, node, list) \
525 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
526#define sk_nulls_for_each_rcu(__sk, node, list) \
527 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
528#define sk_for_each_from(__sk, node) \
529 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
530 hlist_for_each_entry_from(__sk, node, sk_node)
531#define sk_nulls_for_each_from(__sk, node) \
532 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
533 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
534#define sk_for_each_safe(__sk, node, tmp, list) \
535 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
536#define sk_for_each_bound(__sk, node, list) \
537 hlist_for_each_entry(__sk, node, list, sk_bind_node)
538
539
540enum sock_flags {
541 SOCK_DEAD,
542 SOCK_DONE,
543 SOCK_URGINLINE,
544 SOCK_KEEPOPEN,
545 SOCK_LINGER,
546 SOCK_DESTROY,
547 SOCK_BROADCAST,
548 SOCK_TIMESTAMP,
549 SOCK_ZAPPED,
550 SOCK_USE_WRITE_QUEUE,
551 SOCK_DBG,
552 SOCK_RCVTSTAMP,
553 SOCK_RCVTSTAMPNS,
554 SOCK_LOCALROUTE,
555 SOCK_QUEUE_SHRUNK,
556 SOCK_TIMESTAMPING_TX_HARDWARE,
557 SOCK_TIMESTAMPING_TX_SOFTWARE,
558 SOCK_TIMESTAMPING_RX_HARDWARE,
559 SOCK_TIMESTAMPING_RX_SOFTWARE,
560 SOCK_TIMESTAMPING_SOFTWARE,
561 SOCK_TIMESTAMPING_RAW_HARDWARE,
562 SOCK_TIMESTAMPING_SYS_HARDWARE,
563 SOCK_FASYNC,
564 SOCK_RXQ_OVFL,
565};
566
567static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
568{
569 nsk->sk_flags = osk->sk_flags;
570}
571
572static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
573{
574 __set_bit(flag, &sk->sk_flags);
575}
576
577static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
578{
579 __clear_bit(flag, &sk->sk_flags);
580}
581
582static inline int sock_flag(struct sock *sk, enum sock_flags flag)
583{
584 return test_bit(flag, &sk->sk_flags);
585}
586
587static inline void sk_acceptq_removed(struct sock *sk)
588{
589 sk->sk_ack_backlog--;
590}
591
592static inline void sk_acceptq_added(struct sock *sk)
593{
594 sk->sk_ack_backlog++;
595}
596
597static inline int sk_acceptq_is_full(struct sock *sk)
598{
599 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
600}
601
602
603
604
605static inline int sk_stream_min_wspace(struct sock *sk)
606{
607 return sk->sk_wmem_queued >> 1;
608}
609
610static inline int sk_stream_wspace(struct sock *sk)
611{
612 return sk->sk_sndbuf - sk->sk_wmem_queued;
613}
614
615extern void sk_stream_write_space(struct sock *sk);
616
617static inline int sk_stream_memory_free(struct sock *sk)
618{
619 return sk->sk_wmem_queued < sk->sk_sndbuf;
620}
621
622
623static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
624{
625
626 skb_dst_force(skb);
627
628 if (!sk->sk_backlog.tail)
629 sk->sk_backlog.head = skb;
630 else
631 sk->sk_backlog.tail->next = skb;
632
633 sk->sk_backlog.tail = skb;
634 skb->next = NULL;
635}
636
637
638
639
640static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
641{
642 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
643
644 return qsize + skb->truesize > sk->sk_rcvbuf;
645}
646
647
648static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
649{
650 if (sk_rcvqueues_full(sk, skb))
651 return -ENOBUFS;
652
653 __sk_add_backlog(sk, skb);
654 sk->sk_backlog.len += skb->truesize;
655 return 0;
656}
657
658static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
659{
660 return sk->sk_backlog_rcv(sk, skb);
661}
662
663static inline void sock_rps_record_flow(const struct sock *sk)
664{
665#ifdef CONFIG_RPS
666 struct rps_sock_flow_table *sock_flow_table;
667
668 rcu_read_lock();
669 sock_flow_table = rcu_dereference(rps_sock_flow_table);
670 rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
671 rcu_read_unlock();
672#endif
673}
674
675static inline void sock_rps_reset_flow(const struct sock *sk)
676{
677#ifdef CONFIG_RPS
678 struct rps_sock_flow_table *sock_flow_table;
679
680 rcu_read_lock();
681 sock_flow_table = rcu_dereference(rps_sock_flow_table);
682 rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
683 rcu_read_unlock();
684#endif
685}
686
687static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
688{
689#ifdef CONFIG_RPS
690 if (unlikely(sk->sk_rxhash != rxhash)) {
691 sock_rps_reset_flow(sk);
692 sk->sk_rxhash = rxhash;
693 }
694#endif
695}
696
697#define sk_wait_event(__sk, __timeo, __condition) \
698 ({ int __rc; \
699 release_sock(__sk); \
700 __rc = __condition; \
701 if (!__rc) { \
702 *(__timeo) = schedule_timeout(*(__timeo)); \
703 } \
704 lock_sock(__sk); \
705 __rc = __condition; \
706 __rc; \
707 })
708
709extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
710extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
711extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
712extern int sk_stream_error(struct sock *sk, int flags, int err);
713extern void sk_stream_kill_queues(struct sock *sk);
714
715extern int sk_wait_data(struct sock *sk, long *timeo);
716
717struct request_sock_ops;
718struct timewait_sock_ops;
719struct inet_hashinfo;
720struct raw_hashinfo;
721
722
723
724
725
726struct proto {
727 void (*close)(struct sock *sk,
728 long timeout);
729 int (*connect)(struct sock *sk,
730 struct sockaddr *uaddr,
731 int addr_len);
732 int (*disconnect)(struct sock *sk, int flags);
733
734 struct sock * (*accept) (struct sock *sk, int flags, int *err);
735
736 int (*ioctl)(struct sock *sk, int cmd,
737 unsigned long arg);
738 int (*init)(struct sock *sk);
739 void (*destroy)(struct sock *sk);
740 void (*shutdown)(struct sock *sk, int how);
741 int (*setsockopt)(struct sock *sk, int level,
742 int optname, char __user *optval,
743 unsigned int optlen);
744 int (*getsockopt)(struct sock *sk, int level,
745 int optname, char __user *optval,
746 int __user *option);
747#ifdef CONFIG_COMPAT
748 int (*compat_setsockopt)(struct sock *sk,
749 int level,
750 int optname, char __user *optval,
751 unsigned int optlen);
752 int (*compat_getsockopt)(struct sock *sk,
753 int level,
754 int optname, char __user *optval,
755 int __user *option);
756 int (*compat_ioctl)(struct sock *sk,
757 unsigned int cmd, unsigned long arg);
758#endif
759 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
760 struct msghdr *msg, size_t len);
761 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
762 struct msghdr *msg,
763 size_t len, int noblock, int flags,
764 int *addr_len);
765 int (*sendpage)(struct sock *sk, struct page *page,
766 int offset, size_t size, int flags);
767 int (*bind)(struct sock *sk,
768 struct sockaddr *uaddr, int addr_len);
769
770 int (*backlog_rcv) (struct sock *sk,
771 struct sk_buff *skb);
772
773
774 void (*hash)(struct sock *sk);
775 void (*unhash)(struct sock *sk);
776 void (*rehash)(struct sock *sk);
777 int (*get_port)(struct sock *sk, unsigned short snum);
778 void (*clear_sk)(struct sock *sk, int size);
779
780
781#ifdef CONFIG_PROC_FS
782 unsigned int inuse_idx;
783#endif
784
785
786 void (*enter_memory_pressure)(struct sock *sk);
787 atomic_long_t *memory_allocated;
788 struct percpu_counter *sockets_allocated;
789
790
791
792
793
794
795 int *memory_pressure;
796 long *sysctl_mem;
797 int *sysctl_wmem;
798 int *sysctl_rmem;
799 int max_header;
800 bool no_autobind;
801
802 struct kmem_cache *slab;
803 unsigned int obj_size;
804 int slab_flags;
805
806 struct percpu_counter *orphan_count;
807
808 struct request_sock_ops *rsk_prot;
809 struct timewait_sock_ops *twsk_prot;
810
811 union {
812 struct inet_hashinfo *hashinfo;
813 struct udp_table *udp_table;
814 struct raw_hashinfo *raw_hash;
815 } h;
816
817 struct module *owner;
818
819 char name[32];
820
821 struct list_head node;
822#ifdef SOCK_REFCNT_DEBUG
823 atomic_t socks;
824#endif
825};
826
827extern int proto_register(struct proto *prot, int alloc_slab);
828extern void proto_unregister(struct proto *prot);
829
830#ifdef SOCK_REFCNT_DEBUG
831static inline void sk_refcnt_debug_inc(struct sock *sk)
832{
833 atomic_inc(&sk->sk_prot->socks);
834}
835
836static inline void sk_refcnt_debug_dec(struct sock *sk)
837{
838 atomic_dec(&sk->sk_prot->socks);
839 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
840 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
841}
842
843static inline void sk_refcnt_debug_release(const struct sock *sk)
844{
845 if (atomic_read(&sk->sk_refcnt) != 1)
846 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
847 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
848}
849#else
850#define sk_refcnt_debug_inc(sk) do { } while (0)
851#define sk_refcnt_debug_dec(sk) do { } while (0)
852#define sk_refcnt_debug_release(sk) do { } while (0)
853#endif
854
855
856#ifdef CONFIG_PROC_FS
857
858extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
859extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
860#else
861static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
862 int inc)
863{
864}
865#endif
866
867
868
869
870
871static inline void __sk_prot_rehash(struct sock *sk)
872{
873 sk->sk_prot->unhash(sk);
874 sk->sk_prot->hash(sk);
875}
876
877void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
878
879
880#define SOCK_DESTROY_TIME (10*HZ)
881
882
883#define PROT_SOCK 1024
884
885#define SHUTDOWN_MASK 3
886#define RCV_SHUTDOWN 1
887#define SEND_SHUTDOWN 2
888
889#define SOCK_SNDBUF_LOCK 1
890#define SOCK_RCVBUF_LOCK 2
891#define SOCK_BINDADDR_LOCK 4
892#define SOCK_BINDPORT_LOCK 8
893
894
895struct sock_iocb {
896 struct list_head list;
897
898 int flags;
899 int size;
900 struct socket *sock;
901 struct sock *sk;
902 struct scm_cookie *scm;
903 struct msghdr *msg, async_msg;
904 struct kiocb *kiocb;
905};
906
907static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
908{
909 return (struct sock_iocb *)iocb->private;
910}
911
912static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
913{
914 return si->kiocb;
915}
916
917struct socket_alloc {
918 struct socket socket;
919 struct inode vfs_inode;
920};
921
922static inline struct socket *SOCKET_I(struct inode *inode)
923{
924 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
925}
926
927static inline struct inode *SOCK_INODE(struct socket *socket)
928{
929 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
930}
931
932
933
934
935extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
936extern void __sk_mem_reclaim(struct sock *sk);
937
938#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
939#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
940#define SK_MEM_SEND 0
941#define SK_MEM_RECV 1
942
943static inline int sk_mem_pages(int amt)
944{
945 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
946}
947
948static inline int sk_has_account(struct sock *sk)
949{
950
951 return !!sk->sk_prot->memory_allocated;
952}
953
954static inline int sk_wmem_schedule(struct sock *sk, int size)
955{
956 if (!sk_has_account(sk))
957 return 1;
958 return size <= sk->sk_forward_alloc ||
959 __sk_mem_schedule(sk, size, SK_MEM_SEND);
960}
961
962static inline int sk_rmem_schedule(struct sock *sk, int size)
963{
964 if (!sk_has_account(sk))
965 return 1;
966 return size <= sk->sk_forward_alloc ||
967 __sk_mem_schedule(sk, size, SK_MEM_RECV);
968}
969
970static inline void sk_mem_reclaim(struct sock *sk)
971{
972 if (!sk_has_account(sk))
973 return;
974 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
975 __sk_mem_reclaim(sk);
976}
977
978static inline void sk_mem_reclaim_partial(struct sock *sk)
979{
980 if (!sk_has_account(sk))
981 return;
982 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
983 __sk_mem_reclaim(sk);
984}
985
986static inline void sk_mem_charge(struct sock *sk, int size)
987{
988 if (!sk_has_account(sk))
989 return;
990 sk->sk_forward_alloc -= size;
991}
992
993static inline void sk_mem_uncharge(struct sock *sk, int size)
994{
995 if (!sk_has_account(sk))
996 return;
997 sk->sk_forward_alloc += size;
998}
999
1000static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
1001{
1002 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
1003 sk->sk_wmem_queued -= skb->truesize;
1004 sk_mem_uncharge(sk, skb->truesize);
1005 __kfree_skb(skb);
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
1022
1023
1024
1025
1026
1027
1028
1029
1030#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
1031do { \
1032 sk->sk_lock.owned = 0; \
1033 init_waitqueue_head(&sk->sk_lock.wq); \
1034 spin_lock_init(&(sk)->sk_lock.slock); \
1035 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
1036 sizeof((sk)->sk_lock)); \
1037 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
1038 (skey), (sname)); \
1039 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
1040} while (0)
1041
1042extern void lock_sock_nested(struct sock *sk, int subclass);
1043
1044static inline void lock_sock(struct sock *sk)
1045{
1046 lock_sock_nested(sk, 0);
1047}
1048
1049extern void release_sock(struct sock *sk);
1050
1051
1052#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1053#define bh_lock_sock_nested(__sk) \
1054 spin_lock_nested(&((__sk)->sk_lock.slock), \
1055 SINGLE_DEPTH_NESTING)
1056#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1057
1058extern bool lock_sock_fast(struct sock *sk);
1059
1060
1061
1062
1063
1064
1065
1066
1067static inline void unlock_sock_fast(struct sock *sk, bool slow)
1068{
1069 if (slow)
1070 release_sock(sk);
1071 else
1072 spin_unlock_bh(&sk->sk_lock.slock);
1073}
1074
1075
1076extern struct sock *sk_alloc(struct net *net, int family,
1077 gfp_t priority,
1078 struct proto *prot);
1079extern void sk_free(struct sock *sk);
1080extern void sk_release_kernel(struct sock *sk);
1081extern struct sock *sk_clone(const struct sock *sk,
1082 const gfp_t priority);
1083
1084extern struct sk_buff *sock_wmalloc(struct sock *sk,
1085 unsigned long size, int force,
1086 gfp_t priority);
1087extern struct sk_buff *sock_rmalloc(struct sock *sk,
1088 unsigned long size, int force,
1089 gfp_t priority);
1090extern void sock_wfree(struct sk_buff *skb);
1091extern void sock_rfree(struct sk_buff *skb);
1092
1093extern int sock_setsockopt(struct socket *sock, int level,
1094 int op, char __user *optval,
1095 unsigned int optlen);
1096
1097extern int sock_getsockopt(struct socket *sock, int level,
1098 int op, char __user *optval,
1099 int __user *optlen);
1100extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
1101 unsigned long size,
1102 int noblock,
1103 int *errcode);
1104extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1105 unsigned long header_len,
1106 unsigned long data_len,
1107 int noblock,
1108 int *errcode);
1109extern void *sock_kmalloc(struct sock *sk, int size,
1110 gfp_t priority);
1111extern void sock_kfree_s(struct sock *sk, void *mem, int size);
1112extern void sk_send_sigurg(struct sock *sk);
1113
1114#ifdef CONFIG_CGROUPS
1115extern void sock_update_classid(struct sock *sk);
1116#else
1117static inline void sock_update_classid(struct sock *sk)
1118{
1119}
1120#endif
1121
1122
1123
1124
1125
1126extern int sock_no_bind(struct socket *,
1127 struct sockaddr *, int);
1128extern int sock_no_connect(struct socket *,
1129 struct sockaddr *, int, int);
1130extern int sock_no_socketpair(struct socket *,
1131 struct socket *);
1132extern int sock_no_accept(struct socket *,
1133 struct socket *, int);
1134extern int sock_no_getname(struct socket *,
1135 struct sockaddr *, int *, int);
1136extern unsigned int sock_no_poll(struct file *, struct socket *,
1137 struct poll_table_struct *);
1138extern int sock_no_ioctl(struct socket *, unsigned int,
1139 unsigned long);
1140extern int sock_no_listen(struct socket *, int);
1141extern int sock_no_shutdown(struct socket *, int);
1142extern int sock_no_getsockopt(struct socket *, int , int,
1143 char __user *, int __user *);
1144extern int sock_no_setsockopt(struct socket *, int, int,
1145 char __user *, unsigned int);
1146extern int sock_no_sendmsg(struct kiocb *, struct socket *,
1147 struct msghdr *, size_t);
1148extern int sock_no_recvmsg(struct kiocb *, struct socket *,
1149 struct msghdr *, size_t, int);
1150extern int sock_no_mmap(struct file *file,
1151 struct socket *sock,
1152 struct vm_area_struct *vma);
1153extern ssize_t sock_no_sendpage(struct socket *sock,
1154 struct page *page,
1155 int offset, size_t size,
1156 int flags);
1157
1158
1159
1160
1161
1162extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1163 char __user *optval, int __user *optlen);
1164extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1165 struct msghdr *msg, size_t size, int flags);
1166extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1167 char __user *optval, unsigned int optlen);
1168extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1169 int optname, char __user *optval, int __user *optlen);
1170extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1171 int optname, char __user *optval, unsigned int optlen);
1172
1173extern void sk_common_release(struct sock *sk);
1174
1175
1176
1177
1178
1179
1180extern void sock_init_data(struct socket *sock, struct sock *sk);
1181
1182extern void sk_filter_release_rcu(struct rcu_head *rcu);
1183
1184
1185
1186
1187
1188
1189
1190
1191static inline void sk_filter_release(struct sk_filter *fp)
1192{
1193 if (atomic_dec_and_test(&fp->refcnt))
1194 call_rcu(&fp->rcu, sk_filter_release_rcu);
1195}
1196
1197static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1198{
1199 unsigned int size = sk_filter_len(fp);
1200
1201 atomic_sub(size, &sk->sk_omem_alloc);
1202 sk_filter_release(fp);
1203}
1204
1205static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1206{
1207 atomic_inc(&fp->refcnt);
1208 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
1209}
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237static inline void sock_put(struct sock *sk)
1238{
1239 if (atomic_dec_and_test(&sk->sk_refcnt))
1240 sk_free(sk);
1241}
1242
1243extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1244 const int nested);
1245
1246static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1247{
1248 sk->sk_tx_queue_mapping = tx_queue;
1249}
1250
1251static inline void sk_tx_queue_clear(struct sock *sk)
1252{
1253 sk->sk_tx_queue_mapping = -1;
1254}
1255
1256static inline int sk_tx_queue_get(const struct sock *sk)
1257{
1258 return sk ? sk->sk_tx_queue_mapping : -1;
1259}
1260
1261static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1262{
1263 sk_tx_queue_clear(sk);
1264 sk->sk_socket = sock;
1265}
1266
1267static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1268{
1269 BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
1270 return &rcu_dereference_raw(sk->sk_wq)->wait;
1271}
1272
1273
1274
1275
1276
1277
1278
1279static inline void sock_orphan(struct sock *sk)
1280{
1281 write_lock_bh(&sk->sk_callback_lock);
1282 sock_set_flag(sk, SOCK_DEAD);
1283 sk_set_socket(sk, NULL);
1284 sk->sk_wq = NULL;
1285 write_unlock_bh(&sk->sk_callback_lock);
1286}
1287
1288static inline void sock_graft(struct sock *sk, struct socket *parent)
1289{
1290 write_lock_bh(&sk->sk_callback_lock);
1291 sk->sk_wq = parent->wq;
1292 parent->sk = sk;
1293 sk_set_socket(sk, parent);
1294 security_sock_graft(sk, parent);
1295 write_unlock_bh(&sk->sk_callback_lock);
1296}
1297
1298extern int sock_i_uid(struct sock *sk);
1299extern unsigned long sock_i_ino(struct sock *sk);
1300
1301static inline struct dst_entry *
1302__sk_dst_get(struct sock *sk)
1303{
1304 return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
1305 sock_owned_by_user(sk) ||
1306 lockdep_is_held(&sk->sk_lock.slock));
1307}
1308
1309static inline struct dst_entry *
1310sk_dst_get(struct sock *sk)
1311{
1312 struct dst_entry *dst;
1313
1314 rcu_read_lock();
1315 dst = rcu_dereference(sk->sk_dst_cache);
1316 if (dst)
1317 dst_hold(dst);
1318 rcu_read_unlock();
1319 return dst;
1320}
1321
1322extern void sk_reset_txq(struct sock *sk);
1323
1324static inline void dst_negative_advice(struct sock *sk)
1325{
1326 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1327
1328 if (dst && dst->ops->negative_advice) {
1329 ndst = dst->ops->negative_advice(dst);
1330
1331 if (ndst != dst) {
1332 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1333 sk_reset_txq(sk);
1334 }
1335 }
1336}
1337
1338static inline void
1339__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1340{
1341 struct dst_entry *old_dst;
1342
1343 sk_tx_queue_clear(sk);
1344
1345
1346
1347
1348 old_dst = rcu_dereference_raw(sk->sk_dst_cache);
1349 rcu_assign_pointer(sk->sk_dst_cache, dst);
1350 dst_release(old_dst);
1351}
1352
1353static inline void
1354sk_dst_set(struct sock *sk, struct dst_entry *dst)
1355{
1356 spin_lock(&sk->sk_dst_lock);
1357 __sk_dst_set(sk, dst);
1358 spin_unlock(&sk->sk_dst_lock);
1359}
1360
1361static inline void
1362__sk_dst_reset(struct sock *sk)
1363{
1364 __sk_dst_set(sk, NULL);
1365}
1366
1367static inline void
1368sk_dst_reset(struct sock *sk)
1369{
1370 spin_lock(&sk->sk_dst_lock);
1371 __sk_dst_reset(sk);
1372 spin_unlock(&sk->sk_dst_lock);
1373}
1374
1375extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1376
1377extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1378
1379static inline int sk_can_gso(const struct sock *sk)
1380{
1381 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1382}
1383
1384extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1385
1386static inline void sk_nocaps_add(struct sock *sk, int flags)
1387{
1388 sk->sk_route_nocaps |= flags;
1389 sk->sk_route_caps &= ~flags;
1390}
1391
1392static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1393 struct sk_buff *skb, struct page *page,
1394 int off, int copy)
1395{
1396 if (skb->ip_summed == CHECKSUM_NONE) {
1397 int err = 0;
1398 __wsum csum = csum_and_copy_from_user(from,
1399 page_address(page) + off,
1400 copy, 0, &err);
1401 if (err)
1402 return err;
1403 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1404 } else if (copy_from_user(page_address(page) + off, from, copy))
1405 return -EFAULT;
1406
1407 skb->len += copy;
1408 skb->data_len += copy;
1409 skb->truesize += copy;
1410 sk->sk_wmem_queued += copy;
1411 sk_mem_charge(sk, copy);
1412 return 0;
1413}
1414
1415
1416
1417
1418
1419
1420
1421static inline int sk_wmem_alloc_get(const struct sock *sk)
1422{
1423 return atomic_read(&sk->sk_wmem_alloc) - 1;
1424}
1425
1426
1427
1428
1429
1430
1431
1432static inline int sk_rmem_alloc_get(const struct sock *sk)
1433{
1434 return atomic_read(&sk->sk_rmem_alloc);
1435}
1436
1437
1438
1439
1440
1441
1442
1443static inline int sk_has_allocations(const struct sock *sk)
1444{
1445 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480static inline bool wq_has_sleeper(struct socket_wq *wq)
1481{
1482
1483
1484
1485
1486
1487
1488
1489 smp_mb();
1490 return wq && waitqueue_active(&wq->wait);
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static inline void sock_poll_wait(struct file *filp,
1502 wait_queue_head_t *wait_address, poll_table *p)
1503{
1504 if (p && wait_address) {
1505 poll_wait(filp, wait_address, p);
1506
1507
1508
1509
1510
1511
1512 smp_mb();
1513 }
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1526{
1527 skb_orphan(skb);
1528 skb->sk = sk;
1529 skb->destructor = sock_wfree;
1530
1531
1532
1533
1534
1535 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1536}
1537
1538static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1539{
1540 skb_orphan(skb);
1541 skb->sk = sk;
1542 skb->destructor = sock_rfree;
1543 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1544 sk_mem_charge(sk, skb->truesize);
1545}
1546
1547extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1548 unsigned long expires);
1549
1550extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1551
1552extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1553
1554extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
1555
1556
1557
1558
1559
1560static inline int sock_error(struct sock *sk)
1561{
1562 int err;
1563 if (likely(!sk->sk_err))
1564 return 0;
1565 err = xchg(&sk->sk_err, 0);
1566 return -err;
1567}
1568
1569static inline unsigned long sock_wspace(struct sock *sk)
1570{
1571 int amt = 0;
1572
1573 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1574 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1575 if (amt < 0)
1576 amt = 0;
1577 }
1578 return amt;
1579}
1580
1581static inline void sk_wake_async(struct sock *sk, int how, int band)
1582{
1583 if (sock_flag(sk, SOCK_FASYNC))
1584 sock_wake_async(sk->sk_socket, how, band);
1585}
1586
1587#define SOCK_MIN_SNDBUF 2048
1588
1589
1590
1591
1592#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff))
1593
1594static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1595{
1596 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1597 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
1598 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1599 }
1600}
1601
1602struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
1603
1604static inline struct page *sk_stream_alloc_page(struct sock *sk)
1605{
1606 struct page *page = NULL;
1607
1608 page = alloc_pages(sk->sk_allocation, 0);
1609 if (!page) {
1610 sk->sk_prot->enter_memory_pressure(sk);
1611 sk_stream_moderate_sndbuf(sk);
1612 }
1613 return page;
1614}
1615
1616
1617
1618
1619static inline int sock_writeable(const struct sock *sk)
1620{
1621 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
1622}
1623
1624static inline gfp_t gfp_any(void)
1625{
1626 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1627}
1628
1629static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1630{
1631 return noblock ? 0 : sk->sk_rcvtimeo;
1632}
1633
1634static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1635{
1636 return noblock ? 0 : sk->sk_sndtimeo;
1637}
1638
1639static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1640{
1641 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1642}
1643
1644
1645
1646
1647static inline int sock_intr_errno(long timeo)
1648{
1649 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1650}
1651
1652extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1653 struct sk_buff *skb);
1654
1655static __inline__ void
1656sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1657{
1658 ktime_t kt = skb->tstamp;
1659 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
1672 sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
1673 (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
1674 (hwtstamps->hwtstamp.tv64 &&
1675 sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
1676 (hwtstamps->syststamp.tv64 &&
1677 sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
1678 __sock_recv_timestamp(msg, sk, skb);
1679 else
1680 sk->sk_stamp = kt;
1681}
1682
1683extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
1684 struct sk_buff *skb);
1685
1686static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
1687 struct sk_buff *skb)
1688{
1689#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
1690 (1UL << SOCK_RCVTSTAMP) | \
1691 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
1692 (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
1693 (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
1694 (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
1695
1696 if (sk->sk_flags & FLAGS_TS_OR_DROPS)
1697 __sock_recv_ts_and_drops(msg, sk, skb);
1698 else
1699 sk->sk_stamp = skb->tstamp;
1700}
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721#ifdef CONFIG_NET_DMA
1722static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1723{
1724 __skb_unlink(skb, &sk->sk_receive_queue);
1725 if (!copied_early)
1726 __kfree_skb(skb);
1727 else
1728 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1729}
1730#else
1731static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1732{
1733 __skb_unlink(skb, &sk->sk_receive_queue);
1734 __kfree_skb(skb);
1735}
1736#endif
1737
1738static inline
1739struct net *sock_net(const struct sock *sk)
1740{
1741 return read_pnet(&sk->sk_net);
1742}
1743
1744static inline
1745void sock_net_set(struct sock *sk, struct net *net)
1746{
1747 write_pnet(&sk->sk_net, net);
1748}
1749
1750
1751
1752
1753
1754
1755
1756static inline void sk_change_net(struct sock *sk, struct net *net)
1757{
1758 put_net(sock_net(sk));
1759 sock_net_set(sk, hold_net(net));
1760}
1761
1762static inline struct sock *skb_steal_sock(struct sk_buff *skb)
1763{
1764 if (unlikely(skb->sk)) {
1765 struct sock *sk = skb->sk;
1766
1767 skb->destructor = NULL;
1768 skb->sk = NULL;
1769 return sk;
1770 }
1771 return NULL;
1772}
1773
1774extern void sock_enable_timestamp(struct sock *sk, int flag);
1775extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1776extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1777
1778
1779
1780
1781extern int net_msg_warn;
1782#define NETDEBUG(fmt, args...) \
1783 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1784
1785#define LIMIT_NETDEBUG(fmt, args...) \
1786 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1787
1788extern __u32 sysctl_wmem_max;
1789extern __u32 sysctl_rmem_max;
1790
1791extern void sk_init(void);
1792
1793extern int sysctl_optmem_max;
1794
1795extern __u32 sysctl_wmem_default;
1796extern __u32 sysctl_rmem_default;
1797
1798#endif
1799