1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#ifndef _SOCK_H
41#define _SOCK_H
42
43#include <linux/kernel.h>
44#include <linux/list.h>
45#include <linux/timer.h>
46#include <linux/cache.h>
47#include <linux/module.h>
48#include <linux/lockdep.h>
49#include <linux/netdevice.h>
50#include <linux/skbuff.h>
51#include <linux/mm.h>
52#include <linux/security.h>
53
54#include <linux/filter.h>
55
56#include <asm/atomic.h>
57#include <net/dst.h>
58#include <net/checksum.h>
59#include <net/net_namespace.h>
60
61
62
63
64
65
66
67
68#define SOCK_DEBUGGING
69#ifdef SOCK_DEBUGGING
70#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
71 printk(KERN_DEBUG msg); } while (0)
72#else
73#define SOCK_DEBUG(sk, msg...) do { } while (0)
74#endif
75
76
77
78
79
80typedef struct {
81 spinlock_t slock;
82 int owned;
83 wait_queue_head_t wq;
84
85
86
87
88
89
90#ifdef CONFIG_DEBUG_LOCK_ALLOC
91 struct lockdep_map dep_map;
92#endif
93} socket_lock_t;
94
95struct sock;
96struct proto;
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114struct sock_common {
115 unsigned short skc_family;
116 volatile unsigned char skc_state;
117 unsigned char skc_reuse;
118 int skc_bound_dev_if;
119 struct hlist_node skc_node;
120 struct hlist_node skc_bind_node;
121 atomic_t skc_refcnt;
122 unsigned int skc_hash;
123 struct proto *skc_prot;
124 struct net *skc_net;
125};
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186struct sock {
187
188
189
190
191 struct sock_common __sk_common;
192#define sk_family __sk_common.skc_family
193#define sk_state __sk_common.skc_state
194#define sk_reuse __sk_common.skc_reuse
195#define sk_bound_dev_if __sk_common.skc_bound_dev_if
196#define sk_node __sk_common.skc_node
197#define sk_bind_node __sk_common.skc_bind_node
198#define sk_refcnt __sk_common.skc_refcnt
199#define sk_hash __sk_common.skc_hash
200#define sk_prot __sk_common.skc_prot
201#define sk_net __sk_common.skc_net
202 unsigned char sk_shutdown : 2,
203 sk_no_check : 2,
204 sk_userlocks : 4;
205 unsigned char sk_protocol;
206 unsigned short sk_type;
207 int sk_rcvbuf;
208 socket_lock_t sk_lock;
209
210
211
212
213
214 struct {
215 struct sk_buff *head;
216 struct sk_buff *tail;
217 } sk_backlog;
218 wait_queue_head_t *sk_sleep;
219 struct dst_entry *sk_dst_cache;
220 struct xfrm_policy *sk_policy[2];
221 rwlock_t sk_dst_lock;
222 atomic_t sk_rmem_alloc;
223 atomic_t sk_wmem_alloc;
224 atomic_t sk_omem_alloc;
225 int sk_sndbuf;
226 struct sk_buff_head sk_receive_queue;
227 struct sk_buff_head sk_write_queue;
228 struct sk_buff_head sk_async_wait_queue;
229 int sk_wmem_queued;
230 int sk_forward_alloc;
231 gfp_t sk_allocation;
232 int sk_route_caps;
233 int sk_gso_type;
234 int sk_rcvlowat;
235 unsigned long sk_flags;
236 unsigned long sk_lingertime;
237 struct sk_buff_head sk_error_queue;
238 struct proto *sk_prot_creator;
239 rwlock_t sk_callback_lock;
240 int sk_err,
241 sk_err_soft;
242 unsigned short sk_ack_backlog;
243 unsigned short sk_max_ack_backlog;
244 __u32 sk_priority;
245 struct ucred sk_peercred;
246 long sk_rcvtimeo;
247 long sk_sndtimeo;
248 struct sk_filter *sk_filter;
249 void *sk_protinfo;
250 struct timer_list sk_timer;
251 ktime_t sk_stamp;
252 struct socket *sk_socket;
253 void *sk_user_data;
254 struct page *sk_sndmsg_page;
255 struct sk_buff *sk_send_head;
256 __u32 sk_sndmsg_off;
257 int sk_write_pending;
258 void *sk_security;
259 void (*sk_state_change)(struct sock *sk);
260 void (*sk_data_ready)(struct sock *sk, int bytes);
261 void (*sk_write_space)(struct sock *sk);
262 void (*sk_error_report)(struct sock *sk);
263 int (*sk_backlog_rcv)(struct sock *sk,
264 struct sk_buff *skb);
265 void (*sk_destruct)(struct sock *sk);
266};
267
268
269
270
271static inline struct sock *__sk_head(const struct hlist_head *head)
272{
273 return hlist_entry(head->first, struct sock, sk_node);
274}
275
276static inline struct sock *sk_head(const struct hlist_head *head)
277{
278 return hlist_empty(head) ? NULL : __sk_head(head);
279}
280
281static inline struct sock *sk_next(const struct sock *sk)
282{
283 return sk->sk_node.next ?
284 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
285}
286
287static inline int sk_unhashed(const struct sock *sk)
288{
289 return hlist_unhashed(&sk->sk_node);
290}
291
292static inline int sk_hashed(const struct sock *sk)
293{
294 return !sk_unhashed(sk);
295}
296
297static __inline__ void sk_node_init(struct hlist_node *node)
298{
299 node->pprev = NULL;
300}
301
302static __inline__ void __sk_del_node(struct sock *sk)
303{
304 __hlist_del(&sk->sk_node);
305}
306
307static __inline__ int __sk_del_node_init(struct sock *sk)
308{
309 if (sk_hashed(sk)) {
310 __sk_del_node(sk);
311 sk_node_init(&sk->sk_node);
312 return 1;
313 }
314 return 0;
315}
316
317
318
319
320
321
322
323static inline void sock_hold(struct sock *sk)
324{
325 atomic_inc(&sk->sk_refcnt);
326}
327
328
329
330
331static inline void __sock_put(struct sock *sk)
332{
333 atomic_dec(&sk->sk_refcnt);
334}
335
336static __inline__ int sk_del_node_init(struct sock *sk)
337{
338 int rc = __sk_del_node_init(sk);
339
340 if (rc) {
341
342 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
343 __sock_put(sk);
344 }
345 return rc;
346}
347
348static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
349{
350 hlist_add_head(&sk->sk_node, list);
351}
352
353static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
354{
355 sock_hold(sk);
356 __sk_add_node(sk, list);
357}
358
359static __inline__ void __sk_del_bind_node(struct sock *sk)
360{
361 __hlist_del(&sk->sk_bind_node);
362}
363
364static __inline__ void sk_add_bind_node(struct sock *sk,
365 struct hlist_head *list)
366{
367 hlist_add_head(&sk->sk_bind_node, list);
368}
369
370#define sk_for_each(__sk, node, list) \
371 hlist_for_each_entry(__sk, node, list, sk_node)
372#define sk_for_each_from(__sk, node) \
373 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
374 hlist_for_each_entry_from(__sk, node, sk_node)
375#define sk_for_each_continue(__sk, node) \
376 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
377 hlist_for_each_entry_continue(__sk, node, sk_node)
378#define sk_for_each_safe(__sk, node, tmp, list) \
379 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
380#define sk_for_each_bound(__sk, node, list) \
381 hlist_for_each_entry(__sk, node, list, sk_bind_node)
382
383
384enum sock_flags {
385 SOCK_DEAD,
386 SOCK_DONE,
387 SOCK_URGINLINE,
388 SOCK_KEEPOPEN,
389 SOCK_LINGER,
390 SOCK_DESTROY,
391 SOCK_BROADCAST,
392 SOCK_TIMESTAMP,
393 SOCK_ZAPPED,
394 SOCK_USE_WRITE_QUEUE,
395 SOCK_DBG,
396 SOCK_RCVTSTAMP,
397 SOCK_RCVTSTAMPNS,
398 SOCK_LOCALROUTE,
399 SOCK_QUEUE_SHRUNK,
400};
401
402static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
403{
404 nsk->sk_flags = osk->sk_flags;
405}
406
407static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
408{
409 __set_bit(flag, &sk->sk_flags);
410}
411
412static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
413{
414 __clear_bit(flag, &sk->sk_flags);
415}
416
417static inline int sock_flag(struct sock *sk, enum sock_flags flag)
418{
419 return test_bit(flag, &sk->sk_flags);
420}
421
422static inline void sk_acceptq_removed(struct sock *sk)
423{
424 sk->sk_ack_backlog--;
425}
426
427static inline void sk_acceptq_added(struct sock *sk)
428{
429 sk->sk_ack_backlog++;
430}
431
432static inline int sk_acceptq_is_full(struct sock *sk)
433{
434 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
435}
436
437
438
439
440static inline int sk_stream_min_wspace(struct sock *sk)
441{
442 return sk->sk_wmem_queued / 2;
443}
444
445static inline int sk_stream_wspace(struct sock *sk)
446{
447 return sk->sk_sndbuf - sk->sk_wmem_queued;
448}
449
450extern void sk_stream_write_space(struct sock *sk);
451
452static inline int sk_stream_memory_free(struct sock *sk)
453{
454 return sk->sk_wmem_queued < sk->sk_sndbuf;
455}
456
457extern void sk_stream_rfree(struct sk_buff *skb);
458
459static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
460{
461 skb->sk = sk;
462 skb->destructor = sk_stream_rfree;
463 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
464 sk->sk_forward_alloc -= skb->truesize;
465}
466
467static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
468{
469 skb_truesize_check(skb);
470 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
471 sk->sk_wmem_queued -= skb->truesize;
472 sk->sk_forward_alloc += skb->truesize;
473 __kfree_skb(skb);
474}
475
476
477static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
478{
479 if (!sk->sk_backlog.tail) {
480 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
481 } else {
482 sk->sk_backlog.tail->next = skb;
483 sk->sk_backlog.tail = skb;
484 }
485 skb->next = NULL;
486}
487
488#define sk_wait_event(__sk, __timeo, __condition) \
489 ({ int __rc; \
490 release_sock(__sk); \
491 __rc = __condition; \
492 if (!__rc) { \
493 *(__timeo) = schedule_timeout(*(__timeo)); \
494 } \
495 lock_sock(__sk); \
496 __rc = __condition; \
497 __rc; \
498 })
499
500extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
501extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
502extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
503extern int sk_stream_error(struct sock *sk, int flags, int err);
504extern void sk_stream_kill_queues(struct sock *sk);
505
506extern int sk_wait_data(struct sock *sk, long *timeo);
507
508struct request_sock_ops;
509struct timewait_sock_ops;
510
511
512
513
514
515struct proto {
516 void (*close)(struct sock *sk,
517 long timeout);
518 int (*connect)(struct sock *sk,
519 struct sockaddr *uaddr,
520 int addr_len);
521 int (*disconnect)(struct sock *sk, int flags);
522
523 struct sock * (*accept) (struct sock *sk, int flags, int *err);
524
525 int (*ioctl)(struct sock *sk, int cmd,
526 unsigned long arg);
527 int (*init)(struct sock *sk);
528 int (*destroy)(struct sock *sk);
529 void (*shutdown)(struct sock *sk, int how);
530 int (*setsockopt)(struct sock *sk, int level,
531 int optname, char __user *optval,
532 int optlen);
533 int (*getsockopt)(struct sock *sk, int level,
534 int optname, char __user *optval,
535 int __user *option);
536 int (*compat_setsockopt)(struct sock *sk,
537 int level,
538 int optname, char __user *optval,
539 int optlen);
540 int (*compat_getsockopt)(struct sock *sk,
541 int level,
542 int optname, char __user *optval,
543 int __user *option);
544 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
545 struct msghdr *msg, size_t len);
546 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
547 struct msghdr *msg,
548 size_t len, int noblock, int flags,
549 int *addr_len);
550 int (*sendpage)(struct sock *sk, struct page *page,
551 int offset, size_t size, int flags);
552 int (*bind)(struct sock *sk,
553 struct sockaddr *uaddr, int addr_len);
554
555 int (*backlog_rcv) (struct sock *sk,
556 struct sk_buff *skb);
557
558
559 void (*hash)(struct sock *sk);
560 void (*unhash)(struct sock *sk);
561 int (*get_port)(struct sock *sk, unsigned short snum);
562
563#ifdef CONFIG_SMP
564
565 void (*inuse_add)(struct proto *prot, int inc);
566 int (*inuse_getval)(const struct proto *prot);
567 int *inuse_ptr;
568#else
569 int inuse;
570#endif
571
572 void (*enter_memory_pressure)(void);
573 atomic_t *memory_allocated;
574 atomic_t *sockets_allocated;
575
576
577
578
579
580
581 int *memory_pressure;
582 int *sysctl_mem;
583 int *sysctl_wmem;
584 int *sysctl_rmem;
585 int max_header;
586
587 struct kmem_cache *slab;
588 unsigned int obj_size;
589
590 atomic_t *orphan_count;
591
592 struct request_sock_ops *rsk_prot;
593 struct timewait_sock_ops *twsk_prot;
594
595 struct module *owner;
596
597 char name[32];
598
599 struct list_head node;
600#ifdef SOCK_REFCNT_DEBUG
601 atomic_t socks;
602#endif
603};
604
605
606
607
608
609
610
611#ifdef CONFIG_SMP
612# define DEFINE_PROTO_INUSE(NAME) \
613static DEFINE_PER_CPU(int, NAME##_inuse); \
614static void NAME##_inuse_add(struct proto *prot, int inc) \
615{ \
616 __get_cpu_var(NAME##_inuse) += inc; \
617} \
618 \
619static int NAME##_inuse_getval(const struct proto *prot)\
620{ \
621 int res = 0, cpu; \
622 \
623 for_each_possible_cpu(cpu) \
624 res += per_cpu(NAME##_inuse, cpu); \
625 return res; \
626}
627# define REF_PROTO_INUSE(NAME) \
628 .inuse_add = NAME##_inuse_add, \
629 .inuse_getval = NAME##_inuse_getval,
630#else
631# define DEFINE_PROTO_INUSE(NAME)
632# define REF_PROTO_INUSE(NAME)
633#endif
634
635extern int proto_register(struct proto *prot, int alloc_slab);
636extern void proto_unregister(struct proto *prot);
637
638#ifdef SOCK_REFCNT_DEBUG
639static inline void sk_refcnt_debug_inc(struct sock *sk)
640{
641 atomic_inc(&sk->sk_prot->socks);
642}
643
644static inline void sk_refcnt_debug_dec(struct sock *sk)
645{
646 atomic_dec(&sk->sk_prot->socks);
647 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
648 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
649}
650
651static inline void sk_refcnt_debug_release(const struct sock *sk)
652{
653 if (atomic_read(&sk->sk_refcnt) != 1)
654 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
655 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
656}
657#else
658#define sk_refcnt_debug_inc(sk) do { } while (0)
659#define sk_refcnt_debug_dec(sk) do { } while (0)
660#define sk_refcnt_debug_release(sk) do { } while (0)
661#endif
662
663
664static __inline__ void sock_prot_inc_use(struct proto *prot)
665{
666#ifdef CONFIG_SMP
667 prot->inuse_add(prot, 1);
668#else
669 prot->inuse++;
670#endif
671}
672
673static __inline__ void sock_prot_dec_use(struct proto *prot)
674{
675#ifdef CONFIG_SMP
676 prot->inuse_add(prot, -1);
677#else
678 prot->inuse--;
679#endif
680}
681
682static __inline__ int sock_prot_inuse(struct proto *proto)
683{
684#ifdef CONFIG_SMP
685 return proto->inuse_getval(proto);
686#else
687 return proto->inuse;
688#endif
689}
690
691
692
693
694static inline void __sk_prot_rehash(struct sock *sk)
695{
696 sk->sk_prot->unhash(sk);
697 sk->sk_prot->hash(sk);
698}
699
700
701#define SOCK_DESTROY_TIME (10*HZ)
702
703
704#define PROT_SOCK 1024
705
706#define SHUTDOWN_MASK 3
707#define RCV_SHUTDOWN 1
708#define SEND_SHUTDOWN 2
709
710#define SOCK_SNDBUF_LOCK 1
711#define SOCK_RCVBUF_LOCK 2
712#define SOCK_BINDADDR_LOCK 4
713#define SOCK_BINDPORT_LOCK 8
714
715
716struct sock_iocb {
717 struct list_head list;
718
719 int flags;
720 int size;
721 struct socket *sock;
722 struct sock *sk;
723 struct scm_cookie *scm;
724 struct msghdr *msg, async_msg;
725 struct kiocb *kiocb;
726};
727
728static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
729{
730 return (struct sock_iocb *)iocb->private;
731}
732
733static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
734{
735 return si->kiocb;
736}
737
738struct socket_alloc {
739 struct socket socket;
740 struct inode vfs_inode;
741};
742
743static inline struct socket *SOCKET_I(struct inode *inode)
744{
745 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
746}
747
748static inline struct inode *SOCK_INODE(struct socket *socket)
749{
750 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
751}
752
753extern void __sk_stream_mem_reclaim(struct sock *sk);
754extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
755
756#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
757
758static inline int sk_stream_pages(int amt)
759{
760 return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM);
761}
762
763static inline void sk_stream_mem_reclaim(struct sock *sk)
764{
765 if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
766 __sk_stream_mem_reclaim(sk);
767}
768
769static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
770{
771 return (int)skb->truesize <= sk->sk_forward_alloc ||
772 sk_stream_mem_schedule(sk, skb->truesize, 1);
773}
774
775static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
776{
777 return size <= sk->sk_forward_alloc ||
778 sk_stream_mem_schedule(sk, size, 0);
779}
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
795
796
797
798
799
800
801
802
803#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
804do { \
805 sk->sk_lock.owned = 0; \
806 init_waitqueue_head(&sk->sk_lock.wq); \
807 spin_lock_init(&(sk)->sk_lock.slock); \
808 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
809 sizeof((sk)->sk_lock)); \
810 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
811 (skey), (sname)); \
812 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
813} while (0)
814
815extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
816
817static inline void lock_sock(struct sock *sk)
818{
819 lock_sock_nested(sk, 0);
820}
821
822extern void FASTCALL(release_sock(struct sock *sk));
823
824
825#define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
826#define bh_lock_sock_nested(__sk) \
827 spin_lock_nested(&((__sk)->sk_lock.slock), \
828 SINGLE_DEPTH_NESTING)
829#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
830
831extern struct sock *sk_alloc(struct net *net, int family,
832 gfp_t priority,
833 struct proto *prot);
834extern void sk_free(struct sock *sk);
835extern struct sock *sk_clone(const struct sock *sk,
836 const gfp_t priority);
837
838extern struct sk_buff *sock_wmalloc(struct sock *sk,
839 unsigned long size, int force,
840 gfp_t priority);
841extern struct sk_buff *sock_rmalloc(struct sock *sk,
842 unsigned long size, int force,
843 gfp_t priority);
844extern void sock_wfree(struct sk_buff *skb);
845extern void sock_rfree(struct sk_buff *skb);
846
847extern int sock_setsockopt(struct socket *sock, int level,
848 int op, char __user *optval,
849 int optlen);
850
851extern int sock_getsockopt(struct socket *sock, int level,
852 int op, char __user *optval,
853 int __user *optlen);
854extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
855 unsigned long size,
856 int noblock,
857 int *errcode);
858extern void *sock_kmalloc(struct sock *sk, int size,
859 gfp_t priority);
860extern void sock_kfree_s(struct sock *sk, void *mem, int size);
861extern void sk_send_sigurg(struct sock *sk);
862
863
864
865
866
867extern int sock_no_bind(struct socket *,
868 struct sockaddr *, int);
869extern int sock_no_connect(struct socket *,
870 struct sockaddr *, int, int);
871extern int sock_no_socketpair(struct socket *,
872 struct socket *);
873extern int sock_no_accept(struct socket *,
874 struct socket *, int);
875extern int sock_no_getname(struct socket *,
876 struct sockaddr *, int *, int);
877extern unsigned int sock_no_poll(struct file *, struct socket *,
878 struct poll_table_struct *);
879extern int sock_no_ioctl(struct socket *, unsigned int,
880 unsigned long);
881extern int sock_no_listen(struct socket *, int);
882extern int sock_no_shutdown(struct socket *, int);
883extern int sock_no_getsockopt(struct socket *, int , int,
884 char __user *, int __user *);
885extern int sock_no_setsockopt(struct socket *, int, int,
886 char __user *, int);
887extern int sock_no_sendmsg(struct kiocb *, struct socket *,
888 struct msghdr *, size_t);
889extern int sock_no_recvmsg(struct kiocb *, struct socket *,
890 struct msghdr *, size_t, int);
891extern int sock_no_mmap(struct file *file,
892 struct socket *sock,
893 struct vm_area_struct *vma);
894extern ssize_t sock_no_sendpage(struct socket *sock,
895 struct page *page,
896 int offset, size_t size,
897 int flags);
898
899
900
901
902
903extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
904 char __user *optval, int __user *optlen);
905extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
906 struct msghdr *msg, size_t size, int flags);
907extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
908 char __user *optval, int optlen);
909extern int compat_sock_common_getsockopt(struct socket *sock, int level,
910 int optname, char __user *optval, int __user *optlen);
911extern int compat_sock_common_setsockopt(struct socket *sock, int level,
912 int optname, char __user *optval, int optlen);
913
914extern void sk_common_release(struct sock *sk);
915
916
917
918
919
920
921extern void sock_init_data(struct socket *sock, struct sock *sk);
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
938{
939 int err;
940 struct sk_filter *filter;
941
942 err = security_sock_rcv_skb(sk, skb);
943 if (err)
944 return err;
945
946 rcu_read_lock_bh();
947 filter = rcu_dereference(sk->sk_filter);
948 if (filter) {
949 unsigned int pkt_len = sk_run_filter(skb, filter->insns,
950 filter->len);
951 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
952 }
953 rcu_read_unlock_bh();
954
955 return err;
956}
957
958
959
960
961
962
963
964
965
966static inline void sk_filter_release(struct sk_filter *fp)
967{
968 if (atomic_dec_and_test(&fp->refcnt))
969 kfree(fp);
970}
971
972static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
973{
974 unsigned int size = sk_filter_len(fp);
975
976 atomic_sub(size, &sk->sk_omem_alloc);
977 sk_filter_release(fp);
978}
979
980static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
981{
982 atomic_inc(&fp->refcnt);
983 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
984}
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012static inline void sock_put(struct sock *sk)
1013{
1014 if (atomic_dec_and_test(&sk->sk_refcnt))
1015 sk_free(sk);
1016}
1017
1018extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1019 const int nested);
1020
1021
1022
1023
1024
1025
1026
1027
1028static inline void sock_orphan(struct sock *sk)
1029{
1030 write_lock_bh(&sk->sk_callback_lock);
1031 sock_set_flag(sk, SOCK_DEAD);
1032 sk->sk_socket = NULL;
1033 sk->sk_sleep = NULL;
1034 write_unlock_bh(&sk->sk_callback_lock);
1035}
1036
1037static inline void sock_graft(struct sock *sk, struct socket *parent)
1038{
1039 write_lock_bh(&sk->sk_callback_lock);
1040 sk->sk_sleep = &parent->wait;
1041 parent->sk = sk;
1042 sk->sk_socket = parent;
1043 security_sock_graft(sk, parent);
1044 write_unlock_bh(&sk->sk_callback_lock);
1045}
1046
1047extern int sock_i_uid(struct sock *sk);
1048extern unsigned long sock_i_ino(struct sock *sk);
1049
1050static inline struct dst_entry *
1051__sk_dst_get(struct sock *sk)
1052{
1053 return sk->sk_dst_cache;
1054}
1055
1056static inline struct dst_entry *
1057sk_dst_get(struct sock *sk)
1058{
1059 struct dst_entry *dst;
1060
1061 read_lock(&sk->sk_dst_lock);
1062 dst = sk->sk_dst_cache;
1063 if (dst)
1064 dst_hold(dst);
1065 read_unlock(&sk->sk_dst_lock);
1066 return dst;
1067}
1068
1069static inline void
1070__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1071{
1072 struct dst_entry *old_dst;
1073
1074 old_dst = sk->sk_dst_cache;
1075 sk->sk_dst_cache = dst;
1076 dst_release(old_dst);
1077}
1078
1079static inline void
1080sk_dst_set(struct sock *sk, struct dst_entry *dst)
1081{
1082 write_lock(&sk->sk_dst_lock);
1083 __sk_dst_set(sk, dst);
1084 write_unlock(&sk->sk_dst_lock);
1085}
1086
1087static inline void
1088__sk_dst_reset(struct sock *sk)
1089{
1090 struct dst_entry *old_dst;
1091
1092 old_dst = sk->sk_dst_cache;
1093 sk->sk_dst_cache = NULL;
1094 dst_release(old_dst);
1095}
1096
1097static inline void
1098sk_dst_reset(struct sock *sk)
1099{
1100 write_lock(&sk->sk_dst_lock);
1101 __sk_dst_reset(sk);
1102 write_unlock(&sk->sk_dst_lock);
1103}
1104
1105extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1106
1107extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1108
1109static inline int sk_can_gso(const struct sock *sk)
1110{
1111 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1112}
1113
1114extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1115
1116static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1117{
1118 sk->sk_wmem_queued += skb->truesize;
1119 sk->sk_forward_alloc -= skb->truesize;
1120}
1121
1122static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1123 struct sk_buff *skb, struct page *page,
1124 int off, int copy)
1125{
1126 if (skb->ip_summed == CHECKSUM_NONE) {
1127 int err = 0;
1128 __wsum csum = csum_and_copy_from_user(from,
1129 page_address(page) + off,
1130 copy, 0, &err);
1131 if (err)
1132 return err;
1133 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1134 } else if (copy_from_user(page_address(page) + off, from, copy))
1135 return -EFAULT;
1136
1137 skb->len += copy;
1138 skb->data_len += copy;
1139 skb->truesize += copy;
1140 sk->sk_wmem_queued += copy;
1141 sk->sk_forward_alloc -= copy;
1142 return 0;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1155{
1156 sock_hold(sk);
1157 skb->sk = sk;
1158 skb->destructor = sock_wfree;
1159 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1160}
1161
1162static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1163{
1164 skb->sk = sk;
1165 skb->destructor = sock_rfree;
1166 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1167}
1168
1169extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1170 unsigned long expires);
1171
1172extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1173
1174extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1175
1176static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1177{
1178
1179
1180
1181 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1182 (unsigned)sk->sk_rcvbuf)
1183 return -ENOMEM;
1184 skb_set_owner_r(skb, sk);
1185 skb_queue_tail(&sk->sk_error_queue, skb);
1186 if (!sock_flag(sk, SOCK_DEAD))
1187 sk->sk_data_ready(sk, skb->len);
1188 return 0;
1189}
1190
1191
1192
1193
1194
1195static inline int sock_error(struct sock *sk)
1196{
1197 int err;
1198 if (likely(!sk->sk_err))
1199 return 0;
1200 err = xchg(&sk->sk_err, 0);
1201 return -err;
1202}
1203
1204static inline unsigned long sock_wspace(struct sock *sk)
1205{
1206 int amt = 0;
1207
1208 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1209 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1210 if (amt < 0)
1211 amt = 0;
1212 }
1213 return amt;
1214}
1215
1216static inline void sk_wake_async(struct sock *sk, int how, int band)
1217{
1218 if (sk->sk_socket && sk->sk_socket->fasync_list)
1219 sock_wake_async(sk->sk_socket, how, band);
1220}
1221
1222#define SOCK_MIN_SNDBUF 2048
1223#define SOCK_MIN_RCVBUF 256
1224
1225static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1226{
1227 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1228 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1229 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1230 }
1231}
1232
1233static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1234 int size, int mem,
1235 gfp_t gfp)
1236{
1237 struct sk_buff *skb;
1238
1239
1240 size = ALIGN(size, 4);
1241
1242 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
1243 if (skb) {
1244 skb->truesize += mem;
1245 if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1246
1247
1248
1249
1250 skb_reserve(skb, skb_tailroom(skb) - size);
1251 return skb;
1252 }
1253 __kfree_skb(skb);
1254 } else {
1255 sk->sk_prot->enter_memory_pressure();
1256 sk_stream_moderate_sndbuf(sk);
1257 }
1258 return NULL;
1259}
1260
1261static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1262 int size,
1263 gfp_t gfp)
1264{
1265 return sk_stream_alloc_pskb(sk, size, 0, gfp);
1266}
1267
1268static inline struct page *sk_stream_alloc_page(struct sock *sk)
1269{
1270 struct page *page = NULL;
1271
1272 page = alloc_pages(sk->sk_allocation, 0);
1273 if (!page) {
1274 sk->sk_prot->enter_memory_pressure();
1275 sk_stream_moderate_sndbuf(sk);
1276 }
1277 return page;
1278}
1279
1280
1281
1282
1283static inline int sock_writeable(const struct sock *sk)
1284{
1285 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1286}
1287
1288static inline gfp_t gfp_any(void)
1289{
1290 return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
1291}
1292
1293static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1294{
1295 return noblock ? 0 : sk->sk_rcvtimeo;
1296}
1297
1298static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1299{
1300 return noblock ? 0 : sk->sk_sndtimeo;
1301}
1302
1303static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1304{
1305 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1306}
1307
1308
1309
1310
1311static inline int sock_intr_errno(long timeo)
1312{
1313 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1314}
1315
1316extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1317 struct sk_buff *skb);
1318
1319static __inline__ void
1320sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1321{
1322 ktime_t kt = skb->tstamp;
1323
1324 if (sock_flag(sk, SOCK_RCVTSTAMP))
1325 __sock_recv_timestamp(msg, sk, skb);
1326 else
1327 sk->sk_stamp = kt;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339#ifdef CONFIG_NET_DMA
1340static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1341{
1342 __skb_unlink(skb, &sk->sk_receive_queue);
1343 if (!copied_early)
1344 __kfree_skb(skb);
1345 else
1346 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1347}
1348#else
1349static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1350{
1351 __skb_unlink(skb, &sk->sk_receive_queue);
1352 __kfree_skb(skb);
1353}
1354#endif
1355
1356extern void sock_enable_timestamp(struct sock *sk);
1357extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1358extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1359
1360
1361
1362
1363extern int net_msg_warn;
1364#define NETDEBUG(fmt, args...) \
1365 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1366
1367#define LIMIT_NETDEBUG(fmt, args...) \
1368 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
1384 DECLARE_WAITQUEUE(wait, tsk); \
1385 tsk->state = TASK_INTERRUPTIBLE; \
1386 add_wait_queue((sk)->sk_sleep, &wait); \
1387 release_sock(sk);
1388
1389#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
1390 remove_wait_queue((sk)->sk_sleep, &wait); \
1391 lock_sock(sk); \
1392 }
1393
1394static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1395{
1396 if (valbool)
1397 sock_set_flag(sk, bit);
1398 else
1399 sock_reset_flag(sk, bit);
1400}
1401
1402extern __u32 sysctl_wmem_max;
1403extern __u32 sysctl_rmem_max;
1404
1405extern void sk_init(void);
1406
1407#ifdef CONFIG_SYSCTL
1408extern struct ctl_table core_table[];
1409#endif
1410
1411extern int sysctl_optmem_max;
1412
1413extern __u32 sysctl_wmem_default;
1414extern __u32 sysctl_rmem_default;
1415
1416#endif
1417