linux/include/net/sock.h
<<
>>
Prefs
   1/*
   2 * INET         An implementation of the TCP/IP protocol suite for the LINUX
   3 *              operating system.  INET is implemented using the  BSD Socket
   4 *              interface as the means of communication with the user level.
   5 *
   6 *              Definitions for the AF_INET socket handler.
   7 *
   8 * Version:     @(#)sock.h      1.0.4   05/13/93
   9 *
  10 * Authors:     Ross Biro
  11 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  12 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *              Florian La Roche <flla@stud.uni-sb.de>
  14 *
  15 * Fixes:
  16 *              Alan Cox        :       Volatiles in skbuff pointers. See
  17 *                                      skbuff comments. May be overdone,
  18 *                                      better to prove they can be removed
  19 *                                      than the reverse.
  20 *              Alan Cox        :       Added a zapped field for tcp to note
  21 *                                      a socket is reset and must stay shut up
  22 *              Alan Cox        :       New fields for options
  23 *      Pauline Middelink       :       identd support
  24 *              Alan Cox        :       Eliminate low level recv/recvfrom
  25 *              David S. Miller :       New socket lookup architecture.
  26 *              Steve Whitehouse:       Default routines for sock_ops
  27 *              Arnaldo C. Melo :       removed net_pinfo, tp_pinfo and made
  28 *                                      protinfo be just a void pointer, as the
  29 *                                      protocol specific parts were moved to
  30 *                                      respective headers and ipv4/v6, etc now
  31 *                                      use private slabcaches for its socks
  32 *              Pedro Hortas    :       New flags field for socket options
  33 *
  34 *
  35 *              This program is free software; you can redistribute it and/or
  36 *              modify it under the terms of the GNU General Public License
  37 *              as published by the Free Software Foundation; either version
  38 *              2 of the License, or (at your option) any later version.
  39 */
  40#ifndef _SOCK_H
  41#define _SOCK_H
  42
  43#include <linux/kernel.h>
  44#include <linux/list.h>
  45#include <linux/timer.h>
  46#include <linux/cache.h>
  47#include <linux/module.h>
  48#include <linux/lockdep.h>
  49#include <linux/netdevice.h>
  50#include <linux/skbuff.h>       /* struct sk_buff */
  51#include <linux/mm.h>
  52#include <linux/security.h>
  53
  54#include <linux/filter.h>
  55
  56#include <asm/atomic.h>
  57#include <net/dst.h>
  58#include <net/checksum.h>
  59#include <net/net_namespace.h>
  60
  61/*
  62 * This structure really needs to be cleaned up.
  63 * Most of it is for TCP, and not used by any of
  64 * the other protocols.
  65 */
  66
  67/* Define this to get the SOCK_DBG debugging facility. */
  68#define SOCK_DEBUGGING
  69#ifdef SOCK_DEBUGGING
  70#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
  71                                        printk(KERN_DEBUG msg); } while (0)
  72#else
  73#define SOCK_DEBUG(sk, msg...) do { } while (0)
  74#endif
  75
  76/* This is the per-socket lock.  The spinlock provides a synchronization
  77 * between user contexts and software interrupt processing, whereas the
  78 * mini-semaphore synchronizes multiple users amongst themselves.
  79 */
  80typedef struct {
  81        spinlock_t              slock;
  82        int                     owned;
  83        wait_queue_head_t       wq;
  84        /*
  85         * We express the mutex-alike socket_lock semantics
  86         * to the lock validator by explicitly managing
  87         * the slock as a lock variant (in addition to
  88         * the slock itself):
  89         */
  90#ifdef CONFIG_DEBUG_LOCK_ALLOC
  91        struct lockdep_map dep_map;
  92#endif
  93} socket_lock_t;
  94
  95struct sock;
  96struct proto;
  97
  98/**
  99 *      struct sock_common - minimal network layer representation of sockets
 100 *      @skc_family: network address family
 101 *      @skc_state: Connection state
 102 *      @skc_reuse: %SO_REUSEADDR setting
 103 *      @skc_bound_dev_if: bound device index if != 0
 104 *      @skc_node: main hash linkage for various protocol lookup tables
 105 *      @skc_bind_node: bind hash linkage for various protocol lookup tables
 106 *      @skc_refcnt: reference count
 107 *      @skc_hash: hash value used with various protocol lookup tables
 108 *      @skc_prot: protocol handlers inside a network family
 109 *      @skc_net: reference to the network namespace of this socket
 110 *
 111 *      This is the minimal network layer representation of sockets, the header
 112 *      for struct sock and struct inet_timewait_sock.
 113 */
 114struct sock_common {
 115        unsigned short          skc_family;
 116        volatile unsigned char  skc_state;
 117        unsigned char           skc_reuse;
 118        int                     skc_bound_dev_if;
 119        struct hlist_node       skc_node;
 120        struct hlist_node       skc_bind_node;
 121        atomic_t                skc_refcnt;
 122        unsigned int            skc_hash;
 123        struct proto            *skc_prot;
 124        struct net              *skc_net;
 125};
 126
 127/**
 128  *     struct sock - network layer representation of sockets
 129  *     @__sk_common: shared layout with inet_timewait_sock
 130  *     @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
 131  *     @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
 132  *     @sk_lock:       synchronizer
 133  *     @sk_rcvbuf: size of receive buffer in bytes
 134  *     @sk_sleep: sock wait queue
 135  *     @sk_dst_cache: destination cache
 136  *     @sk_dst_lock: destination cache lock
 137  *     @sk_policy: flow policy
 138  *     @sk_rmem_alloc: receive queue bytes committed
 139  *     @sk_receive_queue: incoming packets
 140  *     @sk_wmem_alloc: transmit queue bytes committed
 141  *     @sk_write_queue: Packet sending queue
 142  *     @sk_async_wait_queue: DMA copied packets
 143  *     @sk_omem_alloc: "o" is "option" or "other"
 144  *     @sk_wmem_queued: persistent queue size
 145  *     @sk_forward_alloc: space allocated forward
 146  *     @sk_allocation: allocation mode
 147  *     @sk_sndbuf: size of send buffer in bytes
 148  *     @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, %SO_OOBINLINE settings
 149  *     @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
 150  *     @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
 151  *     @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
 152  *     @sk_lingertime: %SO_LINGER l_linger setting
 153  *     @sk_backlog: always used with the per-socket spinlock held
 154  *     @sk_callback_lock: used with the callbacks in the end of this struct
 155  *     @sk_error_queue: rarely used
 156  *     @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
 157  *     @sk_err: last error
 158  *     @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
 159  *     @sk_ack_backlog: current listen backlog
 160  *     @sk_max_ack_backlog: listen backlog set in listen()
 161  *     @sk_priority: %SO_PRIORITY setting
 162  *     @sk_type: socket type (%SOCK_STREAM, etc)
 163  *     @sk_protocol: which protocol this socket belongs in this network family
 164  *     @sk_peercred: %SO_PEERCRED setting
 165  *     @sk_rcvlowat: %SO_RCVLOWAT setting
 166  *     @sk_rcvtimeo: %SO_RCVTIMEO setting
 167  *     @sk_sndtimeo: %SO_SNDTIMEO setting
 168  *     @sk_filter: socket filtering instructions
 169  *     @sk_protinfo: private area, net family specific, when not using slab
 170  *     @sk_timer: sock cleanup timer
 171  *     @sk_stamp: time stamp of last packet received
 172  *     @sk_socket: Identd and reporting IO signals
 173  *     @sk_user_data: RPC layer private data
 174  *     @sk_sndmsg_page: cached page for sendmsg
 175  *     @sk_sndmsg_off: cached offset for sendmsg
 176  *     @sk_send_head: front of stuff to transmit
 177  *     @sk_security: used by security modules
 178  *     @sk_write_pending: a write to stream socket waits to start
 179  *     @sk_state_change: callback to indicate change in the state of the sock
 180  *     @sk_data_ready: callback to indicate there is data to be processed
 181  *     @sk_write_space: callback to indicate there is bf sending space available
 182  *     @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
 183  *     @sk_backlog_rcv: callback to process the backlog
 184  *     @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
 185 */
 186struct sock {
 187        /*
 188         * Now struct inet_timewait_sock also uses sock_common, so please just
 189         * don't add nothing before this first member (__sk_common) --acme
 190         */
 191        struct sock_common      __sk_common;
 192#define sk_family               __sk_common.skc_family
 193#define sk_state                __sk_common.skc_state
 194#define sk_reuse                __sk_common.skc_reuse
 195#define sk_bound_dev_if         __sk_common.skc_bound_dev_if
 196#define sk_node                 __sk_common.skc_node
 197#define sk_bind_node            __sk_common.skc_bind_node
 198#define sk_refcnt               __sk_common.skc_refcnt
 199#define sk_hash                 __sk_common.skc_hash
 200#define sk_prot                 __sk_common.skc_prot
 201#define sk_net                  __sk_common.skc_net
 202        unsigned char           sk_shutdown : 2,
 203                                sk_no_check : 2,
 204                                sk_userlocks : 4;
 205        unsigned char           sk_protocol;
 206        unsigned short          sk_type;
 207        int                     sk_rcvbuf;
 208        socket_lock_t           sk_lock;
 209        /*
 210         * The backlog queue is special, it is always used with
 211         * the per-socket spinlock held and requires low latency
 212         * access. Therefore we special case it's implementation.
 213         */
 214        struct {
 215                struct sk_buff *head;
 216                struct sk_buff *tail;
 217        } sk_backlog;
 218        wait_queue_head_t       *sk_sleep;
 219        struct dst_entry        *sk_dst_cache;
 220        struct xfrm_policy      *sk_policy[2];
 221        rwlock_t                sk_dst_lock;
 222        atomic_t                sk_rmem_alloc;
 223        atomic_t                sk_wmem_alloc;
 224        atomic_t                sk_omem_alloc;
 225        int                     sk_sndbuf;
 226        struct sk_buff_head     sk_receive_queue;
 227        struct sk_buff_head     sk_write_queue;
 228        struct sk_buff_head     sk_async_wait_queue;
 229        int                     sk_wmem_queued;
 230        int                     sk_forward_alloc;
 231        gfp_t                   sk_allocation;
 232        int                     sk_route_caps;
 233        int                     sk_gso_type;
 234        int                     sk_rcvlowat;
 235        unsigned long           sk_flags;
 236        unsigned long           sk_lingertime;
 237        struct sk_buff_head     sk_error_queue;
 238        struct proto            *sk_prot_creator;
 239        rwlock_t                sk_callback_lock;
 240        int                     sk_err,
 241                                sk_err_soft;
 242        unsigned short          sk_ack_backlog;
 243        unsigned short          sk_max_ack_backlog;
 244        __u32                   sk_priority;
 245        struct ucred            sk_peercred;
 246        long                    sk_rcvtimeo;
 247        long                    sk_sndtimeo;
 248        struct sk_filter        *sk_filter;
 249        void                    *sk_protinfo;
 250        struct timer_list       sk_timer;
 251        ktime_t                 sk_stamp;
 252        struct socket           *sk_socket;
 253        void                    *sk_user_data;
 254        struct page             *sk_sndmsg_page;
 255        struct sk_buff          *sk_send_head;
 256        __u32                   sk_sndmsg_off;
 257        int                     sk_write_pending;
 258        void                    *sk_security;
 259        void                    (*sk_state_change)(struct sock *sk);
 260        void                    (*sk_data_ready)(struct sock *sk, int bytes);
 261        void                    (*sk_write_space)(struct sock *sk);
 262        void                    (*sk_error_report)(struct sock *sk);
 263        int                     (*sk_backlog_rcv)(struct sock *sk,
 264                                                  struct sk_buff *skb);  
 265        void                    (*sk_destruct)(struct sock *sk);
 266};
 267
 268/*
 269 * Hashed lists helper routines
 270 */
 271static inline struct sock *__sk_head(const struct hlist_head *head)
 272{
 273        return hlist_entry(head->first, struct sock, sk_node);
 274}
 275
 276static inline struct sock *sk_head(const struct hlist_head *head)
 277{
 278        return hlist_empty(head) ? NULL : __sk_head(head);
 279}
 280
 281static inline struct sock *sk_next(const struct sock *sk)
 282{
 283        return sk->sk_node.next ?
 284                hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
 285}
 286
 287static inline int sk_unhashed(const struct sock *sk)
 288{
 289        return hlist_unhashed(&sk->sk_node);
 290}
 291
 292static inline int sk_hashed(const struct sock *sk)
 293{
 294        return !sk_unhashed(sk);
 295}
 296
 297static __inline__ void sk_node_init(struct hlist_node *node)
 298{
 299        node->pprev = NULL;
 300}
 301
 302static __inline__ void __sk_del_node(struct sock *sk)
 303{
 304        __hlist_del(&sk->sk_node);
 305}
 306
 307static __inline__ int __sk_del_node_init(struct sock *sk)
 308{
 309        if (sk_hashed(sk)) {
 310                __sk_del_node(sk);
 311                sk_node_init(&sk->sk_node);
 312                return 1;
 313        }
 314        return 0;
 315}
 316
 317/* Grab socket reference count. This operation is valid only
 318   when sk is ALREADY grabbed f.e. it is found in hash table
 319   or a list and the lookup is made under lock preventing hash table
 320   modifications.
 321 */
 322
 323static inline void sock_hold(struct sock *sk)
 324{
 325        atomic_inc(&sk->sk_refcnt);
 326}
 327
 328/* Ungrab socket in the context, which assumes that socket refcnt
 329   cannot hit zero, f.e. it is true in context of any socketcall.
 330 */
 331static inline void __sock_put(struct sock *sk)
 332{
 333        atomic_dec(&sk->sk_refcnt);
 334}
 335
 336static __inline__ int sk_del_node_init(struct sock *sk)
 337{
 338        int rc = __sk_del_node_init(sk);
 339
 340        if (rc) {
 341                /* paranoid for a while -acme */
 342                WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
 343                __sock_put(sk);
 344        }
 345        return rc;
 346}
 347
 348static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
 349{
 350        hlist_add_head(&sk->sk_node, list);
 351}
 352
 353static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
 354{
 355        sock_hold(sk);
 356        __sk_add_node(sk, list);
 357}
 358
 359static __inline__ void __sk_del_bind_node(struct sock *sk)
 360{
 361        __hlist_del(&sk->sk_bind_node);
 362}
 363
 364static __inline__ void sk_add_bind_node(struct sock *sk,
 365                                        struct hlist_head *list)
 366{
 367        hlist_add_head(&sk->sk_bind_node, list);
 368}
 369
 370#define sk_for_each(__sk, node, list) \
 371        hlist_for_each_entry(__sk, node, list, sk_node)
 372#define sk_for_each_from(__sk, node) \
 373        if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
 374                hlist_for_each_entry_from(__sk, node, sk_node)
 375#define sk_for_each_continue(__sk, node) \
 376        if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
 377                hlist_for_each_entry_continue(__sk, node, sk_node)
 378#define sk_for_each_safe(__sk, node, tmp, list) \
 379        hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
 380#define sk_for_each_bound(__sk, node, list) \
 381        hlist_for_each_entry(__sk, node, list, sk_bind_node)
 382
 383/* Sock flags */
 384enum sock_flags {
 385        SOCK_DEAD,
 386        SOCK_DONE,
 387        SOCK_URGINLINE,
 388        SOCK_KEEPOPEN,
 389        SOCK_LINGER,
 390        SOCK_DESTROY,
 391        SOCK_BROADCAST,
 392        SOCK_TIMESTAMP,
 393        SOCK_ZAPPED,
 394        SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
 395        SOCK_DBG, /* %SO_DEBUG setting */
 396        SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
 397        SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
 398        SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
 399        SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
 400};
 401
 402static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
 403{
 404        nsk->sk_flags = osk->sk_flags;
 405}
 406
 407static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
 408{
 409        __set_bit(flag, &sk->sk_flags);
 410}
 411
 412static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
 413{
 414        __clear_bit(flag, &sk->sk_flags);
 415}
 416
 417static inline int sock_flag(struct sock *sk, enum sock_flags flag)
 418{
 419        return test_bit(flag, &sk->sk_flags);
 420}
 421
 422static inline void sk_acceptq_removed(struct sock *sk)
 423{
 424        sk->sk_ack_backlog--;
 425}
 426
 427static inline void sk_acceptq_added(struct sock *sk)
 428{
 429        sk->sk_ack_backlog++;
 430}
 431
 432static inline int sk_acceptq_is_full(struct sock *sk)
 433{
 434        return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
 435}
 436
 437/*
 438 * Compute minimal free write space needed to queue new packets.
 439 */
 440static inline int sk_stream_min_wspace(struct sock *sk)
 441{
 442        return sk->sk_wmem_queued / 2;
 443}
 444
 445static inline int sk_stream_wspace(struct sock *sk)
 446{
 447        return sk->sk_sndbuf - sk->sk_wmem_queued;
 448}
 449
 450extern void sk_stream_write_space(struct sock *sk);
 451
 452static inline int sk_stream_memory_free(struct sock *sk)
 453{
 454        return sk->sk_wmem_queued < sk->sk_sndbuf;
 455}
 456
 457extern void sk_stream_rfree(struct sk_buff *skb);
 458
 459static inline void sk_stream_set_owner_r(struct sk_buff *skb, struct sock *sk)
 460{
 461        skb->sk = sk;
 462        skb->destructor = sk_stream_rfree;
 463        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 464        sk->sk_forward_alloc -= skb->truesize;
 465}
 466
 467static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
 468{
 469        skb_truesize_check(skb);
 470        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
 471        sk->sk_wmem_queued   -= skb->truesize;
 472        sk->sk_forward_alloc += skb->truesize;
 473        __kfree_skb(skb);
 474}
 475
 476/* The per-socket spinlock must be held here. */
 477static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 478{
 479        if (!sk->sk_backlog.tail) {
 480                sk->sk_backlog.head = sk->sk_backlog.tail = skb;
 481        } else {
 482                sk->sk_backlog.tail->next = skb;
 483                sk->sk_backlog.tail = skb;
 484        }
 485        skb->next = NULL;
 486}
 487
 488#define sk_wait_event(__sk, __timeo, __condition)                       \
 489        ({      int __rc;                                               \
 490                release_sock(__sk);                                     \
 491                __rc = __condition;                                     \
 492                if (!__rc) {                                            \
 493                        *(__timeo) = schedule_timeout(*(__timeo));      \
 494                }                                                       \
 495                lock_sock(__sk);                                        \
 496                __rc = __condition;                                     \
 497                __rc;                                                   \
 498        })
 499
 500extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
 501extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
 502extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
 503extern int sk_stream_error(struct sock *sk, int flags, int err);
 504extern void sk_stream_kill_queues(struct sock *sk);
 505
 506extern int sk_wait_data(struct sock *sk, long *timeo);
 507
 508struct request_sock_ops;
 509struct timewait_sock_ops;
 510
 511/* Networking protocol blocks we attach to sockets.
 512 * socket layer -> transport layer interface
 513 * transport -> network interface is defined by struct inet_proto
 514 */
 515struct proto {
 516        void                    (*close)(struct sock *sk, 
 517                                        long timeout);
 518        int                     (*connect)(struct sock *sk,
 519                                        struct sockaddr *uaddr, 
 520                                        int addr_len);
 521        int                     (*disconnect)(struct sock *sk, int flags);
 522
 523        struct sock *           (*accept) (struct sock *sk, int flags, int *err);
 524
 525        int                     (*ioctl)(struct sock *sk, int cmd,
 526                                         unsigned long arg);
 527        int                     (*init)(struct sock *sk);
 528        int                     (*destroy)(struct sock *sk);
 529        void                    (*shutdown)(struct sock *sk, int how);
 530        int                     (*setsockopt)(struct sock *sk, int level, 
 531                                        int optname, char __user *optval,
 532                                        int optlen);
 533        int                     (*getsockopt)(struct sock *sk, int level, 
 534                                        int optname, char __user *optval, 
 535                                        int __user *option);     
 536        int                     (*compat_setsockopt)(struct sock *sk,
 537                                        int level,
 538                                        int optname, char __user *optval,
 539                                        int optlen);
 540        int                     (*compat_getsockopt)(struct sock *sk,
 541                                        int level,
 542                                        int optname, char __user *optval,
 543                                        int __user *option);
 544        int                     (*sendmsg)(struct kiocb *iocb, struct sock *sk,
 545                                           struct msghdr *msg, size_t len);
 546        int                     (*recvmsg)(struct kiocb *iocb, struct sock *sk,
 547                                           struct msghdr *msg,
 548                                        size_t len, int noblock, int flags, 
 549                                        int *addr_len);
 550        int                     (*sendpage)(struct sock *sk, struct page *page,
 551                                        int offset, size_t size, int flags);
 552        int                     (*bind)(struct sock *sk, 
 553                                        struct sockaddr *uaddr, int addr_len);
 554
 555        int                     (*backlog_rcv) (struct sock *sk, 
 556                                                struct sk_buff *skb);
 557
 558        /* Keeping track of sk's, looking them up, and port selection methods. */
 559        void                    (*hash)(struct sock *sk);
 560        void                    (*unhash)(struct sock *sk);
 561        int                     (*get_port)(struct sock *sk, unsigned short snum);
 562
 563#ifdef CONFIG_SMP
 564        /* Keeping track of sockets in use */
 565        void                    (*inuse_add)(struct proto *prot, int inc);
 566        int                     (*inuse_getval)(const struct proto *prot);
 567        int                     *inuse_ptr;
 568#else
 569        int                     inuse;
 570#endif
 571        /* Memory pressure */
 572        void                    (*enter_memory_pressure)(void);
 573        atomic_t                *memory_allocated;      /* Current allocated memory. */
 574        atomic_t                *sockets_allocated;     /* Current number of sockets. */
 575        /*
 576         * Pressure flag: try to collapse.
 577         * Technical note: it is used by multiple contexts non atomically.
 578         * All the sk_stream_mem_schedule() is of this nature: accounting
 579         * is strict, actions are advisory and have some latency.
 580         */
 581        int                     *memory_pressure;
 582        int                     *sysctl_mem;
 583        int                     *sysctl_wmem;
 584        int                     *sysctl_rmem;
 585        int                     max_header;
 586
 587        struct kmem_cache               *slab;
 588        unsigned int            obj_size;
 589
 590        atomic_t                *orphan_count;
 591
 592        struct request_sock_ops *rsk_prot;
 593        struct timewait_sock_ops *twsk_prot;
 594
 595        struct module           *owner;
 596
 597        char                    name[32];
 598
 599        struct list_head        node;
 600#ifdef SOCK_REFCNT_DEBUG
 601        atomic_t                socks;
 602#endif
 603};
 604
 605/*
 606 * Special macros to let protos use a fast version of inuse{get|add}
 607 * using a static percpu variable per proto instead of an allocated one,
 608 * saving one dereference.
 609 * This might be changed if/when dynamic percpu vars become fast.
 610 */
 611#ifdef CONFIG_SMP
 612# define DEFINE_PROTO_INUSE(NAME)                       \
 613static DEFINE_PER_CPU(int, NAME##_inuse);               \
 614static void NAME##_inuse_add(struct proto *prot, int inc)       \
 615{                                                       \
 616        __get_cpu_var(NAME##_inuse) += inc;             \
 617}                                                       \
 618                                                        \
 619static int NAME##_inuse_getval(const struct proto *prot)\
 620{                                                       \
 621        int res = 0, cpu;                               \
 622                                                        \
 623        for_each_possible_cpu(cpu)                      \
 624                res += per_cpu(NAME##_inuse, cpu);      \
 625        return res;                                     \
 626}
 627# define REF_PROTO_INUSE(NAME)                          \
 628        .inuse_add = NAME##_inuse_add,                  \
 629        .inuse_getval = NAME##_inuse_getval,
 630#else
 631# define DEFINE_PROTO_INUSE(NAME)
 632# define REF_PROTO_INUSE(NAME)
 633#endif
 634
 635extern int proto_register(struct proto *prot, int alloc_slab);
 636extern void proto_unregister(struct proto *prot);
 637
 638#ifdef SOCK_REFCNT_DEBUG
 639static inline void sk_refcnt_debug_inc(struct sock *sk)
 640{
 641        atomic_inc(&sk->sk_prot->socks);
 642}
 643
 644static inline void sk_refcnt_debug_dec(struct sock *sk)
 645{
 646        atomic_dec(&sk->sk_prot->socks);
 647        printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
 648               sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
 649}
 650
 651static inline void sk_refcnt_debug_release(const struct sock *sk)
 652{
 653        if (atomic_read(&sk->sk_refcnt) != 1)
 654                printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
 655                       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
 656}
 657#else /* SOCK_REFCNT_DEBUG */
 658#define sk_refcnt_debug_inc(sk) do { } while (0)
 659#define sk_refcnt_debug_dec(sk) do { } while (0)
 660#define sk_refcnt_debug_release(sk) do { } while (0)
 661#endif /* SOCK_REFCNT_DEBUG */
 662
 663/* Called with local bh disabled */
 664static __inline__ void sock_prot_inc_use(struct proto *prot)
 665{
 666#ifdef CONFIG_SMP
 667        prot->inuse_add(prot, 1);
 668#else
 669        prot->inuse++;
 670#endif
 671}
 672
 673static __inline__ void sock_prot_dec_use(struct proto *prot)
 674{
 675#ifdef CONFIG_SMP
 676        prot->inuse_add(prot, -1);
 677#else
 678        prot->inuse--;
 679#endif
 680}
 681
 682static __inline__ int sock_prot_inuse(struct proto *proto)
 683{
 684#ifdef CONFIG_SMP
 685        return proto->inuse_getval(proto);
 686#else
 687        return proto->inuse;
 688#endif
 689}
 690
 691/* With per-bucket locks this operation is not-atomic, so that
 692 * this version is not worse.
 693 */
 694static inline void __sk_prot_rehash(struct sock *sk)
 695{
 696        sk->sk_prot->unhash(sk);
 697        sk->sk_prot->hash(sk);
 698}
 699
 700/* About 10 seconds */
 701#define SOCK_DESTROY_TIME (10*HZ)
 702
 703/* Sockets 0-1023 can't be bound to unless you are superuser */
 704#define PROT_SOCK       1024
 705
 706#define SHUTDOWN_MASK   3
 707#define RCV_SHUTDOWN    1
 708#define SEND_SHUTDOWN   2
 709
 710#define SOCK_SNDBUF_LOCK        1
 711#define SOCK_RCVBUF_LOCK        2
 712#define SOCK_BINDADDR_LOCK      4
 713#define SOCK_BINDPORT_LOCK      8
 714
 715/* sock_iocb: used to kick off async processing of socket ios */
 716struct sock_iocb {
 717        struct list_head        list;
 718
 719        int                     flags;
 720        int                     size;
 721        struct socket           *sock;
 722        struct sock             *sk;
 723        struct scm_cookie       *scm;
 724        struct msghdr           *msg, async_msg;
 725        struct kiocb            *kiocb;
 726};
 727
 728static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
 729{
 730        return (struct sock_iocb *)iocb->private;
 731}
 732
 733static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
 734{
 735        return si->kiocb;
 736}
 737
 738struct socket_alloc {
 739        struct socket socket;
 740        struct inode vfs_inode;
 741};
 742
 743static inline struct socket *SOCKET_I(struct inode *inode)
 744{
 745        return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
 746}
 747
 748static inline struct inode *SOCK_INODE(struct socket *socket)
 749{
 750        return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
 751}
 752
 753extern void __sk_stream_mem_reclaim(struct sock *sk);
 754extern int sk_stream_mem_schedule(struct sock *sk, int size, int kind);
 755
 756#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
 757
 758static inline int sk_stream_pages(int amt)
 759{
 760        return DIV_ROUND_UP(amt, SK_STREAM_MEM_QUANTUM);
 761}
 762
 763static inline void sk_stream_mem_reclaim(struct sock *sk)
 764{
 765        if (sk->sk_forward_alloc >= SK_STREAM_MEM_QUANTUM)
 766                __sk_stream_mem_reclaim(sk);
 767}
 768
 769static inline int sk_stream_rmem_schedule(struct sock *sk, struct sk_buff *skb)
 770{
 771        return (int)skb->truesize <= sk->sk_forward_alloc ||
 772                sk_stream_mem_schedule(sk, skb->truesize, 1);
 773}
 774
 775static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
 776{
 777        return size <= sk->sk_forward_alloc ||
 778               sk_stream_mem_schedule(sk, size, 0);
 779}
 780
 781/* Used by processes to "lock" a socket state, so that
 782 * interrupts and bottom half handlers won't change it
 783 * from under us. It essentially blocks any incoming
 784 * packets, so that we won't get any new data or any
 785 * packets that change the state of the socket.
 786 *
 787 * While locked, BH processing will add new packets to
 788 * the backlog queue.  This queue is processed by the
 789 * owner of the socket lock right before it is released.
 790 *
 791 * Since ~2.3.5 it is also exclusive sleep lock serializing
 792 * accesses from user process context.
 793 */
 794#define sock_owned_by_user(sk)  ((sk)->sk_lock.owned)
 795
 796/*
 797 * Macro so as to not evaluate some arguments when
 798 * lockdep is not enabled.
 799 *
 800 * Mark both the sk_lock and the sk_lock.slock as a
 801 * per-address-family lock class.
 802 */
 803#define sock_lock_init_class_and_name(sk, sname, skey, name, key)       \
 804do {                                                                    \
 805        sk->sk_lock.owned = 0;                                  \
 806        init_waitqueue_head(&sk->sk_lock.wq);                           \
 807        spin_lock_init(&(sk)->sk_lock.slock);                           \
 808        debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
 809                        sizeof((sk)->sk_lock));                         \
 810        lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
 811                        (skey), (sname));                               \
 812        lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
 813} while (0)
 814
 815extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
 816
 817static inline void lock_sock(struct sock *sk)
 818{
 819        lock_sock_nested(sk, 0);
 820}
 821
 822extern void FASTCALL(release_sock(struct sock *sk));
 823
 824/* BH context may only use the following locking interface. */
 825#define bh_lock_sock(__sk)      spin_lock(&((__sk)->sk_lock.slock))
 826#define bh_lock_sock_nested(__sk) \
 827                                spin_lock_nested(&((__sk)->sk_lock.slock), \
 828                                SINGLE_DEPTH_NESTING)
 829#define bh_unlock_sock(__sk)    spin_unlock(&((__sk)->sk_lock.slock))
 830
 831extern struct sock              *sk_alloc(struct net *net, int family,
 832                                          gfp_t priority,
 833                                          struct proto *prot);
 834extern void                     sk_free(struct sock *sk);
 835extern struct sock              *sk_clone(const struct sock *sk,
 836                                          const gfp_t priority);
 837
 838extern struct sk_buff           *sock_wmalloc(struct sock *sk,
 839                                              unsigned long size, int force,
 840                                              gfp_t priority);
 841extern struct sk_buff           *sock_rmalloc(struct sock *sk,
 842                                              unsigned long size, int force,
 843                                              gfp_t priority);
 844extern void                     sock_wfree(struct sk_buff *skb);
 845extern void                     sock_rfree(struct sk_buff *skb);
 846
 847extern int                      sock_setsockopt(struct socket *sock, int level,
 848                                                int op, char __user *optval,
 849                                                int optlen);
 850
 851extern int                      sock_getsockopt(struct socket *sock, int level,
 852                                                int op, char __user *optval, 
 853                                                int __user *optlen);
 854extern struct sk_buff           *sock_alloc_send_skb(struct sock *sk,
 855                                                     unsigned long size,
 856                                                     int noblock,
 857                                                     int *errcode);
 858extern void *sock_kmalloc(struct sock *sk, int size,
 859                          gfp_t priority);
 860extern void sock_kfree_s(struct sock *sk, void *mem, int size);
 861extern void sk_send_sigurg(struct sock *sk);
 862
 863/*
 864 * Functions to fill in entries in struct proto_ops when a protocol
 865 * does not implement a particular function.
 866 */
 867extern int                      sock_no_bind(struct socket *, 
 868                                             struct sockaddr *, int);
 869extern int                      sock_no_connect(struct socket *,
 870                                                struct sockaddr *, int, int);
 871extern int                      sock_no_socketpair(struct socket *,
 872                                                   struct socket *);
 873extern int                      sock_no_accept(struct socket *,
 874                                               struct socket *, int);
 875extern int                      sock_no_getname(struct socket *,
 876                                                struct sockaddr *, int *, int);
 877extern unsigned int             sock_no_poll(struct file *, struct socket *,
 878                                             struct poll_table_struct *);
 879extern int                      sock_no_ioctl(struct socket *, unsigned int,
 880                                              unsigned long);
 881extern int                      sock_no_listen(struct socket *, int);
 882extern int                      sock_no_shutdown(struct socket *, int);
 883extern int                      sock_no_getsockopt(struct socket *, int , int,
 884                                                   char __user *, int __user *);
 885extern int                      sock_no_setsockopt(struct socket *, int, int,
 886                                                   char __user *, int);
 887extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
 888                                                struct msghdr *, size_t);
 889extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
 890                                                struct msghdr *, size_t, int);
 891extern int                      sock_no_mmap(struct file *file,
 892                                             struct socket *sock,
 893                                             struct vm_area_struct *vma);
 894extern ssize_t                  sock_no_sendpage(struct socket *sock,
 895                                                struct page *page,
 896                                                int offset, size_t size, 
 897                                                int flags);
 898
 899/*
 900 * Functions to fill in entries in struct proto_ops when a protocol
 901 * uses the inet style.
 902 */
 903extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
 904                                  char __user *optval, int __user *optlen);
 905extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
 906                               struct msghdr *msg, size_t size, int flags);
 907extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
 908                                  char __user *optval, int optlen);
 909extern int compat_sock_common_getsockopt(struct socket *sock, int level,
 910                int optname, char __user *optval, int __user *optlen);
 911extern int compat_sock_common_setsockopt(struct socket *sock, int level,
 912                int optname, char __user *optval, int optlen);
 913
 914extern void sk_common_release(struct sock *sk);
 915
 916/*
 917 *      Default socket callbacks and setup code
 918 */
 919 
 920/* Initialise core socket variables */
 921extern void sock_init_data(struct socket *sock, struct sock *sk);
 922
 923/**
 924 *      sk_filter - run a packet through a socket filter
 925 *      @sk: sock associated with &sk_buff
 926 *      @skb: buffer to filter
 927 *      @needlock: set to 1 if the sock is not locked by caller.
 928 *
 929 * Run the filter code and then cut skb->data to correct size returned by
 930 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
 931 * than pkt_len we keep whole skb->data. This is the socket level
 932 * wrapper to sk_run_filter. It returns 0 if the packet should
 933 * be accepted or -EPERM if the packet should be tossed.
 934 *
 935 */
 936
 937static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
 938{
 939        int err;
 940        struct sk_filter *filter;
 941        
 942        err = security_sock_rcv_skb(sk, skb);
 943        if (err)
 944                return err;
 945        
 946        rcu_read_lock_bh();
 947        filter = rcu_dereference(sk->sk_filter);
 948        if (filter) {
 949                unsigned int pkt_len = sk_run_filter(skb, filter->insns,
 950                                filter->len);
 951                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
 952        }
 953        rcu_read_unlock_bh();
 954
 955        return err;
 956}
 957
 958/**
 959 *      sk_filter_release: Release a socket filter
 960 *      @sk: socket
 961 *      @fp: filter to remove
 962 *
 963 *      Remove a filter from a socket and release its resources.
 964 */
 965
 966static inline void sk_filter_release(struct sk_filter *fp)
 967{
 968        if (atomic_dec_and_test(&fp->refcnt))
 969                kfree(fp);
 970}
 971
 972static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 973{
 974        unsigned int size = sk_filter_len(fp);
 975
 976        atomic_sub(size, &sk->sk_omem_alloc);
 977        sk_filter_release(fp);
 978}
 979
 980static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 981{
 982        atomic_inc(&fp->refcnt);
 983        atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
 984}
 985
 986/*
 987 * Socket reference counting postulates.
 988 *
 989 * * Each user of socket SHOULD hold a reference count.
 990 * * Each access point to socket (an hash table bucket, reference from a list,
 991 *   running timer, skb in flight MUST hold a reference count.
 992 * * When reference count hits 0, it means it will never increase back.
 993 * * When reference count hits 0, it means that no references from
 994 *   outside exist to this socket and current process on current CPU
 995 *   is last user and may/should destroy this socket.
 996 * * sk_free is called from any context: process, BH, IRQ. When
 997 *   it is called, socket has no references from outside -> sk_free
 998 *   may release descendant resources allocated by the socket, but
 999 *   to the time when it is called, socket is NOT referenced by any
1000 *   hash tables, lists etc.
1001 * * Packets, delivered from outside (from network or from another process)
1002 *   and enqueued on receive/error queues SHOULD NOT grab reference count,
1003 *   when they sit in queue. Otherwise, packets will leak to hole, when
1004 *   socket is looked up by one cpu and unhasing is made by another CPU.
1005 *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
1006 *   (leak to backlog). Packet socket does all the processing inside
1007 *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1008 *   use separate SMP lock, so that they are prone too.
1009 */
1010
1011/* Ungrab socket and destroy it, if it was the last reference. */
1012static inline void sock_put(struct sock *sk)
1013{
1014        if (atomic_dec_and_test(&sk->sk_refcnt))
1015                sk_free(sk);
1016}
1017
1018extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1019                          const int nested);
1020
1021/* Detach socket from process context.
1022 * Announce socket dead, detach it from wait queue and inode.
1023 * Note that parent inode held reference count on this struct sock,
1024 * we do not release it in this function, because protocol
1025 * probably wants some additional cleanups or even continuing
1026 * to work with this socket (TCP).
1027 */
1028static inline void sock_orphan(struct sock *sk)
1029{
1030        write_lock_bh(&sk->sk_callback_lock);
1031        sock_set_flag(sk, SOCK_DEAD);
1032        sk->sk_socket = NULL;
1033        sk->sk_sleep  = NULL;
1034        write_unlock_bh(&sk->sk_callback_lock);
1035}
1036
1037static inline void sock_graft(struct sock *sk, struct socket *parent)
1038{
1039        write_lock_bh(&sk->sk_callback_lock);
1040        sk->sk_sleep = &parent->wait;
1041        parent->sk = sk;
1042        sk->sk_socket = parent;
1043        security_sock_graft(sk, parent);
1044        write_unlock_bh(&sk->sk_callback_lock);
1045}
1046
1047extern int sock_i_uid(struct sock *sk);
1048extern unsigned long sock_i_ino(struct sock *sk);
1049
1050static inline struct dst_entry *
1051__sk_dst_get(struct sock *sk)
1052{
1053        return sk->sk_dst_cache;
1054}
1055
1056static inline struct dst_entry *
1057sk_dst_get(struct sock *sk)
1058{
1059        struct dst_entry *dst;
1060
1061        read_lock(&sk->sk_dst_lock);
1062        dst = sk->sk_dst_cache;
1063        if (dst)
1064                dst_hold(dst);
1065        read_unlock(&sk->sk_dst_lock);
1066        return dst;
1067}
1068
1069static inline void
1070__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1071{
1072        struct dst_entry *old_dst;
1073
1074        old_dst = sk->sk_dst_cache;
1075        sk->sk_dst_cache = dst;
1076        dst_release(old_dst);
1077}
1078
1079static inline void
1080sk_dst_set(struct sock *sk, struct dst_entry *dst)
1081{
1082        write_lock(&sk->sk_dst_lock);
1083        __sk_dst_set(sk, dst);
1084        write_unlock(&sk->sk_dst_lock);
1085}
1086
1087static inline void
1088__sk_dst_reset(struct sock *sk)
1089{
1090        struct dst_entry *old_dst;
1091
1092        old_dst = sk->sk_dst_cache;
1093        sk->sk_dst_cache = NULL;
1094        dst_release(old_dst);
1095}
1096
1097static inline void
1098sk_dst_reset(struct sock *sk)
1099{
1100        write_lock(&sk->sk_dst_lock);
1101        __sk_dst_reset(sk);
1102        write_unlock(&sk->sk_dst_lock);
1103}
1104
1105extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1106
1107extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1108
1109static inline int sk_can_gso(const struct sock *sk)
1110{
1111        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1112}
1113
1114extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1115
1116static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
1117{
1118        sk->sk_wmem_queued   += skb->truesize;
1119        sk->sk_forward_alloc -= skb->truesize;
1120}
1121
1122static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1123                                   struct sk_buff *skb, struct page *page,
1124                                   int off, int copy)
1125{
1126        if (skb->ip_summed == CHECKSUM_NONE) {
1127                int err = 0;
1128                __wsum csum = csum_and_copy_from_user(from,
1129                                                     page_address(page) + off,
1130                                                            copy, 0, &err);
1131                if (err)
1132                        return err;
1133                skb->csum = csum_block_add(skb->csum, csum, skb->len);
1134        } else if (copy_from_user(page_address(page) + off, from, copy))
1135                return -EFAULT;
1136
1137        skb->len             += copy;
1138        skb->data_len        += copy;
1139        skb->truesize        += copy;
1140        sk->sk_wmem_queued   += copy;
1141        sk->sk_forward_alloc -= copy;
1142        return 0;
1143}
1144
1145/*
1146 *      Queue a received datagram if it will fit. Stream and sequenced
1147 *      protocols can't normally use this as they need to fit buffers in
1148 *      and play with them.
1149 *
1150 *      Inlined as it's very short and called for pretty much every
1151 *      packet ever received.
1152 */
1153
1154static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1155{
1156        sock_hold(sk);
1157        skb->sk = sk;
1158        skb->destructor = sock_wfree;
1159        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1160}
1161
1162static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1163{
1164        skb->sk = sk;
1165        skb->destructor = sock_rfree;
1166        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1167}
1168
1169extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1170                           unsigned long expires);
1171
1172extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1173
1174extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1175
1176static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1177{
1178        /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1179           number of warnings when compiling with -W --ANK
1180         */
1181        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1182            (unsigned)sk->sk_rcvbuf)
1183                return -ENOMEM;
1184        skb_set_owner_r(skb, sk);
1185        skb_queue_tail(&sk->sk_error_queue, skb);
1186        if (!sock_flag(sk, SOCK_DEAD))
1187                sk->sk_data_ready(sk, skb->len);
1188        return 0;
1189}
1190
1191/*
1192 *      Recover an error report and clear atomically
1193 */
1194 
1195static inline int sock_error(struct sock *sk)
1196{
1197        int err;
1198        if (likely(!sk->sk_err))
1199                return 0;
1200        err = xchg(&sk->sk_err, 0);
1201        return -err;
1202}
1203
1204static inline unsigned long sock_wspace(struct sock *sk)
1205{
1206        int amt = 0;
1207
1208        if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1209                amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1210                if (amt < 0) 
1211                        amt = 0;
1212        }
1213        return amt;
1214}
1215
1216static inline void sk_wake_async(struct sock *sk, int how, int band)
1217{
1218        if (sk->sk_socket && sk->sk_socket->fasync_list)
1219                sock_wake_async(sk->sk_socket, how, band);
1220}
1221
1222#define SOCK_MIN_SNDBUF 2048
1223#define SOCK_MIN_RCVBUF 256
1224
1225static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1226{
1227        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1228                sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued / 2);
1229                sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1230        }
1231}
1232
1233static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
1234                                                   int size, int mem,
1235                                                   gfp_t gfp)
1236{
1237        struct sk_buff *skb;
1238
1239        /* The TCP header must be at least 32-bit aligned.  */
1240        size = ALIGN(size, 4);
1241
1242        skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
1243        if (skb) {
1244                skb->truesize += mem;
1245                if (sk_stream_wmem_schedule(sk, skb->truesize)) {
1246                        /*
1247                         * Make sure that we have exactly size bytes
1248                         * available to the caller, no more, no less.
1249                         */
1250                        skb_reserve(skb, skb_tailroom(skb) - size);
1251                        return skb;
1252                }
1253                __kfree_skb(skb);
1254        } else {
1255                sk->sk_prot->enter_memory_pressure();
1256                sk_stream_moderate_sndbuf(sk);
1257        }
1258        return NULL;
1259}
1260
1261static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
1262                                                  int size,
1263                                                  gfp_t gfp)
1264{
1265        return sk_stream_alloc_pskb(sk, size, 0, gfp);
1266}
1267
1268static inline struct page *sk_stream_alloc_page(struct sock *sk)
1269{
1270        struct page *page = NULL;
1271
1272        page = alloc_pages(sk->sk_allocation, 0);
1273        if (!page) {
1274                sk->sk_prot->enter_memory_pressure();
1275                sk_stream_moderate_sndbuf(sk);
1276        }
1277        return page;
1278}
1279
1280/*
1281 *      Default write policy as shown to user space via poll/select/SIGIO
1282 */
1283static inline int sock_writeable(const struct sock *sk) 
1284{
1285        return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
1286}
1287
1288static inline gfp_t gfp_any(void)
1289{
1290        return in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
1291}
1292
1293static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1294{
1295        return noblock ? 0 : sk->sk_rcvtimeo;
1296}
1297
1298static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1299{
1300        return noblock ? 0 : sk->sk_sndtimeo;
1301}
1302
1303static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1304{
1305        return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1306}
1307
1308/* Alas, with timeout socket operations are not restartable.
1309 * Compare this to poll().
1310 */
1311static inline int sock_intr_errno(long timeo)
1312{
1313        return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1314}
1315
1316extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1317        struct sk_buff *skb);
1318
1319static __inline__ void
1320sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1321{
1322        ktime_t kt = skb->tstamp;
1323
1324        if (sock_flag(sk, SOCK_RCVTSTAMP))
1325                __sock_recv_timestamp(msg, sk, skb);
1326        else
1327                sk->sk_stamp = kt;
1328}
1329
1330/**
1331 * sk_eat_skb - Release a skb if it is no longer needed
1332 * @sk: socket to eat this skb from
1333 * @skb: socket buffer to eat
1334 * @copied_early: flag indicating whether DMA operations copied this data early
1335 *
1336 * This routine must be called with interrupts disabled or with the socket
1337 * locked so that the sk_buff queue operation is ok.
1338*/
1339#ifdef CONFIG_NET_DMA
1340static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1341{
1342        __skb_unlink(skb, &sk->sk_receive_queue);
1343        if (!copied_early)
1344                __kfree_skb(skb);
1345        else
1346                __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1347}
1348#else
1349static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1350{
1351        __skb_unlink(skb, &sk->sk_receive_queue);
1352        __kfree_skb(skb);
1353}
1354#endif
1355
1356extern void sock_enable_timestamp(struct sock *sk);
1357extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1358extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1359
1360/* 
1361 *      Enable debug/info messages 
1362 */
1363extern int net_msg_warn;
1364#define NETDEBUG(fmt, args...) \
1365        do { if (net_msg_warn) printk(fmt,##args); } while (0)
1366
1367#define LIMIT_NETDEBUG(fmt, args...) \
1368        do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1369
1370/*
1371 * Macros for sleeping on a socket. Use them like this:
1372 *
1373 * SOCK_SLEEP_PRE(sk)
1374 * if (condition)
1375 *      schedule();
1376 * SOCK_SLEEP_POST(sk)
1377 *
1378 * N.B. These are now obsolete and were, afaik, only ever used in DECnet
1379 * and when the last use of them in DECnet has gone, I'm intending to
1380 * remove them.
1381 */
1382
1383#define SOCK_SLEEP_PRE(sk)      { struct task_struct *tsk = current; \
1384                                DECLARE_WAITQUEUE(wait, tsk); \
1385                                tsk->state = TASK_INTERRUPTIBLE; \
1386                                add_wait_queue((sk)->sk_sleep, &wait); \
1387                                release_sock(sk);
1388
1389#define SOCK_SLEEP_POST(sk)     tsk->state = TASK_RUNNING; \
1390                                remove_wait_queue((sk)->sk_sleep, &wait); \
1391                                lock_sock(sk); \
1392                                }
1393
1394static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
1395{
1396        if (valbool)
1397                sock_set_flag(sk, bit);
1398        else
1399                sock_reset_flag(sk, bit);
1400}
1401
1402extern __u32 sysctl_wmem_max;
1403extern __u32 sysctl_rmem_max;
1404
1405extern void sk_init(void);
1406
1407#ifdef CONFIG_SYSCTL
1408extern struct ctl_table core_table[];
1409#endif
1410
1411extern int sysctl_optmem_max;
1412
1413extern __u32 sysctl_wmem_default;
1414extern __u32 sysctl_rmem_default;
1415
1416#endif  /* _SOCK_H */
1417