linux/include/linux/skbuff.h
<<
>>
Prefs
   1/*
   2 *      Definitions for the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:
   5 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
   6 *              Florian La Roche, <rzsfl@rz.uni-sb.de>
   7 *
   8 *      This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _LINUX_SKBUFF_H
  15#define _LINUX_SKBUFF_H
  16
  17#include <linux/kernel.h>
  18#include <linux/kmemcheck.h>
  19#include <linux/compiler.h>
  20#include <linux/time.h>
  21#include <linux/cache.h>
  22
  23#include <asm/atomic.h>
  24#include <asm/types.h>
  25#include <linux/spinlock.h>
  26#include <linux/net.h>
  27#include <linux/textsearch.h>
  28#include <net/checksum.h>
  29#include <linux/rcupdate.h>
  30#include <linux/dmaengine.h>
  31#include <linux/hrtimer.h>
  32
  33/* Don't change this without changing skb_csum_unnecessary! */
  34#define CHECKSUM_NONE 0
  35#define CHECKSUM_UNNECESSARY 1
  36#define CHECKSUM_COMPLETE 2
  37#define CHECKSUM_PARTIAL 3
  38
  39#define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES - 1)) & \
  40                                 ~(SMP_CACHE_BYTES - 1))
  41#define SKB_WITH_OVERHEAD(X)    \
  42        ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  43#define SKB_MAX_ORDER(X, ORDER) \
  44        SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  45#define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
  46#define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
  47
  48/* A. Checksumming of received packets by device.
  49 *
  50 *      NONE: device failed to checksum this packet.
  51 *              skb->csum is undefined.
  52 *
  53 *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
  54 *              skb->csum is undefined.
  55 *            It is bad option, but, unfortunately, many of vendors do this.
  56 *            Apparently with secret goal to sell you new device, when you
  57 *            will add new protocol to your host. F.e. IPv6. 8)
  58 *
  59 *      COMPLETE: the most generic way. Device supplied checksum of _all_
  60 *          the packet as seen by netif_rx in skb->csum.
  61 *          NOTE: Even if device supports only some protocols, but
  62 *          is able to produce some skb->csum, it MUST use COMPLETE,
  63 *          not UNNECESSARY.
  64 *
  65 *      PARTIAL: identical to the case for output below.  This may occur
  66 *          on a packet received directly from another Linux OS, e.g.,
  67 *          a virtualised Linux kernel on the same host.  The packet can
  68 *          be treated in the same way as UNNECESSARY except that on
  69 *          output (i.e., forwarding) the checksum must be filled in
  70 *          by the OS or the hardware.
  71 *
  72 * B. Checksumming on output.
  73 *
  74 *      NONE: skb is checksummed by protocol or csum is not required.
  75 *
  76 *      PARTIAL: device is required to csum packet as seen by hard_start_xmit
  77 *      from skb->csum_start to the end and to record the checksum
  78 *      at skb->csum_start + skb->csum_offset.
  79 *
  80 *      Device must show its capabilities in dev->features, set
  81 *      at device setup time.
  82 *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  83 *                        everything.
  84 *      NETIF_F_NO_CSUM - loopback or reliable single hop media.
  85 *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  86 *                        TCP/UDP over IPv4. Sigh. Vendors like this
  87 *                        way by an unknown reason. Though, see comment above
  88 *                        about CHECKSUM_UNNECESSARY. 8)
  89 *      NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
  90 *
  91 *      Any questions? No questions, good.              --ANK
  92 */
  93
  94struct net_device;
  95struct scatterlist;
  96struct pipe_inode_info;
  97
  98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
  99struct nf_conntrack {
 100        atomic_t use;
 101};
 102#endif
 103
 104#ifdef CONFIG_BRIDGE_NETFILTER
 105struct nf_bridge_info {
 106        atomic_t use;
 107        struct net_device *physindev;
 108        struct net_device *physoutdev;
 109        unsigned int mask;
 110        unsigned long data[32 / sizeof(unsigned long)];
 111};
 112#endif
 113
 114struct sk_buff_head {
 115        /* These two members must be first. */
 116        struct sk_buff  *next;
 117        struct sk_buff  *prev;
 118
 119        __u32           qlen;
 120        spinlock_t      lock;
 121};
 122
 123struct sk_buff;
 124
 125/* To allow 64K frame to be packed as single skb without frag_list */
 126#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
 127
 128typedef struct skb_frag_struct skb_frag_t;
 129
 130struct skb_frag_struct {
 131        struct page *page;
 132#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 133        __u32 page_offset;
 134        __u32 size;
 135#else
 136        __u16 page_offset;
 137        __u16 size;
 138#endif
 139};
 140
 141#define HAVE_HW_TIME_STAMP
 142
 143/**
 144 * struct skb_shared_hwtstamps - hardware time stamps
 145 * @hwtstamp:   hardware time stamp transformed into duration
 146 *              since arbitrary point in time
 147 * @syststamp:  hwtstamp transformed to system time base
 148 *
 149 * Software time stamps generated by ktime_get_real() are stored in
 150 * skb->tstamp. The relation between the different kinds of time
 151 * stamps is as follows:
 152 *
 153 * syststamp and tstamp can be compared against each other in
 154 * arbitrary combinations.  The accuracy of a
 155 * syststamp/tstamp/"syststamp from other device" comparison is
 156 * limited by the accuracy of the transformation into system time
 157 * base. This depends on the device driver and its underlying
 158 * hardware.
 159 *
 160 * hwtstamps can only be compared against other hwtstamps from
 161 * the same device.
 162 *
 163 * This structure is attached to packets as part of the
 164 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 165 */
 166struct skb_shared_hwtstamps {
 167        ktime_t hwtstamp;
 168        ktime_t syststamp;
 169};
 170
 171/* Definitions for tx_flags in struct skb_shared_info */
 172enum {
 173        /* generate hardware time stamp */
 174        SKBTX_HW_TSTAMP = 1 << 0,
 175
 176        /* generate software time stamp */
 177        SKBTX_SW_TSTAMP = 1 << 1,
 178
 179        /* device driver is going to provide hardware time stamp */
 180        SKBTX_IN_PROGRESS = 1 << 2,
 181
 182        /* ensure the originating sk reference is available on driver level */
 183        SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
 184};
 185
 186/* This data is invariant across clones and lives at
 187 * the end of the header data, ie. at skb->end.
 188 */
 189struct skb_shared_info {
 190        unsigned short  nr_frags;
 191        unsigned short  gso_size;
 192        /* Warning: this field is not always filled in (UFO)! */
 193        unsigned short  gso_segs;
 194        unsigned short  gso_type;
 195        __be32          ip6_frag_id;
 196        __u8            tx_flags;
 197        struct sk_buff  *frag_list;
 198        struct skb_shared_hwtstamps hwtstamps;
 199
 200        /*
 201         * Warning : all fields before dataref are cleared in __alloc_skb()
 202         */
 203        atomic_t        dataref;
 204
 205        /* Intermediate layers must ensure that destructor_arg
 206         * remains valid until skb destructor */
 207        void *          destructor_arg;
 208        /* must be last field, see pskb_expand_head() */
 209        skb_frag_t      frags[MAX_SKB_FRAGS];
 210};
 211
 212/* We divide dataref into two halves.  The higher 16 bits hold references
 213 * to the payload part of skb->data.  The lower 16 bits hold references to
 214 * the entire skb->data.  A clone of a headerless skb holds the length of
 215 * the header in skb->hdr_len.
 216 *
 217 * All users must obey the rule that the skb->data reference count must be
 218 * greater than or equal to the payload reference count.
 219 *
 220 * Holding a reference to the payload part means that the user does not
 221 * care about modifications to the header part of skb->data.
 222 */
 223#define SKB_DATAREF_SHIFT 16
 224#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 225
 226
 227enum {
 228        SKB_FCLONE_UNAVAILABLE,
 229        SKB_FCLONE_ORIG,
 230        SKB_FCLONE_CLONE,
 231};
 232
 233enum {
 234        SKB_GSO_TCPV4 = 1 << 0,
 235        SKB_GSO_UDP = 1 << 1,
 236
 237        /* This indicates the skb is from an untrusted source. */
 238        SKB_GSO_DODGY = 1 << 2,
 239
 240        /* This indicates the tcp segment has CWR set. */
 241        SKB_GSO_TCP_ECN = 1 << 3,
 242
 243        SKB_GSO_TCPV6 = 1 << 4,
 244
 245        SKB_GSO_FCOE = 1 << 5,
 246};
 247
 248#if BITS_PER_LONG > 32
 249#define NET_SKBUFF_DATA_USES_OFFSET 1
 250#endif
 251
 252#ifdef NET_SKBUFF_DATA_USES_OFFSET
 253typedef unsigned int sk_buff_data_t;
 254#else
 255typedef unsigned char *sk_buff_data_t;
 256#endif
 257
 258#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
 259    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
 260#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
 261#endif
 262
 263/** 
 264 *      struct sk_buff - socket buffer
 265 *      @next: Next buffer in list
 266 *      @prev: Previous buffer in list
 267 *      @sk: Socket we are owned by
 268 *      @tstamp: Time we arrived
 269 *      @dev: Device we arrived on/are leaving by
 270 *      @transport_header: Transport layer header
 271 *      @network_header: Network layer header
 272 *      @mac_header: Link layer header
 273 *      @_skb_refdst: destination entry (with norefcount bit)
 274 *      @sp: the security path, used for xfrm
 275 *      @cb: Control buffer. Free for use by every layer. Put private vars here
 276 *      @len: Length of actual data
 277 *      @data_len: Data length
 278 *      @mac_len: Length of link layer header
 279 *      @hdr_len: writable header length of cloned skb
 280 *      @csum: Checksum (must include start/offset pair)
 281 *      @csum_start: Offset from skb->head where checksumming should start
 282 *      @csum_offset: Offset from csum_start where checksum should be stored
 283 *      @local_df: allow local fragmentation
 284 *      @cloned: Head may be cloned (check refcnt to be sure)
 285 *      @nohdr: Payload reference only, must not modify header
 286 *      @pkt_type: Packet class
 287 *      @fclone: skbuff clone status
 288 *      @ip_summed: Driver fed us an IP checksum
 289 *      @priority: Packet queueing priority
 290 *      @users: User count - see {datagram,tcp}.c
 291 *      @protocol: Packet protocol from driver
 292 *      @truesize: Buffer size 
 293 *      @head: Head of buffer
 294 *      @data: Data head pointer
 295 *      @tail: Tail pointer
 296 *      @end: End pointer
 297 *      @destructor: Destruct function
 298 *      @mark: Generic packet mark
 299 *      @nfct: Associated connection, if any
 300 *      @ipvs_property: skbuff is owned by ipvs
 301 *      @peeked: this packet has been seen already, so stats have been
 302 *              done for it, don't do them again
 303 *      @nf_trace: netfilter packet trace flag
 304 *      @nfctinfo: Relationship of this skb to the connection
 305 *      @nfct_reasm: netfilter conntrack re-assembly pointer
 306 *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
 307 *      @skb_iif: ifindex of device we arrived on
 308 *      @rxhash: the packet hash computed on receive
 309 *      @queue_mapping: Queue mapping for multiqueue devices
 310 *      @tc_index: Traffic control index
 311 *      @tc_verd: traffic control verdict
 312 *      @ndisc_nodetype: router type (from link layer)
 313 *      @dma_cookie: a cookie to one of several possible DMA operations
 314 *              done by skb DMA functions
 315 *      @secmark: security marking
 316 *      @vlan_tci: vlan tag control information
 317 */
 318
 319struct sk_buff {
 320        /* These two members must be first. */
 321        struct sk_buff          *next;
 322        struct sk_buff          *prev;
 323
 324        ktime_t                 tstamp;
 325
 326        struct sock             *sk;
 327        struct net_device       *dev;
 328
 329        /*
 330         * This is the control buffer. It is free to use for every
 331         * layer. Please put your private variables there. If you
 332         * want to keep them across layers you have to do a skb_clone()
 333         * first. This is owned by whoever has the skb queued ATM.
 334         */
 335        char                    cb[48] __aligned(8);
 336
 337        unsigned long           _skb_refdst;
 338#ifdef CONFIG_XFRM
 339        struct  sec_path        *sp;
 340#endif
 341        unsigned int            len,
 342                                data_len;
 343        __u16                   mac_len,
 344                                hdr_len;
 345        union {
 346                __wsum          csum;
 347                struct {
 348                        __u16   csum_start;
 349                        __u16   csum_offset;
 350                };
 351        };
 352        __u32                   priority;
 353        kmemcheck_bitfield_begin(flags1);
 354        __u8                    local_df:1,
 355                                cloned:1,
 356                                ip_summed:2,
 357                                nohdr:1,
 358                                nfctinfo:3;
 359        __u8                    pkt_type:3,
 360                                fclone:2,
 361                                ipvs_property:1,
 362                                peeked:1,
 363                                nf_trace:1;
 364        kmemcheck_bitfield_end(flags1);
 365        __be16                  protocol;
 366
 367        void                    (*destructor)(struct sk_buff *skb);
 368#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 369        struct nf_conntrack     *nfct;
 370#endif
 371#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 372        struct sk_buff          *nfct_reasm;
 373#endif
 374#ifdef CONFIG_BRIDGE_NETFILTER
 375        struct nf_bridge_info   *nf_bridge;
 376#endif
 377
 378        int                     skb_iif;
 379#ifdef CONFIG_NET_SCHED
 380        __u16                   tc_index;       /* traffic control index */
 381#ifdef CONFIG_NET_CLS_ACT
 382        __u16                   tc_verd;        /* traffic control verdict */
 383#endif
 384#endif
 385
 386        __u32                   rxhash;
 387
 388        kmemcheck_bitfield_begin(flags2);
 389        __u16                   queue_mapping:16;
 390#ifdef CONFIG_IPV6_NDISC_NODETYPE
 391        __u8                    ndisc_nodetype:2,
 392                                deliver_no_wcard:1;
 393#else
 394        __u8                    deliver_no_wcard:1;
 395#endif
 396        __u8                    ooo_okay:1;
 397        kmemcheck_bitfield_end(flags2);
 398
 399        /* 0/13 bit hole */
 400
 401#ifdef CONFIG_NET_DMA
 402        dma_cookie_t            dma_cookie;
 403#endif
 404#ifdef CONFIG_NETWORK_SECMARK
 405        __u32                   secmark;
 406#endif
 407        union {
 408                __u32           mark;
 409                __u32           dropcount;
 410        };
 411
 412        __u16                   vlan_tci;
 413
 414        sk_buff_data_t          transport_header;
 415        sk_buff_data_t          network_header;
 416        sk_buff_data_t          mac_header;
 417        /* These elements must be at the end, see alloc_skb() for details.  */
 418        sk_buff_data_t          tail;
 419        sk_buff_data_t          end;
 420        unsigned char           *head,
 421                                *data;
 422        unsigned int            truesize;
 423        atomic_t                users;
 424};
 425
 426#ifdef __KERNEL__
 427/*
 428 *      Handling routines are only of interest to the kernel
 429 */
 430#include <linux/slab.h>
 431
 432#include <asm/system.h>
 433
 434/*
 435 * skb might have a dst pointer attached, refcounted or not.
 436 * _skb_refdst low order bit is set if refcount was _not_ taken
 437 */
 438#define SKB_DST_NOREF   1UL
 439#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
 440
 441/**
 442 * skb_dst - returns skb dst_entry
 443 * @skb: buffer
 444 *
 445 * Returns skb dst_entry, regardless of reference taken or not.
 446 */
 447static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 448{
 449        /* If refdst was not refcounted, check we still are in a 
 450         * rcu_read_lock section
 451         */
 452        WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
 453                !rcu_read_lock_held() &&
 454                !rcu_read_lock_bh_held());
 455        return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
 456}
 457
 458/**
 459 * skb_dst_set - sets skb dst
 460 * @skb: buffer
 461 * @dst: dst entry
 462 *
 463 * Sets skb dst, assuming a reference was taken on dst and should
 464 * be released by skb_dst_drop()
 465 */
 466static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
 467{
 468        skb->_skb_refdst = (unsigned long)dst;
 469}
 470
 471extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
 472
 473/**
 474 * skb_dst_is_noref - Test if skb dst isnt refcounted
 475 * @skb: buffer
 476 */
 477static inline bool skb_dst_is_noref(const struct sk_buff *skb)
 478{
 479        return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
 480}
 481
 482static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 483{
 484        return (struct rtable *)skb_dst(skb);
 485}
 486
 487extern void kfree_skb(struct sk_buff *skb);
 488extern void consume_skb(struct sk_buff *skb);
 489extern void            __kfree_skb(struct sk_buff *skb);
 490extern struct sk_buff *__alloc_skb(unsigned int size,
 491                                   gfp_t priority, int fclone, int node);
 492static inline struct sk_buff *alloc_skb(unsigned int size,
 493                                        gfp_t priority)
 494{
 495        return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
 496}
 497
 498static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 499                                               gfp_t priority)
 500{
 501        return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
 502}
 503
 504extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
 505
 506extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 507extern struct sk_buff *skb_clone(struct sk_buff *skb,
 508                                 gfp_t priority);
 509extern struct sk_buff *skb_copy(const struct sk_buff *skb,
 510                                gfp_t priority);
 511extern struct sk_buff *pskb_copy(struct sk_buff *skb,
 512                                 gfp_t gfp_mask);
 513extern int             pskb_expand_head(struct sk_buff *skb,
 514                                        int nhead, int ntail,
 515                                        gfp_t gfp_mask);
 516extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
 517                                            unsigned int headroom);
 518extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 519                                       int newheadroom, int newtailroom,
 520                                       gfp_t priority);
 521extern int             skb_to_sgvec(struct sk_buff *skb,
 522                                    struct scatterlist *sg, int offset,
 523                                    int len);
 524extern int             skb_cow_data(struct sk_buff *skb, int tailbits,
 525                                    struct sk_buff **trailer);
 526extern int             skb_pad(struct sk_buff *skb, int pad);
 527#define dev_kfree_skb(a)        consume_skb(a)
 528
 529extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 530                        int getfrag(void *from, char *to, int offset,
 531                        int len,int odd, struct sk_buff *skb),
 532                        void *from, int length);
 533
 534struct skb_seq_state {
 535        __u32           lower_offset;
 536        __u32           upper_offset;
 537        __u32           frag_idx;
 538        __u32           stepped_offset;
 539        struct sk_buff  *root_skb;
 540        struct sk_buff  *cur_skb;
 541        __u8            *frag_data;
 542};
 543
 544extern void           skb_prepare_seq_read(struct sk_buff *skb,
 545                                           unsigned int from, unsigned int to,
 546                                           struct skb_seq_state *st);
 547extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
 548                                   struct skb_seq_state *st);
 549extern void           skb_abort_seq_read(struct skb_seq_state *st);
 550
 551extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
 552                                    unsigned int to, struct ts_config *config,
 553                                    struct ts_state *state);
 554
 555extern __u32 __skb_get_rxhash(struct sk_buff *skb);
 556static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 557{
 558        if (!skb->rxhash)
 559                skb->rxhash = __skb_get_rxhash(skb);
 560
 561        return skb->rxhash;
 562}
 563
 564#ifdef NET_SKBUFF_DATA_USES_OFFSET
 565static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 566{
 567        return skb->head + skb->end;
 568}
 569#else
 570static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 571{
 572        return skb->end;
 573}
 574#endif
 575
 576/* Internal */
 577#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
 578
 579static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 580{
 581        return &skb_shinfo(skb)->hwtstamps;
 582}
 583
 584/**
 585 *      skb_queue_empty - check if a queue is empty
 586 *      @list: queue head
 587 *
 588 *      Returns true if the queue is empty, false otherwise.
 589 */
 590static inline int skb_queue_empty(const struct sk_buff_head *list)
 591{
 592        return list->next == (struct sk_buff *)list;
 593}
 594
 595/**
 596 *      skb_queue_is_last - check if skb is the last entry in the queue
 597 *      @list: queue head
 598 *      @skb: buffer
 599 *
 600 *      Returns true if @skb is the last buffer on the list.
 601 */
 602static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 603                                     const struct sk_buff *skb)
 604{
 605        return skb->next == (struct sk_buff *)list;
 606}
 607
 608/**
 609 *      skb_queue_is_first - check if skb is the first entry in the queue
 610 *      @list: queue head
 611 *      @skb: buffer
 612 *
 613 *      Returns true if @skb is the first buffer on the list.
 614 */
 615static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 616                                      const struct sk_buff *skb)
 617{
 618        return skb->prev == (struct sk_buff *)list;
 619}
 620
 621/**
 622 *      skb_queue_next - return the next packet in the queue
 623 *      @list: queue head
 624 *      @skb: current buffer
 625 *
 626 *      Return the next packet in @list after @skb.  It is only valid to
 627 *      call this if skb_queue_is_last() evaluates to false.
 628 */
 629static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
 630                                             const struct sk_buff *skb)
 631{
 632        /* This BUG_ON may seem severe, but if we just return then we
 633         * are going to dereference garbage.
 634         */
 635        BUG_ON(skb_queue_is_last(list, skb));
 636        return skb->next;
 637}
 638
 639/**
 640 *      skb_queue_prev - return the prev packet in the queue
 641 *      @list: queue head
 642 *      @skb: current buffer
 643 *
 644 *      Return the prev packet in @list before @skb.  It is only valid to
 645 *      call this if skb_queue_is_first() evaluates to false.
 646 */
 647static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
 648                                             const struct sk_buff *skb)
 649{
 650        /* This BUG_ON may seem severe, but if we just return then we
 651         * are going to dereference garbage.
 652         */
 653        BUG_ON(skb_queue_is_first(list, skb));
 654        return skb->prev;
 655}
 656
 657/**
 658 *      skb_get - reference buffer
 659 *      @skb: buffer to reference
 660 *
 661 *      Makes another reference to a socket buffer and returns a pointer
 662 *      to the buffer.
 663 */
 664static inline struct sk_buff *skb_get(struct sk_buff *skb)
 665{
 666        atomic_inc(&skb->users);
 667        return skb;
 668}
 669
 670/*
 671 * If users == 1, we are the only owner and are can avoid redundant
 672 * atomic change.
 673 */
 674
 675/**
 676 *      skb_cloned - is the buffer a clone
 677 *      @skb: buffer to check
 678 *
 679 *      Returns true if the buffer was generated with skb_clone() and is
 680 *      one of multiple shared copies of the buffer. Cloned buffers are
 681 *      shared data so must not be written to under normal circumstances.
 682 */
 683static inline int skb_cloned(const struct sk_buff *skb)
 684{
 685        return skb->cloned &&
 686               (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 687}
 688
 689/**
 690 *      skb_header_cloned - is the header a clone
 691 *      @skb: buffer to check
 692 *
 693 *      Returns true if modifying the header part of the buffer requires
 694 *      the data to be copied.
 695 */
 696static inline int skb_header_cloned(const struct sk_buff *skb)
 697{
 698        int dataref;
 699
 700        if (!skb->cloned)
 701                return 0;
 702
 703        dataref = atomic_read(&skb_shinfo(skb)->dataref);
 704        dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
 705        return dataref != 1;
 706}
 707
 708/**
 709 *      skb_header_release - release reference to header
 710 *      @skb: buffer to operate on
 711 *
 712 *      Drop a reference to the header part of the buffer.  This is done
 713 *      by acquiring a payload reference.  You must not read from the header
 714 *      part of skb->data after this.
 715 */
 716static inline void skb_header_release(struct sk_buff *skb)
 717{
 718        BUG_ON(skb->nohdr);
 719        skb->nohdr = 1;
 720        atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 721}
 722
 723/**
 724 *      skb_shared - is the buffer shared
 725 *      @skb: buffer to check
 726 *
 727 *      Returns true if more than one person has a reference to this
 728 *      buffer.
 729 */
 730static inline int skb_shared(const struct sk_buff *skb)
 731{
 732        return atomic_read(&skb->users) != 1;
 733}
 734
 735/**
 736 *      skb_share_check - check if buffer is shared and if so clone it
 737 *      @skb: buffer to check
 738 *      @pri: priority for memory allocation
 739 *
 740 *      If the buffer is shared the buffer is cloned and the old copy
 741 *      drops a reference. A new clone with a single reference is returned.
 742 *      If the buffer is not shared the original buffer is returned. When
 743 *      being called from interrupt status or with spinlocks held pri must
 744 *      be GFP_ATOMIC.
 745 *
 746 *      NULL is returned on a memory allocation failure.
 747 */
 748static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
 749                                              gfp_t pri)
 750{
 751        might_sleep_if(pri & __GFP_WAIT);
 752        if (skb_shared(skb)) {
 753                struct sk_buff *nskb = skb_clone(skb, pri);
 754                kfree_skb(skb);
 755                skb = nskb;
 756        }
 757        return skb;
 758}
 759
 760/*
 761 *      Copy shared buffers into a new sk_buff. We effectively do COW on
 762 *      packets to handle cases where we have a local reader and forward
 763 *      and a couple of other messy ones. The normal one is tcpdumping
 764 *      a packet thats being forwarded.
 765 */
 766
 767/**
 768 *      skb_unshare - make a copy of a shared buffer
 769 *      @skb: buffer to check
 770 *      @pri: priority for memory allocation
 771 *
 772 *      If the socket buffer is a clone then this function creates a new
 773 *      copy of the data, drops a reference count on the old copy and returns
 774 *      the new copy with the reference count at 1. If the buffer is not a clone
 775 *      the original buffer is returned. When called with a spinlock held or
 776 *      from interrupt state @pri must be %GFP_ATOMIC
 777 *
 778 *      %NULL is returned on a memory allocation failure.
 779 */
 780static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
 781                                          gfp_t pri)
 782{
 783        might_sleep_if(pri & __GFP_WAIT);
 784        if (skb_cloned(skb)) {
 785                struct sk_buff *nskb = skb_copy(skb, pri);
 786                kfree_skb(skb); /* Free our shared copy */
 787                skb = nskb;
 788        }
 789        return skb;
 790}
 791
 792/**
 793 *      skb_peek - peek at the head of an &sk_buff_head
 794 *      @list_: list to peek at
 795 *
 796 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 797 *      be careful with this one. A peek leaves the buffer on the
 798 *      list and someone else may run off with it. You must hold
 799 *      the appropriate locks or have a private queue to do this.
 800 *
 801 *      Returns %NULL for an empty list or a pointer to the head element.
 802 *      The reference count is not incremented and the reference is therefore
 803 *      volatile. Use with caution.
 804 */
 805static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
 806{
 807        struct sk_buff *list = ((struct sk_buff *)list_)->next;
 808        if (list == (struct sk_buff *)list_)
 809                list = NULL;
 810        return list;
 811}
 812
 813/**
 814 *      skb_peek_tail - peek at the tail of an &sk_buff_head
 815 *      @list_: list to peek at
 816 *
 817 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 818 *      be careful with this one. A peek leaves the buffer on the
 819 *      list and someone else may run off with it. You must hold
 820 *      the appropriate locks or have a private queue to do this.
 821 *
 822 *      Returns %NULL for an empty list or a pointer to the tail element.
 823 *      The reference count is not incremented and the reference is therefore
 824 *      volatile. Use with caution.
 825 */
 826static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
 827{
 828        struct sk_buff *list = ((struct sk_buff *)list_)->prev;
 829        if (list == (struct sk_buff *)list_)
 830                list = NULL;
 831        return list;
 832}
 833
 834/**
 835 *      skb_queue_len   - get queue length
 836 *      @list_: list to measure
 837 *
 838 *      Return the length of an &sk_buff queue.
 839 */
 840static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 841{
 842        return list_->qlen;
 843}
 844
 845/**
 846 *      __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 847 *      @list: queue to initialize
 848 *
 849 *      This initializes only the list and queue length aspects of
 850 *      an sk_buff_head object.  This allows to initialize the list
 851 *      aspects of an sk_buff_head without reinitializing things like
 852 *      the spinlock.  It can also be used for on-stack sk_buff_head
 853 *      objects where the spinlock is known to not be used.
 854 */
 855static inline void __skb_queue_head_init(struct sk_buff_head *list)
 856{
 857        list->prev = list->next = (struct sk_buff *)list;
 858        list->qlen = 0;
 859}
 860
 861/*
 862 * This function creates a split out lock class for each invocation;
 863 * this is needed for now since a whole lot of users of the skb-queue
 864 * infrastructure in drivers have different locking usage (in hardirq)
 865 * than the networking core (in softirq only). In the long run either the
 866 * network layer or drivers should need annotation to consolidate the
 867 * main types of usage into 3 classes.
 868 */
 869static inline void skb_queue_head_init(struct sk_buff_head *list)
 870{
 871        spin_lock_init(&list->lock);
 872        __skb_queue_head_init(list);
 873}
 874
 875static inline void skb_queue_head_init_class(struct sk_buff_head *list,
 876                struct lock_class_key *class)
 877{
 878        skb_queue_head_init(list);
 879        lockdep_set_class(&list->lock, class);
 880}
 881
 882/*
 883 *      Insert an sk_buff on a list.
 884 *
 885 *      The "__skb_xxxx()" functions are the non-atomic ones that
 886 *      can only be called with interrupts disabled.
 887 */
 888extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
 889static inline void __skb_insert(struct sk_buff *newsk,
 890                                struct sk_buff *prev, struct sk_buff *next,
 891                                struct sk_buff_head *list)
 892{
 893        newsk->next = next;
 894        newsk->prev = prev;
 895        next->prev  = prev->next = newsk;
 896        list->qlen++;
 897}
 898
 899static inline void __skb_queue_splice(const struct sk_buff_head *list,
 900                                      struct sk_buff *prev,
 901                                      struct sk_buff *next)
 902{
 903        struct sk_buff *first = list->next;
 904        struct sk_buff *last = list->prev;
 905
 906        first->prev = prev;
 907        prev->next = first;
 908
 909        last->next = next;
 910        next->prev = last;
 911}
 912
 913/**
 914 *      skb_queue_splice - join two skb lists, this is designed for stacks
 915 *      @list: the new list to add
 916 *      @head: the place to add it in the first list
 917 */
 918static inline void skb_queue_splice(const struct sk_buff_head *list,
 919                                    struct sk_buff_head *head)
 920{
 921        if (!skb_queue_empty(list)) {
 922                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
 923                head->qlen += list->qlen;
 924        }
 925}
 926
 927/**
 928 *      skb_queue_splice - join two skb lists and reinitialise the emptied list
 929 *      @list: the new list to add
 930 *      @head: the place to add it in the first list
 931 *
 932 *      The list at @list is reinitialised
 933 */
 934static inline void skb_queue_splice_init(struct sk_buff_head *list,
 935                                         struct sk_buff_head *head)
 936{
 937        if (!skb_queue_empty(list)) {
 938                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
 939                head->qlen += list->qlen;
 940                __skb_queue_head_init(list);
 941        }
 942}
 943
 944/**
 945 *      skb_queue_splice_tail - join two skb lists, each list being a queue
 946 *      @list: the new list to add
 947 *      @head: the place to add it in the first list
 948 */
 949static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
 950                                         struct sk_buff_head *head)
 951{
 952        if (!skb_queue_empty(list)) {
 953                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
 954                head->qlen += list->qlen;
 955        }
 956}
 957
 958/**
 959 *      skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
 960 *      @list: the new list to add
 961 *      @head: the place to add it in the first list
 962 *
 963 *      Each of the lists is a queue.
 964 *      The list at @list is reinitialised
 965 */
 966static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
 967                                              struct sk_buff_head *head)
 968{
 969        if (!skb_queue_empty(list)) {
 970                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
 971                head->qlen += list->qlen;
 972                __skb_queue_head_init(list);
 973        }
 974}
 975
 976/**
 977 *      __skb_queue_after - queue a buffer at the list head
 978 *      @list: list to use
 979 *      @prev: place after this buffer
 980 *      @newsk: buffer to queue
 981 *
 982 *      Queue a buffer int the middle of a list. This function takes no locks
 983 *      and you must therefore hold required locks before calling it.
 984 *
 985 *      A buffer cannot be placed on two lists at the same time.
 986 */
 987static inline void __skb_queue_after(struct sk_buff_head *list,
 988                                     struct sk_buff *prev,
 989                                     struct sk_buff *newsk)
 990{
 991        __skb_insert(newsk, prev, prev->next, list);
 992}
 993
 994extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
 995                       struct sk_buff_head *list);
 996
 997static inline void __skb_queue_before(struct sk_buff_head *list,
 998                                      struct sk_buff *next,
 999                                      struct sk_buff *newsk)
1000{
1001        __skb_insert(newsk, next->prev, next, list);
1002}
1003
1004/**
1005 *      __skb_queue_head - queue a buffer at the list head
1006 *      @list: list to use
1007 *      @newsk: buffer to queue
1008 *
1009 *      Queue a buffer at the start of a list. This function takes no locks
1010 *      and you must therefore hold required locks before calling it.
1011 *
1012 *      A buffer cannot be placed on two lists at the same time.
1013 */
1014extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1015static inline void __skb_queue_head(struct sk_buff_head *list,
1016                                    struct sk_buff *newsk)
1017{
1018        __skb_queue_after(list, (struct sk_buff *)list, newsk);
1019}
1020
1021/**
1022 *      __skb_queue_tail - queue a buffer at the list tail
1023 *      @list: list to use
1024 *      @newsk: buffer to queue
1025 *
1026 *      Queue a buffer at the end of a list. This function takes no locks
1027 *      and you must therefore hold required locks before calling it.
1028 *
1029 *      A buffer cannot be placed on two lists at the same time.
1030 */
1031extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1032static inline void __skb_queue_tail(struct sk_buff_head *list,
1033                                   struct sk_buff *newsk)
1034{
1035        __skb_queue_before(list, (struct sk_buff *)list, newsk);
1036}
1037
1038/*
1039 * remove sk_buff from list. _Must_ be called atomically, and with
1040 * the list known..
1041 */
1042extern void        skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1043static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1044{
1045        struct sk_buff *next, *prev;
1046
1047        list->qlen--;
1048        next       = skb->next;
1049        prev       = skb->prev;
1050        skb->next  = skb->prev = NULL;
1051        next->prev = prev;
1052        prev->next = next;
1053}
1054
1055/**
1056 *      __skb_dequeue - remove from the head of the queue
1057 *      @list: list to dequeue from
1058 *
1059 *      Remove the head of the list. This function does not take any locks
1060 *      so must be used with appropriate locks held only. The head item is
1061 *      returned or %NULL if the list is empty.
1062 */
1063extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1064static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1065{
1066        struct sk_buff *skb = skb_peek(list);
1067        if (skb)
1068                __skb_unlink(skb, list);
1069        return skb;
1070}
1071
1072/**
1073 *      __skb_dequeue_tail - remove from the tail of the queue
1074 *      @list: list to dequeue from
1075 *
1076 *      Remove the tail of the list. This function does not take any locks
1077 *      so must be used with appropriate locks held only. The tail item is
1078 *      returned or %NULL if the list is empty.
1079 */
1080extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1081static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1082{
1083        struct sk_buff *skb = skb_peek_tail(list);
1084        if (skb)
1085                __skb_unlink(skb, list);
1086        return skb;
1087}
1088
1089
1090static inline int skb_is_nonlinear(const struct sk_buff *skb)
1091{
1092        return skb->data_len;
1093}
1094
1095static inline unsigned int skb_headlen(const struct sk_buff *skb)
1096{
1097        return skb->len - skb->data_len;
1098}
1099
1100static inline int skb_pagelen(const struct sk_buff *skb)
1101{
1102        int i, len = 0;
1103
1104        for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1105                len += skb_shinfo(skb)->frags[i].size;
1106        return len + skb_headlen(skb);
1107}
1108
1109static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1110                                      struct page *page, int off, int size)
1111{
1112        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1113
1114        frag->page                = page;
1115        frag->page_offset         = off;
1116        frag->size                = size;
1117        skb_shinfo(skb)->nr_frags = i + 1;
1118}
1119
1120extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1121                            int off, int size);
1122
1123#define SKB_PAGE_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->nr_frags)
1124#define SKB_FRAG_ASSERT(skb)    BUG_ON(skb_has_frag_list(skb))
1125#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1126
1127#ifdef NET_SKBUFF_DATA_USES_OFFSET
1128static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1129{
1130        return skb->head + skb->tail;
1131}
1132
1133static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1134{
1135        skb->tail = skb->data - skb->head;
1136}
1137
1138static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1139{
1140        skb_reset_tail_pointer(skb);
1141        skb->tail += offset;
1142}
1143#else /* NET_SKBUFF_DATA_USES_OFFSET */
1144static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1145{
1146        return skb->tail;
1147}
1148
1149static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1150{
1151        skb->tail = skb->data;
1152}
1153
1154static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1155{
1156        skb->tail = skb->data + offset;
1157}
1158
1159#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1160
1161/*
1162 *      Add data to an sk_buff
1163 */
1164extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1165static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1166{
1167        unsigned char *tmp = skb_tail_pointer(skb);
1168        SKB_LINEAR_ASSERT(skb);
1169        skb->tail += len;
1170        skb->len  += len;
1171        return tmp;
1172}
1173
1174extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1175static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1176{
1177        skb->data -= len;
1178        skb->len  += len;
1179        return skb->data;
1180}
1181
1182extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1183static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1184{
1185        skb->len -= len;
1186        BUG_ON(skb->len < skb->data_len);
1187        return skb->data += len;
1188}
1189
1190static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1191{
1192        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1193}
1194
1195extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1196
1197static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1198{
1199        if (len > skb_headlen(skb) &&
1200            !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1201                return NULL;
1202        skb->len -= len;
1203        return skb->data += len;
1204}
1205
1206static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1207{
1208        return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1209}
1210
1211static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1212{
1213        if (likely(len <= skb_headlen(skb)))
1214                return 1;
1215        if (unlikely(len > skb->len))
1216                return 0;
1217        return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1218}
1219
1220/**
1221 *      skb_headroom - bytes at buffer head
1222 *      @skb: buffer to check
1223 *
1224 *      Return the number of bytes of free space at the head of an &sk_buff.
1225 */
1226static inline unsigned int skb_headroom(const struct sk_buff *skb)
1227{
1228        return skb->data - skb->head;
1229}
1230
1231/**
1232 *      skb_tailroom - bytes at buffer end
1233 *      @skb: buffer to check
1234 *
1235 *      Return the number of bytes of free space at the tail of an sk_buff
1236 */
1237static inline int skb_tailroom(const struct sk_buff *skb)
1238{
1239        return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1240}
1241
1242/**
1243 *      skb_reserve - adjust headroom
1244 *      @skb: buffer to alter
1245 *      @len: bytes to move
1246 *
1247 *      Increase the headroom of an empty &sk_buff by reducing the tail
1248 *      room. This is only allowed for an empty buffer.
1249 */
1250static inline void skb_reserve(struct sk_buff *skb, int len)
1251{
1252        skb->data += len;
1253        skb->tail += len;
1254}
1255
1256#ifdef NET_SKBUFF_DATA_USES_OFFSET
1257static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1258{
1259        return skb->head + skb->transport_header;
1260}
1261
1262static inline void skb_reset_transport_header(struct sk_buff *skb)
1263{
1264        skb->transport_header = skb->data - skb->head;
1265}
1266
1267static inline void skb_set_transport_header(struct sk_buff *skb,
1268                                            const int offset)
1269{
1270        skb_reset_transport_header(skb);
1271        skb->transport_header += offset;
1272}
1273
1274static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1275{
1276        return skb->head + skb->network_header;
1277}
1278
1279static inline void skb_reset_network_header(struct sk_buff *skb)
1280{
1281        skb->network_header = skb->data - skb->head;
1282}
1283
1284static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1285{
1286        skb_reset_network_header(skb);
1287        skb->network_header += offset;
1288}
1289
1290static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1291{
1292        return skb->head + skb->mac_header;
1293}
1294
1295static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1296{
1297        return skb->mac_header != ~0U;
1298}
1299
1300static inline void skb_reset_mac_header(struct sk_buff *skb)
1301{
1302        skb->mac_header = skb->data - skb->head;
1303}
1304
1305static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1306{
1307        skb_reset_mac_header(skb);
1308        skb->mac_header += offset;
1309}
1310
1311#else /* NET_SKBUFF_DATA_USES_OFFSET */
1312
1313static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1314{
1315        return skb->transport_header;
1316}
1317
1318static inline void skb_reset_transport_header(struct sk_buff *skb)
1319{
1320        skb->transport_header = skb->data;
1321}
1322
1323static inline void skb_set_transport_header(struct sk_buff *skb,
1324                                            const int offset)
1325{
1326        skb->transport_header = skb->data + offset;
1327}
1328
1329static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1330{
1331        return skb->network_header;
1332}
1333
1334static inline void skb_reset_network_header(struct sk_buff *skb)
1335{
1336        skb->network_header = skb->data;
1337}
1338
1339static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1340{
1341        skb->network_header = skb->data + offset;
1342}
1343
1344static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1345{
1346        return skb->mac_header;
1347}
1348
1349static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1350{
1351        return skb->mac_header != NULL;
1352}
1353
1354static inline void skb_reset_mac_header(struct sk_buff *skb)
1355{
1356        skb->mac_header = skb->data;
1357}
1358
1359static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1360{
1361        skb->mac_header = skb->data + offset;
1362}
1363#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1364
1365static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1366{
1367        return skb->csum_start - skb_headroom(skb);
1368}
1369
1370static inline int skb_transport_offset(const struct sk_buff *skb)
1371{
1372        return skb_transport_header(skb) - skb->data;
1373}
1374
1375static inline u32 skb_network_header_len(const struct sk_buff *skb)
1376{
1377        return skb->transport_header - skb->network_header;
1378}
1379
1380static inline int skb_network_offset(const struct sk_buff *skb)
1381{
1382        return skb_network_header(skb) - skb->data;
1383}
1384
1385static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1386{
1387        return pskb_may_pull(skb, skb_network_offset(skb) + len);
1388}
1389
1390/*
1391 * CPUs often take a performance hit when accessing unaligned memory
1392 * locations. The actual performance hit varies, it can be small if the
1393 * hardware handles it or large if we have to take an exception and fix it
1394 * in software.
1395 *
1396 * Since an ethernet header is 14 bytes network drivers often end up with
1397 * the IP header at an unaligned offset. The IP header can be aligned by
1398 * shifting the start of the packet by 2 bytes. Drivers should do this
1399 * with:
1400 *
1401 * skb_reserve(skb, NET_IP_ALIGN);
1402 *
1403 * The downside to this alignment of the IP header is that the DMA is now
1404 * unaligned. On some architectures the cost of an unaligned DMA is high
1405 * and this cost outweighs the gains made by aligning the IP header.
1406 *
1407 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1408 * to be overridden.
1409 */
1410#ifndef NET_IP_ALIGN
1411#define NET_IP_ALIGN    2
1412#endif
1413
1414/*
1415 * The networking layer reserves some headroom in skb data (via
1416 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1417 * the header has to grow. In the default case, if the header has to grow
1418 * 32 bytes or less we avoid the reallocation.
1419 *
1420 * Unfortunately this headroom changes the DMA alignment of the resulting
1421 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1422 * on some architectures. An architecture can override this value,
1423 * perhaps setting it to a cacheline in size (since that will maintain
1424 * cacheline alignment of the DMA). It must be a power of 2.
1425 *
1426 * Various parts of the networking layer expect at least 32 bytes of
1427 * headroom, you should not reduce this.
1428 *
1429 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1430 * to reduce average number of cache lines per packet.
1431 * get_rps_cpus() for example only access one 64 bytes aligned block :
1432 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1433 */
1434#ifndef NET_SKB_PAD
1435#define NET_SKB_PAD     max(32, L1_CACHE_BYTES)
1436#endif
1437
1438extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1439
1440static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1441{
1442        if (unlikely(skb->data_len)) {
1443                WARN_ON(1);
1444                return;
1445        }
1446        skb->len = len;
1447        skb_set_tail_pointer(skb, len);
1448}
1449
1450extern void skb_trim(struct sk_buff *skb, unsigned int len);
1451
1452static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1453{
1454        if (skb->data_len)
1455                return ___pskb_trim(skb, len);
1456        __skb_trim(skb, len);
1457        return 0;
1458}
1459
1460static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1461{
1462        return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1463}
1464
1465/**
1466 *      pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1467 *      @skb: buffer to alter
1468 *      @len: new length
1469 *
1470 *      This is identical to pskb_trim except that the caller knows that
1471 *      the skb is not cloned so we should never get an error due to out-
1472 *      of-memory.
1473 */
1474static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1475{
1476        int err = pskb_trim(skb, len);
1477        BUG_ON(err);
1478}
1479
1480/**
1481 *      skb_orphan - orphan a buffer
1482 *      @skb: buffer to orphan
1483 *
1484 *      If a buffer currently has an owner then we call the owner's
1485 *      destructor function and make the @skb unowned. The buffer continues
1486 *      to exist but is no longer charged to its former owner.
1487 */
1488static inline void skb_orphan(struct sk_buff *skb)
1489{
1490        if (skb->destructor)
1491                skb->destructor(skb);
1492        skb->destructor = NULL;
1493        skb->sk         = NULL;
1494}
1495
1496/**
1497 *      __skb_queue_purge - empty a list
1498 *      @list: list to empty
1499 *
1500 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1501 *      the list and one reference dropped. This function does not take the
1502 *      list lock and the caller must hold the relevant locks to use it.
1503 */
1504extern void skb_queue_purge(struct sk_buff_head *list);
1505static inline void __skb_queue_purge(struct sk_buff_head *list)
1506{
1507        struct sk_buff *skb;
1508        while ((skb = __skb_dequeue(list)) != NULL)
1509                kfree_skb(skb);
1510}
1511
1512/**
1513 *      __dev_alloc_skb - allocate an skbuff for receiving
1514 *      @length: length to allocate
1515 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
1516 *
1517 *      Allocate a new &sk_buff and assign it a usage count of one. The
1518 *      buffer has unspecified headroom built in. Users should allocate
1519 *      the headroom they think they need without accounting for the
1520 *      built in space. The built in space is used for optimisations.
1521 *
1522 *      %NULL is returned if there is no free memory.
1523 */
1524static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1525                                              gfp_t gfp_mask)
1526{
1527        struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1528        if (likely(skb))
1529                skb_reserve(skb, NET_SKB_PAD);
1530        return skb;
1531}
1532
1533extern struct sk_buff *dev_alloc_skb(unsigned int length);
1534
1535extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1536                unsigned int length, gfp_t gfp_mask);
1537
1538/**
1539 *      netdev_alloc_skb - allocate an skbuff for rx on a specific device
1540 *      @dev: network device to receive on
1541 *      @length: length to allocate
1542 *
1543 *      Allocate a new &sk_buff and assign it a usage count of one. The
1544 *      buffer has unspecified headroom built in. Users should allocate
1545 *      the headroom they think they need without accounting for the
1546 *      built in space. The built in space is used for optimisations.
1547 *
1548 *      %NULL is returned if there is no free memory. Although this function
1549 *      allocates memory it can be called from an interrupt.
1550 */
1551static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1552                unsigned int length)
1553{
1554        return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1555}
1556
1557static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1558                unsigned int length)
1559{
1560        struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
1561
1562        if (NET_IP_ALIGN && skb)
1563                skb_reserve(skb, NET_IP_ALIGN);
1564        return skb;
1565}
1566
1567/**
1568 *      __netdev_alloc_page - allocate a page for ps-rx on a specific device
1569 *      @dev: network device to receive on
1570 *      @gfp_mask: alloc_pages_node mask
1571 *
1572 *      Allocate a new page. dev currently unused.
1573 *
1574 *      %NULL is returned if there is no free memory.
1575 */
1576static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1577{
1578        return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1579}
1580
1581/**
1582 *      netdev_alloc_page - allocate a page for ps-rx on a specific device
1583 *      @dev: network device to receive on
1584 *
1585 *      Allocate a new page. dev currently unused.
1586 *
1587 *      %NULL is returned if there is no free memory.
1588 */
1589static inline struct page *netdev_alloc_page(struct net_device *dev)
1590{
1591        return __netdev_alloc_page(dev, GFP_ATOMIC);
1592}
1593
1594static inline void netdev_free_page(struct net_device *dev, struct page *page)
1595{
1596        __free_page(page);
1597}
1598
1599/**
1600 *      skb_clone_writable - is the header of a clone writable
1601 *      @skb: buffer to check
1602 *      @len: length up to which to write
1603 *
1604 *      Returns true if modifying the header part of the cloned buffer
1605 *      does not requires the data to be copied.
1606 */
1607static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1608{
1609        return !skb_header_cloned(skb) &&
1610               skb_headroom(skb) + len <= skb->hdr_len;
1611}
1612
1613static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1614                            int cloned)
1615{
1616        int delta = 0;
1617
1618        if (headroom < NET_SKB_PAD)
1619                headroom = NET_SKB_PAD;
1620        if (headroom > skb_headroom(skb))
1621                delta = headroom - skb_headroom(skb);
1622
1623        if (delta || cloned)
1624                return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1625                                        GFP_ATOMIC);
1626        return 0;
1627}
1628
1629/**
1630 *      skb_cow - copy header of skb when it is required
1631 *      @skb: buffer to cow
1632 *      @headroom: needed headroom
1633 *
1634 *      If the skb passed lacks sufficient headroom or its data part
1635 *      is shared, data is reallocated. If reallocation fails, an error
1636 *      is returned and original skb is not changed.
1637 *
1638 *      The result is skb with writable area skb->head...skb->tail
1639 *      and at least @headroom of space at head.
1640 */
1641static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1642{
1643        return __skb_cow(skb, headroom, skb_cloned(skb));
1644}
1645
1646/**
1647 *      skb_cow_head - skb_cow but only making the head writable
1648 *      @skb: buffer to cow
1649 *      @headroom: needed headroom
1650 *
1651 *      This function is identical to skb_cow except that we replace the
1652 *      skb_cloned check by skb_header_cloned.  It should be used when
1653 *      you only need to push on some header and do not need to modify
1654 *      the data.
1655 */
1656static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1657{
1658        return __skb_cow(skb, headroom, skb_header_cloned(skb));
1659}
1660
1661/**
1662 *      skb_padto       - pad an skbuff up to a minimal size
1663 *      @skb: buffer to pad
1664 *      @len: minimal length
1665 *
1666 *      Pads up a buffer to ensure the trailing bytes exist and are
1667 *      blanked. If the buffer already contains sufficient data it
1668 *      is untouched. Otherwise it is extended. Returns zero on
1669 *      success. The skb is freed on error.
1670 */
1671 
1672static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1673{
1674        unsigned int size = skb->len;
1675        if (likely(size >= len))
1676                return 0;
1677        return skb_pad(skb, len - size);
1678}
1679
1680static inline int skb_add_data(struct sk_buff *skb,
1681                               char __user *from, int copy)
1682{
1683        const int off = skb->len;
1684
1685        if (skb->ip_summed == CHECKSUM_NONE) {
1686                int err = 0;
1687                __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1688                                                            copy, 0, &err);
1689                if (!err) {
1690                        skb->csum = csum_block_add(skb->csum, csum, off);
1691                        return 0;
1692                }
1693        } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1694                return 0;
1695
1696        __skb_trim(skb, off);
1697        return -EFAULT;
1698}
1699
1700static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1701                                   struct page *page, int off)
1702{
1703        if (i) {
1704                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1705
1706                return page == frag->page &&
1707                       off == frag->page_offset + frag->size;
1708        }
1709        return 0;
1710}
1711
1712static inline int __skb_linearize(struct sk_buff *skb)
1713{
1714        return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1715}
1716
1717/**
1718 *      skb_linearize - convert paged skb to linear one
1719 *      @skb: buffer to linarize
1720 *
1721 *      If there is no free memory -ENOMEM is returned, otherwise zero
1722 *      is returned and the old skb data released.
1723 */
1724static inline int skb_linearize(struct sk_buff *skb)
1725{
1726        return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1727}
1728
1729/**
1730 *      skb_linearize_cow - make sure skb is linear and writable
1731 *      @skb: buffer to process
1732 *
1733 *      If there is no free memory -ENOMEM is returned, otherwise zero
1734 *      is returned and the old skb data released.
1735 */
1736static inline int skb_linearize_cow(struct sk_buff *skb)
1737{
1738        return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1739               __skb_linearize(skb) : 0;
1740}
1741
1742/**
1743 *      skb_postpull_rcsum - update checksum for received skb after pull
1744 *      @skb: buffer to update
1745 *      @start: start of data before pull
1746 *      @len: length of data pulled
1747 *
1748 *      After doing a pull on a received packet, you need to call this to
1749 *      update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1750 *      CHECKSUM_NONE so that it can be recomputed from scratch.
1751 */
1752
1753static inline void skb_postpull_rcsum(struct sk_buff *skb,
1754                                      const void *start, unsigned int len)
1755{
1756        if (skb->ip_summed == CHECKSUM_COMPLETE)
1757                skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1758}
1759
1760unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1761
1762/**
1763 *      pskb_trim_rcsum - trim received skb and update checksum
1764 *      @skb: buffer to trim
1765 *      @len: new length
1766 *
1767 *      This is exactly the same as pskb_trim except that it ensures the
1768 *      checksum of received packets are still valid after the operation.
1769 */
1770
1771static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1772{
1773        if (likely(len >= skb->len))
1774                return 0;
1775        if (skb->ip_summed == CHECKSUM_COMPLETE)
1776                skb->ip_summed = CHECKSUM_NONE;
1777        return __pskb_trim(skb, len);
1778}
1779
1780#define skb_queue_walk(queue, skb) \
1781                for (skb = (queue)->next;                                       \
1782                     prefetch(skb->next), (skb != (struct sk_buff *)(queue));   \
1783                     skb = skb->next)
1784
1785#define skb_queue_walk_safe(queue, skb, tmp)                                    \
1786                for (skb = (queue)->next, tmp = skb->next;                      \
1787                     skb != (struct sk_buff *)(queue);                          \
1788                     skb = tmp, tmp = skb->next)
1789
1790#define skb_queue_walk_from(queue, skb)                                         \
1791                for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1792                     skb = skb->next)
1793
1794#define skb_queue_walk_from_safe(queue, skb, tmp)                               \
1795                for (tmp = skb->next;                                           \
1796                     skb != (struct sk_buff *)(queue);                          \
1797                     skb = tmp, tmp = skb->next)
1798
1799#define skb_queue_reverse_walk(queue, skb) \
1800                for (skb = (queue)->prev;                                       \
1801                     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));   \
1802                     skb = skb->prev)
1803
1804
1805static inline bool skb_has_frag_list(const struct sk_buff *skb)
1806{
1807        return skb_shinfo(skb)->frag_list != NULL;
1808}
1809
1810static inline void skb_frag_list_init(struct sk_buff *skb)
1811{
1812        skb_shinfo(skb)->frag_list = NULL;
1813}
1814
1815static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
1816{
1817        frag->next = skb_shinfo(skb)->frag_list;
1818        skb_shinfo(skb)->frag_list = frag;
1819}
1820
1821#define skb_walk_frags(skb, iter)       \
1822        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
1823
1824extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1825                                           int *peeked, int *err);
1826extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1827                                         int noblock, int *err);
1828extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
1829                                     struct poll_table_struct *wait);
1830extern int             skb_copy_datagram_iovec(const struct sk_buff *from,
1831                                               int offset, struct iovec *to,
1832                                               int size);
1833extern int             skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1834                                                        int hlen,
1835                                                        struct iovec *iov);
1836extern int             skb_copy_datagram_from_iovec(struct sk_buff *skb,
1837                                                    int offset,
1838                                                    const struct iovec *from,
1839                                                    int from_offset,
1840                                                    int len);
1841extern int             skb_copy_datagram_const_iovec(const struct sk_buff *from,
1842                                                     int offset,
1843                                                     const struct iovec *to,
1844                                                     int to_offset,
1845                                                     int size);
1846extern void            skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1847extern void            skb_free_datagram_locked(struct sock *sk,
1848                                                struct sk_buff *skb);
1849extern int             skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1850                                         unsigned int flags);
1851extern __wsum          skb_checksum(const struct sk_buff *skb, int offset,
1852                                    int len, __wsum csum);
1853extern int             skb_copy_bits(const struct sk_buff *skb, int offset,
1854                                     void *to, int len);
1855extern int             skb_store_bits(struct sk_buff *skb, int offset,
1856                                      const void *from, int len);
1857extern __wsum          skb_copy_and_csum_bits(const struct sk_buff *skb,
1858                                              int offset, u8 *to, int len,
1859                                              __wsum csum);
1860extern int             skb_splice_bits(struct sk_buff *skb,
1861                                                unsigned int offset,
1862                                                struct pipe_inode_info *pipe,
1863                                                unsigned int len,
1864                                                unsigned int flags);
1865extern void            skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1866extern void            skb_split(struct sk_buff *skb,
1867                                 struct sk_buff *skb1, const u32 len);
1868extern int             skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1869                                 int shiftlen);
1870
1871extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1872
1873static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1874                                       int len, void *buffer)
1875{
1876        int hlen = skb_headlen(skb);
1877
1878        if (hlen - offset >= len)
1879                return skb->data + offset;
1880
1881        if (skb_copy_bits(skb, offset, buffer, len) < 0)
1882                return NULL;
1883
1884        return buffer;
1885}
1886
1887static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1888                                             void *to,
1889                                             const unsigned int len)
1890{
1891        memcpy(to, skb->data, len);
1892}
1893
1894static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1895                                                    const int offset, void *to,
1896                                                    const unsigned int len)
1897{
1898        memcpy(to, skb->data + offset, len);
1899}
1900
1901static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1902                                           const void *from,
1903                                           const unsigned int len)
1904{
1905        memcpy(skb->data, from, len);
1906}
1907
1908static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1909                                                  const int offset,
1910                                                  const void *from,
1911                                                  const unsigned int len)
1912{
1913        memcpy(skb->data + offset, from, len);
1914}
1915
1916extern void skb_init(void);
1917
1918static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
1919{
1920        return skb->tstamp;
1921}
1922
1923/**
1924 *      skb_get_timestamp - get timestamp from a skb
1925 *      @skb: skb to get stamp from
1926 *      @stamp: pointer to struct timeval to store stamp in
1927 *
1928 *      Timestamps are stored in the skb as offsets to a base timestamp.
1929 *      This function converts the offset back to a struct timeval and stores
1930 *      it in stamp.
1931 */
1932static inline void skb_get_timestamp(const struct sk_buff *skb,
1933                                     struct timeval *stamp)
1934{
1935        *stamp = ktime_to_timeval(skb->tstamp);
1936}
1937
1938static inline void skb_get_timestampns(const struct sk_buff *skb,
1939                                       struct timespec *stamp)
1940{
1941        *stamp = ktime_to_timespec(skb->tstamp);
1942}
1943
1944static inline void __net_timestamp(struct sk_buff *skb)
1945{
1946        skb->tstamp = ktime_get_real();
1947}
1948
1949static inline ktime_t net_timedelta(ktime_t t)
1950{
1951        return ktime_sub(ktime_get_real(), t);
1952}
1953
1954static inline ktime_t net_invalid_timestamp(void)
1955{
1956        return ktime_set(0, 0);
1957}
1958
1959extern void skb_timestamping_init(void);
1960
1961#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
1962
1963extern void skb_clone_tx_timestamp(struct sk_buff *skb);
1964extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
1965
1966#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
1967
1968static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
1969{
1970}
1971
1972static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
1973{
1974        return false;
1975}
1976
1977#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
1978
1979/**
1980 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
1981 *
1982 * @skb: clone of the the original outgoing packet
1983 * @hwtstamps: hardware time stamps
1984 *
1985 */
1986void skb_complete_tx_timestamp(struct sk_buff *skb,
1987                               struct skb_shared_hwtstamps *hwtstamps);
1988
1989/**
1990 * skb_tstamp_tx - queue clone of skb with send time stamps
1991 * @orig_skb:   the original outgoing packet
1992 * @hwtstamps:  hardware time stamps, may be NULL if not available
1993 *
1994 * If the skb has a socket associated, then this function clones the
1995 * skb (thus sharing the actual data and optional structures), stores
1996 * the optional hardware time stamping information (if non NULL) or
1997 * generates a software time stamp (otherwise), then queues the clone
1998 * to the error queue of the socket.  Errors are silently ignored.
1999 */
2000extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2001                        struct skb_shared_hwtstamps *hwtstamps);
2002
2003static inline void sw_tx_timestamp(struct sk_buff *skb)
2004{
2005        if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2006            !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2007                skb_tstamp_tx(skb, NULL);
2008}
2009
2010/**
2011 * skb_tx_timestamp() - Driver hook for transmit timestamping
2012 *
2013 * Ethernet MAC Drivers should call this function in their hard_xmit()
2014 * function as soon as possible after giving the sk_buff to the MAC
2015 * hardware, but before freeing the sk_buff.
2016 *
2017 * @skb: A socket buffer.
2018 */
2019static inline void skb_tx_timestamp(struct sk_buff *skb)
2020{
2021        skb_clone_tx_timestamp(skb);
2022        sw_tx_timestamp(skb);
2023}
2024
2025extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2026extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2027
2028static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2029{
2030        return skb->ip_summed & CHECKSUM_UNNECESSARY;
2031}
2032
2033/**
2034 *      skb_checksum_complete - Calculate checksum of an entire packet
2035 *      @skb: packet to process
2036 *
2037 *      This function calculates the checksum over the entire packet plus
2038 *      the value of skb->csum.  The latter can be used to supply the
2039 *      checksum of a pseudo header as used by TCP/UDP.  It returns the
2040 *      checksum.
2041 *
2042 *      For protocols that contain complete checksums such as ICMP/TCP/UDP,
2043 *      this function can be used to verify that checksum on received
2044 *      packets.  In that case the function should return zero if the
2045 *      checksum is correct.  In particular, this function will return zero
2046 *      if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2047 *      hardware has already verified the correctness of the checksum.
2048 */
2049static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2050{
2051        return skb_csum_unnecessary(skb) ?
2052               0 : __skb_checksum_complete(skb);
2053}
2054
2055#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2056extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2057static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2058{
2059        if (nfct && atomic_dec_and_test(&nfct->use))
2060                nf_conntrack_destroy(nfct);
2061}
2062static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2063{
2064        if (nfct)
2065                atomic_inc(&nfct->use);
2066}
2067#endif
2068#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2069static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2070{
2071        if (skb)
2072                atomic_inc(&skb->users);
2073}
2074static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2075{
2076        if (skb)
2077                kfree_skb(skb);
2078}
2079#endif
2080#ifdef CONFIG_BRIDGE_NETFILTER
2081static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2082{
2083        if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2084                kfree(nf_bridge);
2085}
2086static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2087{
2088        if (nf_bridge)
2089                atomic_inc(&nf_bridge->use);
2090}
2091#endif /* CONFIG_BRIDGE_NETFILTER */
2092static inline void nf_reset(struct sk_buff *skb)
2093{
2094#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2095        nf_conntrack_put(skb->nfct);
2096        skb->nfct = NULL;
2097#endif
2098#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2099        nf_conntrack_put_reasm(skb->nfct_reasm);
2100        skb->nfct_reasm = NULL;
2101#endif
2102#ifdef CONFIG_BRIDGE_NETFILTER
2103        nf_bridge_put(skb->nf_bridge);
2104        skb->nf_bridge = NULL;
2105#endif
2106}
2107
2108/* Note: This doesn't put any conntrack and bridge info in dst. */
2109static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2110{
2111#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2112        dst->nfct = src->nfct;
2113        nf_conntrack_get(src->nfct);
2114        dst->nfctinfo = src->nfctinfo;
2115#endif
2116#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2117        dst->nfct_reasm = src->nfct_reasm;
2118        nf_conntrack_get_reasm(src->nfct_reasm);
2119#endif
2120#ifdef CONFIG_BRIDGE_NETFILTER
2121        dst->nf_bridge  = src->nf_bridge;
2122        nf_bridge_get(src->nf_bridge);
2123#endif
2124}
2125
2126static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2127{
2128#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2129        nf_conntrack_put(dst->nfct);
2130#endif
2131#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2132        nf_conntrack_put_reasm(dst->nfct_reasm);
2133#endif
2134#ifdef CONFIG_BRIDGE_NETFILTER
2135        nf_bridge_put(dst->nf_bridge);
2136#endif
2137        __nf_copy(dst, src);
2138}
2139
2140#ifdef CONFIG_NETWORK_SECMARK
2141static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2142{
2143        to->secmark = from->secmark;
2144}
2145
2146static inline void skb_init_secmark(struct sk_buff *skb)
2147{
2148        skb->secmark = 0;
2149}
2150#else
2151static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2152{ }
2153
2154static inline void skb_init_secmark(struct sk_buff *skb)
2155{ }
2156#endif
2157
2158static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2159{
2160        skb->queue_mapping = queue_mapping;
2161}
2162
2163static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2164{
2165        return skb->queue_mapping;
2166}
2167
2168static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2169{
2170        to->queue_mapping = from->queue_mapping;
2171}
2172
2173static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2174{
2175        skb->queue_mapping = rx_queue + 1;
2176}
2177
2178static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2179{
2180        return skb->queue_mapping - 1;
2181}
2182
2183static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2184{
2185        return skb->queue_mapping != 0;
2186}
2187
2188extern u16 __skb_tx_hash(const struct net_device *dev,
2189                         const struct sk_buff *skb,
2190                         unsigned int num_tx_queues);
2191
2192#ifdef CONFIG_XFRM
2193static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2194{
2195        return skb->sp;
2196}
2197#else
2198static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2199{
2200        return NULL;
2201}
2202#endif
2203
2204static inline int skb_is_gso(const struct sk_buff *skb)
2205{
2206        return skb_shinfo(skb)->gso_size;
2207}
2208
2209static inline int skb_is_gso_v6(const struct sk_buff *skb)
2210{
2211        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2212}
2213
2214extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2215
2216static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2217{
2218        /* LRO sets gso_size but not gso_type, whereas if GSO is really
2219         * wanted then gso_type will be set. */
2220        struct skb_shared_info *shinfo = skb_shinfo(skb);
2221        if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2222            unlikely(shinfo->gso_type == 0)) {
2223                __skb_warn_lro_forwarding(skb);
2224                return true;
2225        }
2226        return false;
2227}
2228
2229static inline void skb_forward_csum(struct sk_buff *skb)
2230{
2231        /* Unfortunately we don't support this one.  Any brave souls? */
2232        if (skb->ip_summed == CHECKSUM_COMPLETE)
2233                skb->ip_summed = CHECKSUM_NONE;
2234}
2235
2236/**
2237 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2238 * @skb: skb to check
2239 *
2240 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2241 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2242 * use this helper, to document places where we make this assertion.
2243 */
2244static inline void skb_checksum_none_assert(struct sk_buff *skb)
2245{
2246#ifdef DEBUG
2247        BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2248#endif
2249}
2250
2251bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2252#endif  /* __KERNEL__ */
2253#endif  /* _LINUX_SKBUFF_H */
2254