linux/include/linux/skbuff.h
<<
>>
Prefs
   1/*
   2 *      Definitions for the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:
   5 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
   6 *              Florian La Roche, <rzsfl@rz.uni-sb.de>
   7 *
   8 *      This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _LINUX_SKBUFF_H
  15#define _LINUX_SKBUFF_H
  16
  17#include <linux/kernel.h>
  18#include <linux/kmemcheck.h>
  19#include <linux/compiler.h>
  20#include <linux/time.h>
  21#include <linux/cache.h>
  22
  23#include <linux/atomic.h>
  24#include <asm/types.h>
  25#include <linux/spinlock.h>
  26#include <linux/net.h>
  27#include <linux/textsearch.h>
  28#include <net/checksum.h>
  29#include <linux/rcupdate.h>
  30#include <linux/dmaengine.h>
  31#include <linux/hrtimer.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/netdev_features.h>
  34
  35/* Don't change this without changing skb_csum_unnecessary! */
  36#define CHECKSUM_NONE 0
  37#define CHECKSUM_UNNECESSARY 1
  38#define CHECKSUM_COMPLETE 2
  39#define CHECKSUM_PARTIAL 3
  40
  41#define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES - 1)) & \
  42                                 ~(SMP_CACHE_BYTES - 1))
  43#define SKB_WITH_OVERHEAD(X)    \
  44        ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  45#define SKB_MAX_ORDER(X, ORDER) \
  46        SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  47#define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
  48#define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
  49
  50/* return minimum truesize of one skb containing X bytes of data */
  51#define SKB_TRUESIZE(X) ((X) +                                          \
  52                         SKB_DATA_ALIGN(sizeof(struct sk_buff)) +       \
  53                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  54
  55/* A. Checksumming of received packets by device.
  56 *
  57 *      NONE: device failed to checksum this packet.
  58 *              skb->csum is undefined.
  59 *
  60 *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
  61 *              skb->csum is undefined.
  62 *            It is bad option, but, unfortunately, many of vendors do this.
  63 *            Apparently with secret goal to sell you new device, when you
  64 *            will add new protocol to your host. F.e. IPv6. 8)
  65 *
  66 *      COMPLETE: the most generic way. Device supplied checksum of _all_
  67 *          the packet as seen by netif_rx in skb->csum.
  68 *          NOTE: Even if device supports only some protocols, but
  69 *          is able to produce some skb->csum, it MUST use COMPLETE,
  70 *          not UNNECESSARY.
  71 *
  72 *      PARTIAL: identical to the case for output below.  This may occur
  73 *          on a packet received directly from another Linux OS, e.g.,
  74 *          a virtualised Linux kernel on the same host.  The packet can
  75 *          be treated in the same way as UNNECESSARY except that on
  76 *          output (i.e., forwarding) the checksum must be filled in
  77 *          by the OS or the hardware.
  78 *
  79 * B. Checksumming on output.
  80 *
  81 *      NONE: skb is checksummed by protocol or csum is not required.
  82 *
  83 *      PARTIAL: device is required to csum packet as seen by hard_start_xmit
  84 *      from skb->csum_start to the end and to record the checksum
  85 *      at skb->csum_start + skb->csum_offset.
  86 *
  87 *      Device must show its capabilities in dev->features, set
  88 *      at device setup time.
  89 *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  90 *                        everything.
  91 *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  92 *                        TCP/UDP over IPv4. Sigh. Vendors like this
  93 *                        way by an unknown reason. Though, see comment above
  94 *                        about CHECKSUM_UNNECESSARY. 8)
  95 *      NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
  96 *
  97 *      Any questions? No questions, good.              --ANK
  98 */
  99
 100struct net_device;
 101struct scatterlist;
 102struct pipe_inode_info;
 103
 104#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 105struct nf_conntrack {
 106        atomic_t use;
 107};
 108#endif
 109
 110#ifdef CONFIG_BRIDGE_NETFILTER
 111struct nf_bridge_info {
 112        atomic_t use;
 113        struct net_device *physindev;
 114        struct net_device *physoutdev;
 115        unsigned int mask;
 116        unsigned long data[32 / sizeof(unsigned long)];
 117};
 118#endif
 119
 120struct sk_buff_head {
 121        /* These two members must be first. */
 122        struct sk_buff  *next;
 123        struct sk_buff  *prev;
 124
 125        __u32           qlen;
 126        spinlock_t      lock;
 127};
 128
 129struct sk_buff;
 130
 131/* To allow 64K frame to be packed as single skb without frag_list we
 132 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 133 * buffers which do not start on a page boundary.
 134 *
 135 * Since GRO uses frags we allocate at least 16 regardless of page
 136 * size.
 137 */
 138#if (65536/PAGE_SIZE + 1) < 16
 139#define MAX_SKB_FRAGS 16UL
 140#else
 141#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 142#endif
 143
 144typedef struct skb_frag_struct skb_frag_t;
 145
 146struct skb_frag_struct {
 147        struct {
 148                struct page *p;
 149        } page;
 150#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 151        __u32 page_offset;
 152        __u32 size;
 153#else
 154        __u16 page_offset;
 155        __u16 size;
 156#endif
 157};
 158
 159static inline unsigned int skb_frag_size(const skb_frag_t *frag)
 160{
 161        return frag->size;
 162}
 163
 164static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
 165{
 166        frag->size = size;
 167}
 168
 169static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
 170{
 171        frag->size += delta;
 172}
 173
 174static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 175{
 176        frag->size -= delta;
 177}
 178
 179#define HAVE_HW_TIME_STAMP
 180
 181/**
 182 * struct skb_shared_hwtstamps - hardware time stamps
 183 * @hwtstamp:   hardware time stamp transformed into duration
 184 *              since arbitrary point in time
 185 * @syststamp:  hwtstamp transformed to system time base
 186 *
 187 * Software time stamps generated by ktime_get_real() are stored in
 188 * skb->tstamp. The relation between the different kinds of time
 189 * stamps is as follows:
 190 *
 191 * syststamp and tstamp can be compared against each other in
 192 * arbitrary combinations.  The accuracy of a
 193 * syststamp/tstamp/"syststamp from other device" comparison is
 194 * limited by the accuracy of the transformation into system time
 195 * base. This depends on the device driver and its underlying
 196 * hardware.
 197 *
 198 * hwtstamps can only be compared against other hwtstamps from
 199 * the same device.
 200 *
 201 * This structure is attached to packets as part of the
 202 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 203 */
 204struct skb_shared_hwtstamps {
 205        ktime_t hwtstamp;
 206        ktime_t syststamp;
 207};
 208
 209/* Definitions for tx_flags in struct skb_shared_info */
 210enum {
 211        /* generate hardware time stamp */
 212        SKBTX_HW_TSTAMP = 1 << 0,
 213
 214        /* generate software time stamp */
 215        SKBTX_SW_TSTAMP = 1 << 1,
 216
 217        /* device driver is going to provide hardware time stamp */
 218        SKBTX_IN_PROGRESS = 1 << 2,
 219
 220        /* ensure the originating sk reference is available on driver level */
 221        SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
 222
 223        /* device driver supports TX zero-copy buffers */
 224        SKBTX_DEV_ZEROCOPY = 1 << 4,
 225
 226        /* generate wifi status information (where possible) */
 227        SKBTX_WIFI_STATUS = 1 << 5,
 228};
 229
 230/*
 231 * The callback notifies userspace to release buffers when skb DMA is done in
 232 * lower device, the skb last reference should be 0 when calling this.
 233 * The desc is used to track userspace buffer index.
 234 */
 235struct ubuf_info {
 236        void (*callback)(void *);
 237        void *arg;
 238        unsigned long desc;
 239};
 240
 241/* This data is invariant across clones and lives at
 242 * the end of the header data, ie. at skb->end.
 243 */
 244struct skb_shared_info {
 245        unsigned char   nr_frags;
 246        __u8            tx_flags;
 247        unsigned short  gso_size;
 248        /* Warning: this field is not always filled in (UFO)! */
 249        unsigned short  gso_segs;
 250        unsigned short  gso_type;
 251        struct sk_buff  *frag_list;
 252        struct skb_shared_hwtstamps hwtstamps;
 253        __be32          ip6_frag_id;
 254
 255        /*
 256         * Warning : all fields before dataref are cleared in __alloc_skb()
 257         */
 258        atomic_t        dataref;
 259
 260        /* Intermediate layers must ensure that destructor_arg
 261         * remains valid until skb destructor */
 262        void *          destructor_arg;
 263
 264        /* must be last field, see pskb_expand_head() */
 265        skb_frag_t      frags[MAX_SKB_FRAGS];
 266};
 267
 268/* We divide dataref into two halves.  The higher 16 bits hold references
 269 * to the payload part of skb->data.  The lower 16 bits hold references to
 270 * the entire skb->data.  A clone of a headerless skb holds the length of
 271 * the header in skb->hdr_len.
 272 *
 273 * All users must obey the rule that the skb->data reference count must be
 274 * greater than or equal to the payload reference count.
 275 *
 276 * Holding a reference to the payload part means that the user does not
 277 * care about modifications to the header part of skb->data.
 278 */
 279#define SKB_DATAREF_SHIFT 16
 280#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 281
 282
 283enum {
 284        SKB_FCLONE_UNAVAILABLE,
 285        SKB_FCLONE_ORIG,
 286        SKB_FCLONE_CLONE,
 287};
 288
 289enum {
 290        SKB_GSO_TCPV4 = 1 << 0,
 291        SKB_GSO_UDP = 1 << 1,
 292
 293        /* This indicates the skb is from an untrusted source. */
 294        SKB_GSO_DODGY = 1 << 2,
 295
 296        /* This indicates the tcp segment has CWR set. */
 297        SKB_GSO_TCP_ECN = 1 << 3,
 298
 299        SKB_GSO_TCPV6 = 1 << 4,
 300
 301        SKB_GSO_FCOE = 1 << 5,
 302};
 303
 304#if BITS_PER_LONG > 32
 305#define NET_SKBUFF_DATA_USES_OFFSET 1
 306#endif
 307
 308#ifdef NET_SKBUFF_DATA_USES_OFFSET
 309typedef unsigned int sk_buff_data_t;
 310#else
 311typedef unsigned char *sk_buff_data_t;
 312#endif
 313
 314#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
 315    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
 316#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
 317#endif
 318
 319/** 
 320 *      struct sk_buff - socket buffer
 321 *      @next: Next buffer in list
 322 *      @prev: Previous buffer in list
 323 *      @tstamp: Time we arrived
 324 *      @sk: Socket we are owned by
 325 *      @dev: Device we arrived on/are leaving by
 326 *      @cb: Control buffer. Free for use by every layer. Put private vars here
 327 *      @_skb_refdst: destination entry (with norefcount bit)
 328 *      @sp: the security path, used for xfrm
 329 *      @len: Length of actual data
 330 *      @data_len: Data length
 331 *      @mac_len: Length of link layer header
 332 *      @hdr_len: writable header length of cloned skb
 333 *      @csum: Checksum (must include start/offset pair)
 334 *      @csum_start: Offset from skb->head where checksumming should start
 335 *      @csum_offset: Offset from csum_start where checksum should be stored
 336 *      @priority: Packet queueing priority
 337 *      @local_df: allow local fragmentation
 338 *      @cloned: Head may be cloned (check refcnt to be sure)
 339 *      @ip_summed: Driver fed us an IP checksum
 340 *      @nohdr: Payload reference only, must not modify header
 341 *      @nfctinfo: Relationship of this skb to the connection
 342 *      @pkt_type: Packet class
 343 *      @fclone: skbuff clone status
 344 *      @ipvs_property: skbuff is owned by ipvs
 345 *      @peeked: this packet has been seen already, so stats have been
 346 *              done for it, don't do them again
 347 *      @nf_trace: netfilter packet trace flag
 348 *      @protocol: Packet protocol from driver
 349 *      @destructor: Destruct function
 350 *      @nfct: Associated connection, if any
 351 *      @nfct_reasm: netfilter conntrack re-assembly pointer
 352 *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
 353 *      @skb_iif: ifindex of device we arrived on
 354 *      @tc_index: Traffic control index
 355 *      @tc_verd: traffic control verdict
 356 *      @rxhash: the packet hash computed on receive
 357 *      @queue_mapping: Queue mapping for multiqueue devices
 358 *      @ndisc_nodetype: router type (from link layer)
 359 *      @ooo_okay: allow the mapping of a socket to a queue to be changed
 360 *      @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
 361 *              ports.
 362 *      @wifi_acked_valid: wifi_acked was set
 363 *      @wifi_acked: whether frame was acked on wifi or not
 364 *      @dma_cookie: a cookie to one of several possible DMA operations
 365 *              done by skb DMA functions
 366 *      @secmark: security marking
 367 *      @mark: Generic packet mark
 368 *      @dropcount: total number of sk_receive_queue overflows
 369 *      @vlan_tci: vlan tag control information
 370 *      @transport_header: Transport layer header
 371 *      @network_header: Network layer header
 372 *      @mac_header: Link layer header
 373 *      @tail: Tail pointer
 374 *      @end: End pointer
 375 *      @head: Head of buffer
 376 *      @data: Data head pointer
 377 *      @truesize: Buffer size
 378 *      @users: User count - see {datagram,tcp}.c
 379 */
 380
 381struct sk_buff {
 382        /* These two members must be first. */
 383        struct sk_buff          *next;
 384        struct sk_buff          *prev;
 385
 386        ktime_t                 tstamp;
 387
 388        struct sock             *sk;
 389        struct net_device       *dev;
 390
 391        /*
 392         * This is the control buffer. It is free to use for every
 393         * layer. Please put your private variables there. If you
 394         * want to keep them across layers you have to do a skb_clone()
 395         * first. This is owned by whoever has the skb queued ATM.
 396         */
 397        char                    cb[48] __aligned(8);
 398
 399        unsigned long           _skb_refdst;
 400#ifdef CONFIG_XFRM
 401        struct  sec_path        *sp;
 402#endif
 403        unsigned int            len,
 404                                data_len;
 405        __u16                   mac_len,
 406                                hdr_len;
 407        union {
 408                __wsum          csum;
 409                struct {
 410                        __u16   csum_start;
 411                        __u16   csum_offset;
 412                };
 413        };
 414        __u32                   priority;
 415        kmemcheck_bitfield_begin(flags1);
 416        __u8                    local_df:1,
 417                                cloned:1,
 418                                ip_summed:2,
 419                                nohdr:1,
 420                                nfctinfo:3;
 421        __u8                    pkt_type:3,
 422                                fclone:2,
 423                                ipvs_property:1,
 424                                peeked:1,
 425                                nf_trace:1;
 426        kmemcheck_bitfield_end(flags1);
 427        __be16                  protocol;
 428
 429        void                    (*destructor)(struct sk_buff *skb);
 430#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 431        struct nf_conntrack     *nfct;
 432#endif
 433#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 434        struct sk_buff          *nfct_reasm;
 435#endif
 436#ifdef CONFIG_BRIDGE_NETFILTER
 437        struct nf_bridge_info   *nf_bridge;
 438#endif
 439
 440        int                     skb_iif;
 441#ifdef CONFIG_NET_SCHED
 442        __u16                   tc_index;       /* traffic control index */
 443#ifdef CONFIG_NET_CLS_ACT
 444        __u16                   tc_verd;        /* traffic control verdict */
 445#endif
 446#endif
 447
 448        __u32                   rxhash;
 449
 450        __u16                   queue_mapping;
 451        kmemcheck_bitfield_begin(flags2);
 452#ifdef CONFIG_IPV6_NDISC_NODETYPE
 453        __u8                    ndisc_nodetype:2;
 454#endif
 455        __u8                    ooo_okay:1;
 456        __u8                    l4_rxhash:1;
 457        __u8                    wifi_acked_valid:1;
 458        __u8                    wifi_acked:1;
 459        /* 10/12 bit hole (depending on ndisc_nodetype presence) */
 460        kmemcheck_bitfield_end(flags2);
 461
 462#ifdef CONFIG_NET_DMA
 463        dma_cookie_t            dma_cookie;
 464#endif
 465#ifdef CONFIG_NETWORK_SECMARK
 466        __u32                   secmark;
 467#endif
 468        union {
 469                __u32           mark;
 470                __u32           dropcount;
 471        };
 472
 473        __u16                   vlan_tci;
 474
 475        sk_buff_data_t          transport_header;
 476        sk_buff_data_t          network_header;
 477        sk_buff_data_t          mac_header;
 478        /* These elements must be at the end, see alloc_skb() for details.  */
 479        sk_buff_data_t          tail;
 480        sk_buff_data_t          end;
 481        unsigned char           *head,
 482                                *data;
 483        unsigned int            truesize;
 484        atomic_t                users;
 485};
 486
 487#ifdef __KERNEL__
 488/*
 489 *      Handling routines are only of interest to the kernel
 490 */
 491#include <linux/slab.h>
 492
 493#include <asm/system.h>
 494
 495/*
 496 * skb might have a dst pointer attached, refcounted or not.
 497 * _skb_refdst low order bit is set if refcount was _not_ taken
 498 */
 499#define SKB_DST_NOREF   1UL
 500#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
 501
 502/**
 503 * skb_dst - returns skb dst_entry
 504 * @skb: buffer
 505 *
 506 * Returns skb dst_entry, regardless of reference taken or not.
 507 */
 508static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 509{
 510        /* If refdst was not refcounted, check we still are in a 
 511         * rcu_read_lock section
 512         */
 513        WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
 514                !rcu_read_lock_held() &&
 515                !rcu_read_lock_bh_held());
 516        return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
 517}
 518
 519/**
 520 * skb_dst_set - sets skb dst
 521 * @skb: buffer
 522 * @dst: dst entry
 523 *
 524 * Sets skb dst, assuming a reference was taken on dst and should
 525 * be released by skb_dst_drop()
 526 */
 527static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
 528{
 529        skb->_skb_refdst = (unsigned long)dst;
 530}
 531
 532extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
 533
 534/**
 535 * skb_dst_is_noref - Test if skb dst isn't refcounted
 536 * @skb: buffer
 537 */
 538static inline bool skb_dst_is_noref(const struct sk_buff *skb)
 539{
 540        return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
 541}
 542
 543static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 544{
 545        return (struct rtable *)skb_dst(skb);
 546}
 547
 548extern void kfree_skb(struct sk_buff *skb);
 549extern void consume_skb(struct sk_buff *skb);
 550extern void            __kfree_skb(struct sk_buff *skb);
 551extern struct sk_buff *__alloc_skb(unsigned int size,
 552                                   gfp_t priority, int fclone, int node);
 553extern struct sk_buff *build_skb(void *data);
 554static inline struct sk_buff *alloc_skb(unsigned int size,
 555                                        gfp_t priority)
 556{
 557        return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
 558}
 559
 560static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 561                                               gfp_t priority)
 562{
 563        return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
 564}
 565
 566extern void skb_recycle(struct sk_buff *skb);
 567extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
 568
 569extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 570extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 571extern struct sk_buff *skb_clone(struct sk_buff *skb,
 572                                 gfp_t priority);
 573extern struct sk_buff *skb_copy(const struct sk_buff *skb,
 574                                gfp_t priority);
 575extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
 576                                 int headroom, gfp_t gfp_mask);
 577
 578extern int             pskb_expand_head(struct sk_buff *skb,
 579                                        int nhead, int ntail,
 580                                        gfp_t gfp_mask);
 581extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
 582                                            unsigned int headroom);
 583extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 584                                       int newheadroom, int newtailroom,
 585                                       gfp_t priority);
 586extern int             skb_to_sgvec(struct sk_buff *skb,
 587                                    struct scatterlist *sg, int offset,
 588                                    int len);
 589extern int             skb_cow_data(struct sk_buff *skb, int tailbits,
 590                                    struct sk_buff **trailer);
 591extern int             skb_pad(struct sk_buff *skb, int pad);
 592#define dev_kfree_skb(a)        consume_skb(a)
 593
 594extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 595                        int getfrag(void *from, char *to, int offset,
 596                        int len,int odd, struct sk_buff *skb),
 597                        void *from, int length);
 598
 599struct skb_seq_state {
 600        __u32           lower_offset;
 601        __u32           upper_offset;
 602        __u32           frag_idx;
 603        __u32           stepped_offset;
 604        struct sk_buff  *root_skb;
 605        struct sk_buff  *cur_skb;
 606        __u8            *frag_data;
 607};
 608
 609extern void           skb_prepare_seq_read(struct sk_buff *skb,
 610                                           unsigned int from, unsigned int to,
 611                                           struct skb_seq_state *st);
 612extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
 613                                   struct skb_seq_state *st);
 614extern void           skb_abort_seq_read(struct skb_seq_state *st);
 615
 616extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
 617                                    unsigned int to, struct ts_config *config,
 618                                    struct ts_state *state);
 619
 620extern void __skb_get_rxhash(struct sk_buff *skb);
 621static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 622{
 623        if (!skb->rxhash)
 624                __skb_get_rxhash(skb);
 625
 626        return skb->rxhash;
 627}
 628
 629#ifdef NET_SKBUFF_DATA_USES_OFFSET
 630static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 631{
 632        return skb->head + skb->end;
 633}
 634#else
 635static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 636{
 637        return skb->end;
 638}
 639#endif
 640
 641/* Internal */
 642#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
 643
 644static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 645{
 646        return &skb_shinfo(skb)->hwtstamps;
 647}
 648
 649/**
 650 *      skb_queue_empty - check if a queue is empty
 651 *      @list: queue head
 652 *
 653 *      Returns true if the queue is empty, false otherwise.
 654 */
 655static inline int skb_queue_empty(const struct sk_buff_head *list)
 656{
 657        return list->next == (struct sk_buff *)list;
 658}
 659
 660/**
 661 *      skb_queue_is_last - check if skb is the last entry in the queue
 662 *      @list: queue head
 663 *      @skb: buffer
 664 *
 665 *      Returns true if @skb is the last buffer on the list.
 666 */
 667static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 668                                     const struct sk_buff *skb)
 669{
 670        return skb->next == (struct sk_buff *)list;
 671}
 672
 673/**
 674 *      skb_queue_is_first - check if skb is the first entry in the queue
 675 *      @list: queue head
 676 *      @skb: buffer
 677 *
 678 *      Returns true if @skb is the first buffer on the list.
 679 */
 680static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 681                                      const struct sk_buff *skb)
 682{
 683        return skb->prev == (struct sk_buff *)list;
 684}
 685
 686/**
 687 *      skb_queue_next - return the next packet in the queue
 688 *      @list: queue head
 689 *      @skb: current buffer
 690 *
 691 *      Return the next packet in @list after @skb.  It is only valid to
 692 *      call this if skb_queue_is_last() evaluates to false.
 693 */
 694static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
 695                                             const struct sk_buff *skb)
 696{
 697        /* This BUG_ON may seem severe, but if we just return then we
 698         * are going to dereference garbage.
 699         */
 700        BUG_ON(skb_queue_is_last(list, skb));
 701        return skb->next;
 702}
 703
 704/**
 705 *      skb_queue_prev - return the prev packet in the queue
 706 *      @list: queue head
 707 *      @skb: current buffer
 708 *
 709 *      Return the prev packet in @list before @skb.  It is only valid to
 710 *      call this if skb_queue_is_first() evaluates to false.
 711 */
 712static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
 713                                             const struct sk_buff *skb)
 714{
 715        /* This BUG_ON may seem severe, but if we just return then we
 716         * are going to dereference garbage.
 717         */
 718        BUG_ON(skb_queue_is_first(list, skb));
 719        return skb->prev;
 720}
 721
 722/**
 723 *      skb_get - reference buffer
 724 *      @skb: buffer to reference
 725 *
 726 *      Makes another reference to a socket buffer and returns a pointer
 727 *      to the buffer.
 728 */
 729static inline struct sk_buff *skb_get(struct sk_buff *skb)
 730{
 731        atomic_inc(&skb->users);
 732        return skb;
 733}
 734
 735/*
 736 * If users == 1, we are the only owner and are can avoid redundant
 737 * atomic change.
 738 */
 739
 740/**
 741 *      skb_cloned - is the buffer a clone
 742 *      @skb: buffer to check
 743 *
 744 *      Returns true if the buffer was generated with skb_clone() and is
 745 *      one of multiple shared copies of the buffer. Cloned buffers are
 746 *      shared data so must not be written to under normal circumstances.
 747 */
 748static inline int skb_cloned(const struct sk_buff *skb)
 749{
 750        return skb->cloned &&
 751               (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 752}
 753
 754/**
 755 *      skb_header_cloned - is the header a clone
 756 *      @skb: buffer to check
 757 *
 758 *      Returns true if modifying the header part of the buffer requires
 759 *      the data to be copied.
 760 */
 761static inline int skb_header_cloned(const struct sk_buff *skb)
 762{
 763        int dataref;
 764
 765        if (!skb->cloned)
 766                return 0;
 767
 768        dataref = atomic_read(&skb_shinfo(skb)->dataref);
 769        dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
 770        return dataref != 1;
 771}
 772
 773/**
 774 *      skb_header_release - release reference to header
 775 *      @skb: buffer to operate on
 776 *
 777 *      Drop a reference to the header part of the buffer.  This is done
 778 *      by acquiring a payload reference.  You must not read from the header
 779 *      part of skb->data after this.
 780 */
 781static inline void skb_header_release(struct sk_buff *skb)
 782{
 783        BUG_ON(skb->nohdr);
 784        skb->nohdr = 1;
 785        atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 786}
 787
 788/**
 789 *      skb_shared - is the buffer shared
 790 *      @skb: buffer to check
 791 *
 792 *      Returns true if more than one person has a reference to this
 793 *      buffer.
 794 */
 795static inline int skb_shared(const struct sk_buff *skb)
 796{
 797        return atomic_read(&skb->users) != 1;
 798}
 799
 800/**
 801 *      skb_share_check - check if buffer is shared and if so clone it
 802 *      @skb: buffer to check
 803 *      @pri: priority for memory allocation
 804 *
 805 *      If the buffer is shared the buffer is cloned and the old copy
 806 *      drops a reference. A new clone with a single reference is returned.
 807 *      If the buffer is not shared the original buffer is returned. When
 808 *      being called from interrupt status or with spinlocks held pri must
 809 *      be GFP_ATOMIC.
 810 *
 811 *      NULL is returned on a memory allocation failure.
 812 */
 813static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
 814                                              gfp_t pri)
 815{
 816        might_sleep_if(pri & __GFP_WAIT);
 817        if (skb_shared(skb)) {
 818                struct sk_buff *nskb = skb_clone(skb, pri);
 819                kfree_skb(skb);
 820                skb = nskb;
 821        }
 822        return skb;
 823}
 824
 825/*
 826 *      Copy shared buffers into a new sk_buff. We effectively do COW on
 827 *      packets to handle cases where we have a local reader and forward
 828 *      and a couple of other messy ones. The normal one is tcpdumping
 829 *      a packet thats being forwarded.
 830 */
 831
 832/**
 833 *      skb_unshare - make a copy of a shared buffer
 834 *      @skb: buffer to check
 835 *      @pri: priority for memory allocation
 836 *
 837 *      If the socket buffer is a clone then this function creates a new
 838 *      copy of the data, drops a reference count on the old copy and returns
 839 *      the new copy with the reference count at 1. If the buffer is not a clone
 840 *      the original buffer is returned. When called with a spinlock held or
 841 *      from interrupt state @pri must be %GFP_ATOMIC
 842 *
 843 *      %NULL is returned on a memory allocation failure.
 844 */
 845static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
 846                                          gfp_t pri)
 847{
 848        might_sleep_if(pri & __GFP_WAIT);
 849        if (skb_cloned(skb)) {
 850                struct sk_buff *nskb = skb_copy(skb, pri);
 851                kfree_skb(skb); /* Free our shared copy */
 852                skb = nskb;
 853        }
 854        return skb;
 855}
 856
 857/**
 858 *      skb_peek - peek at the head of an &sk_buff_head
 859 *      @list_: list to peek at
 860 *
 861 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 862 *      be careful with this one. A peek leaves the buffer on the
 863 *      list and someone else may run off with it. You must hold
 864 *      the appropriate locks or have a private queue to do this.
 865 *
 866 *      Returns %NULL for an empty list or a pointer to the head element.
 867 *      The reference count is not incremented and the reference is therefore
 868 *      volatile. Use with caution.
 869 */
 870static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
 871{
 872        struct sk_buff *list = ((const struct sk_buff *)list_)->next;
 873        if (list == (struct sk_buff *)list_)
 874                list = NULL;
 875        return list;
 876}
 877
 878/**
 879 *      skb_peek_tail - peek at the tail of an &sk_buff_head
 880 *      @list_: list to peek at
 881 *
 882 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 883 *      be careful with this one. A peek leaves the buffer on the
 884 *      list and someone else may run off with it. You must hold
 885 *      the appropriate locks or have a private queue to do this.
 886 *
 887 *      Returns %NULL for an empty list or a pointer to the tail element.
 888 *      The reference count is not incremented and the reference is therefore
 889 *      volatile. Use with caution.
 890 */
 891static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
 892{
 893        struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
 894        if (list == (struct sk_buff *)list_)
 895                list = NULL;
 896        return list;
 897}
 898
 899/**
 900 *      skb_queue_len   - get queue length
 901 *      @list_: list to measure
 902 *
 903 *      Return the length of an &sk_buff queue.
 904 */
 905static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
 906{
 907        return list_->qlen;
 908}
 909
 910/**
 911 *      __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
 912 *      @list: queue to initialize
 913 *
 914 *      This initializes only the list and queue length aspects of
 915 *      an sk_buff_head object.  This allows to initialize the list
 916 *      aspects of an sk_buff_head without reinitializing things like
 917 *      the spinlock.  It can also be used for on-stack sk_buff_head
 918 *      objects where the spinlock is known to not be used.
 919 */
 920static inline void __skb_queue_head_init(struct sk_buff_head *list)
 921{
 922        list->prev = list->next = (struct sk_buff *)list;
 923        list->qlen = 0;
 924}
 925
 926/*
 927 * This function creates a split out lock class for each invocation;
 928 * this is needed for now since a whole lot of users of the skb-queue
 929 * infrastructure in drivers have different locking usage (in hardirq)
 930 * than the networking core (in softirq only). In the long run either the
 931 * network layer or drivers should need annotation to consolidate the
 932 * main types of usage into 3 classes.
 933 */
 934static inline void skb_queue_head_init(struct sk_buff_head *list)
 935{
 936        spin_lock_init(&list->lock);
 937        __skb_queue_head_init(list);
 938}
 939
 940static inline void skb_queue_head_init_class(struct sk_buff_head *list,
 941                struct lock_class_key *class)
 942{
 943        skb_queue_head_init(list);
 944        lockdep_set_class(&list->lock, class);
 945}
 946
 947/*
 948 *      Insert an sk_buff on a list.
 949 *
 950 *      The "__skb_xxxx()" functions are the non-atomic ones that
 951 *      can only be called with interrupts disabled.
 952 */
 953extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
 954static inline void __skb_insert(struct sk_buff *newsk,
 955                                struct sk_buff *prev, struct sk_buff *next,
 956                                struct sk_buff_head *list)
 957{
 958        newsk->next = next;
 959        newsk->prev = prev;
 960        next->prev  = prev->next = newsk;
 961        list->qlen++;
 962}
 963
 964static inline void __skb_queue_splice(const struct sk_buff_head *list,
 965                                      struct sk_buff *prev,
 966                                      struct sk_buff *next)
 967{
 968        struct sk_buff *first = list->next;
 969        struct sk_buff *last = list->prev;
 970
 971        first->prev = prev;
 972        prev->next = first;
 973
 974        last->next = next;
 975        next->prev = last;
 976}
 977
 978/**
 979 *      skb_queue_splice - join two skb lists, this is designed for stacks
 980 *      @list: the new list to add
 981 *      @head: the place to add it in the first list
 982 */
 983static inline void skb_queue_splice(const struct sk_buff_head *list,
 984                                    struct sk_buff_head *head)
 985{
 986        if (!skb_queue_empty(list)) {
 987                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
 988                head->qlen += list->qlen;
 989        }
 990}
 991
 992/**
 993 *      skb_queue_splice - join two skb lists and reinitialise the emptied list
 994 *      @list: the new list to add
 995 *      @head: the place to add it in the first list
 996 *
 997 *      The list at @list is reinitialised
 998 */
 999static inline void skb_queue_splice_init(struct sk_buff_head *list,
1000                                         struct sk_buff_head *head)
1001{
1002        if (!skb_queue_empty(list)) {
1003                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1004                head->qlen += list->qlen;
1005                __skb_queue_head_init(list);
1006        }
1007}
1008
1009/**
1010 *      skb_queue_splice_tail - join two skb lists, each list being a queue
1011 *      @list: the new list to add
1012 *      @head: the place to add it in the first list
1013 */
1014static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1015                                         struct sk_buff_head *head)
1016{
1017        if (!skb_queue_empty(list)) {
1018                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1019                head->qlen += list->qlen;
1020        }
1021}
1022
1023/**
1024 *      skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
1025 *      @list: the new list to add
1026 *      @head: the place to add it in the first list
1027 *
1028 *      Each of the lists is a queue.
1029 *      The list at @list is reinitialised
1030 */
1031static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1032                                              struct sk_buff_head *head)
1033{
1034        if (!skb_queue_empty(list)) {
1035                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1036                head->qlen += list->qlen;
1037                __skb_queue_head_init(list);
1038        }
1039}
1040
1041/**
1042 *      __skb_queue_after - queue a buffer at the list head
1043 *      @list: list to use
1044 *      @prev: place after this buffer
1045 *      @newsk: buffer to queue
1046 *
1047 *      Queue a buffer int the middle of a list. This function takes no locks
1048 *      and you must therefore hold required locks before calling it.
1049 *
1050 *      A buffer cannot be placed on two lists at the same time.
1051 */
1052static inline void __skb_queue_after(struct sk_buff_head *list,
1053                                     struct sk_buff *prev,
1054                                     struct sk_buff *newsk)
1055{
1056        __skb_insert(newsk, prev, prev->next, list);
1057}
1058
1059extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1060                       struct sk_buff_head *list);
1061
1062static inline void __skb_queue_before(struct sk_buff_head *list,
1063                                      struct sk_buff *next,
1064                                      struct sk_buff *newsk)
1065{
1066        __skb_insert(newsk, next->prev, next, list);
1067}
1068
1069/**
1070 *      __skb_queue_head - queue a buffer at the list head
1071 *      @list: list to use
1072 *      @newsk: buffer to queue
1073 *
1074 *      Queue a buffer at the start of a list. This function takes no locks
1075 *      and you must therefore hold required locks before calling it.
1076 *
1077 *      A buffer cannot be placed on two lists at the same time.
1078 */
1079extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1080static inline void __skb_queue_head(struct sk_buff_head *list,
1081                                    struct sk_buff *newsk)
1082{
1083        __skb_queue_after(list, (struct sk_buff *)list, newsk);
1084}
1085
1086/**
1087 *      __skb_queue_tail - queue a buffer at the list tail
1088 *      @list: list to use
1089 *      @newsk: buffer to queue
1090 *
1091 *      Queue a buffer at the end of a list. This function takes no locks
1092 *      and you must therefore hold required locks before calling it.
1093 *
1094 *      A buffer cannot be placed on two lists at the same time.
1095 */
1096extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1097static inline void __skb_queue_tail(struct sk_buff_head *list,
1098                                   struct sk_buff *newsk)
1099{
1100        __skb_queue_before(list, (struct sk_buff *)list, newsk);
1101}
1102
1103/*
1104 * remove sk_buff from list. _Must_ be called atomically, and with
1105 * the list known..
1106 */
1107extern void        skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1108static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1109{
1110        struct sk_buff *next, *prev;
1111
1112        list->qlen--;
1113        next       = skb->next;
1114        prev       = skb->prev;
1115        skb->next  = skb->prev = NULL;
1116        next->prev = prev;
1117        prev->next = next;
1118}
1119
1120/**
1121 *      __skb_dequeue - remove from the head of the queue
1122 *      @list: list to dequeue from
1123 *
1124 *      Remove the head of the list. This function does not take any locks
1125 *      so must be used with appropriate locks held only. The head item is
1126 *      returned or %NULL if the list is empty.
1127 */
1128extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1129static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1130{
1131        struct sk_buff *skb = skb_peek(list);
1132        if (skb)
1133                __skb_unlink(skb, list);
1134        return skb;
1135}
1136
1137/**
1138 *      __skb_dequeue_tail - remove from the tail of the queue
1139 *      @list: list to dequeue from
1140 *
1141 *      Remove the tail of the list. This function does not take any locks
1142 *      so must be used with appropriate locks held only. The tail item is
1143 *      returned or %NULL if the list is empty.
1144 */
1145extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1146static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1147{
1148        struct sk_buff *skb = skb_peek_tail(list);
1149        if (skb)
1150                __skb_unlink(skb, list);
1151        return skb;
1152}
1153
1154
1155static inline int skb_is_nonlinear(const struct sk_buff *skb)
1156{
1157        return skb->data_len;
1158}
1159
1160static inline unsigned int skb_headlen(const struct sk_buff *skb)
1161{
1162        return skb->len - skb->data_len;
1163}
1164
1165static inline int skb_pagelen(const struct sk_buff *skb)
1166{
1167        int i, len = 0;
1168
1169        for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1170                len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1171        return len + skb_headlen(skb);
1172}
1173
1174/**
1175 * __skb_fill_page_desc - initialise a paged fragment in an skb
1176 * @skb: buffer containing fragment to be initialised
1177 * @i: paged fragment index to initialise
1178 * @page: the page to use for this fragment
1179 * @off: the offset to the data with @page
1180 * @size: the length of the data
1181 *
1182 * Initialises the @i'th fragment of @skb to point to &size bytes at
1183 * offset @off within @page.
1184 *
1185 * Does not take any additional reference on the fragment.
1186 */
1187static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1188                                        struct page *page, int off, int size)
1189{
1190        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1191
1192        frag->page.p              = page;
1193        frag->page_offset         = off;
1194        skb_frag_size_set(frag, size);
1195}
1196
1197/**
1198 * skb_fill_page_desc - initialise a paged fragment in an skb
1199 * @skb: buffer containing fragment to be initialised
1200 * @i: paged fragment index to initialise
1201 * @page: the page to use for this fragment
1202 * @off: the offset to the data with @page
1203 * @size: the length of the data
1204 *
1205 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1206 * @skb to point to &size bytes at offset @off within @page. In
1207 * addition updates @skb such that @i is the last fragment.
1208 *
1209 * Does not take any additional reference on the fragment.
1210 */
1211static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1212                                      struct page *page, int off, int size)
1213{
1214        __skb_fill_page_desc(skb, i, page, off, size);
1215        skb_shinfo(skb)->nr_frags = i + 1;
1216}
1217
1218extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1219                            int off, int size);
1220
1221#define SKB_PAGE_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->nr_frags)
1222#define SKB_FRAG_ASSERT(skb)    BUG_ON(skb_has_frag_list(skb))
1223#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1224
1225#ifdef NET_SKBUFF_DATA_USES_OFFSET
1226static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1227{
1228        return skb->head + skb->tail;
1229}
1230
1231static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1232{
1233        skb->tail = skb->data - skb->head;
1234}
1235
1236static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1237{
1238        skb_reset_tail_pointer(skb);
1239        skb->tail += offset;
1240}
1241#else /* NET_SKBUFF_DATA_USES_OFFSET */
1242static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1243{
1244        return skb->tail;
1245}
1246
1247static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1248{
1249        skb->tail = skb->data;
1250}
1251
1252static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1253{
1254        skb->tail = skb->data + offset;
1255}
1256
1257#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1258
1259/*
1260 *      Add data to an sk_buff
1261 */
1262extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1263static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1264{
1265        unsigned char *tmp = skb_tail_pointer(skb);
1266        SKB_LINEAR_ASSERT(skb);
1267        skb->tail += len;
1268        skb->len  += len;
1269        return tmp;
1270}
1271
1272extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1273static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1274{
1275        skb->data -= len;
1276        skb->len  += len;
1277        return skb->data;
1278}
1279
1280extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1281static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1282{
1283        skb->len -= len;
1284        BUG_ON(skb->len < skb->data_len);
1285        return skb->data += len;
1286}
1287
1288static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1289{
1290        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1291}
1292
1293extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1294
1295static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1296{
1297        if (len > skb_headlen(skb) &&
1298            !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1299                return NULL;
1300        skb->len -= len;
1301        return skb->data += len;
1302}
1303
1304static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1305{
1306        return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1307}
1308
1309static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1310{
1311        if (likely(len <= skb_headlen(skb)))
1312                return 1;
1313        if (unlikely(len > skb->len))
1314                return 0;
1315        return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1316}
1317
1318/**
1319 *      skb_headroom - bytes at buffer head
1320 *      @skb: buffer to check
1321 *
1322 *      Return the number of bytes of free space at the head of an &sk_buff.
1323 */
1324static inline unsigned int skb_headroom(const struct sk_buff *skb)
1325{
1326        return skb->data - skb->head;
1327}
1328
1329/**
1330 *      skb_tailroom - bytes at buffer end
1331 *      @skb: buffer to check
1332 *
1333 *      Return the number of bytes of free space at the tail of an sk_buff
1334 */
1335static inline int skb_tailroom(const struct sk_buff *skb)
1336{
1337        return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1338}
1339
1340/**
1341 *      skb_reserve - adjust headroom
1342 *      @skb: buffer to alter
1343 *      @len: bytes to move
1344 *
1345 *      Increase the headroom of an empty &sk_buff by reducing the tail
1346 *      room. This is only allowed for an empty buffer.
1347 */
1348static inline void skb_reserve(struct sk_buff *skb, int len)
1349{
1350        skb->data += len;
1351        skb->tail += len;
1352}
1353
1354static inline void skb_reset_mac_len(struct sk_buff *skb)
1355{
1356        skb->mac_len = skb->network_header - skb->mac_header;
1357}
1358
1359#ifdef NET_SKBUFF_DATA_USES_OFFSET
1360static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1361{
1362        return skb->head + skb->transport_header;
1363}
1364
1365static inline void skb_reset_transport_header(struct sk_buff *skb)
1366{
1367        skb->transport_header = skb->data - skb->head;
1368}
1369
1370static inline void skb_set_transport_header(struct sk_buff *skb,
1371                                            const int offset)
1372{
1373        skb_reset_transport_header(skb);
1374        skb->transport_header += offset;
1375}
1376
1377static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1378{
1379        return skb->head + skb->network_header;
1380}
1381
1382static inline void skb_reset_network_header(struct sk_buff *skb)
1383{
1384        skb->network_header = skb->data - skb->head;
1385}
1386
1387static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1388{
1389        skb_reset_network_header(skb);
1390        skb->network_header += offset;
1391}
1392
1393static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1394{
1395        return skb->head + skb->mac_header;
1396}
1397
1398static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1399{
1400        return skb->mac_header != ~0U;
1401}
1402
1403static inline void skb_reset_mac_header(struct sk_buff *skb)
1404{
1405        skb->mac_header = skb->data - skb->head;
1406}
1407
1408static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1409{
1410        skb_reset_mac_header(skb);
1411        skb->mac_header += offset;
1412}
1413
1414#else /* NET_SKBUFF_DATA_USES_OFFSET */
1415
1416static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1417{
1418        return skb->transport_header;
1419}
1420
1421static inline void skb_reset_transport_header(struct sk_buff *skb)
1422{
1423        skb->transport_header = skb->data;
1424}
1425
1426static inline void skb_set_transport_header(struct sk_buff *skb,
1427                                            const int offset)
1428{
1429        skb->transport_header = skb->data + offset;
1430}
1431
1432static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1433{
1434        return skb->network_header;
1435}
1436
1437static inline void skb_reset_network_header(struct sk_buff *skb)
1438{
1439        skb->network_header = skb->data;
1440}
1441
1442static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1443{
1444        skb->network_header = skb->data + offset;
1445}
1446
1447static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1448{
1449        return skb->mac_header;
1450}
1451
1452static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1453{
1454        return skb->mac_header != NULL;
1455}
1456
1457static inline void skb_reset_mac_header(struct sk_buff *skb)
1458{
1459        skb->mac_header = skb->data;
1460}
1461
1462static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1463{
1464        skb->mac_header = skb->data + offset;
1465}
1466#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1467
1468static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1469{
1470        if (skb_mac_header_was_set(skb)) {
1471                const unsigned char *old_mac = skb_mac_header(skb);
1472
1473                skb_set_mac_header(skb, -skb->mac_len);
1474                memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1475        }
1476}
1477
1478static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1479{
1480        return skb->csum_start - skb_headroom(skb);
1481}
1482
1483static inline int skb_transport_offset(const struct sk_buff *skb)
1484{
1485        return skb_transport_header(skb) - skb->data;
1486}
1487
1488static inline u32 skb_network_header_len(const struct sk_buff *skb)
1489{
1490        return skb->transport_header - skb->network_header;
1491}
1492
1493static inline int skb_network_offset(const struct sk_buff *skb)
1494{
1495        return skb_network_header(skb) - skb->data;
1496}
1497
1498static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1499{
1500        return pskb_may_pull(skb, skb_network_offset(skb) + len);
1501}
1502
1503/*
1504 * CPUs often take a performance hit when accessing unaligned memory
1505 * locations. The actual performance hit varies, it can be small if the
1506 * hardware handles it or large if we have to take an exception and fix it
1507 * in software.
1508 *
1509 * Since an ethernet header is 14 bytes network drivers often end up with
1510 * the IP header at an unaligned offset. The IP header can be aligned by
1511 * shifting the start of the packet by 2 bytes. Drivers should do this
1512 * with:
1513 *
1514 * skb_reserve(skb, NET_IP_ALIGN);
1515 *
1516 * The downside to this alignment of the IP header is that the DMA is now
1517 * unaligned. On some architectures the cost of an unaligned DMA is high
1518 * and this cost outweighs the gains made by aligning the IP header.
1519 *
1520 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1521 * to be overridden.
1522 */
1523#ifndef NET_IP_ALIGN
1524#define NET_IP_ALIGN    2
1525#endif
1526
1527/*
1528 * The networking layer reserves some headroom in skb data (via
1529 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1530 * the header has to grow. In the default case, if the header has to grow
1531 * 32 bytes or less we avoid the reallocation.
1532 *
1533 * Unfortunately this headroom changes the DMA alignment of the resulting
1534 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1535 * on some architectures. An architecture can override this value,
1536 * perhaps setting it to a cacheline in size (since that will maintain
1537 * cacheline alignment of the DMA). It must be a power of 2.
1538 *
1539 * Various parts of the networking layer expect at least 32 bytes of
1540 * headroom, you should not reduce this.
1541 *
1542 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1543 * to reduce average number of cache lines per packet.
1544 * get_rps_cpus() for example only access one 64 bytes aligned block :
1545 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1546 */
1547#ifndef NET_SKB_PAD
1548#define NET_SKB_PAD     max(32, L1_CACHE_BYTES)
1549#endif
1550
1551extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1552
1553static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1554{
1555        if (unlikely(skb_is_nonlinear(skb))) {
1556                WARN_ON(1);
1557                return;
1558        }
1559        skb->len = len;
1560        skb_set_tail_pointer(skb, len);
1561}
1562
1563extern void skb_trim(struct sk_buff *skb, unsigned int len);
1564
1565static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1566{
1567        if (skb->data_len)
1568                return ___pskb_trim(skb, len);
1569        __skb_trim(skb, len);
1570        return 0;
1571}
1572
1573static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1574{
1575        return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1576}
1577
1578/**
1579 *      pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1580 *      @skb: buffer to alter
1581 *      @len: new length
1582 *
1583 *      This is identical to pskb_trim except that the caller knows that
1584 *      the skb is not cloned so we should never get an error due to out-
1585 *      of-memory.
1586 */
1587static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1588{
1589        int err = pskb_trim(skb, len);
1590        BUG_ON(err);
1591}
1592
1593/**
1594 *      skb_orphan - orphan a buffer
1595 *      @skb: buffer to orphan
1596 *
1597 *      If a buffer currently has an owner then we call the owner's
1598 *      destructor function and make the @skb unowned. The buffer continues
1599 *      to exist but is no longer charged to its former owner.
1600 */
1601static inline void skb_orphan(struct sk_buff *skb)
1602{
1603        if (skb->destructor)
1604                skb->destructor(skb);
1605        skb->destructor = NULL;
1606        skb->sk         = NULL;
1607}
1608
1609/**
1610 *      __skb_queue_purge - empty a list
1611 *      @list: list to empty
1612 *
1613 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1614 *      the list and one reference dropped. This function does not take the
1615 *      list lock and the caller must hold the relevant locks to use it.
1616 */
1617extern void skb_queue_purge(struct sk_buff_head *list);
1618static inline void __skb_queue_purge(struct sk_buff_head *list)
1619{
1620        struct sk_buff *skb;
1621        while ((skb = __skb_dequeue(list)) != NULL)
1622                kfree_skb(skb);
1623}
1624
1625/**
1626 *      __dev_alloc_skb - allocate an skbuff for receiving
1627 *      @length: length to allocate
1628 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
1629 *
1630 *      Allocate a new &sk_buff and assign it a usage count of one. The
1631 *      buffer has unspecified headroom built in. Users should allocate
1632 *      the headroom they think they need without accounting for the
1633 *      built in space. The built in space is used for optimisations.
1634 *
1635 *      %NULL is returned if there is no free memory.
1636 */
1637static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1638                                              gfp_t gfp_mask)
1639{
1640        struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1641        if (likely(skb))
1642                skb_reserve(skb, NET_SKB_PAD);
1643        return skb;
1644}
1645
1646extern struct sk_buff *dev_alloc_skb(unsigned int length);
1647
1648extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1649                unsigned int length, gfp_t gfp_mask);
1650
1651/**
1652 *      netdev_alloc_skb - allocate an skbuff for rx on a specific device
1653 *      @dev: network device to receive on
1654 *      @length: length to allocate
1655 *
1656 *      Allocate a new &sk_buff and assign it a usage count of one. The
1657 *      buffer has unspecified headroom built in. Users should allocate
1658 *      the headroom they think they need without accounting for the
1659 *      built in space. The built in space is used for optimisations.
1660 *
1661 *      %NULL is returned if there is no free memory. Although this function
1662 *      allocates memory it can be called from an interrupt.
1663 */
1664static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1665                unsigned int length)
1666{
1667        return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1668}
1669
1670static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1671                unsigned int length, gfp_t gfp)
1672{
1673        struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1674
1675        if (NET_IP_ALIGN && skb)
1676                skb_reserve(skb, NET_IP_ALIGN);
1677        return skb;
1678}
1679
1680static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1681                unsigned int length)
1682{
1683        return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1684}
1685
1686/**
1687 * skb_frag_page - retrieve the page refered to by a paged fragment
1688 * @frag: the paged fragment
1689 *
1690 * Returns the &struct page associated with @frag.
1691 */
1692static inline struct page *skb_frag_page(const skb_frag_t *frag)
1693{
1694        return frag->page.p;
1695}
1696
1697/**
1698 * __skb_frag_ref - take an addition reference on a paged fragment.
1699 * @frag: the paged fragment
1700 *
1701 * Takes an additional reference on the paged fragment @frag.
1702 */
1703static inline void __skb_frag_ref(skb_frag_t *frag)
1704{
1705        get_page(skb_frag_page(frag));
1706}
1707
1708/**
1709 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1710 * @skb: the buffer
1711 * @f: the fragment offset.
1712 *
1713 * Takes an additional reference on the @f'th paged fragment of @skb.
1714 */
1715static inline void skb_frag_ref(struct sk_buff *skb, int f)
1716{
1717        __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1718}
1719
1720/**
1721 * __skb_frag_unref - release a reference on a paged fragment.
1722 * @frag: the paged fragment
1723 *
1724 * Releases a reference on the paged fragment @frag.
1725 */
1726static inline void __skb_frag_unref(skb_frag_t *frag)
1727{
1728        put_page(skb_frag_page(frag));
1729}
1730
1731/**
1732 * skb_frag_unref - release a reference on a paged fragment of an skb.
1733 * @skb: the buffer
1734 * @f: the fragment offset
1735 *
1736 * Releases a reference on the @f'th paged fragment of @skb.
1737 */
1738static inline void skb_frag_unref(struct sk_buff *skb, int f)
1739{
1740        __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1741}
1742
1743/**
1744 * skb_frag_address - gets the address of the data contained in a paged fragment
1745 * @frag: the paged fragment buffer
1746 *
1747 * Returns the address of the data within @frag. The page must already
1748 * be mapped.
1749 */
1750static inline void *skb_frag_address(const skb_frag_t *frag)
1751{
1752        return page_address(skb_frag_page(frag)) + frag->page_offset;
1753}
1754
1755/**
1756 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
1757 * @frag: the paged fragment buffer
1758 *
1759 * Returns the address of the data within @frag. Checks that the page
1760 * is mapped and returns %NULL otherwise.
1761 */
1762static inline void *skb_frag_address_safe(const skb_frag_t *frag)
1763{
1764        void *ptr = page_address(skb_frag_page(frag));
1765        if (unlikely(!ptr))
1766                return NULL;
1767
1768        return ptr + frag->page_offset;
1769}
1770
1771/**
1772 * __skb_frag_set_page - sets the page contained in a paged fragment
1773 * @frag: the paged fragment
1774 * @page: the page to set
1775 *
1776 * Sets the fragment @frag to contain @page.
1777 */
1778static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
1779{
1780        frag->page.p = page;
1781}
1782
1783/**
1784 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
1785 * @skb: the buffer
1786 * @f: the fragment offset
1787 * @page: the page to set
1788 *
1789 * Sets the @f'th fragment of @skb to contain @page.
1790 */
1791static inline void skb_frag_set_page(struct sk_buff *skb, int f,
1792                                     struct page *page)
1793{
1794        __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
1795}
1796
1797/**
1798 * skb_frag_dma_map - maps a paged fragment via the DMA API
1799 * @dev: the device to map the fragment to
1800 * @frag: the paged fragment to map
1801 * @offset: the offset within the fragment (starting at the
1802 *          fragment's own offset)
1803 * @size: the number of bytes to map
1804 * @dir: the direction of the mapping (%PCI_DMA_*)
1805 *
1806 * Maps the page associated with @frag to @device.
1807 */
1808static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1809                                          const skb_frag_t *frag,
1810                                          size_t offset, size_t size,
1811                                          enum dma_data_direction dir)
1812{
1813        return dma_map_page(dev, skb_frag_page(frag),
1814                            frag->page_offset + offset, size, dir);
1815}
1816
1817static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
1818                                        gfp_t gfp_mask)
1819{
1820        return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
1821}
1822
1823/**
1824 *      skb_clone_writable - is the header of a clone writable
1825 *      @skb: buffer to check
1826 *      @len: length up to which to write
1827 *
1828 *      Returns true if modifying the header part of the cloned buffer
1829 *      does not requires the data to be copied.
1830 */
1831static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1832{
1833        return !skb_header_cloned(skb) &&
1834               skb_headroom(skb) + len <= skb->hdr_len;
1835}
1836
1837static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1838                            int cloned)
1839{
1840        int delta = 0;
1841
1842        if (headroom < NET_SKB_PAD)
1843                headroom = NET_SKB_PAD;
1844        if (headroom > skb_headroom(skb))
1845                delta = headroom - skb_headroom(skb);
1846
1847        if (delta || cloned)
1848                return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1849                                        GFP_ATOMIC);
1850        return 0;
1851}
1852
1853/**
1854 *      skb_cow - copy header of skb when it is required
1855 *      @skb: buffer to cow
1856 *      @headroom: needed headroom
1857 *
1858 *      If the skb passed lacks sufficient headroom or its data part
1859 *      is shared, data is reallocated. If reallocation fails, an error
1860 *      is returned and original skb is not changed.
1861 *
1862 *      The result is skb with writable area skb->head...skb->tail
1863 *      and at least @headroom of space at head.
1864 */
1865static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1866{
1867        return __skb_cow(skb, headroom, skb_cloned(skb));
1868}
1869
1870/**
1871 *      skb_cow_head - skb_cow but only making the head writable
1872 *      @skb: buffer to cow
1873 *      @headroom: needed headroom
1874 *
1875 *      This function is identical to skb_cow except that we replace the
1876 *      skb_cloned check by skb_header_cloned.  It should be used when
1877 *      you only need to push on some header and do not need to modify
1878 *      the data.
1879 */
1880static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1881{
1882        return __skb_cow(skb, headroom, skb_header_cloned(skb));
1883}
1884
1885/**
1886 *      skb_padto       - pad an skbuff up to a minimal size
1887 *      @skb: buffer to pad
1888 *      @len: minimal length
1889 *
1890 *      Pads up a buffer to ensure the trailing bytes exist and are
1891 *      blanked. If the buffer already contains sufficient data it
1892 *      is untouched. Otherwise it is extended. Returns zero on
1893 *      success. The skb is freed on error.
1894 */
1895 
1896static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1897{
1898        unsigned int size = skb->len;
1899        if (likely(size >= len))
1900                return 0;
1901        return skb_pad(skb, len - size);
1902}
1903
1904static inline int skb_add_data(struct sk_buff *skb,
1905                               char __user *from, int copy)
1906{
1907        const int off = skb->len;
1908
1909        if (skb->ip_summed == CHECKSUM_NONE) {
1910                int err = 0;
1911                __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1912                                                            copy, 0, &err);
1913                if (!err) {
1914                        skb->csum = csum_block_add(skb->csum, csum, off);
1915                        return 0;
1916                }
1917        } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1918                return 0;
1919
1920        __skb_trim(skb, off);
1921        return -EFAULT;
1922}
1923
1924static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1925                                   const struct page *page, int off)
1926{
1927        if (i) {
1928                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1929
1930                return page == skb_frag_page(frag) &&
1931                       off == frag->page_offset + skb_frag_size(frag);
1932        }
1933        return 0;
1934}
1935
1936static inline int __skb_linearize(struct sk_buff *skb)
1937{
1938        return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1939}
1940
1941/**
1942 *      skb_linearize - convert paged skb to linear one
1943 *      @skb: buffer to linarize
1944 *
1945 *      If there is no free memory -ENOMEM is returned, otherwise zero
1946 *      is returned and the old skb data released.
1947 */
1948static inline int skb_linearize(struct sk_buff *skb)
1949{
1950        return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1951}
1952
1953/**
1954 *      skb_linearize_cow - make sure skb is linear and writable
1955 *      @skb: buffer to process
1956 *
1957 *      If there is no free memory -ENOMEM is returned, otherwise zero
1958 *      is returned and the old skb data released.
1959 */
1960static inline int skb_linearize_cow(struct sk_buff *skb)
1961{
1962        return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1963               __skb_linearize(skb) : 0;
1964}
1965
1966/**
1967 *      skb_postpull_rcsum - update checksum for received skb after pull
1968 *      @skb: buffer to update
1969 *      @start: start of data before pull
1970 *      @len: length of data pulled
1971 *
1972 *      After doing a pull on a received packet, you need to call this to
1973 *      update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1974 *      CHECKSUM_NONE so that it can be recomputed from scratch.
1975 */
1976
1977static inline void skb_postpull_rcsum(struct sk_buff *skb,
1978                                      const void *start, unsigned int len)
1979{
1980        if (skb->ip_summed == CHECKSUM_COMPLETE)
1981                skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1982}
1983
1984unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1985
1986/**
1987 *      pskb_trim_rcsum - trim received skb and update checksum
1988 *      @skb: buffer to trim
1989 *      @len: new length
1990 *
1991 *      This is exactly the same as pskb_trim except that it ensures the
1992 *      checksum of received packets are still valid after the operation.
1993 */
1994
1995static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1996{
1997        if (likely(len >= skb->len))
1998                return 0;
1999        if (skb->ip_summed == CHECKSUM_COMPLETE)
2000                skb->ip_summed = CHECKSUM_NONE;
2001        return __pskb_trim(skb, len);
2002}
2003
2004#define skb_queue_walk(queue, skb) \
2005                for (skb = (queue)->next;                                       \
2006                     skb != (struct sk_buff *)(queue);                          \
2007                     skb = skb->next)
2008
2009#define skb_queue_walk_safe(queue, skb, tmp)                                    \
2010                for (skb = (queue)->next, tmp = skb->next;                      \
2011                     skb != (struct sk_buff *)(queue);                          \
2012                     skb = tmp, tmp = skb->next)
2013
2014#define skb_queue_walk_from(queue, skb)                                         \
2015                for (; skb != (struct sk_buff *)(queue);                        \
2016                     skb = skb->next)
2017
2018#define skb_queue_walk_from_safe(queue, skb, tmp)                               \
2019                for (tmp = skb->next;                                           \
2020                     skb != (struct sk_buff *)(queue);                          \
2021                     skb = tmp, tmp = skb->next)
2022
2023#define skb_queue_reverse_walk(queue, skb) \
2024                for (skb = (queue)->prev;                                       \
2025                     skb != (struct sk_buff *)(queue);                          \
2026                     skb = skb->prev)
2027
2028#define skb_queue_reverse_walk_safe(queue, skb, tmp)                            \
2029                for (skb = (queue)->prev, tmp = skb->prev;                      \
2030                     skb != (struct sk_buff *)(queue);                          \
2031                     skb = tmp, tmp = skb->prev)
2032
2033#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)                       \
2034                for (tmp = skb->prev;                                           \
2035                     skb != (struct sk_buff *)(queue);                          \
2036                     skb = tmp, tmp = skb->prev)
2037
2038static inline bool skb_has_frag_list(const struct sk_buff *skb)
2039{
2040        return skb_shinfo(skb)->frag_list != NULL;
2041}
2042
2043static inline void skb_frag_list_init(struct sk_buff *skb)
2044{
2045        skb_shinfo(skb)->frag_list = NULL;
2046}
2047
2048static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2049{
2050        frag->next = skb_shinfo(skb)->frag_list;
2051        skb_shinfo(skb)->frag_list = frag;
2052}
2053
2054#define skb_walk_frags(skb, iter)       \
2055        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2056
2057extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2058                                           int *peeked, int *err);
2059extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2060                                         int noblock, int *err);
2061extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
2062                                     struct poll_table_struct *wait);
2063extern int             skb_copy_datagram_iovec(const struct sk_buff *from,
2064                                               int offset, struct iovec *to,
2065                                               int size);
2066extern int             skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2067                                                        int hlen,
2068                                                        struct iovec *iov);
2069extern int             skb_copy_datagram_from_iovec(struct sk_buff *skb,
2070                                                    int offset,
2071                                                    const struct iovec *from,
2072                                                    int from_offset,
2073                                                    int len);
2074extern int             skb_copy_datagram_const_iovec(const struct sk_buff *from,
2075                                                     int offset,
2076                                                     const struct iovec *to,
2077                                                     int to_offset,
2078                                                     int size);
2079extern void            skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2080extern void            skb_free_datagram_locked(struct sock *sk,
2081                                                struct sk_buff *skb);
2082extern int             skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2083                                         unsigned int flags);
2084extern __wsum          skb_checksum(const struct sk_buff *skb, int offset,
2085                                    int len, __wsum csum);
2086extern int             skb_copy_bits(const struct sk_buff *skb, int offset,
2087                                     void *to, int len);
2088extern int             skb_store_bits(struct sk_buff *skb, int offset,
2089                                      const void *from, int len);
2090extern __wsum          skb_copy_and_csum_bits(const struct sk_buff *skb,
2091                                              int offset, u8 *to, int len,
2092                                              __wsum csum);
2093extern int             skb_splice_bits(struct sk_buff *skb,
2094                                                unsigned int offset,
2095                                                struct pipe_inode_info *pipe,
2096                                                unsigned int len,
2097                                                unsigned int flags);
2098extern void            skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2099extern void            skb_split(struct sk_buff *skb,
2100                                 struct sk_buff *skb1, const u32 len);
2101extern int             skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2102                                 int shiftlen);
2103
2104extern struct sk_buff *skb_segment(struct sk_buff *skb,
2105                                   netdev_features_t features);
2106
2107static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2108                                       int len, void *buffer)
2109{
2110        int hlen = skb_headlen(skb);
2111
2112        if (hlen - offset >= len)
2113                return skb->data + offset;
2114
2115        if (skb_copy_bits(skb, offset, buffer, len) < 0)
2116                return NULL;
2117
2118        return buffer;
2119}
2120
2121static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2122                                             void *to,
2123                                             const unsigned int len)
2124{
2125        memcpy(to, skb->data, len);
2126}
2127
2128static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2129                                                    const int offset, void *to,
2130                                                    const unsigned int len)
2131{
2132        memcpy(to, skb->data + offset, len);
2133}
2134
2135static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2136                                           const void *from,
2137                                           const unsigned int len)
2138{
2139        memcpy(skb->data, from, len);
2140}
2141
2142static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2143                                                  const int offset,
2144                                                  const void *from,
2145                                                  const unsigned int len)
2146{
2147        memcpy(skb->data + offset, from, len);
2148}
2149
2150extern void skb_init(void);
2151
2152static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2153{
2154        return skb->tstamp;
2155}
2156
2157/**
2158 *      skb_get_timestamp - get timestamp from a skb
2159 *      @skb: skb to get stamp from
2160 *      @stamp: pointer to struct timeval to store stamp in
2161 *
2162 *      Timestamps are stored in the skb as offsets to a base timestamp.
2163 *      This function converts the offset back to a struct timeval and stores
2164 *      it in stamp.
2165 */
2166static inline void skb_get_timestamp(const struct sk_buff *skb,
2167                                     struct timeval *stamp)
2168{
2169        *stamp = ktime_to_timeval(skb->tstamp);
2170}
2171
2172static inline void skb_get_timestampns(const struct sk_buff *skb,
2173                                       struct timespec *stamp)
2174{
2175        *stamp = ktime_to_timespec(skb->tstamp);
2176}
2177
2178static inline void __net_timestamp(struct sk_buff *skb)
2179{
2180        skb->tstamp = ktime_get_real();
2181}
2182
2183static inline ktime_t net_timedelta(ktime_t t)
2184{
2185        return ktime_sub(ktime_get_real(), t);
2186}
2187
2188static inline ktime_t net_invalid_timestamp(void)
2189{
2190        return ktime_set(0, 0);
2191}
2192
2193extern void skb_timestamping_init(void);
2194
2195#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2196
2197extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2198extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2199
2200#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2201
2202static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2203{
2204}
2205
2206static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2207{
2208        return false;
2209}
2210
2211#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2212
2213/**
2214 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2215 *
2216 * PHY drivers may accept clones of transmitted packets for
2217 * timestamping via their phy_driver.txtstamp method. These drivers
2218 * must call this function to return the skb back to the stack, with
2219 * or without a timestamp.
2220 *
2221 * @skb: clone of the the original outgoing packet
2222 * @hwtstamps: hardware time stamps, may be NULL if not available
2223 *
2224 */
2225void skb_complete_tx_timestamp(struct sk_buff *skb,
2226                               struct skb_shared_hwtstamps *hwtstamps);
2227
2228/**
2229 * skb_tstamp_tx - queue clone of skb with send time stamps
2230 * @orig_skb:   the original outgoing packet
2231 * @hwtstamps:  hardware time stamps, may be NULL if not available
2232 *
2233 * If the skb has a socket associated, then this function clones the
2234 * skb (thus sharing the actual data and optional structures), stores
2235 * the optional hardware time stamping information (if non NULL) or
2236 * generates a software time stamp (otherwise), then queues the clone
2237 * to the error queue of the socket.  Errors are silently ignored.
2238 */
2239extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2240                        struct skb_shared_hwtstamps *hwtstamps);
2241
2242static inline void sw_tx_timestamp(struct sk_buff *skb)
2243{
2244        if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2245            !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2246                skb_tstamp_tx(skb, NULL);
2247}
2248
2249/**
2250 * skb_tx_timestamp() - Driver hook for transmit timestamping
2251 *
2252 * Ethernet MAC Drivers should call this function in their hard_xmit()
2253 * function immediately before giving the sk_buff to the MAC hardware.
2254 *
2255 * @skb: A socket buffer.
2256 */
2257static inline void skb_tx_timestamp(struct sk_buff *skb)
2258{
2259        skb_clone_tx_timestamp(skb);
2260        sw_tx_timestamp(skb);
2261}
2262
2263/**
2264 * skb_complete_wifi_ack - deliver skb with wifi status
2265 *
2266 * @skb: the original outgoing packet
2267 * @acked: ack status
2268 *
2269 */
2270void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2271
2272extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2273extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2274
2275static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2276{
2277        return skb->ip_summed & CHECKSUM_UNNECESSARY;
2278}
2279
2280/**
2281 *      skb_checksum_complete - Calculate checksum of an entire packet
2282 *      @skb: packet to process
2283 *
2284 *      This function calculates the checksum over the entire packet plus
2285 *      the value of skb->csum.  The latter can be used to supply the
2286 *      checksum of a pseudo header as used by TCP/UDP.  It returns the
2287 *      checksum.
2288 *
2289 *      For protocols that contain complete checksums such as ICMP/TCP/UDP,
2290 *      this function can be used to verify that checksum on received
2291 *      packets.  In that case the function should return zero if the
2292 *      checksum is correct.  In particular, this function will return zero
2293 *      if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2294 *      hardware has already verified the correctness of the checksum.
2295 */
2296static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2297{
2298        return skb_csum_unnecessary(skb) ?
2299               0 : __skb_checksum_complete(skb);
2300}
2301
2302#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2303extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2304static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2305{
2306        if (nfct && atomic_dec_and_test(&nfct->use))
2307                nf_conntrack_destroy(nfct);
2308}
2309static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2310{
2311        if (nfct)
2312                atomic_inc(&nfct->use);
2313}
2314#endif
2315#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2316static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2317{
2318        if (skb)
2319                atomic_inc(&skb->users);
2320}
2321static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2322{
2323        if (skb)
2324                kfree_skb(skb);
2325}
2326#endif
2327#ifdef CONFIG_BRIDGE_NETFILTER
2328static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2329{
2330        if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2331                kfree(nf_bridge);
2332}
2333static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2334{
2335        if (nf_bridge)
2336                atomic_inc(&nf_bridge->use);
2337}
2338#endif /* CONFIG_BRIDGE_NETFILTER */
2339static inline void nf_reset(struct sk_buff *skb)
2340{
2341#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2342        nf_conntrack_put(skb->nfct);
2343        skb->nfct = NULL;
2344#endif
2345#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2346        nf_conntrack_put_reasm(skb->nfct_reasm);
2347        skb->nfct_reasm = NULL;
2348#endif
2349#ifdef CONFIG_BRIDGE_NETFILTER
2350        nf_bridge_put(skb->nf_bridge);
2351        skb->nf_bridge = NULL;
2352#endif
2353}
2354
2355/* Note: This doesn't put any conntrack and bridge info in dst. */
2356static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2357{
2358#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2359        dst->nfct = src->nfct;
2360        nf_conntrack_get(src->nfct);
2361        dst->nfctinfo = src->nfctinfo;
2362#endif
2363#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2364        dst->nfct_reasm = src->nfct_reasm;
2365        nf_conntrack_get_reasm(src->nfct_reasm);
2366#endif
2367#ifdef CONFIG_BRIDGE_NETFILTER
2368        dst->nf_bridge  = src->nf_bridge;
2369        nf_bridge_get(src->nf_bridge);
2370#endif
2371}
2372
2373static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2374{
2375#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2376        nf_conntrack_put(dst->nfct);
2377#endif
2378#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2379        nf_conntrack_put_reasm(dst->nfct_reasm);
2380#endif
2381#ifdef CONFIG_BRIDGE_NETFILTER
2382        nf_bridge_put(dst->nf_bridge);
2383#endif
2384        __nf_copy(dst, src);
2385}
2386
2387#ifdef CONFIG_NETWORK_SECMARK
2388static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2389{
2390        to->secmark = from->secmark;
2391}
2392
2393static inline void skb_init_secmark(struct sk_buff *skb)
2394{
2395        skb->secmark = 0;
2396}
2397#else
2398static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2399{ }
2400
2401static inline void skb_init_secmark(struct sk_buff *skb)
2402{ }
2403#endif
2404
2405static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2406{
2407        skb->queue_mapping = queue_mapping;
2408}
2409
2410static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2411{
2412        return skb->queue_mapping;
2413}
2414
2415static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2416{
2417        to->queue_mapping = from->queue_mapping;
2418}
2419
2420static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2421{
2422        skb->queue_mapping = rx_queue + 1;
2423}
2424
2425static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2426{
2427        return skb->queue_mapping - 1;
2428}
2429
2430static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2431{
2432        return skb->queue_mapping != 0;
2433}
2434
2435extern u16 __skb_tx_hash(const struct net_device *dev,
2436                         const struct sk_buff *skb,
2437                         unsigned int num_tx_queues);
2438
2439#ifdef CONFIG_XFRM
2440static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2441{
2442        return skb->sp;
2443}
2444#else
2445static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2446{
2447        return NULL;
2448}
2449#endif
2450
2451static inline int skb_is_gso(const struct sk_buff *skb)
2452{
2453        return skb_shinfo(skb)->gso_size;
2454}
2455
2456static inline int skb_is_gso_v6(const struct sk_buff *skb)
2457{
2458        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2459}
2460
2461extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2462
2463static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2464{
2465        /* LRO sets gso_size but not gso_type, whereas if GSO is really
2466         * wanted then gso_type will be set. */
2467        const struct skb_shared_info *shinfo = skb_shinfo(skb);
2468
2469        if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2470            unlikely(shinfo->gso_type == 0)) {
2471                __skb_warn_lro_forwarding(skb);
2472                return true;
2473        }
2474        return false;
2475}
2476
2477static inline void skb_forward_csum(struct sk_buff *skb)
2478{
2479        /* Unfortunately we don't support this one.  Any brave souls? */
2480        if (skb->ip_summed == CHECKSUM_COMPLETE)
2481                skb->ip_summed = CHECKSUM_NONE;
2482}
2483
2484/**
2485 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2486 * @skb: skb to check
2487 *
2488 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2489 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2490 * use this helper, to document places where we make this assertion.
2491 */
2492static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2493{
2494#ifdef DEBUG
2495        BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2496#endif
2497}
2498
2499bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2500
2501static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2502{
2503        if (irqs_disabled())
2504                return false;
2505
2506        if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
2507                return false;
2508
2509        if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
2510                return false;
2511
2512        skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2513        if (skb_end_pointer(skb) - skb->head < skb_size)
2514                return false;
2515
2516        if (skb_shared(skb) || skb_cloned(skb))
2517                return false;
2518
2519        return true;
2520}
2521#endif  /* __KERNEL__ */
2522#endif  /* _LINUX_SKBUFF_H */
2523