linux/include/linux/skbuff.h
<<
>>
Prefs
   1/*
   2 *      Definitions for the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:
   5 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
   6 *              Florian La Roche, <rzsfl@rz.uni-sb.de>
   7 *
   8 *      This program is free software; you can redistribute it and/or
   9 *      modify it under the terms of the GNU General Public License
  10 *      as published by the Free Software Foundation; either version
  11 *      2 of the License, or (at your option) any later version.
  12 */
  13
  14#ifndef _LINUX_SKBUFF_H
  15#define _LINUX_SKBUFF_H
  16
  17#include <linux/kernel.h>
  18#include <linux/kmemcheck.h>
  19#include <linux/compiler.h>
  20#include <linux/time.h>
  21#include <linux/bug.h>
  22#include <linux/cache.h>
  23
  24#include <linux/atomic.h>
  25#include <asm/types.h>
  26#include <linux/spinlock.h>
  27#include <linux/net.h>
  28#include <linux/textsearch.h>
  29#include <net/checksum.h>
  30#include <linux/rcupdate.h>
  31#include <linux/dmaengine.h>
  32#include <linux/hrtimer.h>
  33#include <linux/dma-mapping.h>
  34#include <linux/netdev_features.h>
  35#include <net/flow_keys.h>
  36
  37/* Don't change this without changing skb_csum_unnecessary! */
  38#define CHECKSUM_NONE 0
  39#define CHECKSUM_UNNECESSARY 1
  40#define CHECKSUM_COMPLETE 2
  41#define CHECKSUM_PARTIAL 3
  42
  43#define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES - 1)) & \
  44                                 ~(SMP_CACHE_BYTES - 1))
  45#define SKB_WITH_OVERHEAD(X)    \
  46        ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  47#define SKB_MAX_ORDER(X, ORDER) \
  48        SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
  49#define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
  50#define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
  51
  52/* return minimum truesize of one skb containing X bytes of data */
  53#define SKB_TRUESIZE(X) ((X) +                                          \
  54                         SKB_DATA_ALIGN(sizeof(struct sk_buff)) +       \
  55                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
  56
  57/* A. Checksumming of received packets by device.
  58 *
  59 *      NONE: device failed to checksum this packet.
  60 *              skb->csum is undefined.
  61 *
  62 *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
  63 *              skb->csum is undefined.
  64 *            It is bad option, but, unfortunately, many of vendors do this.
  65 *            Apparently with secret goal to sell you new device, when you
  66 *            will add new protocol to your host. F.e. IPv6. 8)
  67 *
  68 *      COMPLETE: the most generic way. Device supplied checksum of _all_
  69 *          the packet as seen by netif_rx in skb->csum.
  70 *          NOTE: Even if device supports only some protocols, but
  71 *          is able to produce some skb->csum, it MUST use COMPLETE,
  72 *          not UNNECESSARY.
  73 *
  74 *      PARTIAL: identical to the case for output below.  This may occur
  75 *          on a packet received directly from another Linux OS, e.g.,
  76 *          a virtualised Linux kernel on the same host.  The packet can
  77 *          be treated in the same way as UNNECESSARY except that on
  78 *          output (i.e., forwarding) the checksum must be filled in
  79 *          by the OS or the hardware.
  80 *
  81 * B. Checksumming on output.
  82 *
  83 *      NONE: skb is checksummed by protocol or csum is not required.
  84 *
  85 *      PARTIAL: device is required to csum packet as seen by hard_start_xmit
  86 *      from skb->csum_start to the end and to record the checksum
  87 *      at skb->csum_start + skb->csum_offset.
  88 *
  89 *      Device must show its capabilities in dev->features, set
  90 *      at device setup time.
  91 *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
  92 *                        everything.
  93 *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
  94 *                        TCP/UDP over IPv4. Sigh. Vendors like this
  95 *                        way by an unknown reason. Though, see comment above
  96 *                        about CHECKSUM_UNNECESSARY. 8)
  97 *      NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
  98 *
  99 *      UNNECESSARY: device will do per protocol specific csum. Protocol drivers
 100 *      that do not want net to perform the checksum calculation should use
 101 *      this flag in their outgoing skbs.
 102 *      NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
 103 *                        offload. Correspondingly, the FCoE protocol driver
 104 *                        stack should use CHECKSUM_UNNECESSARY.
 105 *
 106 *      Any questions? No questions, good.              --ANK
 107 */
 108
 109struct net_device;
 110struct scatterlist;
 111struct pipe_inode_info;
 112
 113#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 114struct nf_conntrack {
 115        atomic_t use;
 116};
 117#endif
 118
 119#ifdef CONFIG_BRIDGE_NETFILTER
 120struct nf_bridge_info {
 121        atomic_t                use;
 122        unsigned int            mask;
 123        struct net_device       *physindev;
 124        struct net_device       *physoutdev;
 125        unsigned long           data[32 / sizeof(unsigned long)];
 126};
 127#endif
 128
 129struct sk_buff_head {
 130        /* These two members must be first. */
 131        struct sk_buff  *next;
 132        struct sk_buff  *prev;
 133
 134        __u32           qlen;
 135        spinlock_t      lock;
 136};
 137
 138struct sk_buff;
 139
 140/* To allow 64K frame to be packed as single skb without frag_list we
 141 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
 142 * buffers which do not start on a page boundary.
 143 *
 144 * Since GRO uses frags we allocate at least 16 regardless of page
 145 * size.
 146 */
 147#if (65536/PAGE_SIZE + 1) < 16
 148#define MAX_SKB_FRAGS 16UL
 149#else
 150#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 151#endif
 152
 153typedef struct skb_frag_struct skb_frag_t;
 154
 155struct skb_frag_struct {
 156        struct {
 157                struct page *p;
 158        } page;
 159#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
 160        __u32 page_offset;
 161        __u32 size;
 162#else
 163        __u16 page_offset;
 164        __u16 size;
 165#endif
 166};
 167
 168static inline unsigned int skb_frag_size(const skb_frag_t *frag)
 169{
 170        return frag->size;
 171}
 172
 173static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
 174{
 175        frag->size = size;
 176}
 177
 178static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
 179{
 180        frag->size += delta;
 181}
 182
 183static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
 184{
 185        frag->size -= delta;
 186}
 187
 188#define HAVE_HW_TIME_STAMP
 189
 190/**
 191 * struct skb_shared_hwtstamps - hardware time stamps
 192 * @hwtstamp:   hardware time stamp transformed into duration
 193 *              since arbitrary point in time
 194 * @syststamp:  hwtstamp transformed to system time base
 195 *
 196 * Software time stamps generated by ktime_get_real() are stored in
 197 * skb->tstamp. The relation between the different kinds of time
 198 * stamps is as follows:
 199 *
 200 * syststamp and tstamp can be compared against each other in
 201 * arbitrary combinations.  The accuracy of a
 202 * syststamp/tstamp/"syststamp from other device" comparison is
 203 * limited by the accuracy of the transformation into system time
 204 * base. This depends on the device driver and its underlying
 205 * hardware.
 206 *
 207 * hwtstamps can only be compared against other hwtstamps from
 208 * the same device.
 209 *
 210 * This structure is attached to packets as part of the
 211 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
 212 */
 213struct skb_shared_hwtstamps {
 214        ktime_t hwtstamp;
 215        ktime_t syststamp;
 216};
 217
 218/* Definitions for tx_flags in struct skb_shared_info */
 219enum {
 220        /* generate hardware time stamp */
 221        SKBTX_HW_TSTAMP = 1 << 0,
 222
 223        /* generate software time stamp */
 224        SKBTX_SW_TSTAMP = 1 << 1,
 225
 226        /* device driver is going to provide hardware time stamp */
 227        SKBTX_IN_PROGRESS = 1 << 2,
 228
 229        /* device driver supports TX zero-copy buffers */
 230        SKBTX_DEV_ZEROCOPY = 1 << 3,
 231
 232        /* generate wifi status information (where possible) */
 233        SKBTX_WIFI_STATUS = 1 << 4,
 234
 235        /* This indicates at least one fragment might be overwritten
 236         * (as in vmsplice(), sendfile() ...)
 237         * If we need to compute a TX checksum, we'll need to copy
 238         * all frags to avoid possible bad checksum
 239         */
 240        SKBTX_SHARED_FRAG = 1 << 5,
 241};
 242
 243/*
 244 * The callback notifies userspace to release buffers when skb DMA is done in
 245 * lower device, the skb last reference should be 0 when calling this.
 246 * The zerocopy_success argument is true if zero copy transmit occurred,
 247 * false on data copy or out of memory error caused by data copy attempt.
 248 * The ctx field is used to track device context.
 249 * The desc field is used to track userspace buffer index.
 250 */
 251struct ubuf_info {
 252        void (*callback)(struct ubuf_info *, bool zerocopy_success);
 253        void *ctx;
 254        unsigned long desc;
 255};
 256
 257/* This data is invariant across clones and lives at
 258 * the end of the header data, ie. at skb->end.
 259 */
 260struct skb_shared_info {
 261        unsigned char   nr_frags;
 262        __u8            tx_flags;
 263        unsigned short  gso_size;
 264        /* Warning: this field is not always filled in (UFO)! */
 265        unsigned short  gso_segs;
 266        unsigned short  gso_type;
 267        struct sk_buff  *frag_list;
 268        struct skb_shared_hwtstamps hwtstamps;
 269        __be32          ip6_frag_id;
 270
 271        /*
 272         * Warning : all fields before dataref are cleared in __alloc_skb()
 273         */
 274        atomic_t        dataref;
 275
 276        /* Intermediate layers must ensure that destructor_arg
 277         * remains valid until skb destructor */
 278        void *          destructor_arg;
 279
 280        /* must be last field, see pskb_expand_head() */
 281        skb_frag_t      frags[MAX_SKB_FRAGS];
 282};
 283
 284/* We divide dataref into two halves.  The higher 16 bits hold references
 285 * to the payload part of skb->data.  The lower 16 bits hold references to
 286 * the entire skb->data.  A clone of a headerless skb holds the length of
 287 * the header in skb->hdr_len.
 288 *
 289 * All users must obey the rule that the skb->data reference count must be
 290 * greater than or equal to the payload reference count.
 291 *
 292 * Holding a reference to the payload part means that the user does not
 293 * care about modifications to the header part of skb->data.
 294 */
 295#define SKB_DATAREF_SHIFT 16
 296#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
 297
 298
 299enum {
 300        SKB_FCLONE_UNAVAILABLE,
 301        SKB_FCLONE_ORIG,
 302        SKB_FCLONE_CLONE,
 303};
 304
 305enum {
 306        SKB_GSO_TCPV4 = 1 << 0,
 307        SKB_GSO_UDP = 1 << 1,
 308
 309        /* This indicates the skb is from an untrusted source. */
 310        SKB_GSO_DODGY = 1 << 2,
 311
 312        /* This indicates the tcp segment has CWR set. */
 313        SKB_GSO_TCP_ECN = 1 << 3,
 314
 315        SKB_GSO_TCPV6 = 1 << 4,
 316
 317        SKB_GSO_FCOE = 1 << 5,
 318
 319        SKB_GSO_GRE = 1 << 6,
 320
 321        SKB_GSO_UDP_TUNNEL = 1 << 7,
 322
 323        SKB_GSO_MPLS = 1 << 8,
 324};
 325
 326#if BITS_PER_LONG > 32
 327#define NET_SKBUFF_DATA_USES_OFFSET 1
 328#endif
 329
 330#ifdef NET_SKBUFF_DATA_USES_OFFSET
 331typedef unsigned int sk_buff_data_t;
 332#else
 333typedef unsigned char *sk_buff_data_t;
 334#endif
 335
 336#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
 337    defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
 338#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
 339#endif
 340
 341/** 
 342 *      struct sk_buff - socket buffer
 343 *      @next: Next buffer in list
 344 *      @prev: Previous buffer in list
 345 *      @tstamp: Time we arrived
 346 *      @sk: Socket we are owned by
 347 *      @dev: Device we arrived on/are leaving by
 348 *      @cb: Control buffer. Free for use by every layer. Put private vars here
 349 *      @_skb_refdst: destination entry (with norefcount bit)
 350 *      @sp: the security path, used for xfrm
 351 *      @len: Length of actual data
 352 *      @data_len: Data length
 353 *      @mac_len: Length of link layer header
 354 *      @hdr_len: writable header length of cloned skb
 355 *      @csum: Checksum (must include start/offset pair)
 356 *      @csum_start: Offset from skb->head where checksumming should start
 357 *      @csum_offset: Offset from csum_start where checksum should be stored
 358 *      @priority: Packet queueing priority
 359 *      @local_df: allow local fragmentation
 360 *      @cloned: Head may be cloned (check refcnt to be sure)
 361 *      @ip_summed: Driver fed us an IP checksum
 362 *      @nohdr: Payload reference only, must not modify header
 363 *      @nfctinfo: Relationship of this skb to the connection
 364 *      @pkt_type: Packet class
 365 *      @fclone: skbuff clone status
 366 *      @ipvs_property: skbuff is owned by ipvs
 367 *      @peeked: this packet has been seen already, so stats have been
 368 *              done for it, don't do them again
 369 *      @nf_trace: netfilter packet trace flag
 370 *      @protocol: Packet protocol from driver
 371 *      @destructor: Destruct function
 372 *      @nfct: Associated connection, if any
 373 *      @nfct_reasm: netfilter conntrack re-assembly pointer
 374 *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
 375 *      @skb_iif: ifindex of device we arrived on
 376 *      @tc_index: Traffic control index
 377 *      @tc_verd: traffic control verdict
 378 *      @rxhash: the packet hash computed on receive
 379 *      @queue_mapping: Queue mapping for multiqueue devices
 380 *      @ndisc_nodetype: router type (from link layer)
 381 *      @ooo_okay: allow the mapping of a socket to a queue to be changed
 382 *      @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
 383 *              ports.
 384 *      @wifi_acked_valid: wifi_acked was set
 385 *      @wifi_acked: whether frame was acked on wifi or not
 386 *      @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
 387 *      @dma_cookie: a cookie to one of several possible DMA operations
 388 *              done by skb DMA functions
 389  *     @napi_id: id of the NAPI struct this skb came from
 390 *      @secmark: security marking
 391 *      @mark: Generic packet mark
 392 *      @dropcount: total number of sk_receive_queue overflows
 393 *      @vlan_proto: vlan encapsulation protocol
 394 *      @vlan_tci: vlan tag control information
 395 *      @inner_protocol: Protocol (encapsulation)
 396 *      @inner_transport_header: Inner transport layer header (encapsulation)
 397 *      @inner_network_header: Network layer header (encapsulation)
 398 *      @inner_mac_header: Link layer header (encapsulation)
 399 *      @transport_header: Transport layer header
 400 *      @network_header: Network layer header
 401 *      @mac_header: Link layer header
 402 *      @tail: Tail pointer
 403 *      @end: End pointer
 404 *      @head: Head of buffer
 405 *      @data: Data head pointer
 406 *      @truesize: Buffer size
 407 *      @users: User count - see {datagram,tcp}.c
 408 */
 409
 410struct sk_buff {
 411        /* These two members must be first. */
 412        struct sk_buff          *next;
 413        struct sk_buff          *prev;
 414
 415        ktime_t                 tstamp;
 416
 417        struct sock             *sk;
 418        struct net_device       *dev;
 419
 420        /*
 421         * This is the control buffer. It is free to use for every
 422         * layer. Please put your private variables there. If you
 423         * want to keep them across layers you have to do a skb_clone()
 424         * first. This is owned by whoever has the skb queued ATM.
 425         */
 426        char                    cb[48] __aligned(8);
 427
 428        unsigned long           _skb_refdst;
 429#ifdef CONFIG_XFRM
 430        struct  sec_path        *sp;
 431#endif
 432        unsigned int            len,
 433                                data_len;
 434        __u16                   mac_len,
 435                                hdr_len;
 436        union {
 437                __wsum          csum;
 438                struct {
 439                        __u16   csum_start;
 440                        __u16   csum_offset;
 441                };
 442        };
 443        __u32                   priority;
 444        kmemcheck_bitfield_begin(flags1);
 445        __u8                    local_df:1,
 446                                cloned:1,
 447                                ip_summed:2,
 448                                nohdr:1,
 449                                nfctinfo:3;
 450        __u8                    pkt_type:3,
 451                                fclone:2,
 452                                ipvs_property:1,
 453                                peeked:1,
 454                                nf_trace:1;
 455        kmemcheck_bitfield_end(flags1);
 456        __be16                  protocol;
 457
 458        void                    (*destructor)(struct sk_buff *skb);
 459#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 460        struct nf_conntrack     *nfct;
 461#endif
 462#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 463        struct sk_buff          *nfct_reasm;
 464#endif
 465#ifdef CONFIG_BRIDGE_NETFILTER
 466        struct nf_bridge_info   *nf_bridge;
 467#endif
 468
 469        int                     skb_iif;
 470
 471        __u32                   rxhash;
 472
 473        __be16                  vlan_proto;
 474        __u16                   vlan_tci;
 475
 476#ifdef CONFIG_NET_SCHED
 477        __u16                   tc_index;       /* traffic control index */
 478#ifdef CONFIG_NET_CLS_ACT
 479        __u16                   tc_verd;        /* traffic control verdict */
 480#endif
 481#endif
 482
 483        __u16                   queue_mapping;
 484        kmemcheck_bitfield_begin(flags2);
 485#ifdef CONFIG_IPV6_NDISC_NODETYPE
 486        __u8                    ndisc_nodetype:2;
 487#endif
 488        __u8                    pfmemalloc:1;
 489        __u8                    ooo_okay:1;
 490        __u8                    l4_rxhash:1;
 491        __u8                    wifi_acked_valid:1;
 492        __u8                    wifi_acked:1;
 493        __u8                    no_fcs:1;
 494        __u8                    head_frag:1;
 495        /* Encapsulation protocol and NIC drivers should use
 496         * this flag to indicate to each other if the skb contains
 497         * encapsulated packet or not and maybe use the inner packet
 498         * headers if needed
 499         */
 500        __u8                    encapsulation:1;
 501        /* 6/8 bit hole (depending on ndisc_nodetype presence) */
 502        kmemcheck_bitfield_end(flags2);
 503
 504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
 505        union {
 506                unsigned int    napi_id;
 507                dma_cookie_t    dma_cookie;
 508        };
 509#endif
 510#ifdef CONFIG_NETWORK_SECMARK
 511        __u32                   secmark;
 512#endif
 513        union {
 514                __u32           mark;
 515                __u32           dropcount;
 516                __u32           reserved_tailroom;
 517        };
 518
 519        __be16                  inner_protocol;
 520        __u16                   inner_transport_header;
 521        __u16                   inner_network_header;
 522        __u16                   inner_mac_header;
 523        __u16                   transport_header;
 524        __u16                   network_header;
 525        __u16                   mac_header;
 526        /* These elements must be at the end, see alloc_skb() for details.  */
 527        sk_buff_data_t          tail;
 528        sk_buff_data_t          end;
 529        unsigned char           *head,
 530                                *data;
 531        unsigned int            truesize;
 532        atomic_t                users;
 533};
 534
 535#ifdef __KERNEL__
 536/*
 537 *      Handling routines are only of interest to the kernel
 538 */
 539#include <linux/slab.h>
 540
 541
 542#define SKB_ALLOC_FCLONE        0x01
 543#define SKB_ALLOC_RX            0x02
 544
 545/* Returns true if the skb was allocated from PFMEMALLOC reserves */
 546static inline bool skb_pfmemalloc(const struct sk_buff *skb)
 547{
 548        return unlikely(skb->pfmemalloc);
 549}
 550
 551/*
 552 * skb might have a dst pointer attached, refcounted or not.
 553 * _skb_refdst low order bit is set if refcount was _not_ taken
 554 */
 555#define SKB_DST_NOREF   1UL
 556#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
 557
 558/**
 559 * skb_dst - returns skb dst_entry
 560 * @skb: buffer
 561 *
 562 * Returns skb dst_entry, regardless of reference taken or not.
 563 */
 564static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
 565{
 566        /* If refdst was not refcounted, check we still are in a 
 567         * rcu_read_lock section
 568         */
 569        WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
 570                !rcu_read_lock_held() &&
 571                !rcu_read_lock_bh_held());
 572        return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
 573}
 574
 575/**
 576 * skb_dst_set - sets skb dst
 577 * @skb: buffer
 578 * @dst: dst entry
 579 *
 580 * Sets skb dst, assuming a reference was taken on dst and should
 581 * be released by skb_dst_drop()
 582 */
 583static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
 584{
 585        skb->_skb_refdst = (unsigned long)dst;
 586}
 587
 588extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
 589                                bool force);
 590
 591/**
 592 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
 593 * @skb: buffer
 594 * @dst: dst entry
 595 *
 596 * Sets skb dst, assuming a reference was not taken on dst.
 597 * If dst entry is cached, we do not take reference and dst_release
 598 * will be avoided by refdst_drop. If dst entry is not cached, we take
 599 * reference, so that last dst_release can destroy the dst immediately.
 600 */
 601static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
 602{
 603        __skb_dst_set_noref(skb, dst, false);
 604}
 605
 606/**
 607 * skb_dst_set_noref_force - sets skb dst, without taking reference
 608 * @skb: buffer
 609 * @dst: dst entry
 610 *
 611 * Sets skb dst, assuming a reference was not taken on dst.
 612 * No reference is taken and no dst_release will be called. While for
 613 * cached dsts deferred reclaim is a basic feature, for entries that are
 614 * not cached it is caller's job to guarantee that last dst_release for
 615 * provided dst happens when nobody uses it, eg. after a RCU grace period.
 616 */
 617static inline void skb_dst_set_noref_force(struct sk_buff *skb,
 618                                           struct dst_entry *dst)
 619{
 620        __skb_dst_set_noref(skb, dst, true);
 621}
 622
 623/**
 624 * skb_dst_is_noref - Test if skb dst isn't refcounted
 625 * @skb: buffer
 626 */
 627static inline bool skb_dst_is_noref(const struct sk_buff *skb)
 628{
 629        return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
 630}
 631
 632static inline struct rtable *skb_rtable(const struct sk_buff *skb)
 633{
 634        return (struct rtable *)skb_dst(skb);
 635}
 636
 637extern void kfree_skb(struct sk_buff *skb);
 638extern void kfree_skb_list(struct sk_buff *segs);
 639extern void skb_tx_error(struct sk_buff *skb);
 640extern void consume_skb(struct sk_buff *skb);
 641extern void            __kfree_skb(struct sk_buff *skb);
 642extern struct kmem_cache *skbuff_head_cache;
 643
 644extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
 645extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
 646                             bool *fragstolen, int *delta_truesize);
 647
 648extern struct sk_buff *__alloc_skb(unsigned int size,
 649                                   gfp_t priority, int flags, int node);
 650extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
 651static inline struct sk_buff *alloc_skb(unsigned int size,
 652                                        gfp_t priority)
 653{
 654        return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
 655}
 656
 657static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
 658                                               gfp_t priority)
 659{
 660        return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
 661}
 662
 663extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
 664static inline struct sk_buff *alloc_skb_head(gfp_t priority)
 665{
 666        return __alloc_skb_head(priority, -1);
 667}
 668
 669extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
 670extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 671extern struct sk_buff *skb_clone(struct sk_buff *skb,
 672                                 gfp_t priority);
 673extern struct sk_buff *skb_copy(const struct sk_buff *skb,
 674                                gfp_t priority);
 675extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
 676                                 int headroom, gfp_t gfp_mask);
 677
 678extern int             pskb_expand_head(struct sk_buff *skb,
 679                                        int nhead, int ntail,
 680                                        gfp_t gfp_mask);
 681extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
 682                                            unsigned int headroom);
 683extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 684                                       int newheadroom, int newtailroom,
 685                                       gfp_t priority);
 686extern int             skb_to_sgvec(struct sk_buff *skb,
 687                                    struct scatterlist *sg, int offset,
 688                                    int len);
 689extern int             skb_cow_data(struct sk_buff *skb, int tailbits,
 690                                    struct sk_buff **trailer);
 691extern int             skb_pad(struct sk_buff *skb, int pad);
 692#define dev_kfree_skb(a)        consume_skb(a)
 693
 694extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 695                        int getfrag(void *from, char *to, int offset,
 696                        int len,int odd, struct sk_buff *skb),
 697                        void *from, int length);
 698
 699struct skb_seq_state {
 700        __u32           lower_offset;
 701        __u32           upper_offset;
 702        __u32           frag_idx;
 703        __u32           stepped_offset;
 704        struct sk_buff  *root_skb;
 705        struct sk_buff  *cur_skb;
 706        __u8            *frag_data;
 707};
 708
 709extern void           skb_prepare_seq_read(struct sk_buff *skb,
 710                                           unsigned int from, unsigned int to,
 711                                           struct skb_seq_state *st);
 712extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
 713                                   struct skb_seq_state *st);
 714extern void           skb_abort_seq_read(struct skb_seq_state *st);
 715
 716extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
 717                                    unsigned int to, struct ts_config *config,
 718                                    struct ts_state *state);
 719
 720extern void __skb_get_rxhash(struct sk_buff *skb);
 721static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 722{
 723        if (!skb->l4_rxhash)
 724                __skb_get_rxhash(skb);
 725
 726        return skb->rxhash;
 727}
 728
 729#ifdef NET_SKBUFF_DATA_USES_OFFSET
 730static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 731{
 732        return skb->head + skb->end;
 733}
 734
 735static inline unsigned int skb_end_offset(const struct sk_buff *skb)
 736{
 737        return skb->end;
 738}
 739#else
 740static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
 741{
 742        return skb->end;
 743}
 744
 745static inline unsigned int skb_end_offset(const struct sk_buff *skb)
 746{
 747        return skb->end - skb->head;
 748}
 749#endif
 750
 751/* Internal */
 752#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
 753
 754static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
 755{
 756        return &skb_shinfo(skb)->hwtstamps;
 757}
 758
 759/**
 760 *      skb_queue_empty - check if a queue is empty
 761 *      @list: queue head
 762 *
 763 *      Returns true if the queue is empty, false otherwise.
 764 */
 765static inline int skb_queue_empty(const struct sk_buff_head *list)
 766{
 767        return list->next == (struct sk_buff *)list;
 768}
 769
 770/**
 771 *      skb_queue_is_last - check if skb is the last entry in the queue
 772 *      @list: queue head
 773 *      @skb: buffer
 774 *
 775 *      Returns true if @skb is the last buffer on the list.
 776 */
 777static inline bool skb_queue_is_last(const struct sk_buff_head *list,
 778                                     const struct sk_buff *skb)
 779{
 780        return skb->next == (struct sk_buff *)list;
 781}
 782
 783/**
 784 *      skb_queue_is_first - check if skb is the first entry in the queue
 785 *      @list: queue head
 786 *      @skb: buffer
 787 *
 788 *      Returns true if @skb is the first buffer on the list.
 789 */
 790static inline bool skb_queue_is_first(const struct sk_buff_head *list,
 791                                      const struct sk_buff *skb)
 792{
 793        return skb->prev == (struct sk_buff *)list;
 794}
 795
 796/**
 797 *      skb_queue_next - return the next packet in the queue
 798 *      @list: queue head
 799 *      @skb: current buffer
 800 *
 801 *      Return the next packet in @list after @skb.  It is only valid to
 802 *      call this if skb_queue_is_last() evaluates to false.
 803 */
 804static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
 805                                             const struct sk_buff *skb)
 806{
 807        /* This BUG_ON may seem severe, but if we just return then we
 808         * are going to dereference garbage.
 809         */
 810        BUG_ON(skb_queue_is_last(list, skb));
 811        return skb->next;
 812}
 813
 814/**
 815 *      skb_queue_prev - return the prev packet in the queue
 816 *      @list: queue head
 817 *      @skb: current buffer
 818 *
 819 *      Return the prev packet in @list before @skb.  It is only valid to
 820 *      call this if skb_queue_is_first() evaluates to false.
 821 */
 822static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
 823                                             const struct sk_buff *skb)
 824{
 825        /* This BUG_ON may seem severe, but if we just return then we
 826         * are going to dereference garbage.
 827         */
 828        BUG_ON(skb_queue_is_first(list, skb));
 829        return skb->prev;
 830}
 831
 832/**
 833 *      skb_get - reference buffer
 834 *      @skb: buffer to reference
 835 *
 836 *      Makes another reference to a socket buffer and returns a pointer
 837 *      to the buffer.
 838 */
 839static inline struct sk_buff *skb_get(struct sk_buff *skb)
 840{
 841        atomic_inc(&skb->users);
 842        return skb;
 843}
 844
 845/*
 846 * If users == 1, we are the only owner and are can avoid redundant
 847 * atomic change.
 848 */
 849
 850/**
 851 *      skb_cloned - is the buffer a clone
 852 *      @skb: buffer to check
 853 *
 854 *      Returns true if the buffer was generated with skb_clone() and is
 855 *      one of multiple shared copies of the buffer. Cloned buffers are
 856 *      shared data so must not be written to under normal circumstances.
 857 */
 858static inline int skb_cloned(const struct sk_buff *skb)
 859{
 860        return skb->cloned &&
 861               (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
 862}
 863
 864static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
 865{
 866        might_sleep_if(pri & __GFP_WAIT);
 867
 868        if (skb_cloned(skb))
 869                return pskb_expand_head(skb, 0, 0, pri);
 870
 871        return 0;
 872}
 873
 874/**
 875 *      skb_header_cloned - is the header a clone
 876 *      @skb: buffer to check
 877 *
 878 *      Returns true if modifying the header part of the buffer requires
 879 *      the data to be copied.
 880 */
 881static inline int skb_header_cloned(const struct sk_buff *skb)
 882{
 883        int dataref;
 884
 885        if (!skb->cloned)
 886                return 0;
 887
 888        dataref = atomic_read(&skb_shinfo(skb)->dataref);
 889        dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
 890        return dataref != 1;
 891}
 892
 893/**
 894 *      skb_header_release - release reference to header
 895 *      @skb: buffer to operate on
 896 *
 897 *      Drop a reference to the header part of the buffer.  This is done
 898 *      by acquiring a payload reference.  You must not read from the header
 899 *      part of skb->data after this.
 900 */
 901static inline void skb_header_release(struct sk_buff *skb)
 902{
 903        BUG_ON(skb->nohdr);
 904        skb->nohdr = 1;
 905        atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
 906}
 907
 908/**
 909 *      skb_shared - is the buffer shared
 910 *      @skb: buffer to check
 911 *
 912 *      Returns true if more than one person has a reference to this
 913 *      buffer.
 914 */
 915static inline int skb_shared(const struct sk_buff *skb)
 916{
 917        return atomic_read(&skb->users) != 1;
 918}
 919
 920/**
 921 *      skb_share_check - check if buffer is shared and if so clone it
 922 *      @skb: buffer to check
 923 *      @pri: priority for memory allocation
 924 *
 925 *      If the buffer is shared the buffer is cloned and the old copy
 926 *      drops a reference. A new clone with a single reference is returned.
 927 *      If the buffer is not shared the original buffer is returned. When
 928 *      being called from interrupt status or with spinlocks held pri must
 929 *      be GFP_ATOMIC.
 930 *
 931 *      NULL is returned on a memory allocation failure.
 932 */
 933static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
 934{
 935        might_sleep_if(pri & __GFP_WAIT);
 936        if (skb_shared(skb)) {
 937                struct sk_buff *nskb = skb_clone(skb, pri);
 938
 939                if (likely(nskb))
 940                        consume_skb(skb);
 941                else
 942                        kfree_skb(skb);
 943                skb = nskb;
 944        }
 945        return skb;
 946}
 947
 948/*
 949 *      Copy shared buffers into a new sk_buff. We effectively do COW on
 950 *      packets to handle cases where we have a local reader and forward
 951 *      and a couple of other messy ones. The normal one is tcpdumping
 952 *      a packet thats being forwarded.
 953 */
 954
 955/**
 956 *      skb_unshare - make a copy of a shared buffer
 957 *      @skb: buffer to check
 958 *      @pri: priority for memory allocation
 959 *
 960 *      If the socket buffer is a clone then this function creates a new
 961 *      copy of the data, drops a reference count on the old copy and returns
 962 *      the new copy with the reference count at 1. If the buffer is not a clone
 963 *      the original buffer is returned. When called with a spinlock held or
 964 *      from interrupt state @pri must be %GFP_ATOMIC
 965 *
 966 *      %NULL is returned on a memory allocation failure.
 967 */
 968static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
 969                                          gfp_t pri)
 970{
 971        might_sleep_if(pri & __GFP_WAIT);
 972        if (skb_cloned(skb)) {
 973                struct sk_buff *nskb = skb_copy(skb, pri);
 974                kfree_skb(skb); /* Free our shared copy */
 975                skb = nskb;
 976        }
 977        return skb;
 978}
 979
 980/**
 981 *      skb_peek - peek at the head of an &sk_buff_head
 982 *      @list_: list to peek at
 983 *
 984 *      Peek an &sk_buff. Unlike most other operations you _MUST_
 985 *      be careful with this one. A peek leaves the buffer on the
 986 *      list and someone else may run off with it. You must hold
 987 *      the appropriate locks or have a private queue to do this.
 988 *
 989 *      Returns %NULL for an empty list or a pointer to the head element.
 990 *      The reference count is not incremented and the reference is therefore
 991 *      volatile. Use with caution.
 992 */
 993static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
 994{
 995        struct sk_buff *skb = list_->next;
 996
 997        if (skb == (struct sk_buff *)list_)
 998                skb = NULL;
 999        return skb;
1000}
1001
1002/**
1003 *      skb_peek_next - peek skb following the given one from a queue
1004 *      @skb: skb to start from
1005 *      @list_: list to peek at
1006 *
1007 *      Returns %NULL when the end of the list is met or a pointer to the
1008 *      next element. The reference count is not incremented and the
1009 *      reference is therefore volatile. Use with caution.
1010 */
1011static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1012                const struct sk_buff_head *list_)
1013{
1014        struct sk_buff *next = skb->next;
1015
1016        if (next == (struct sk_buff *)list_)
1017                next = NULL;
1018        return next;
1019}
1020
1021/**
1022 *      skb_peek_tail - peek at the tail of an &sk_buff_head
1023 *      @list_: list to peek at
1024 *
1025 *      Peek an &sk_buff. Unlike most other operations you _MUST_
1026 *      be careful with this one. A peek leaves the buffer on the
1027 *      list and someone else may run off with it. You must hold
1028 *      the appropriate locks or have a private queue to do this.
1029 *
1030 *      Returns %NULL for an empty list or a pointer to the tail element.
1031 *      The reference count is not incremented and the reference is therefore
1032 *      volatile. Use with caution.
1033 */
1034static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1035{
1036        struct sk_buff *skb = list_->prev;
1037
1038        if (skb == (struct sk_buff *)list_)
1039                skb = NULL;
1040        return skb;
1041
1042}
1043
1044/**
1045 *      skb_queue_len   - get queue length
1046 *      @list_: list to measure
1047 *
1048 *      Return the length of an &sk_buff queue.
1049 */
1050static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1051{
1052        return list_->qlen;
1053}
1054
1055/**
1056 *      __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1057 *      @list: queue to initialize
1058 *
1059 *      This initializes only the list and queue length aspects of
1060 *      an sk_buff_head object.  This allows to initialize the list
1061 *      aspects of an sk_buff_head without reinitializing things like
1062 *      the spinlock.  It can also be used for on-stack sk_buff_head
1063 *      objects where the spinlock is known to not be used.
1064 */
1065static inline void __skb_queue_head_init(struct sk_buff_head *list)
1066{
1067        list->prev = list->next = (struct sk_buff *)list;
1068        list->qlen = 0;
1069}
1070
1071/*
1072 * This function creates a split out lock class for each invocation;
1073 * this is needed for now since a whole lot of users of the skb-queue
1074 * infrastructure in drivers have different locking usage (in hardirq)
1075 * than the networking core (in softirq only). In the long run either the
1076 * network layer or drivers should need annotation to consolidate the
1077 * main types of usage into 3 classes.
1078 */
1079static inline void skb_queue_head_init(struct sk_buff_head *list)
1080{
1081        spin_lock_init(&list->lock);
1082        __skb_queue_head_init(list);
1083}
1084
1085static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1086                struct lock_class_key *class)
1087{
1088        skb_queue_head_init(list);
1089        lockdep_set_class(&list->lock, class);
1090}
1091
1092/*
1093 *      Insert an sk_buff on a list.
1094 *
1095 *      The "__skb_xxxx()" functions are the non-atomic ones that
1096 *      can only be called with interrupts disabled.
1097 */
1098extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
1099static inline void __skb_insert(struct sk_buff *newsk,
1100                                struct sk_buff *prev, struct sk_buff *next,
1101                                struct sk_buff_head *list)
1102{
1103        newsk->next = next;
1104        newsk->prev = prev;
1105        next->prev  = prev->next = newsk;
1106        list->qlen++;
1107}
1108
1109static inline void __skb_queue_splice(const struct sk_buff_head *list,
1110                                      struct sk_buff *prev,
1111                                      struct sk_buff *next)
1112{
1113        struct sk_buff *first = list->next;
1114        struct sk_buff *last = list->prev;
1115
1116        first->prev = prev;
1117        prev->next = first;
1118
1119        last->next = next;
1120        next->prev = last;
1121}
1122
1123/**
1124 *      skb_queue_splice - join two skb lists, this is designed for stacks
1125 *      @list: the new list to add
1126 *      @head: the place to add it in the first list
1127 */
1128static inline void skb_queue_splice(const struct sk_buff_head *list,
1129                                    struct sk_buff_head *head)
1130{
1131        if (!skb_queue_empty(list)) {
1132                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1133                head->qlen += list->qlen;
1134        }
1135}
1136
1137/**
1138 *      skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1139 *      @list: the new list to add
1140 *      @head: the place to add it in the first list
1141 *
1142 *      The list at @list is reinitialised
1143 */
1144static inline void skb_queue_splice_init(struct sk_buff_head *list,
1145                                         struct sk_buff_head *head)
1146{
1147        if (!skb_queue_empty(list)) {
1148                __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1149                head->qlen += list->qlen;
1150                __skb_queue_head_init(list);
1151        }
1152}
1153
1154/**
1155 *      skb_queue_splice_tail - join two skb lists, each list being a queue
1156 *      @list: the new list to add
1157 *      @head: the place to add it in the first list
1158 */
1159static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1160                                         struct sk_buff_head *head)
1161{
1162        if (!skb_queue_empty(list)) {
1163                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1164                head->qlen += list->qlen;
1165        }
1166}
1167
1168/**
1169 *      skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1170 *      @list: the new list to add
1171 *      @head: the place to add it in the first list
1172 *
1173 *      Each of the lists is a queue.
1174 *      The list at @list is reinitialised
1175 */
1176static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1177                                              struct sk_buff_head *head)
1178{
1179        if (!skb_queue_empty(list)) {
1180                __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1181                head->qlen += list->qlen;
1182                __skb_queue_head_init(list);
1183        }
1184}
1185
1186/**
1187 *      __skb_queue_after - queue a buffer at the list head
1188 *      @list: list to use
1189 *      @prev: place after this buffer
1190 *      @newsk: buffer to queue
1191 *
1192 *      Queue a buffer int the middle of a list. This function takes no locks
1193 *      and you must therefore hold required locks before calling it.
1194 *
1195 *      A buffer cannot be placed on two lists at the same time.
1196 */
1197static inline void __skb_queue_after(struct sk_buff_head *list,
1198                                     struct sk_buff *prev,
1199                                     struct sk_buff *newsk)
1200{
1201        __skb_insert(newsk, prev, prev->next, list);
1202}
1203
1204extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1205                       struct sk_buff_head *list);
1206
1207static inline void __skb_queue_before(struct sk_buff_head *list,
1208                                      struct sk_buff *next,
1209                                      struct sk_buff *newsk)
1210{
1211        __skb_insert(newsk, next->prev, next, list);
1212}
1213
1214/**
1215 *      __skb_queue_head - queue a buffer at the list head
1216 *      @list: list to use
1217 *      @newsk: buffer to queue
1218 *
1219 *      Queue a buffer at the start of a list. This function takes no locks
1220 *      and you must therefore hold required locks before calling it.
1221 *
1222 *      A buffer cannot be placed on two lists at the same time.
1223 */
1224extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1225static inline void __skb_queue_head(struct sk_buff_head *list,
1226                                    struct sk_buff *newsk)
1227{
1228        __skb_queue_after(list, (struct sk_buff *)list, newsk);
1229}
1230
1231/**
1232 *      __skb_queue_tail - queue a buffer at the list tail
1233 *      @list: list to use
1234 *      @newsk: buffer to queue
1235 *
1236 *      Queue a buffer at the end of a list. This function takes no locks
1237 *      and you must therefore hold required locks before calling it.
1238 *
1239 *      A buffer cannot be placed on two lists at the same time.
1240 */
1241extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1242static inline void __skb_queue_tail(struct sk_buff_head *list,
1243                                   struct sk_buff *newsk)
1244{
1245        __skb_queue_before(list, (struct sk_buff *)list, newsk);
1246}
1247
1248/*
1249 * remove sk_buff from list. _Must_ be called atomically, and with
1250 * the list known..
1251 */
1252extern void        skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1253static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1254{
1255        struct sk_buff *next, *prev;
1256
1257        list->qlen--;
1258        next       = skb->next;
1259        prev       = skb->prev;
1260        skb->next  = skb->prev = NULL;
1261        next->prev = prev;
1262        prev->next = next;
1263}
1264
1265/**
1266 *      __skb_dequeue - remove from the head of the queue
1267 *      @list: list to dequeue from
1268 *
1269 *      Remove the head of the list. This function does not take any locks
1270 *      so must be used with appropriate locks held only. The head item is
1271 *      returned or %NULL if the list is empty.
1272 */
1273extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1274static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1275{
1276        struct sk_buff *skb = skb_peek(list);
1277        if (skb)
1278                __skb_unlink(skb, list);
1279        return skb;
1280}
1281
1282/**
1283 *      __skb_dequeue_tail - remove from the tail of the queue
1284 *      @list: list to dequeue from
1285 *
1286 *      Remove the tail of the list. This function does not take any locks
1287 *      so must be used with appropriate locks held only. The tail item is
1288 *      returned or %NULL if the list is empty.
1289 */
1290extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1291static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1292{
1293        struct sk_buff *skb = skb_peek_tail(list);
1294        if (skb)
1295                __skb_unlink(skb, list);
1296        return skb;
1297}
1298
1299
1300static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1301{
1302        return skb->data_len;
1303}
1304
1305static inline unsigned int skb_headlen(const struct sk_buff *skb)
1306{
1307        return skb->len - skb->data_len;
1308}
1309
1310static inline int skb_pagelen(const struct sk_buff *skb)
1311{
1312        int i, len = 0;
1313
1314        for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1315                len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1316        return len + skb_headlen(skb);
1317}
1318
1319/**
1320 * __skb_fill_page_desc - initialise a paged fragment in an skb
1321 * @skb: buffer containing fragment to be initialised
1322 * @i: paged fragment index to initialise
1323 * @page: the page to use for this fragment
1324 * @off: the offset to the data with @page
1325 * @size: the length of the data
1326 *
1327 * Initialises the @i'th fragment of @skb to point to &size bytes at
1328 * offset @off within @page.
1329 *
1330 * Does not take any additional reference on the fragment.
1331 */
1332static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1333                                        struct page *page, int off, int size)
1334{
1335        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1336
1337        /*
1338         * Propagate page->pfmemalloc to the skb if we can. The problem is
1339         * that not all callers have unique ownership of the page. If
1340         * pfmemalloc is set, we check the mapping as a mapping implies
1341         * page->index is set (index and pfmemalloc share space).
1342         * If it's a valid mapping, we cannot use page->pfmemalloc but we
1343         * do not lose pfmemalloc information as the pages would not be
1344         * allocated using __GFP_MEMALLOC.
1345         */
1346        frag->page.p              = page;
1347        frag->page_offset         = off;
1348        skb_frag_size_set(frag, size);
1349
1350        page = compound_head(page);
1351        if (page->pfmemalloc && !page->mapping)
1352                skb->pfmemalloc = true;
1353}
1354
1355/**
1356 * skb_fill_page_desc - initialise a paged fragment in an skb
1357 * @skb: buffer containing fragment to be initialised
1358 * @i: paged fragment index to initialise
1359 * @page: the page to use for this fragment
1360 * @off: the offset to the data with @page
1361 * @size: the length of the data
1362 *
1363 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1364 * @skb to point to &size bytes at offset @off within @page. In
1365 * addition updates @skb such that @i is the last fragment.
1366 *
1367 * Does not take any additional reference on the fragment.
1368 */
1369static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1370                                      struct page *page, int off, int size)
1371{
1372        __skb_fill_page_desc(skb, i, page, off, size);
1373        skb_shinfo(skb)->nr_frags = i + 1;
1374}
1375
1376extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1377                            int off, int size, unsigned int truesize);
1378
1379#define SKB_PAGE_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->nr_frags)
1380#define SKB_FRAG_ASSERT(skb)    BUG_ON(skb_has_frag_list(skb))
1381#define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1382
1383#ifdef NET_SKBUFF_DATA_USES_OFFSET
1384static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1385{
1386        return skb->head + skb->tail;
1387}
1388
1389static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1390{
1391        skb->tail = skb->data - skb->head;
1392}
1393
1394static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1395{
1396        skb_reset_tail_pointer(skb);
1397        skb->tail += offset;
1398}
1399
1400#else /* NET_SKBUFF_DATA_USES_OFFSET */
1401static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1402{
1403        return skb->tail;
1404}
1405
1406static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1407{
1408        skb->tail = skb->data;
1409}
1410
1411static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1412{
1413        skb->tail = skb->data + offset;
1414}
1415
1416#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1417
1418/*
1419 *      Add data to an sk_buff
1420 */
1421extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1422static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1423{
1424        unsigned char *tmp = skb_tail_pointer(skb);
1425        SKB_LINEAR_ASSERT(skb);
1426        skb->tail += len;
1427        skb->len  += len;
1428        return tmp;
1429}
1430
1431extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1432static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1433{
1434        skb->data -= len;
1435        skb->len  += len;
1436        return skb->data;
1437}
1438
1439extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1440static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1441{
1442        skb->len -= len;
1443        BUG_ON(skb->len < skb->data_len);
1444        return skb->data += len;
1445}
1446
1447static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1448{
1449        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1450}
1451
1452extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1453
1454static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1455{
1456        if (len > skb_headlen(skb) &&
1457            !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1458                return NULL;
1459        skb->len -= len;
1460        return skb->data += len;
1461}
1462
1463static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1464{
1465        return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1466}
1467
1468static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1469{
1470        if (likely(len <= skb_headlen(skb)))
1471                return 1;
1472        if (unlikely(len > skb->len))
1473                return 0;
1474        return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1475}
1476
1477/**
1478 *      skb_headroom - bytes at buffer head
1479 *      @skb: buffer to check
1480 *
1481 *      Return the number of bytes of free space at the head of an &sk_buff.
1482 */
1483static inline unsigned int skb_headroom(const struct sk_buff *skb)
1484{
1485        return skb->data - skb->head;
1486}
1487
1488/**
1489 *      skb_tailroom - bytes at buffer end
1490 *      @skb: buffer to check
1491 *
1492 *      Return the number of bytes of free space at the tail of an sk_buff
1493 */
1494static inline int skb_tailroom(const struct sk_buff *skb)
1495{
1496        return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1497}
1498
1499/**
1500 *      skb_availroom - bytes at buffer end
1501 *      @skb: buffer to check
1502 *
1503 *      Return the number of bytes of free space at the tail of an sk_buff
1504 *      allocated by sk_stream_alloc()
1505 */
1506static inline int skb_availroom(const struct sk_buff *skb)
1507{
1508        if (skb_is_nonlinear(skb))
1509                return 0;
1510
1511        return skb->end - skb->tail - skb->reserved_tailroom;
1512}
1513
1514/**
1515 *      skb_reserve - adjust headroom
1516 *      @skb: buffer to alter
1517 *      @len: bytes to move
1518 *
1519 *      Increase the headroom of an empty &sk_buff by reducing the tail
1520 *      room. This is only allowed for an empty buffer.
1521 */
1522static inline void skb_reserve(struct sk_buff *skb, int len)
1523{
1524        skb->data += len;
1525        skb->tail += len;
1526}
1527
1528static inline void skb_reset_inner_headers(struct sk_buff *skb)
1529{
1530        skb->inner_mac_header = skb->mac_header;
1531        skb->inner_network_header = skb->network_header;
1532        skb->inner_transport_header = skb->transport_header;
1533}
1534
1535static inline void skb_reset_mac_len(struct sk_buff *skb)
1536{
1537        skb->mac_len = skb->network_header - skb->mac_header;
1538}
1539
1540static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1541                                                        *skb)
1542{
1543        return skb->head + skb->inner_transport_header;
1544}
1545
1546static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1547{
1548        skb->inner_transport_header = skb->data - skb->head;
1549}
1550
1551static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1552                                                   const int offset)
1553{
1554        skb_reset_inner_transport_header(skb);
1555        skb->inner_transport_header += offset;
1556}
1557
1558static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1559{
1560        return skb->head + skb->inner_network_header;
1561}
1562
1563static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1564{
1565        skb->inner_network_header = skb->data - skb->head;
1566}
1567
1568static inline void skb_set_inner_network_header(struct sk_buff *skb,
1569                                                const int offset)
1570{
1571        skb_reset_inner_network_header(skb);
1572        skb->inner_network_header += offset;
1573}
1574
1575static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1576{
1577        return skb->head + skb->inner_mac_header;
1578}
1579
1580static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1581{
1582        skb->inner_mac_header = skb->data - skb->head;
1583}
1584
1585static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1586                                            const int offset)
1587{
1588        skb_reset_inner_mac_header(skb);
1589        skb->inner_mac_header += offset;
1590}
1591static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1592{
1593        return skb->transport_header != (typeof(skb->transport_header))~0U;
1594}
1595
1596static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1597{
1598        return skb->head + skb->transport_header;
1599}
1600
1601static inline void skb_reset_transport_header(struct sk_buff *skb)
1602{
1603        skb->transport_header = skb->data - skb->head;
1604}
1605
1606static inline void skb_set_transport_header(struct sk_buff *skb,
1607                                            const int offset)
1608{
1609        skb_reset_transport_header(skb);
1610        skb->transport_header += offset;
1611}
1612
1613static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1614{
1615        return skb->head + skb->network_header;
1616}
1617
1618static inline void skb_reset_network_header(struct sk_buff *skb)
1619{
1620        skb->network_header = skb->data - skb->head;
1621}
1622
1623static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1624{
1625        skb_reset_network_header(skb);
1626        skb->network_header += offset;
1627}
1628
1629static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1630{
1631        return skb->head + skb->mac_header;
1632}
1633
1634static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1635{
1636        return skb->mac_header != (typeof(skb->mac_header))~0U;
1637}
1638
1639static inline void skb_reset_mac_header(struct sk_buff *skb)
1640{
1641        skb->mac_header = skb->data - skb->head;
1642}
1643
1644static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1645{
1646        skb_reset_mac_header(skb);
1647        skb->mac_header += offset;
1648}
1649
1650static inline void skb_probe_transport_header(struct sk_buff *skb,
1651                                              const int offset_hint)
1652{
1653        struct flow_keys keys;
1654
1655        if (skb_transport_header_was_set(skb))
1656                return;
1657        else if (skb_flow_dissect(skb, &keys))
1658                skb_set_transport_header(skb, keys.thoff);
1659        else
1660                skb_set_transport_header(skb, offset_hint);
1661}
1662
1663static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1664{
1665        if (skb_mac_header_was_set(skb)) {
1666                const unsigned char *old_mac = skb_mac_header(skb);
1667
1668                skb_set_mac_header(skb, -skb->mac_len);
1669                memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1670        }
1671}
1672
1673static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1674{
1675        return skb->csum_start - skb_headroom(skb);
1676}
1677
1678static inline int skb_transport_offset(const struct sk_buff *skb)
1679{
1680        return skb_transport_header(skb) - skb->data;
1681}
1682
1683static inline u32 skb_network_header_len(const struct sk_buff *skb)
1684{
1685        return skb->transport_header - skb->network_header;
1686}
1687
1688static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1689{
1690        return skb->inner_transport_header - skb->inner_network_header;
1691}
1692
1693static inline int skb_network_offset(const struct sk_buff *skb)
1694{
1695        return skb_network_header(skb) - skb->data;
1696}
1697
1698static inline int skb_inner_network_offset(const struct sk_buff *skb)
1699{
1700        return skb_inner_network_header(skb) - skb->data;
1701}
1702
1703static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1704{
1705        return pskb_may_pull(skb, skb_network_offset(skb) + len);
1706}
1707
1708/*
1709 * CPUs often take a performance hit when accessing unaligned memory
1710 * locations. The actual performance hit varies, it can be small if the
1711 * hardware handles it or large if we have to take an exception and fix it
1712 * in software.
1713 *
1714 * Since an ethernet header is 14 bytes network drivers often end up with
1715 * the IP header at an unaligned offset. The IP header can be aligned by
1716 * shifting the start of the packet by 2 bytes. Drivers should do this
1717 * with:
1718 *
1719 * skb_reserve(skb, NET_IP_ALIGN);
1720 *
1721 * The downside to this alignment of the IP header is that the DMA is now
1722 * unaligned. On some architectures the cost of an unaligned DMA is high
1723 * and this cost outweighs the gains made by aligning the IP header.
1724 *
1725 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1726 * to be overridden.
1727 */
1728#ifndef NET_IP_ALIGN
1729#define NET_IP_ALIGN    2
1730#endif
1731
1732/*
1733 * The networking layer reserves some headroom in skb data (via
1734 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1735 * the header has to grow. In the default case, if the header has to grow
1736 * 32 bytes or less we avoid the reallocation.
1737 *
1738 * Unfortunately this headroom changes the DMA alignment of the resulting
1739 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1740 * on some architectures. An architecture can override this value,
1741 * perhaps setting it to a cacheline in size (since that will maintain
1742 * cacheline alignment of the DMA). It must be a power of 2.
1743 *
1744 * Various parts of the networking layer expect at least 32 bytes of
1745 * headroom, you should not reduce this.
1746 *
1747 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1748 * to reduce average number of cache lines per packet.
1749 * get_rps_cpus() for example only access one 64 bytes aligned block :
1750 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1751 */
1752#ifndef NET_SKB_PAD
1753#define NET_SKB_PAD     max(32, L1_CACHE_BYTES)
1754#endif
1755
1756extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1757
1758static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1759{
1760        if (unlikely(skb_is_nonlinear(skb))) {
1761                WARN_ON(1);
1762                return;
1763        }
1764        skb->len = len;
1765        skb_set_tail_pointer(skb, len);
1766}
1767
1768extern void skb_trim(struct sk_buff *skb, unsigned int len);
1769
1770static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1771{
1772        if (skb->data_len)
1773                return ___pskb_trim(skb, len);
1774        __skb_trim(skb, len);
1775        return 0;
1776}
1777
1778static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1779{
1780        return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1781}
1782
1783/**
1784 *      pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1785 *      @skb: buffer to alter
1786 *      @len: new length
1787 *
1788 *      This is identical to pskb_trim except that the caller knows that
1789 *      the skb is not cloned so we should never get an error due to out-
1790 *      of-memory.
1791 */
1792static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1793{
1794        int err = pskb_trim(skb, len);
1795        BUG_ON(err);
1796}
1797
1798/**
1799 *      skb_orphan - orphan a buffer
1800 *      @skb: buffer to orphan
1801 *
1802 *      If a buffer currently has an owner then we call the owner's
1803 *      destructor function and make the @skb unowned. The buffer continues
1804 *      to exist but is no longer charged to its former owner.
1805 */
1806static inline void skb_orphan(struct sk_buff *skb)
1807{
1808        if (skb->destructor) {
1809                skb->destructor(skb);
1810                skb->destructor = NULL;
1811                skb->sk         = NULL;
1812        } else {
1813                BUG_ON(skb->sk);
1814        }
1815}
1816
1817/**
1818 *      skb_orphan_frags - orphan the frags contained in a buffer
1819 *      @skb: buffer to orphan frags from
1820 *      @gfp_mask: allocation mask for replacement pages
1821 *
1822 *      For each frag in the SKB which needs a destructor (i.e. has an
1823 *      owner) create a copy of that frag and release the original
1824 *      page by calling the destructor.
1825 */
1826static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1827{
1828        if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1829                return 0;
1830        return skb_copy_ubufs(skb, gfp_mask);
1831}
1832
1833/**
1834 *      __skb_queue_purge - empty a list
1835 *      @list: list to empty
1836 *
1837 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1838 *      the list and one reference dropped. This function does not take the
1839 *      list lock and the caller must hold the relevant locks to use it.
1840 */
1841extern void skb_queue_purge(struct sk_buff_head *list);
1842static inline void __skb_queue_purge(struct sk_buff_head *list)
1843{
1844        struct sk_buff *skb;
1845        while ((skb = __skb_dequeue(list)) != NULL)
1846                kfree_skb(skb);
1847}
1848
1849#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
1850#define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1851#define NETDEV_PAGECNT_MAX_BIAS    NETDEV_FRAG_PAGE_MAX_SIZE
1852
1853extern void *netdev_alloc_frag(unsigned int fragsz);
1854
1855extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1856                                          unsigned int length,
1857                                          gfp_t gfp_mask);
1858
1859/**
1860 *      netdev_alloc_skb - allocate an skbuff for rx on a specific device
1861 *      @dev: network device to receive on
1862 *      @length: length to allocate
1863 *
1864 *      Allocate a new &sk_buff and assign it a usage count of one. The
1865 *      buffer has unspecified headroom built in. Users should allocate
1866 *      the headroom they think they need without accounting for the
1867 *      built in space. The built in space is used for optimisations.
1868 *
1869 *      %NULL is returned if there is no free memory. Although this function
1870 *      allocates memory it can be called from an interrupt.
1871 */
1872static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1873                                               unsigned int length)
1874{
1875        return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1876}
1877
1878/* legacy helper around __netdev_alloc_skb() */
1879static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1880                                              gfp_t gfp_mask)
1881{
1882        return __netdev_alloc_skb(NULL, length, gfp_mask);
1883}
1884
1885/* legacy helper around netdev_alloc_skb() */
1886static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1887{
1888        return netdev_alloc_skb(NULL, length);
1889}
1890
1891
1892static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1893                unsigned int length, gfp_t gfp)
1894{
1895        struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1896
1897        if (NET_IP_ALIGN && skb)
1898                skb_reserve(skb, NET_IP_ALIGN);
1899        return skb;
1900}
1901
1902static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1903                unsigned int length)
1904{
1905        return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1906}
1907
1908/**
1909 *      __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
1910 *      @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1911 *      @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1912 *      @order: size of the allocation
1913 *
1914 *      Allocate a new page.
1915 *
1916 *      %NULL is returned if there is no free memory.
1917*/
1918static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1919                                              struct sk_buff *skb,
1920                                              unsigned int order)
1921{
1922        struct page *page;
1923
1924        gfp_mask |= __GFP_COLD;
1925
1926        if (!(gfp_mask & __GFP_NOMEMALLOC))
1927                gfp_mask |= __GFP_MEMALLOC;
1928
1929        page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1930        if (skb && page && page->pfmemalloc)
1931                skb->pfmemalloc = true;
1932
1933        return page;
1934}
1935
1936/**
1937 *      __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
1938 *      @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
1939 *      @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
1940 *
1941 *      Allocate a new page.
1942 *
1943 *      %NULL is returned if there is no free memory.
1944 */
1945static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1946                                             struct sk_buff *skb)
1947{
1948        return __skb_alloc_pages(gfp_mask, skb, 0);
1949}
1950
1951/**
1952 *      skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
1953 *      @page: The page that was allocated from skb_alloc_page
1954 *      @skb: The skb that may need pfmemalloc set
1955 */
1956static inline void skb_propagate_pfmemalloc(struct page *page,
1957                                             struct sk_buff *skb)
1958{
1959        if (page && page->pfmemalloc)
1960                skb->pfmemalloc = true;
1961}
1962
1963/**
1964 * skb_frag_page - retrieve the page refered to by a paged fragment
1965 * @frag: the paged fragment
1966 *
1967 * Returns the &struct page associated with @frag.
1968 */
1969static inline struct page *skb_frag_page(const skb_frag_t *frag)
1970{
1971        return frag->page.p;
1972}
1973
1974/**
1975 * __skb_frag_ref - take an addition reference on a paged fragment.
1976 * @frag: the paged fragment
1977 *
1978 * Takes an additional reference on the paged fragment @frag.
1979 */
1980static inline void __skb_frag_ref(skb_frag_t *frag)
1981{
1982        get_page(skb_frag_page(frag));
1983}
1984
1985/**
1986 * skb_frag_ref - take an addition reference on a paged fragment of an skb.
1987 * @skb: the buffer
1988 * @f: the fragment offset.
1989 *
1990 * Takes an additional reference on the @f'th paged fragment of @skb.
1991 */
1992static inline void skb_frag_ref(struct sk_buff *skb, int f)
1993{
1994        __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1995}
1996
1997/**
1998 * __skb_frag_unref - release a reference on a paged fragment.
1999 * @frag: the paged fragment
2000 *
2001 * Releases a reference on the paged fragment @frag.
2002 */
2003static inline void __skb_frag_unref(skb_frag_t *frag)
2004{
2005        put_page(skb_frag_page(frag));
2006}
2007
2008/**
2009 * skb_frag_unref - release a reference on a paged fragment of an skb.
2010 * @skb: the buffer
2011 * @f: the fragment offset
2012 *
2013 * Releases a reference on the @f'th paged fragment of @skb.
2014 */
2015static inline void skb_frag_unref(struct sk_buff *skb, int f)
2016{
2017        __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2018}
2019
2020/**
2021 * skb_frag_address - gets the address of the data contained in a paged fragment
2022 * @frag: the paged fragment buffer
2023 *
2024 * Returns the address of the data within @frag. The page must already
2025 * be mapped.
2026 */
2027static inline void *skb_frag_address(const skb_frag_t *frag)
2028{
2029        return page_address(skb_frag_page(frag)) + frag->page_offset;
2030}
2031
2032/**
2033 * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2034 * @frag: the paged fragment buffer
2035 *
2036 * Returns the address of the data within @frag. Checks that the page
2037 * is mapped and returns %NULL otherwise.
2038 */
2039static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2040{
2041        void *ptr = page_address(skb_frag_page(frag));
2042        if (unlikely(!ptr))
2043                return NULL;
2044
2045        return ptr + frag->page_offset;
2046}
2047
2048/**
2049 * __skb_frag_set_page - sets the page contained in a paged fragment
2050 * @frag: the paged fragment
2051 * @page: the page to set
2052 *
2053 * Sets the fragment @frag to contain @page.
2054 */
2055static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2056{
2057        frag->page.p = page;
2058}
2059
2060/**
2061 * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2062 * @skb: the buffer
2063 * @f: the fragment offset
2064 * @page: the page to set
2065 *
2066 * Sets the @f'th fragment of @skb to contain @page.
2067 */
2068static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2069                                     struct page *page)
2070{
2071        __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2072}
2073
2074/**
2075 * skb_frag_dma_map - maps a paged fragment via the DMA API
2076 * @dev: the device to map the fragment to
2077 * @frag: the paged fragment to map
2078 * @offset: the offset within the fragment (starting at the
2079 *          fragment's own offset)
2080 * @size: the number of bytes to map
2081 * @dir: the direction of the mapping (%PCI_DMA_*)
2082 *
2083 * Maps the page associated with @frag to @device.
2084 */
2085static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2086                                          const skb_frag_t *frag,
2087                                          size_t offset, size_t size,
2088                                          enum dma_data_direction dir)
2089{
2090        return dma_map_page(dev, skb_frag_page(frag),
2091                            frag->page_offset + offset, size, dir);
2092}
2093
2094static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2095                                        gfp_t gfp_mask)
2096{
2097        return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2098}
2099
2100/**
2101 *      skb_clone_writable - is the header of a clone writable
2102 *      @skb: buffer to check
2103 *      @len: length up to which to write
2104 *
2105 *      Returns true if modifying the header part of the cloned buffer
2106 *      does not requires the data to be copied.
2107 */
2108static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2109{
2110        return !skb_header_cloned(skb) &&
2111               skb_headroom(skb) + len <= skb->hdr_len;
2112}
2113
2114static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2115                            int cloned)
2116{
2117        int delta = 0;
2118
2119        if (headroom > skb_headroom(skb))
2120                delta = headroom - skb_headroom(skb);
2121
2122        if (delta || cloned)
2123                return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2124                                        GFP_ATOMIC);
2125        return 0;
2126}
2127
2128/**
2129 *      skb_cow - copy header of skb when it is required
2130 *      @skb: buffer to cow
2131 *      @headroom: needed headroom
2132 *
2133 *      If the skb passed lacks sufficient headroom or its data part
2134 *      is shared, data is reallocated. If reallocation fails, an error
2135 *      is returned and original skb is not changed.
2136 *
2137 *      The result is skb with writable area skb->head...skb->tail
2138 *      and at least @headroom of space at head.
2139 */
2140static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2141{
2142        return __skb_cow(skb, headroom, skb_cloned(skb));
2143}
2144
2145/**
2146 *      skb_cow_head - skb_cow but only making the head writable
2147 *      @skb: buffer to cow
2148 *      @headroom: needed headroom
2149 *
2150 *      This function is identical to skb_cow except that we replace the
2151 *      skb_cloned check by skb_header_cloned.  It should be used when
2152 *      you only need to push on some header and do not need to modify
2153 *      the data.
2154 */
2155static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2156{
2157        return __skb_cow(skb, headroom, skb_header_cloned(skb));
2158}
2159
2160/**
2161 *      skb_padto       - pad an skbuff up to a minimal size
2162 *      @skb: buffer to pad
2163 *      @len: minimal length
2164 *
2165 *      Pads up a buffer to ensure the trailing bytes exist and are
2166 *      blanked. If the buffer already contains sufficient data it
2167 *      is untouched. Otherwise it is extended. Returns zero on
2168 *      success. The skb is freed on error.
2169 */
2170 
2171static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2172{
2173        unsigned int size = skb->len;
2174        if (likely(size >= len))
2175                return 0;
2176        return skb_pad(skb, len - size);
2177}
2178
2179static inline int skb_add_data(struct sk_buff *skb,
2180                               char __user *from, int copy)
2181{
2182        const int off = skb->len;
2183
2184        if (skb->ip_summed == CHECKSUM_NONE) {
2185                int err = 0;
2186                __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2187                                                            copy, 0, &err);
2188                if (!err) {
2189                        skb->csum = csum_block_add(skb->csum, csum, off);
2190                        return 0;
2191                }
2192        } else if (!copy_from_user(skb_put(skb, copy), from, copy))
2193                return 0;
2194
2195        __skb_trim(skb, off);
2196        return -EFAULT;
2197}
2198
2199static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2200                                    const struct page *page, int off)
2201{
2202        if (i) {
2203                const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2204
2205                return page == skb_frag_page(frag) &&
2206                       off == frag->page_offset + skb_frag_size(frag);
2207        }
2208        return false;
2209}
2210
2211static inline int __skb_linearize(struct sk_buff *skb)
2212{
2213        return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2214}
2215
2216/**
2217 *      skb_linearize - convert paged skb to linear one
2218 *      @skb: buffer to linarize
2219 *
2220 *      If there is no free memory -ENOMEM is returned, otherwise zero
2221 *      is returned and the old skb data released.
2222 */
2223static inline int skb_linearize(struct sk_buff *skb)
2224{
2225        return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2226}
2227
2228/**
2229 * skb_has_shared_frag - can any frag be overwritten
2230 * @skb: buffer to test
2231 *
2232 * Return true if the skb has at least one frag that might be modified
2233 * by an external entity (as in vmsplice()/sendfile())
2234 */
2235static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2236{
2237        return skb_is_nonlinear(skb) &&
2238               skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2239}
2240
2241/**
2242 *      skb_linearize_cow - make sure skb is linear and writable
2243 *      @skb: buffer to process
2244 *
2245 *      If there is no free memory -ENOMEM is returned, otherwise zero
2246 *      is returned and the old skb data released.
2247 */
2248static inline int skb_linearize_cow(struct sk_buff *skb)
2249{
2250        return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2251               __skb_linearize(skb) : 0;
2252}
2253
2254/**
2255 *      skb_postpull_rcsum - update checksum for received skb after pull
2256 *      @skb: buffer to update
2257 *      @start: start of data before pull
2258 *      @len: length of data pulled
2259 *
2260 *      After doing a pull on a received packet, you need to call this to
2261 *      update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2262 *      CHECKSUM_NONE so that it can be recomputed from scratch.
2263 */
2264
2265static inline void skb_postpull_rcsum(struct sk_buff *skb,
2266                                      const void *start, unsigned int len)
2267{
2268        if (skb->ip_summed == CHECKSUM_COMPLETE)
2269                skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2270}
2271
2272unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2273
2274/**
2275 *      pskb_trim_rcsum - trim received skb and update checksum
2276 *      @skb: buffer to trim
2277 *      @len: new length
2278 *
2279 *      This is exactly the same as pskb_trim except that it ensures the
2280 *      checksum of received packets are still valid after the operation.
2281 */
2282
2283static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2284{
2285        if (likely(len >= skb->len))
2286                return 0;
2287        if (skb->ip_summed == CHECKSUM_COMPLETE)
2288                skb->ip_summed = CHECKSUM_NONE;
2289        return __pskb_trim(skb, len);
2290}
2291
2292#define skb_queue_walk(queue, skb) \
2293                for (skb = (queue)->next;                                       \
2294                     skb != (struct sk_buff *)(queue);                          \
2295                     skb = skb->next)
2296
2297#define skb_queue_walk_safe(queue, skb, tmp)                                    \
2298                for (skb = (queue)->next, tmp = skb->next;                      \
2299                     skb != (struct sk_buff *)(queue);                          \
2300                     skb = tmp, tmp = skb->next)
2301
2302#define skb_queue_walk_from(queue, skb)                                         \
2303                for (; skb != (struct sk_buff *)(queue);                        \
2304                     skb = skb->next)
2305
2306#define skb_queue_walk_from_safe(queue, skb, tmp)                               \
2307                for (tmp = skb->next;                                           \
2308                     skb != (struct sk_buff *)(queue);                          \
2309                     skb = tmp, tmp = skb->next)
2310
2311#define skb_queue_reverse_walk(queue, skb) \
2312                for (skb = (queue)->prev;                                       \
2313                     skb != (struct sk_buff *)(queue);                          \
2314                     skb = skb->prev)
2315
2316#define skb_queue_reverse_walk_safe(queue, skb, tmp)                            \
2317                for (skb = (queue)->prev, tmp = skb->prev;                      \
2318                     skb != (struct sk_buff *)(queue);                          \
2319                     skb = tmp, tmp = skb->prev)
2320
2321#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)                       \
2322                for (tmp = skb->prev;                                           \
2323                     skb != (struct sk_buff *)(queue);                          \
2324                     skb = tmp, tmp = skb->prev)
2325
2326static inline bool skb_has_frag_list(const struct sk_buff *skb)
2327{
2328        return skb_shinfo(skb)->frag_list != NULL;
2329}
2330
2331static inline void skb_frag_list_init(struct sk_buff *skb)
2332{
2333        skb_shinfo(skb)->frag_list = NULL;
2334}
2335
2336static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2337{
2338        frag->next = skb_shinfo(skb)->frag_list;
2339        skb_shinfo(skb)->frag_list = frag;
2340}
2341
2342#define skb_walk_frags(skb, iter)       \
2343        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2344
2345extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2346                                           int *peeked, int *off, int *err);
2347extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2348                                         int noblock, int *err);
2349extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
2350                                     struct poll_table_struct *wait);
2351extern int             skb_copy_datagram_iovec(const struct sk_buff *from,
2352                                               int offset, struct iovec *to,
2353                                               int size);
2354extern int             skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2355                                                        int hlen,
2356                                                        struct iovec *iov);
2357extern int             skb_copy_datagram_from_iovec(struct sk_buff *skb,
2358                                                    int offset,
2359                                                    const struct iovec *from,
2360                                                    int from_offset,
2361                                                    int len);
2362extern int             zerocopy_sg_from_iovec(struct sk_buff *skb,
2363                                              const struct iovec *frm,
2364                                              int offset,
2365                                              size_t count);
2366extern int             skb_copy_datagram_const_iovec(const struct sk_buff *from,
2367                                                     int offset,
2368                                                     const struct iovec *to,
2369                                                     int to_offset,
2370                                                     int size);
2371extern void            skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2372extern void            skb_free_datagram_locked(struct sock *sk,
2373                                                struct sk_buff *skb);
2374extern int             skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2375                                         unsigned int flags);
2376extern __wsum          skb_checksum(const struct sk_buff *skb, int offset,
2377                                    int len, __wsum csum);
2378extern int             skb_copy_bits(const struct sk_buff *skb, int offset,
2379                                     void *to, int len);
2380extern int             skb_store_bits(struct sk_buff *skb, int offset,
2381                                      const void *from, int len);
2382extern __wsum          skb_copy_and_csum_bits(const struct sk_buff *skb,
2383                                              int offset, u8 *to, int len,
2384                                              __wsum csum);
2385extern int             skb_splice_bits(struct sk_buff *skb,
2386                                                unsigned int offset,
2387                                                struct pipe_inode_info *pipe,
2388                                                unsigned int len,
2389                                                unsigned int flags);
2390extern void            skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2391extern void            skb_split(struct sk_buff *skb,
2392                                 struct sk_buff *skb1, const u32 len);
2393extern int             skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2394                                 int shiftlen);
2395extern void            skb_scrub_packet(struct sk_buff *skb, bool xnet);
2396
2397extern struct sk_buff *skb_segment(struct sk_buff *skb,
2398                                   netdev_features_t features);
2399
2400static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2401                                       int len, void *buffer)
2402{
2403        int hlen = skb_headlen(skb);
2404
2405        if (hlen - offset >= len)
2406                return skb->data + offset;
2407
2408        if (skb_copy_bits(skb, offset, buffer, len) < 0)
2409                return NULL;
2410
2411        return buffer;
2412}
2413
2414static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2415                                             void *to,
2416                                             const unsigned int len)
2417{
2418        memcpy(to, skb->data, len);
2419}
2420
2421static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2422                                                    const int offset, void *to,
2423                                                    const unsigned int len)
2424{
2425        memcpy(to, skb->data + offset, len);
2426}
2427
2428static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2429                                           const void *from,
2430                                           const unsigned int len)
2431{
2432        memcpy(skb->data, from, len);
2433}
2434
2435static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2436                                                  const int offset,
2437                                                  const void *from,
2438                                                  const unsigned int len)
2439{
2440        memcpy(skb->data + offset, from, len);
2441}
2442
2443extern void skb_init(void);
2444
2445static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2446{
2447        return skb->tstamp;
2448}
2449
2450/**
2451 *      skb_get_timestamp - get timestamp from a skb
2452 *      @skb: skb to get stamp from
2453 *      @stamp: pointer to struct timeval to store stamp in
2454 *
2455 *      Timestamps are stored in the skb as offsets to a base timestamp.
2456 *      This function converts the offset back to a struct timeval and stores
2457 *      it in stamp.
2458 */
2459static inline void skb_get_timestamp(const struct sk_buff *skb,
2460                                     struct timeval *stamp)
2461{
2462        *stamp = ktime_to_timeval(skb->tstamp);
2463}
2464
2465static inline void skb_get_timestampns(const struct sk_buff *skb,
2466                                       struct timespec *stamp)
2467{
2468        *stamp = ktime_to_timespec(skb->tstamp);
2469}
2470
2471static inline void __net_timestamp(struct sk_buff *skb)
2472{
2473        skb->tstamp = ktime_get_real();
2474}
2475
2476static inline ktime_t net_timedelta(ktime_t t)
2477{
2478        return ktime_sub(ktime_get_real(), t);
2479}
2480
2481static inline ktime_t net_invalid_timestamp(void)
2482{
2483        return ktime_set(0, 0);
2484}
2485
2486extern void skb_timestamping_init(void);
2487
2488#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2489
2490extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2491extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2492
2493#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2494
2495static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2496{
2497}
2498
2499static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2500{
2501        return false;
2502}
2503
2504#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2505
2506/**
2507 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2508 *
2509 * PHY drivers may accept clones of transmitted packets for
2510 * timestamping via their phy_driver.txtstamp method. These drivers
2511 * must call this function to return the skb back to the stack, with
2512 * or without a timestamp.
2513 *
2514 * @skb: clone of the the original outgoing packet
2515 * @hwtstamps: hardware time stamps, may be NULL if not available
2516 *
2517 */
2518void skb_complete_tx_timestamp(struct sk_buff *skb,
2519                               struct skb_shared_hwtstamps *hwtstamps);
2520
2521/**
2522 * skb_tstamp_tx - queue clone of skb with send time stamps
2523 * @orig_skb:   the original outgoing packet
2524 * @hwtstamps:  hardware time stamps, may be NULL if not available
2525 *
2526 * If the skb has a socket associated, then this function clones the
2527 * skb (thus sharing the actual data and optional structures), stores
2528 * the optional hardware time stamping information (if non NULL) or
2529 * generates a software time stamp (otherwise), then queues the clone
2530 * to the error queue of the socket.  Errors are silently ignored.
2531 */
2532extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2533                        struct skb_shared_hwtstamps *hwtstamps);
2534
2535static inline void sw_tx_timestamp(struct sk_buff *skb)
2536{
2537        if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2538            !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2539                skb_tstamp_tx(skb, NULL);
2540}
2541
2542/**
2543 * skb_tx_timestamp() - Driver hook for transmit timestamping
2544 *
2545 * Ethernet MAC Drivers should call this function in their hard_xmit()
2546 * function immediately before giving the sk_buff to the MAC hardware.
2547 *
2548 * @skb: A socket buffer.
2549 */
2550static inline void skb_tx_timestamp(struct sk_buff *skb)
2551{
2552        skb_clone_tx_timestamp(skb);
2553        sw_tx_timestamp(skb);
2554}
2555
2556/**
2557 * skb_complete_wifi_ack - deliver skb with wifi status
2558 *
2559 * @skb: the original outgoing packet
2560 * @acked: ack status
2561 *
2562 */
2563void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2564
2565extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2566extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2567
2568static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2569{
2570        return skb->ip_summed & CHECKSUM_UNNECESSARY;
2571}
2572
2573/**
2574 *      skb_checksum_complete - Calculate checksum of an entire packet
2575 *      @skb: packet to process
2576 *
2577 *      This function calculates the checksum over the entire packet plus
2578 *      the value of skb->csum.  The latter can be used to supply the
2579 *      checksum of a pseudo header as used by TCP/UDP.  It returns the
2580 *      checksum.
2581 *
2582 *      For protocols that contain complete checksums such as ICMP/TCP/UDP,
2583 *      this function can be used to verify that checksum on received
2584 *      packets.  In that case the function should return zero if the
2585 *      checksum is correct.  In particular, this function will return zero
2586 *      if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2587 *      hardware has already verified the correctness of the checksum.
2588 */
2589static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2590{
2591        return skb_csum_unnecessary(skb) ?
2592               0 : __skb_checksum_complete(skb);
2593}
2594
2595#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2596extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2597static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2598{
2599        if (nfct && atomic_dec_and_test(&nfct->use))
2600                nf_conntrack_destroy(nfct);
2601}
2602static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2603{
2604        if (nfct)
2605                atomic_inc(&nfct->use);
2606}
2607#endif
2608#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2609static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2610{
2611        if (skb)
2612                atomic_inc(&skb->users);
2613}
2614static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2615{
2616        if (skb)
2617                kfree_skb(skb);
2618}
2619#endif
2620#ifdef CONFIG_BRIDGE_NETFILTER
2621static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2622{
2623        if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2624                kfree(nf_bridge);
2625}
2626static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2627{
2628        if (nf_bridge)
2629                atomic_inc(&nf_bridge->use);
2630}
2631#endif /* CONFIG_BRIDGE_NETFILTER */
2632static inline void nf_reset(struct sk_buff *skb)
2633{
2634#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2635        nf_conntrack_put(skb->nfct);
2636        skb->nfct = NULL;
2637#endif
2638#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2639        nf_conntrack_put_reasm(skb->nfct_reasm);
2640        skb->nfct_reasm = NULL;
2641#endif
2642#ifdef CONFIG_BRIDGE_NETFILTER
2643        nf_bridge_put(skb->nf_bridge);
2644        skb->nf_bridge = NULL;
2645#endif
2646}
2647
2648static inline void nf_reset_trace(struct sk_buff *skb)
2649{
2650#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2651        skb->nf_trace = 0;
2652#endif
2653}
2654
2655/* Note: This doesn't put any conntrack and bridge info in dst. */
2656static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2657{
2658#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2659        dst->nfct = src->nfct;
2660        nf_conntrack_get(src->nfct);
2661        dst->nfctinfo = src->nfctinfo;
2662#endif
2663#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2664        dst->nfct_reasm = src->nfct_reasm;
2665        nf_conntrack_get_reasm(src->nfct_reasm);
2666#endif
2667#ifdef CONFIG_BRIDGE_NETFILTER
2668        dst->nf_bridge  = src->nf_bridge;
2669        nf_bridge_get(src->nf_bridge);
2670#endif
2671}
2672
2673static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2674{
2675#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2676        nf_conntrack_put(dst->nfct);
2677#endif
2678#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2679        nf_conntrack_put_reasm(dst->nfct_reasm);
2680#endif
2681#ifdef CONFIG_BRIDGE_NETFILTER
2682        nf_bridge_put(dst->nf_bridge);
2683#endif
2684        __nf_copy(dst, src);
2685}
2686
2687#ifdef CONFIG_NETWORK_SECMARK
2688static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2689{
2690        to->secmark = from->secmark;
2691}
2692
2693static inline void skb_init_secmark(struct sk_buff *skb)
2694{
2695        skb->secmark = 0;
2696}
2697#else
2698static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2699{ }
2700
2701static inline void skb_init_secmark(struct sk_buff *skb)
2702{ }
2703#endif
2704
2705static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2706{
2707        skb->queue_mapping = queue_mapping;
2708}
2709
2710static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2711{
2712        return skb->queue_mapping;
2713}
2714
2715static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2716{
2717        to->queue_mapping = from->queue_mapping;
2718}
2719
2720static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2721{
2722        skb->queue_mapping = rx_queue + 1;
2723}
2724
2725static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2726{
2727        return skb->queue_mapping - 1;
2728}
2729
2730static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2731{
2732        return skb->queue_mapping != 0;
2733}
2734
2735extern u16 __skb_tx_hash(const struct net_device *dev,
2736                         const struct sk_buff *skb,
2737                         unsigned int num_tx_queues);
2738
2739#ifdef CONFIG_XFRM
2740static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2741{
2742        return skb->sp;
2743}
2744#else
2745static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2746{
2747        return NULL;
2748}
2749#endif
2750
2751/* Keeps track of mac header offset relative to skb->head.
2752 * It is useful for TSO of Tunneling protocol. e.g. GRE.
2753 * For non-tunnel skb it points to skb_mac_header() and for
2754 * tunnel skb it points to outer mac header. */
2755struct skb_gso_cb {
2756        int mac_offset;
2757};
2758#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2759
2760static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2761{
2762        return (skb_mac_header(inner_skb) - inner_skb->head) -
2763                SKB_GSO_CB(inner_skb)->mac_offset;
2764}
2765
2766static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2767{
2768        int new_headroom, headroom;
2769        int ret;
2770
2771        headroom = skb_headroom(skb);
2772        ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2773        if (ret)
2774                return ret;
2775
2776        new_headroom = skb_headroom(skb);
2777        SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2778        return 0;
2779}
2780
2781static inline bool skb_is_gso(const struct sk_buff *skb)
2782{
2783        return skb_shinfo(skb)->gso_size;
2784}
2785
2786static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2787{
2788        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2789}
2790
2791extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2792
2793static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2794{
2795        /* LRO sets gso_size but not gso_type, whereas if GSO is really
2796         * wanted then gso_type will be set. */
2797        const struct skb_shared_info *shinfo = skb_shinfo(skb);
2798
2799        if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2800            unlikely(shinfo->gso_type == 0)) {
2801                __skb_warn_lro_forwarding(skb);
2802                return true;
2803        }
2804        return false;
2805}
2806
2807static inline void skb_forward_csum(struct sk_buff *skb)
2808{
2809        /* Unfortunately we don't support this one.  Any brave souls? */
2810        if (skb->ip_summed == CHECKSUM_COMPLETE)
2811                skb->ip_summed = CHECKSUM_NONE;
2812}
2813
2814/**
2815 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2816 * @skb: skb to check
2817 *
2818 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2819 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2820 * use this helper, to document places where we make this assertion.
2821 */
2822static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2823{
2824#ifdef DEBUG
2825        BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2826#endif
2827}
2828
2829bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2830
2831u32 __skb_get_poff(const struct sk_buff *skb);
2832
2833/**
2834 * skb_head_is_locked - Determine if the skb->head is locked down
2835 * @skb: skb to check
2836 *
2837 * The head on skbs build around a head frag can be removed if they are
2838 * not cloned.  This function returns true if the skb head is locked down
2839 * due to either being allocated via kmalloc, or by being a clone with
2840 * multiple references to the head.
2841 */
2842static inline bool skb_head_is_locked(const struct sk_buff *skb)
2843{
2844        return !skb->head_frag || skb_cloned(skb);
2845}
2846#endif  /* __KERNEL__ */
2847#endif  /* _LINUX_SKBUFF_H */
2848