linux/net/core/skbuff.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *      Routines having to do with the 'struct sk_buff' memory handlers.
   4 *
   5 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
   6 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   7 *
   8 *      Fixes:
   9 *              Alan Cox        :       Fixed the worst of the load
  10 *                                      balancer bugs.
  11 *              Dave Platt      :       Interrupt stacking fix.
  12 *      Richard Kooijman        :       Timestamp fixes.
  13 *              Alan Cox        :       Changed buffer format.
  14 *              Alan Cox        :       destructor hook for AF_UNIX etc.
  15 *              Linus Torvalds  :       Better skb_clone.
  16 *              Alan Cox        :       Added skb_copy.
  17 *              Alan Cox        :       Added all the changed routines Linus
  18 *                                      only put in the headers
  19 *              Ray VanTassle   :       Fixed --skb->lock in free
  20 *              Alan Cox        :       skb_copy copy arp field
  21 *              Andi Kleen      :       slabified it.
  22 *              Robert Olsson   :       Removed skb_head_pool
  23 *
  24 *      NOTE:
  25 *              The __skb_ routines should be called with interrupts
  26 *      disabled, or you better be *real* sure that the operation is atomic
  27 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
  28 *      or via disabling bottom half handlers, etc).
  29 */
  30
  31/*
  32 *      The functions in this file will not compile correctly with gcc 2.4.x
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/module.h>
  38#include <linux/types.h>
  39#include <linux/kernel.h>
  40#include <linux/mm.h>
  41#include <linux/interrupt.h>
  42#include <linux/in.h>
  43#include <linux/inet.h>
  44#include <linux/slab.h>
  45#include <linux/tcp.h>
  46#include <linux/udp.h>
  47#include <linux/sctp.h>
  48#include <linux/netdevice.h>
  49#ifdef CONFIG_NET_CLS_ACT
  50#include <net/pkt_sched.h>
  51#endif
  52#include <linux/string.h>
  53#include <linux/skbuff.h>
  54#include <linux/splice.h>
  55#include <linux/cache.h>
  56#include <linux/rtnetlink.h>
  57#include <linux/init.h>
  58#include <linux/scatterlist.h>
  59#include <linux/errqueue.h>
  60#include <linux/prefetch.h>
  61#include <linux/if_vlan.h>
  62#include <linux/mpls.h>
  63
  64#include <net/protocol.h>
  65#include <net/dst.h>
  66#include <net/sock.h>
  67#include <net/checksum.h>
  68#include <net/ip6_checksum.h>
  69#include <net/xfrm.h>
  70#include <net/mpls.h>
  71
  72#include <linux/uaccess.h>
  73#include <trace/events/skb.h>
  74#include <linux/highmem.h>
  75#include <linux/capability.h>
  76#include <linux/user_namespace.h>
  77#include <linux/indirect_call_wrapper.h>
  78
  79#include "datagram.h"
  80
  81struct kmem_cache *skbuff_head_cache __ro_after_init;
  82static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
  83#ifdef CONFIG_SKB_EXTENSIONS
  84static struct kmem_cache *skbuff_ext_cache __ro_after_init;
  85#endif
  86int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
  87EXPORT_SYMBOL(sysctl_max_skb_frags);
  88
  89/**
  90 *      skb_panic - private function for out-of-line support
  91 *      @skb:   buffer
  92 *      @sz:    size
  93 *      @addr:  address
  94 *      @msg:   skb_over_panic or skb_under_panic
  95 *
  96 *      Out-of-line support for skb_put() and skb_push().
  97 *      Called via the wrapper skb_over_panic() or skb_under_panic().
  98 *      Keep out of line to prevent kernel bloat.
  99 *      __builtin_return_address is not used because it is not always reliable.
 100 */
 101static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
 102                      const char msg[])
 103{
 104        pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
 105                 msg, addr, skb->len, sz, skb->head, skb->data,
 106                 (unsigned long)skb->tail, (unsigned long)skb->end,
 107                 skb->dev ? skb->dev->name : "<NULL>");
 108        BUG();
 109}
 110
 111static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 112{
 113        skb_panic(skb, sz, addr, __func__);
 114}
 115
 116static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 117{
 118        skb_panic(skb, sz, addr, __func__);
 119}
 120
 121/*
 122 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
 123 * the caller if emergency pfmemalloc reserves are being used. If it is and
 124 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
 125 * may be used. Otherwise, the packet data may be discarded until enough
 126 * memory is free
 127 */
 128#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
 129         __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
 130
 131static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
 132                               unsigned long ip, bool *pfmemalloc)
 133{
 134        void *obj;
 135        bool ret_pfmemalloc = false;
 136
 137        /*
 138         * Try a regular allocation, when that fails and we're not entitled
 139         * to the reserves, fail.
 140         */
 141        obj = kmalloc_node_track_caller(size,
 142                                        flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
 143                                        node);
 144        if (obj || !(gfp_pfmemalloc_allowed(flags)))
 145                goto out;
 146
 147        /* Try again but now we are using pfmemalloc reserves */
 148        ret_pfmemalloc = true;
 149        obj = kmalloc_node_track_caller(size, flags, node);
 150
 151out:
 152        if (pfmemalloc)
 153                *pfmemalloc = ret_pfmemalloc;
 154
 155        return obj;
 156}
 157
 158/*      Allocate a new skbuff. We do this ourselves so we can fill in a few
 159 *      'private' fields and also do memory statistics to find all the
 160 *      [BEEP] leaks.
 161 *
 162 */
 163
 164/**
 165 *      __alloc_skb     -       allocate a network buffer
 166 *      @size: size to allocate
 167 *      @gfp_mask: allocation mask
 168 *      @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
 169 *              instead of head cache and allocate a cloned (child) skb.
 170 *              If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
 171 *              allocations in case the data is required for writeback
 172 *      @node: numa node to allocate memory on
 173 *
 174 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
 175 *      tail room of at least size bytes. The object has a reference count
 176 *      of one. The return is the buffer. On a failure the return is %NULL.
 177 *
 178 *      Buffers may only be allocated from interrupts using a @gfp_mask of
 179 *      %GFP_ATOMIC.
 180 */
 181struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 182                            int flags, int node)
 183{
 184        struct kmem_cache *cache;
 185        struct skb_shared_info *shinfo;
 186        struct sk_buff *skb;
 187        u8 *data;
 188        bool pfmemalloc;
 189
 190        cache = (flags & SKB_ALLOC_FCLONE)
 191                ? skbuff_fclone_cache : skbuff_head_cache;
 192
 193        if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
 194                gfp_mask |= __GFP_MEMALLOC;
 195
 196        /* Get the HEAD */
 197        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 198        if (!skb)
 199                goto out;
 200        prefetchw(skb);
 201
 202        /* We do our best to align skb_shared_info on a separate cache
 203         * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
 204         * aligned memory blocks, unless SLUB/SLAB debug is enabled.
 205         * Both skb->head and skb_shared_info are cache line aligned.
 206         */
 207        size = SKB_DATA_ALIGN(size);
 208        size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 209        data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
 210        if (!data)
 211                goto nodata;
 212        /* kmalloc(size) might give us more room than requested.
 213         * Put skb_shared_info exactly at the end of allocated zone,
 214         * to allow max possible filling before reallocation.
 215         */
 216        size = SKB_WITH_OVERHEAD(ksize(data));
 217        prefetchw(data + size);
 218
 219        /*
 220         * Only clear those fields we need to clear, not those that we will
 221         * actually initialise below. Hence, don't put any more fields after
 222         * the tail pointer in struct sk_buff!
 223         */
 224        memset(skb, 0, offsetof(struct sk_buff, tail));
 225        /* Account for allocated memory : skb + skb->head */
 226        skb->truesize = SKB_TRUESIZE(size);
 227        skb->pfmemalloc = pfmemalloc;
 228        refcount_set(&skb->users, 1);
 229        skb->head = data;
 230        skb->data = data;
 231        skb_reset_tail_pointer(skb);
 232        skb->end = skb->tail + size;
 233        skb->mac_header = (typeof(skb->mac_header))~0U;
 234        skb->transport_header = (typeof(skb->transport_header))~0U;
 235
 236        /* make sure we initialize shinfo sequentially */
 237        shinfo = skb_shinfo(skb);
 238        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 239        atomic_set(&shinfo->dataref, 1);
 240
 241        if (flags & SKB_ALLOC_FCLONE) {
 242                struct sk_buff_fclones *fclones;
 243
 244                fclones = container_of(skb, struct sk_buff_fclones, skb1);
 245
 246                skb->fclone = SKB_FCLONE_ORIG;
 247                refcount_set(&fclones->fclone_ref, 1);
 248
 249                fclones->skb2.fclone = SKB_FCLONE_CLONE;
 250        }
 251out:
 252        return skb;
 253nodata:
 254        kmem_cache_free(cache, skb);
 255        skb = NULL;
 256        goto out;
 257}
 258EXPORT_SYMBOL(__alloc_skb);
 259
 260/* Caller must provide SKB that is memset cleared */
 261static struct sk_buff *__build_skb_around(struct sk_buff *skb,
 262                                          void *data, unsigned int frag_size)
 263{
 264        struct skb_shared_info *shinfo;
 265        unsigned int size = frag_size ? : ksize(data);
 266
 267        size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 268
 269        /* Assumes caller memset cleared SKB */
 270        skb->truesize = SKB_TRUESIZE(size);
 271        refcount_set(&skb->users, 1);
 272        skb->head = data;
 273        skb->data = data;
 274        skb_reset_tail_pointer(skb);
 275        skb->end = skb->tail + size;
 276        skb->mac_header = (typeof(skb->mac_header))~0U;
 277        skb->transport_header = (typeof(skb->transport_header))~0U;
 278
 279        /* make sure we initialize shinfo sequentially */
 280        shinfo = skb_shinfo(skb);
 281        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 282        atomic_set(&shinfo->dataref, 1);
 283
 284        return skb;
 285}
 286
 287/**
 288 * __build_skb - build a network buffer
 289 * @data: data buffer provided by caller
 290 * @frag_size: size of data, or 0 if head was kmalloced
 291 *
 292 * Allocate a new &sk_buff. Caller provides space holding head and
 293 * skb_shared_info. @data must have been allocated by kmalloc() only if
 294 * @frag_size is 0, otherwise data should come from the page allocator
 295 *  or vmalloc()
 296 * The return is the new skb buffer.
 297 * On a failure the return is %NULL, and @data is not freed.
 298 * Notes :
 299 *  Before IO, driver allocates only data buffer where NIC put incoming frame
 300 *  Driver should add room at head (NET_SKB_PAD) and
 301 *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
 302 *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
 303 *  before giving packet to stack.
 304 *  RX rings only contains data buffers, not full skbs.
 305 */
 306struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 307{
 308        struct sk_buff *skb;
 309
 310        skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
 311        if (unlikely(!skb))
 312                return NULL;
 313
 314        memset(skb, 0, offsetof(struct sk_buff, tail));
 315
 316        return __build_skb_around(skb, data, frag_size);
 317}
 318
 319/* build_skb() is wrapper over __build_skb(), that specifically
 320 * takes care of skb->head and skb->pfmemalloc
 321 * This means that if @frag_size is not zero, then @data must be backed
 322 * by a page fragment, not kmalloc() or vmalloc()
 323 */
 324struct sk_buff *build_skb(void *data, unsigned int frag_size)
 325{
 326        struct sk_buff *skb = __build_skb(data, frag_size);
 327
 328        if (skb && frag_size) {
 329                skb->head_frag = 1;
 330                if (page_is_pfmemalloc(virt_to_head_page(data)))
 331                        skb->pfmemalloc = 1;
 332        }
 333        return skb;
 334}
 335EXPORT_SYMBOL(build_skb);
 336
 337/**
 338 * build_skb_around - build a network buffer around provided skb
 339 * @skb: sk_buff provide by caller, must be memset cleared
 340 * @data: data buffer provided by caller
 341 * @frag_size: size of data, or 0 if head was kmalloced
 342 */
 343struct sk_buff *build_skb_around(struct sk_buff *skb,
 344                                 void *data, unsigned int frag_size)
 345{
 346        if (unlikely(!skb))
 347                return NULL;
 348
 349        skb = __build_skb_around(skb, data, frag_size);
 350
 351        if (skb && frag_size) {
 352                skb->head_frag = 1;
 353                if (page_is_pfmemalloc(virt_to_head_page(data)))
 354                        skb->pfmemalloc = 1;
 355        }
 356        return skb;
 357}
 358EXPORT_SYMBOL(build_skb_around);
 359
 360#define NAPI_SKB_CACHE_SIZE     64
 361
 362struct napi_alloc_cache {
 363        struct page_frag_cache page;
 364        unsigned int skb_count;
 365        void *skb_cache[NAPI_SKB_CACHE_SIZE];
 366};
 367
 368static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 369static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
 370
 371static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 372{
 373        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 374
 375        return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 376}
 377
 378void *napi_alloc_frag(unsigned int fragsz)
 379{
 380        fragsz = SKB_DATA_ALIGN(fragsz);
 381
 382        return __napi_alloc_frag(fragsz, GFP_ATOMIC);
 383}
 384EXPORT_SYMBOL(napi_alloc_frag);
 385
 386/**
 387 * netdev_alloc_frag - allocate a page fragment
 388 * @fragsz: fragment size
 389 *
 390 * Allocates a frag from a page for receive buffer.
 391 * Uses GFP_ATOMIC allocations.
 392 */
 393void *netdev_alloc_frag(unsigned int fragsz)
 394{
 395        struct page_frag_cache *nc;
 396        void *data;
 397
 398        fragsz = SKB_DATA_ALIGN(fragsz);
 399        if (in_irq() || irqs_disabled()) {
 400                nc = this_cpu_ptr(&netdev_alloc_cache);
 401                data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
 402        } else {
 403                local_bh_disable();
 404                data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
 405                local_bh_enable();
 406        }
 407        return data;
 408}
 409EXPORT_SYMBOL(netdev_alloc_frag);
 410
 411/**
 412 *      __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 413 *      @dev: network device to receive on
 414 *      @len: length to allocate
 415 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
 416 *
 417 *      Allocate a new &sk_buff and assign it a usage count of one. The
 418 *      buffer has NET_SKB_PAD headroom built in. Users should allocate
 419 *      the headroom they think they need without accounting for the
 420 *      built in space. The built in space is used for optimisations.
 421 *
 422 *      %NULL is returned if there is no free memory.
 423 */
 424struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 425                                   gfp_t gfp_mask)
 426{
 427        struct page_frag_cache *nc;
 428        struct sk_buff *skb;
 429        bool pfmemalloc;
 430        void *data;
 431
 432        len += NET_SKB_PAD;
 433
 434        if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 435            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 436                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
 437                if (!skb)
 438                        goto skb_fail;
 439                goto skb_success;
 440        }
 441
 442        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 443        len = SKB_DATA_ALIGN(len);
 444
 445        if (sk_memalloc_socks())
 446                gfp_mask |= __GFP_MEMALLOC;
 447
 448        if (in_irq() || irqs_disabled()) {
 449                nc = this_cpu_ptr(&netdev_alloc_cache);
 450                data = page_frag_alloc(nc, len, gfp_mask);
 451                pfmemalloc = nc->pfmemalloc;
 452        } else {
 453                local_bh_disable();
 454                nc = this_cpu_ptr(&napi_alloc_cache.page);
 455                data = page_frag_alloc(nc, len, gfp_mask);
 456                pfmemalloc = nc->pfmemalloc;
 457                local_bh_enable();
 458        }
 459
 460        if (unlikely(!data))
 461                return NULL;
 462
 463        skb = __build_skb(data, len);
 464        if (unlikely(!skb)) {
 465                skb_free_frag(data);
 466                return NULL;
 467        }
 468
 469        /* use OR instead of assignment to avoid clearing of bits in mask */
 470        if (pfmemalloc)
 471                skb->pfmemalloc = 1;
 472        skb->head_frag = 1;
 473
 474skb_success:
 475        skb_reserve(skb, NET_SKB_PAD);
 476        skb->dev = dev;
 477
 478skb_fail:
 479        return skb;
 480}
 481EXPORT_SYMBOL(__netdev_alloc_skb);
 482
 483/**
 484 *      __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
 485 *      @napi: napi instance this buffer was allocated for
 486 *      @len: length to allocate
 487 *      @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
 488 *
 489 *      Allocate a new sk_buff for use in NAPI receive.  This buffer will
 490 *      attempt to allocate the head from a special reserved region used
 491 *      only for NAPI Rx allocation.  By doing this we can save several
 492 *      CPU cycles by avoiding having to disable and re-enable IRQs.
 493 *
 494 *      %NULL is returned if there is no free memory.
 495 */
 496struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
 497                                 gfp_t gfp_mask)
 498{
 499        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 500        struct sk_buff *skb;
 501        void *data;
 502
 503        len += NET_SKB_PAD + NET_IP_ALIGN;
 504
 505        if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 506            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 507                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
 508                if (!skb)
 509                        goto skb_fail;
 510                goto skb_success;
 511        }
 512
 513        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 514        len = SKB_DATA_ALIGN(len);
 515
 516        if (sk_memalloc_socks())
 517                gfp_mask |= __GFP_MEMALLOC;
 518
 519        data = page_frag_alloc(&nc->page, len, gfp_mask);
 520        if (unlikely(!data))
 521                return NULL;
 522
 523        skb = __build_skb(data, len);
 524        if (unlikely(!skb)) {
 525                skb_free_frag(data);
 526                return NULL;
 527        }
 528
 529        /* use OR instead of assignment to avoid clearing of bits in mask */
 530        if (nc->page.pfmemalloc)
 531                skb->pfmemalloc = 1;
 532        skb->head_frag = 1;
 533
 534skb_success:
 535        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 536        skb->dev = napi->dev;
 537
 538skb_fail:
 539        return skb;
 540}
 541EXPORT_SYMBOL(__napi_alloc_skb);
 542
 543void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 544                     int size, unsigned int truesize)
 545{
 546        skb_fill_page_desc(skb, i, page, off, size);
 547        skb->len += size;
 548        skb->data_len += size;
 549        skb->truesize += truesize;
 550}
 551EXPORT_SYMBOL(skb_add_rx_frag);
 552
 553void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
 554                          unsigned int truesize)
 555{
 556        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 557
 558        skb_frag_size_add(frag, size);
 559        skb->len += size;
 560        skb->data_len += size;
 561        skb->truesize += truesize;
 562}
 563EXPORT_SYMBOL(skb_coalesce_rx_frag);
 564
 565static void skb_drop_list(struct sk_buff **listp)
 566{
 567        kfree_skb_list(*listp);
 568        *listp = NULL;
 569}
 570
 571static inline void skb_drop_fraglist(struct sk_buff *skb)
 572{
 573        skb_drop_list(&skb_shinfo(skb)->frag_list);
 574}
 575
 576static void skb_clone_fraglist(struct sk_buff *skb)
 577{
 578        struct sk_buff *list;
 579
 580        skb_walk_frags(skb, list)
 581                skb_get(list);
 582}
 583
 584static void skb_free_head(struct sk_buff *skb)
 585{
 586        unsigned char *head = skb->head;
 587
 588        if (skb->head_frag)
 589                skb_free_frag(head);
 590        else
 591                kfree(head);
 592}
 593
 594static void skb_release_data(struct sk_buff *skb)
 595{
 596        struct skb_shared_info *shinfo = skb_shinfo(skb);
 597        int i;
 598
 599        if (skb->cloned &&
 600            atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 601                              &shinfo->dataref))
 602                return;
 603
 604        for (i = 0; i < shinfo->nr_frags; i++)
 605                __skb_frag_unref(&shinfo->frags[i]);
 606
 607        if (shinfo->frag_list)
 608                kfree_skb_list(shinfo->frag_list);
 609
 610        skb_zcopy_clear(skb, true);
 611        skb_free_head(skb);
 612}
 613
 614/*
 615 *      Free an skbuff by memory without cleaning the state.
 616 */
 617static void kfree_skbmem(struct sk_buff *skb)
 618{
 619        struct sk_buff_fclones *fclones;
 620
 621        switch (skb->fclone) {
 622        case SKB_FCLONE_UNAVAILABLE:
 623                kmem_cache_free(skbuff_head_cache, skb);
 624                return;
 625
 626        case SKB_FCLONE_ORIG:
 627                fclones = container_of(skb, struct sk_buff_fclones, skb1);
 628
 629                /* We usually free the clone (TX completion) before original skb
 630                 * This test would have no chance to be true for the clone,
 631                 * while here, branch prediction will be good.
 632                 */
 633                if (refcount_read(&fclones->fclone_ref) == 1)
 634                        goto fastpath;
 635                break;
 636
 637        default: /* SKB_FCLONE_CLONE */
 638                fclones = container_of(skb, struct sk_buff_fclones, skb2);
 639                break;
 640        }
 641        if (!refcount_dec_and_test(&fclones->fclone_ref))
 642                return;
 643fastpath:
 644        kmem_cache_free(skbuff_fclone_cache, fclones);
 645}
 646
 647void skb_release_head_state(struct sk_buff *skb)
 648{
 649        skb_dst_drop(skb);
 650        if (skb->destructor) {
 651                WARN_ON(in_irq());
 652                skb->destructor(skb);
 653        }
 654#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 655        nf_conntrack_put(skb_nfct(skb));
 656#endif
 657        skb_ext_put(skb);
 658}
 659
 660/* Free everything but the sk_buff shell. */
 661static void skb_release_all(struct sk_buff *skb)
 662{
 663        skb_release_head_state(skb);
 664        if (likely(skb->head))
 665                skb_release_data(skb);
 666}
 667
 668/**
 669 *      __kfree_skb - private function
 670 *      @skb: buffer
 671 *
 672 *      Free an sk_buff. Release anything attached to the buffer.
 673 *      Clean the state. This is an internal helper function. Users should
 674 *      always call kfree_skb
 675 */
 676
 677void __kfree_skb(struct sk_buff *skb)
 678{
 679        skb_release_all(skb);
 680        kfree_skbmem(skb);
 681}
 682EXPORT_SYMBOL(__kfree_skb);
 683
 684/**
 685 *      kfree_skb - free an sk_buff
 686 *      @skb: buffer to free
 687 *
 688 *      Drop a reference to the buffer and free it if the usage count has
 689 *      hit zero.
 690 */
 691void kfree_skb(struct sk_buff *skb)
 692{
 693        if (!skb_unref(skb))
 694                return;
 695
 696        trace_kfree_skb(skb, __builtin_return_address(0));
 697        __kfree_skb(skb);
 698}
 699EXPORT_SYMBOL(kfree_skb);
 700
 701void kfree_skb_list(struct sk_buff *segs)
 702{
 703        while (segs) {
 704                struct sk_buff *next = segs->next;
 705
 706                kfree_skb(segs);
 707                segs = next;
 708        }
 709}
 710EXPORT_SYMBOL(kfree_skb_list);
 711
 712/* Dump skb information and contents.
 713 *
 714 * Must only be called from net_ratelimit()-ed paths.
 715 *
 716 * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise.
 717 */
 718void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
 719{
 720        static atomic_t can_dump_full = ATOMIC_INIT(5);
 721        struct skb_shared_info *sh = skb_shinfo(skb);
 722        struct net_device *dev = skb->dev;
 723        struct sock *sk = skb->sk;
 724        struct sk_buff *list_skb;
 725        bool has_mac, has_trans;
 726        int headroom, tailroom;
 727        int i, len, seg_len;
 728
 729        if (full_pkt)
 730                full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
 731
 732        if (full_pkt)
 733                len = skb->len;
 734        else
 735                len = min_t(int, skb->len, MAX_HEADER + 128);
 736
 737        headroom = skb_headroom(skb);
 738        tailroom = skb_tailroom(skb);
 739
 740        has_mac = skb_mac_header_was_set(skb);
 741        has_trans = skb_transport_header_was_set(skb);
 742
 743        printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
 744               "mac=(%d,%d) net=(%d,%d) trans=%d\n"
 745               "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
 746               "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
 747               "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
 748               level, skb->len, headroom, skb_headlen(skb), tailroom,
 749               has_mac ? skb->mac_header : -1,
 750               has_mac ? skb_mac_header_len(skb) : -1,
 751               skb->network_header,
 752               has_trans ? skb_network_header_len(skb) : -1,
 753               has_trans ? skb->transport_header : -1,
 754               sh->tx_flags, sh->nr_frags,
 755               sh->gso_size, sh->gso_type, sh->gso_segs,
 756               skb->csum, skb->ip_summed, skb->csum_complete_sw,
 757               skb->csum_valid, skb->csum_level,
 758               skb->hash, skb->sw_hash, skb->l4_hash,
 759               ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
 760
 761        if (dev)
 762                printk("%sdev name=%s feat=0x%pNF\n",
 763                       level, dev->name, &dev->features);
 764        if (sk)
 765                printk("%ssk family=%hu type=%u proto=%u\n",
 766                       level, sk->sk_family, sk->sk_type, sk->sk_protocol);
 767
 768        if (full_pkt && headroom)
 769                print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
 770                               16, 1, skb->head, headroom, false);
 771
 772        seg_len = min_t(int, skb_headlen(skb), len);
 773        if (seg_len)
 774                print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET,
 775                               16, 1, skb->data, seg_len, false);
 776        len -= seg_len;
 777
 778        if (full_pkt && tailroom)
 779                print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
 780                               16, 1, skb_tail_pointer(skb), tailroom, false);
 781
 782        for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
 783                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 784                u32 p_off, p_len, copied;
 785                struct page *p;
 786                u8 *vaddr;
 787
 788                skb_frag_foreach_page(frag, skb_frag_off(frag),
 789                                      skb_frag_size(frag), p, p_off, p_len,
 790                                      copied) {
 791                        seg_len = min_t(int, p_len, len);
 792                        vaddr = kmap_atomic(p);
 793                        print_hex_dump(level, "skb frag:     ",
 794                                       DUMP_PREFIX_OFFSET,
 795                                       16, 1, vaddr + p_off, seg_len, false);
 796                        kunmap_atomic(vaddr);
 797                        len -= seg_len;
 798                        if (!len)
 799                                break;
 800                }
 801        }
 802
 803        if (full_pkt && skb_has_frag_list(skb)) {
 804                printk("skb fraglist:\n");
 805                skb_walk_frags(skb, list_skb)
 806                        skb_dump(level, list_skb, true);
 807        }
 808}
 809EXPORT_SYMBOL(skb_dump);
 810
 811/**
 812 *      skb_tx_error - report an sk_buff xmit error
 813 *      @skb: buffer that triggered an error
 814 *
 815 *      Report xmit error if a device callback is tracking this skb.
 816 *      skb must be freed afterwards.
 817 */
 818void skb_tx_error(struct sk_buff *skb)
 819{
 820        skb_zcopy_clear(skb, true);
 821}
 822EXPORT_SYMBOL(skb_tx_error);
 823
 824/**
 825 *      consume_skb - free an skbuff
 826 *      @skb: buffer to free
 827 *
 828 *      Drop a ref to the buffer and free it if the usage count has hit zero
 829 *      Functions identically to kfree_skb, but kfree_skb assumes that the frame
 830 *      is being dropped after a failure and notes that
 831 */
 832void consume_skb(struct sk_buff *skb)
 833{
 834        if (!skb_unref(skb))
 835                return;
 836
 837        trace_consume_skb(skb);
 838        __kfree_skb(skb);
 839}
 840EXPORT_SYMBOL(consume_skb);
 841
 842/**
 843 *      consume_stateless_skb - free an skbuff, assuming it is stateless
 844 *      @skb: buffer to free
 845 *
 846 *      Alike consume_skb(), but this variant assumes that this is the last
 847 *      skb reference and all the head states have been already dropped
 848 */
 849void __consume_stateless_skb(struct sk_buff *skb)
 850{
 851        trace_consume_skb(skb);
 852        skb_release_data(skb);
 853        kfree_skbmem(skb);
 854}
 855
 856void __kfree_skb_flush(void)
 857{
 858        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 859
 860        /* flush skb_cache if containing objects */
 861        if (nc->skb_count) {
 862                kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
 863                                     nc->skb_cache);
 864                nc->skb_count = 0;
 865        }
 866}
 867
 868static inline void _kfree_skb_defer(struct sk_buff *skb)
 869{
 870        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 871
 872        /* drop skb->head and call any destructors for packet */
 873        skb_release_all(skb);
 874
 875        /* record skb to CPU local list */
 876        nc->skb_cache[nc->skb_count++] = skb;
 877
 878#ifdef CONFIG_SLUB
 879        /* SLUB writes into objects when freeing */
 880        prefetchw(skb);
 881#endif
 882
 883        /* flush skb_cache if it is filled */
 884        if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
 885                kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
 886                                     nc->skb_cache);
 887                nc->skb_count = 0;
 888        }
 889}
 890void __kfree_skb_defer(struct sk_buff *skb)
 891{
 892        _kfree_skb_defer(skb);
 893}
 894
 895void napi_consume_skb(struct sk_buff *skb, int budget)
 896{
 897        if (unlikely(!skb))
 898                return;
 899
 900        /* Zero budget indicate non-NAPI context called us, like netpoll */
 901        if (unlikely(!budget)) {
 902                dev_consume_skb_any(skb);
 903                return;
 904        }
 905
 906        if (!skb_unref(skb))
 907                return;
 908
 909        /* if reaching here SKB is ready to free */
 910        trace_consume_skb(skb);
 911
 912        /* if SKB is a clone, don't handle this case */
 913        if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
 914                __kfree_skb(skb);
 915                return;
 916        }
 917
 918        _kfree_skb_defer(skb);
 919}
 920EXPORT_SYMBOL(napi_consume_skb);
 921
 922/* Make sure a field is enclosed inside headers_start/headers_end section */
 923#define CHECK_SKB_FIELD(field) \
 924        BUILD_BUG_ON(offsetof(struct sk_buff, field) <          \
 925                     offsetof(struct sk_buff, headers_start));  \
 926        BUILD_BUG_ON(offsetof(struct sk_buff, field) >          \
 927                     offsetof(struct sk_buff, headers_end));    \
 928
 929static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 930{
 931        new->tstamp             = old->tstamp;
 932        /* We do not copy old->sk */
 933        new->dev                = old->dev;
 934        memcpy(new->cb, old->cb, sizeof(old->cb));
 935        skb_dst_copy(new, old);
 936        __skb_ext_copy(new, old);
 937        __nf_copy(new, old, false);
 938
 939        /* Note : this field could be in headers_start/headers_end section
 940         * It is not yet because we do not want to have a 16 bit hole
 941         */
 942        new->queue_mapping = old->queue_mapping;
 943
 944        memcpy(&new->headers_start, &old->headers_start,
 945               offsetof(struct sk_buff, headers_end) -
 946               offsetof(struct sk_buff, headers_start));
 947        CHECK_SKB_FIELD(protocol);
 948        CHECK_SKB_FIELD(csum);
 949        CHECK_SKB_FIELD(hash);
 950        CHECK_SKB_FIELD(priority);
 951        CHECK_SKB_FIELD(skb_iif);
 952        CHECK_SKB_FIELD(vlan_proto);
 953        CHECK_SKB_FIELD(vlan_tci);
 954        CHECK_SKB_FIELD(transport_header);
 955        CHECK_SKB_FIELD(network_header);
 956        CHECK_SKB_FIELD(mac_header);
 957        CHECK_SKB_FIELD(inner_protocol);
 958        CHECK_SKB_FIELD(inner_transport_header);
 959        CHECK_SKB_FIELD(inner_network_header);
 960        CHECK_SKB_FIELD(inner_mac_header);
 961        CHECK_SKB_FIELD(mark);
 962#ifdef CONFIG_NETWORK_SECMARK
 963        CHECK_SKB_FIELD(secmark);
 964#endif
 965#ifdef CONFIG_NET_RX_BUSY_POLL
 966        CHECK_SKB_FIELD(napi_id);
 967#endif
 968#ifdef CONFIG_XPS
 969        CHECK_SKB_FIELD(sender_cpu);
 970#endif
 971#ifdef CONFIG_NET_SCHED
 972        CHECK_SKB_FIELD(tc_index);
 973#endif
 974
 975}
 976
 977/*
 978 * You should not add any new code to this function.  Add it to
 979 * __copy_skb_header above instead.
 980 */
 981static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 982{
 983#define C(x) n->x = skb->x
 984
 985        n->next = n->prev = NULL;
 986        n->sk = NULL;
 987        __copy_skb_header(n, skb);
 988
 989        C(len);
 990        C(data_len);
 991        C(mac_len);
 992        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 993        n->cloned = 1;
 994        n->nohdr = 0;
 995        n->peeked = 0;
 996        C(pfmemalloc);
 997        n->destructor = NULL;
 998        C(tail);
 999        C(end);
1000        C(head);
1001        C(head_frag);
1002        C(data);
1003        C(truesize);
1004        refcount_set(&n->users, 1);
1005
1006        atomic_inc(&(skb_shinfo(skb)->dataref));
1007        skb->cloned = 1;
1008
1009        return n;
1010#undef C
1011}
1012
1013/**
1014 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1015 * @first: first sk_buff of the msg
1016 */
1017struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1018{
1019        struct sk_buff *n;
1020
1021        n = alloc_skb(0, GFP_ATOMIC);
1022        if (!n)
1023                return NULL;
1024
1025        n->len = first->len;
1026        n->data_len = first->len;
1027        n->truesize = first->truesize;
1028
1029        skb_shinfo(n)->frag_list = first;
1030
1031        __copy_skb_header(n, first);
1032        n->destructor = NULL;
1033
1034        return n;
1035}
1036EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1037
1038/**
1039 *      skb_morph       -       morph one skb into another
1040 *      @dst: the skb to receive the contents
1041 *      @src: the skb to supply the contents
1042 *
1043 *      This is identical to skb_clone except that the target skb is
1044 *      supplied by the user.
1045 *
1046 *      The target skb is returned upon exit.
1047 */
1048struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1049{
1050        skb_release_all(dst);
1051        return __skb_clone(dst, src);
1052}
1053EXPORT_SYMBOL_GPL(skb_morph);
1054
1055int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1056{
1057        unsigned long max_pg, num_pg, new_pg, old_pg;
1058        struct user_struct *user;
1059
1060        if (capable(CAP_IPC_LOCK) || !size)
1061                return 0;
1062
1063        num_pg = (size >> PAGE_SHIFT) + 2;      /* worst case */
1064        max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1065        user = mmp->user ? : current_user();
1066
1067        do {
1068                old_pg = atomic_long_read(&user->locked_vm);
1069                new_pg = old_pg + num_pg;
1070                if (new_pg > max_pg)
1071                        return -ENOBUFS;
1072        } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1073                 old_pg);
1074
1075        if (!mmp->user) {
1076                mmp->user = get_uid(user);
1077                mmp->num_pg = num_pg;
1078        } else {
1079                mmp->num_pg += num_pg;
1080        }
1081
1082        return 0;
1083}
1084EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1085
1086void mm_unaccount_pinned_pages(struct mmpin *mmp)
1087{
1088        if (mmp->user) {
1089                atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1090                free_uid(mmp->user);
1091        }
1092}
1093EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1094
1095struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
1096{
1097        struct ubuf_info *uarg;
1098        struct sk_buff *skb;
1099
1100        WARN_ON_ONCE(!in_task());
1101
1102        skb = sock_omalloc(sk, 0, GFP_KERNEL);
1103        if (!skb)
1104                return NULL;
1105
1106        BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1107        uarg = (void *)skb->cb;
1108        uarg->mmp.user = NULL;
1109
1110        if (mm_account_pinned_pages(&uarg->mmp, size)) {
1111                kfree_skb(skb);
1112                return NULL;
1113        }
1114
1115        uarg->callback = sock_zerocopy_callback;
1116        uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1117        uarg->len = 1;
1118        uarg->bytelen = size;
1119        uarg->zerocopy = 1;
1120        refcount_set(&uarg->refcnt, 1);
1121        sock_hold(sk);
1122
1123        return uarg;
1124}
1125EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
1126
1127static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
1128{
1129        return container_of((void *)uarg, struct sk_buff, cb);
1130}
1131
1132struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
1133                                        struct ubuf_info *uarg)
1134{
1135        if (uarg) {
1136                const u32 byte_limit = 1 << 19;         /* limit to a few TSO */
1137                u32 bytelen, next;
1138
1139                /* realloc only when socket is locked (TCP, UDP cork),
1140                 * so uarg->len and sk_zckey access is serialized
1141                 */
1142                if (!sock_owned_by_user(sk)) {
1143                        WARN_ON_ONCE(1);
1144                        return NULL;
1145                }
1146
1147                bytelen = uarg->bytelen + size;
1148                if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1149                        /* TCP can create new skb to attach new uarg */
1150                        if (sk->sk_type == SOCK_STREAM)
1151                                goto new_alloc;
1152                        return NULL;
1153                }
1154
1155                next = (u32)atomic_read(&sk->sk_zckey);
1156                if ((u32)(uarg->id + uarg->len) == next) {
1157                        if (mm_account_pinned_pages(&uarg->mmp, size))
1158                                return NULL;
1159                        uarg->len++;
1160                        uarg->bytelen = bytelen;
1161                        atomic_set(&sk->sk_zckey, ++next);
1162
1163                        /* no extra ref when appending to datagram (MSG_MORE) */
1164                        if (sk->sk_type == SOCK_STREAM)
1165                                sock_zerocopy_get(uarg);
1166
1167                        return uarg;
1168                }
1169        }
1170
1171new_alloc:
1172        return sock_zerocopy_alloc(sk, size);
1173}
1174EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1175
1176static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1177{
1178        struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1179        u32 old_lo, old_hi;
1180        u64 sum_len;
1181
1182        old_lo = serr->ee.ee_info;
1183        old_hi = serr->ee.ee_data;
1184        sum_len = old_hi - old_lo + 1ULL + len;
1185
1186        if (sum_len >= (1ULL << 32))
1187                return false;
1188
1189        if (lo != old_hi + 1)
1190                return false;
1191
1192        serr->ee.ee_data += len;
1193        return true;
1194}
1195
1196void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1197{
1198        struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1199        struct sock_exterr_skb *serr;
1200        struct sock *sk = skb->sk;
1201        struct sk_buff_head *q;
1202        unsigned long flags;
1203        u32 lo, hi;
1204        u16 len;
1205
1206        mm_unaccount_pinned_pages(&uarg->mmp);
1207
1208        /* if !len, there was only 1 call, and it was aborted
1209         * so do not queue a completion notification
1210         */
1211        if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1212                goto release;
1213
1214        len = uarg->len;
1215        lo = uarg->id;
1216        hi = uarg->id + len - 1;
1217
1218        serr = SKB_EXT_ERR(skb);
1219        memset(serr, 0, sizeof(*serr));
1220        serr->ee.ee_errno = 0;
1221        serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1222        serr->ee.ee_data = hi;
1223        serr->ee.ee_info = lo;
1224        if (!success)
1225                serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1226
1227        q = &sk->sk_error_queue;
1228        spin_lock_irqsave(&q->lock, flags);
1229        tail = skb_peek_tail(q);
1230        if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1231            !skb_zerocopy_notify_extend(tail, lo, len)) {
1232                __skb_queue_tail(q, skb);
1233                skb = NULL;
1234        }
1235        spin_unlock_irqrestore(&q->lock, flags);
1236
1237        sk->sk_error_report(sk);
1238
1239release:
1240        consume_skb(skb);
1241        sock_put(sk);
1242}
1243EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1244
1245void sock_zerocopy_put(struct ubuf_info *uarg)
1246{
1247        if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1248                if (uarg->callback)
1249                        uarg->callback(uarg, uarg->zerocopy);
1250                else
1251                        consume_skb(skb_from_uarg(uarg));
1252        }
1253}
1254EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1255
1256void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1257{
1258        if (uarg) {
1259                struct sock *sk = skb_from_uarg(uarg)->sk;
1260
1261                atomic_dec(&sk->sk_zckey);
1262                uarg->len--;
1263
1264                if (have_uref)
1265                        sock_zerocopy_put(uarg);
1266        }
1267}
1268EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1269
1270int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1271{
1272        return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1273}
1274EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1275
1276int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1277                             struct msghdr *msg, int len,
1278                             struct ubuf_info *uarg)
1279{
1280        struct ubuf_info *orig_uarg = skb_zcopy(skb);
1281        struct iov_iter orig_iter = msg->msg_iter;
1282        int err, orig_len = skb->len;
1283
1284        /* An skb can only point to one uarg. This edge case happens when
1285         * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1286         */
1287        if (orig_uarg && uarg != orig_uarg)
1288                return -EEXIST;
1289
1290        err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1291        if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1292                struct sock *save_sk = skb->sk;
1293
1294                /* Streams do not free skb on error. Reset to prev state. */
1295                msg->msg_iter = orig_iter;
1296                skb->sk = sk;
1297                ___pskb_trim(skb, orig_len);
1298                skb->sk = save_sk;
1299                return err;
1300        }
1301
1302        skb_zcopy_set(skb, uarg, NULL);
1303        return skb->len - orig_len;
1304}
1305EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1306
1307static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1308                              gfp_t gfp_mask)
1309{
1310        if (skb_zcopy(orig)) {
1311                if (skb_zcopy(nskb)) {
1312                        /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1313                        if (!gfp_mask) {
1314                                WARN_ON_ONCE(1);
1315                                return -ENOMEM;
1316                        }
1317                        if (skb_uarg(nskb) == skb_uarg(orig))
1318                                return 0;
1319                        if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1320                                return -EIO;
1321                }
1322                skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1323        }
1324        return 0;
1325}
1326
1327/**
1328 *      skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
1329 *      @skb: the skb to modify
1330 *      @gfp_mask: allocation priority
1331 *
1332 *      This must be called on SKBTX_DEV_ZEROCOPY skb.
1333 *      It will copy all frags into kernel and drop the reference
1334 *      to userspace pages.
1335 *
1336 *      If this function is called from an interrupt gfp_mask() must be
1337 *      %GFP_ATOMIC.
1338 *
1339 *      Returns 0 on success or a negative error code on failure
1340 *      to allocate kernel memory to copy to.
1341 */
1342int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1343{
1344        int num_frags = skb_shinfo(skb)->nr_frags;
1345        struct page *page, *head = NULL;
1346        int i, new_frags;
1347        u32 d_off;
1348
1349        if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1350                return -EINVAL;
1351
1352        if (!num_frags)
1353                goto release;
1354
1355        new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1356        for (i = 0; i < new_frags; i++) {
1357                page = alloc_page(gfp_mask);
1358                if (!page) {
1359                        while (head) {
1360                                struct page *next = (struct page *)page_private(head);
1361                                put_page(head);
1362                                head = next;
1363                        }
1364                        return -ENOMEM;
1365                }
1366                set_page_private(page, (unsigned long)head);
1367                head = page;
1368        }
1369
1370        page = head;
1371        d_off = 0;
1372        for (i = 0; i < num_frags; i++) {
1373                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1374                u32 p_off, p_len, copied;
1375                struct page *p;
1376                u8 *vaddr;
1377
1378                skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1379                                      p, p_off, p_len, copied) {
1380                        u32 copy, done = 0;
1381                        vaddr = kmap_atomic(p);
1382
1383                        while (done < p_len) {
1384                                if (d_off == PAGE_SIZE) {
1385                                        d_off = 0;
1386                                        page = (struct page *)page_private(page);
1387                                }
1388                                copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1389                                memcpy(page_address(page) + d_off,
1390                                       vaddr + p_off + done, copy);
1391                                done += copy;
1392                                d_off += copy;
1393                        }
1394                        kunmap_atomic(vaddr);
1395                }
1396        }
1397
1398        /* skb frags release userspace buffers */
1399        for (i = 0; i < num_frags; i++)
1400                skb_frag_unref(skb, i);
1401
1402        /* skb frags point to kernel buffers */
1403        for (i = 0; i < new_frags - 1; i++) {
1404                __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1405                head = (struct page *)page_private(head);
1406        }
1407        __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1408        skb_shinfo(skb)->nr_frags = new_frags;
1409
1410release:
1411        skb_zcopy_clear(skb, false);
1412        return 0;
1413}
1414EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1415
1416/**
1417 *      skb_clone       -       duplicate an sk_buff
1418 *      @skb: buffer to clone
1419 *      @gfp_mask: allocation priority
1420 *
1421 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
1422 *      copies share the same packet data but not structure. The new
1423 *      buffer has a reference count of 1. If the allocation fails the
1424 *      function returns %NULL otherwise the new buffer is returned.
1425 *
1426 *      If this function is called from an interrupt gfp_mask() must be
1427 *      %GFP_ATOMIC.
1428 */
1429
1430struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1431{
1432        struct sk_buff_fclones *fclones = container_of(skb,
1433                                                       struct sk_buff_fclones,
1434                                                       skb1);
1435        struct sk_buff *n;
1436
1437        if (skb_orphan_frags(skb, gfp_mask))
1438                return NULL;
1439
1440        if (skb->fclone == SKB_FCLONE_ORIG &&
1441            refcount_read(&fclones->fclone_ref) == 1) {
1442                n = &fclones->skb2;
1443                refcount_set(&fclones->fclone_ref, 2);
1444        } else {
1445                if (skb_pfmemalloc(skb))
1446                        gfp_mask |= __GFP_MEMALLOC;
1447
1448                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1449                if (!n)
1450                        return NULL;
1451
1452                n->fclone = SKB_FCLONE_UNAVAILABLE;
1453        }
1454
1455        return __skb_clone(n, skb);
1456}
1457EXPORT_SYMBOL(skb_clone);
1458
1459void skb_headers_offset_update(struct sk_buff *skb, int off)
1460{
1461        /* Only adjust this if it actually is csum_start rather than csum */
1462        if (skb->ip_summed == CHECKSUM_PARTIAL)
1463                skb->csum_start += off;
1464        /* {transport,network,mac}_header and tail are relative to skb->head */
1465        skb->transport_header += off;
1466        skb->network_header   += off;
1467        if (skb_mac_header_was_set(skb))
1468                skb->mac_header += off;
1469        skb->inner_transport_header += off;
1470        skb->inner_network_header += off;
1471        skb->inner_mac_header += off;
1472}
1473EXPORT_SYMBOL(skb_headers_offset_update);
1474
1475void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1476{
1477        __copy_skb_header(new, old);
1478
1479        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1480        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1481        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1482}
1483EXPORT_SYMBOL(skb_copy_header);
1484
1485static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1486{
1487        if (skb_pfmemalloc(skb))
1488                return SKB_ALLOC_RX;
1489        return 0;
1490}
1491
1492/**
1493 *      skb_copy        -       create private copy of an sk_buff
1494 *      @skb: buffer to copy
1495 *      @gfp_mask: allocation priority
1496 *
1497 *      Make a copy of both an &sk_buff and its data. This is used when the
1498 *      caller wishes to modify the data and needs a private copy of the
1499 *      data to alter. Returns %NULL on failure or the pointer to the buffer
1500 *      on success. The returned buffer has a reference count of 1.
1501 *
1502 *      As by-product this function converts non-linear &sk_buff to linear
1503 *      one, so that &sk_buff becomes completely private and caller is allowed
1504 *      to modify all the data of returned buffer. This means that this
1505 *      function is not recommended for use in circumstances when only
1506 *      header is going to be modified. Use pskb_copy() instead.
1507 */
1508
1509struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1510{
1511        int headerlen = skb_headroom(skb);
1512        unsigned int size = skb_end_offset(skb) + skb->data_len;
1513        struct sk_buff *n = __alloc_skb(size, gfp_mask,
1514                                        skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1515
1516        if (!n)
1517                return NULL;
1518
1519        /* Set the data pointer */
1520        skb_reserve(n, headerlen);
1521        /* Set the tail pointer and length */
1522        skb_put(n, skb->len);
1523
1524        BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1525
1526        skb_copy_header(n, skb);
1527        return n;
1528}
1529EXPORT_SYMBOL(skb_copy);
1530
1531/**
1532 *      __pskb_copy_fclone      -  create copy of an sk_buff with private head.
1533 *      @skb: buffer to copy
1534 *      @headroom: headroom of new skb
1535 *      @gfp_mask: allocation priority
1536 *      @fclone: if true allocate the copy of the skb from the fclone
1537 *      cache instead of the head cache; it is recommended to set this
1538 *      to true for the cases where the copy will likely be cloned
1539 *
1540 *      Make a copy of both an &sk_buff and part of its data, located
1541 *      in header. Fragmented data remain shared. This is used when
1542 *      the caller wishes to modify only header of &sk_buff and needs
1543 *      private copy of the header to alter. Returns %NULL on failure
1544 *      or the pointer to the buffer on success.
1545 *      The returned buffer has a reference count of 1.
1546 */
1547
1548struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1549                                   gfp_t gfp_mask, bool fclone)
1550{
1551        unsigned int size = skb_headlen(skb) + headroom;
1552        int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1553        struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1554
1555        if (!n)
1556                goto out;
1557
1558        /* Set the data pointer */
1559        skb_reserve(n, headroom);
1560        /* Set the tail pointer and length */
1561        skb_put(n, skb_headlen(skb));
1562        /* Copy the bytes */
1563        skb_copy_from_linear_data(skb, n->data, n->len);
1564
1565        n->truesize += skb->data_len;
1566        n->data_len  = skb->data_len;
1567        n->len       = skb->len;
1568
1569        if (skb_shinfo(skb)->nr_frags) {
1570                int i;
1571
1572                if (skb_orphan_frags(skb, gfp_mask) ||
1573                    skb_zerocopy_clone(n, skb, gfp_mask)) {
1574                        kfree_skb(n);
1575                        n = NULL;
1576                        goto out;
1577                }
1578                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1579                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1580                        skb_frag_ref(skb, i);
1581                }
1582                skb_shinfo(n)->nr_frags = i;
1583        }
1584
1585        if (skb_has_frag_list(skb)) {
1586                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1587                skb_clone_fraglist(n);
1588        }
1589
1590        skb_copy_header(n, skb);
1591out:
1592        return n;
1593}
1594EXPORT_SYMBOL(__pskb_copy_fclone);
1595
1596/**
1597 *      pskb_expand_head - reallocate header of &sk_buff
1598 *      @skb: buffer to reallocate
1599 *      @nhead: room to add at head
1600 *      @ntail: room to add at tail
1601 *      @gfp_mask: allocation priority
1602 *
1603 *      Expands (or creates identical copy, if @nhead and @ntail are zero)
1604 *      header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1605 *      reference count of 1. Returns zero in the case of success or error,
1606 *      if expansion failed. In the last case, &sk_buff is not changed.
1607 *
1608 *      All the pointers pointing into skb header may change and must be
1609 *      reloaded after call to this function.
1610 */
1611
1612int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1613                     gfp_t gfp_mask)
1614{
1615        int i, osize = skb_end_offset(skb);
1616        int size = osize + nhead + ntail;
1617        long off;
1618        u8 *data;
1619
1620        BUG_ON(nhead < 0);
1621
1622        BUG_ON(skb_shared(skb));
1623
1624        size = SKB_DATA_ALIGN(size);
1625
1626        if (skb_pfmemalloc(skb))
1627                gfp_mask |= __GFP_MEMALLOC;
1628        data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1629                               gfp_mask, NUMA_NO_NODE, NULL);
1630        if (!data)
1631                goto nodata;
1632        size = SKB_WITH_OVERHEAD(ksize(data));
1633
1634        /* Copy only real data... and, alas, header. This should be
1635         * optimized for the cases when header is void.
1636         */
1637        memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1638
1639        memcpy((struct skb_shared_info *)(data + size),
1640               skb_shinfo(skb),
1641               offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1642
1643        /*
1644         * if shinfo is shared we must drop the old head gracefully, but if it
1645         * is not we can just drop the old head and let the existing refcount
1646         * be since all we did is relocate the values
1647         */
1648        if (skb_cloned(skb)) {
1649                if (skb_orphan_frags(skb, gfp_mask))
1650                        goto nofrags;
1651                if (skb_zcopy(skb))
1652                        refcount_inc(&skb_uarg(skb)->refcnt);
1653                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1654                        skb_frag_ref(skb, i);
1655
1656                if (skb_has_frag_list(skb))
1657                        skb_clone_fraglist(skb);
1658
1659                skb_release_data(skb);
1660        } else {
1661                skb_free_head(skb);
1662        }
1663        off = (data + nhead) - skb->head;
1664
1665        skb->head     = data;
1666        skb->head_frag = 0;
1667        skb->data    += off;
1668#ifdef NET_SKBUFF_DATA_USES_OFFSET
1669        skb->end      = size;
1670        off           = nhead;
1671#else
1672        skb->end      = skb->head + size;
1673#endif
1674        skb->tail             += off;
1675        skb_headers_offset_update(skb, nhead);
1676        skb->cloned   = 0;
1677        skb->hdr_len  = 0;
1678        skb->nohdr    = 0;
1679        atomic_set(&skb_shinfo(skb)->dataref, 1);
1680
1681        skb_metadata_clear(skb);
1682
1683        /* It is not generally safe to change skb->truesize.
1684         * For the moment, we really care of rx path, or
1685         * when skb is orphaned (not attached to a socket).
1686         */
1687        if (!skb->sk || skb->destructor == sock_edemux)
1688                skb->truesize += size - osize;
1689
1690        return 0;
1691
1692nofrags:
1693        kfree(data);
1694nodata:
1695        return -ENOMEM;
1696}
1697EXPORT_SYMBOL(pskb_expand_head);
1698
1699/* Make private copy of skb with writable head and some headroom */
1700
1701struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1702{
1703        struct sk_buff *skb2;
1704        int delta = headroom - skb_headroom(skb);
1705
1706        if (delta <= 0)
1707                skb2 = pskb_copy(skb, GFP_ATOMIC);
1708        else {
1709                skb2 = skb_clone(skb, GFP_ATOMIC);
1710                if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1711                                             GFP_ATOMIC)) {
1712                        kfree_skb(skb2);
1713                        skb2 = NULL;
1714                }
1715        }
1716        return skb2;
1717}
1718EXPORT_SYMBOL(skb_realloc_headroom);
1719
1720/**
1721 *      skb_copy_expand -       copy and expand sk_buff
1722 *      @skb: buffer to copy
1723 *      @newheadroom: new free bytes at head
1724 *      @newtailroom: new free bytes at tail
1725 *      @gfp_mask: allocation priority
1726 *
1727 *      Make a copy of both an &sk_buff and its data and while doing so
1728 *      allocate additional space.
1729 *
1730 *      This is used when the caller wishes to modify the data and needs a
1731 *      private copy of the data to alter as well as more space for new fields.
1732 *      Returns %NULL on failure or the pointer to the buffer
1733 *      on success. The returned buffer has a reference count of 1.
1734 *
1735 *      You must pass %GFP_ATOMIC as the allocation priority if this function
1736 *      is called from an interrupt.
1737 */
1738struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1739                                int newheadroom, int newtailroom,
1740                                gfp_t gfp_mask)
1741{
1742        /*
1743         *      Allocate the copy buffer
1744         */
1745        struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1746                                        gfp_mask, skb_alloc_rx_flag(skb),
1747                                        NUMA_NO_NODE);
1748        int oldheadroom = skb_headroom(skb);
1749        int head_copy_len, head_copy_off;
1750
1751        if (!n)
1752                return NULL;
1753
1754        skb_reserve(n, newheadroom);
1755
1756        /* Set the tail pointer and length */
1757        skb_put(n, skb->len);
1758
1759        head_copy_len = oldheadroom;
1760        head_copy_off = 0;
1761        if (newheadroom <= head_copy_len)
1762                head_copy_len = newheadroom;
1763        else
1764                head_copy_off = newheadroom - head_copy_len;
1765
1766        /* Copy the linear header and data. */
1767        BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1768                             skb->len + head_copy_len));
1769
1770        skb_copy_header(n, skb);
1771
1772        skb_headers_offset_update(n, newheadroom - oldheadroom);
1773
1774        return n;
1775}
1776EXPORT_SYMBOL(skb_copy_expand);
1777
1778/**
1779 *      __skb_pad               -       zero pad the tail of an skb
1780 *      @skb: buffer to pad
1781 *      @pad: space to pad
1782 *      @free_on_error: free buffer on error
1783 *
1784 *      Ensure that a buffer is followed by a padding area that is zero
1785 *      filled. Used by network drivers which may DMA or transfer data
1786 *      beyond the buffer end onto the wire.
1787 *
1788 *      May return error in out of memory cases. The skb is freed on error
1789 *      if @free_on_error is true.
1790 */
1791
1792int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1793{
1794        int err;
1795        int ntail;
1796
1797        /* If the skbuff is non linear tailroom is always zero.. */
1798        if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1799                memset(skb->data+skb->len, 0, pad);
1800                return 0;
1801        }
1802
1803        ntail = skb->data_len + pad - (skb->end - skb->tail);
1804        if (likely(skb_cloned(skb) || ntail > 0)) {
1805                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1806                if (unlikely(err))
1807                        goto free_skb;
1808        }
1809
1810        /* FIXME: The use of this function with non-linear skb's really needs
1811         * to be audited.
1812         */
1813        err = skb_linearize(skb);
1814        if (unlikely(err))
1815                goto free_skb;
1816
1817        memset(skb->data + skb->len, 0, pad);
1818        return 0;
1819
1820free_skb:
1821        if (free_on_error)
1822                kfree_skb(skb);
1823        return err;
1824}
1825EXPORT_SYMBOL(__skb_pad);
1826
1827/**
1828 *      pskb_put - add data to the tail of a potentially fragmented buffer
1829 *      @skb: start of the buffer to use
1830 *      @tail: tail fragment of the buffer to use
1831 *      @len: amount of data to add
1832 *
1833 *      This function extends the used data area of the potentially
1834 *      fragmented buffer. @tail must be the last fragment of @skb -- or
1835 *      @skb itself. If this would exceed the total buffer size the kernel
1836 *      will panic. A pointer to the first byte of the extra data is
1837 *      returned.
1838 */
1839
1840void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1841{
1842        if (tail != skb) {
1843                skb->data_len += len;
1844                skb->len += len;
1845        }
1846        return skb_put(tail, len);
1847}
1848EXPORT_SYMBOL_GPL(pskb_put);
1849
1850/**
1851 *      skb_put - add data to a buffer
1852 *      @skb: buffer to use
1853 *      @len: amount of data to add
1854 *
1855 *      This function extends the used data area of the buffer. If this would
1856 *      exceed the total buffer size the kernel will panic. A pointer to the
1857 *      first byte of the extra data is returned.
1858 */
1859void *skb_put(struct sk_buff *skb, unsigned int len)
1860{
1861        void *tmp = skb_tail_pointer(skb);
1862        SKB_LINEAR_ASSERT(skb);
1863        skb->tail += len;
1864        skb->len  += len;
1865        if (unlikely(skb->tail > skb->end))
1866                skb_over_panic(skb, len, __builtin_return_address(0));
1867        return tmp;
1868}
1869EXPORT_SYMBOL(skb_put);
1870
1871/**
1872 *      skb_push - add data to the start of a buffer
1873 *      @skb: buffer to use
1874 *      @len: amount of data to add
1875 *
1876 *      This function extends the used data area of the buffer at the buffer
1877 *      start. If this would exceed the total buffer headroom the kernel will
1878 *      panic. A pointer to the first byte of the extra data is returned.
1879 */
1880void *skb_push(struct sk_buff *skb, unsigned int len)
1881{
1882        skb->data -= len;
1883        skb->len  += len;
1884        if (unlikely(skb->data < skb->head))
1885                skb_under_panic(skb, len, __builtin_return_address(0));
1886        return skb->data;
1887}
1888EXPORT_SYMBOL(skb_push);
1889
1890/**
1891 *      skb_pull - remove data from the start of a buffer
1892 *      @skb: buffer to use
1893 *      @len: amount of data to remove
1894 *
1895 *      This function removes data from the start of a buffer, returning
1896 *      the memory to the headroom. A pointer to the next data in the buffer
1897 *      is returned. Once the data has been pulled future pushes will overwrite
1898 *      the old data.
1899 */
1900void *skb_pull(struct sk_buff *skb, unsigned int len)
1901{
1902        return skb_pull_inline(skb, len);
1903}
1904EXPORT_SYMBOL(skb_pull);
1905
1906/**
1907 *      skb_trim - remove end from a buffer
1908 *      @skb: buffer to alter
1909 *      @len: new length
1910 *
1911 *      Cut the length of a buffer down by removing data from the tail. If
1912 *      the buffer is already under the length specified it is not modified.
1913 *      The skb must be linear.
1914 */
1915void skb_trim(struct sk_buff *skb, unsigned int len)
1916{
1917        if (skb->len > len)
1918                __skb_trim(skb, len);
1919}
1920EXPORT_SYMBOL(skb_trim);
1921
1922/* Trims skb to length len. It can change skb pointers.
1923 */
1924
1925int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1926{
1927        struct sk_buff **fragp;
1928        struct sk_buff *frag;
1929        int offset = skb_headlen(skb);
1930        int nfrags = skb_shinfo(skb)->nr_frags;
1931        int i;
1932        int err;
1933
1934        if (skb_cloned(skb) &&
1935            unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1936                return err;
1937
1938        i = 0;
1939        if (offset >= len)
1940                goto drop_pages;
1941
1942        for (; i < nfrags; i++) {
1943                int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1944
1945                if (end < len) {
1946                        offset = end;
1947                        continue;
1948                }
1949
1950                skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1951
1952drop_pages:
1953                skb_shinfo(skb)->nr_frags = i;
1954
1955                for (; i < nfrags; i++)
1956                        skb_frag_unref(skb, i);
1957
1958                if (skb_has_frag_list(skb))
1959                        skb_drop_fraglist(skb);
1960                goto done;
1961        }
1962
1963        for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1964             fragp = &frag->next) {
1965                int end = offset + frag->len;
1966
1967                if (skb_shared(frag)) {
1968                        struct sk_buff *nfrag;
1969
1970                        nfrag = skb_clone(frag, GFP_ATOMIC);
1971                        if (unlikely(!nfrag))
1972                                return -ENOMEM;
1973
1974                        nfrag->next = frag->next;
1975                        consume_skb(frag);
1976                        frag = nfrag;
1977                        *fragp = frag;
1978                }
1979
1980                if (end < len) {
1981                        offset = end;
1982                        continue;
1983                }
1984
1985                if (end > len &&
1986                    unlikely((err = pskb_trim(frag, len - offset))))
1987                        return err;
1988
1989                if (frag->next)
1990                        skb_drop_list(&frag->next);
1991                break;
1992        }
1993
1994done:
1995        if (len > skb_headlen(skb)) {
1996                skb->data_len -= skb->len - len;
1997                skb->len       = len;
1998        } else {
1999                skb->len       = len;
2000                skb->data_len  = 0;
2001                skb_set_tail_pointer(skb, len);
2002        }
2003
2004        if (!skb->sk || skb->destructor == sock_edemux)
2005                skb_condense(skb);
2006        return 0;
2007}
2008EXPORT_SYMBOL(___pskb_trim);
2009
2010/* Note : use pskb_trim_rcsum() instead of calling this directly
2011 */
2012int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2013{
2014        if (skb->ip_summed == CHECKSUM_COMPLETE) {
2015                int delta = skb->len - len;
2016
2017                skb->csum = csum_block_sub(skb->csum,
2018                                           skb_checksum(skb, len, delta, 0),
2019                                           len);
2020        }
2021        return __pskb_trim(skb, len);
2022}
2023EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2024
2025/**
2026 *      __pskb_pull_tail - advance tail of skb header
2027 *      @skb: buffer to reallocate
2028 *      @delta: number of bytes to advance tail
2029 *
2030 *      The function makes a sense only on a fragmented &sk_buff,
2031 *      it expands header moving its tail forward and copying necessary
2032 *      data from fragmented part.
2033 *
2034 *      &sk_buff MUST have reference count of 1.
2035 *
2036 *      Returns %NULL (and &sk_buff does not change) if pull failed
2037 *      or value of new tail of skb in the case of success.
2038 *
2039 *      All the pointers pointing into skb header may change and must be
2040 *      reloaded after call to this function.
2041 */
2042
2043/* Moves tail of skb head forward, copying data from fragmented part,
2044 * when it is necessary.
2045 * 1. It may fail due to malloc failure.
2046 * 2. It may change skb pointers.
2047 *
2048 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2049 */
2050void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2051{
2052        /* If skb has not enough free space at tail, get new one
2053         * plus 128 bytes for future expansions. If we have enough
2054         * room at tail, reallocate without expansion only if skb is cloned.
2055         */
2056        int i, k, eat = (skb->tail + delta) - skb->end;
2057
2058        if (eat > 0 || skb_cloned(skb)) {
2059                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2060                                     GFP_ATOMIC))
2061                        return NULL;
2062        }
2063
2064        BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2065                             skb_tail_pointer(skb), delta));
2066
2067        /* Optimization: no fragments, no reasons to preestimate
2068         * size of pulled pages. Superb.
2069         */
2070        if (!skb_has_frag_list(skb))
2071                goto pull_pages;
2072
2073        /* Estimate size of pulled pages. */
2074        eat = delta;
2075        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2076                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2077
2078                if (size >= eat)
2079                        goto pull_pages;
2080                eat -= size;
2081        }
2082
2083        /* If we need update frag list, we are in troubles.
2084         * Certainly, it is possible to add an offset to skb data,
2085         * but taking into account that pulling is expected to
2086         * be very rare operation, it is worth to fight against
2087         * further bloating skb head and crucify ourselves here instead.
2088         * Pure masohism, indeed. 8)8)
2089         */
2090        if (eat) {
2091                struct sk_buff *list = skb_shinfo(skb)->frag_list;
2092                struct sk_buff *clone = NULL;
2093                struct sk_buff *insp = NULL;
2094
2095                do {
2096                        if (list->len <= eat) {
2097                                /* Eaten as whole. */
2098                                eat -= list->len;
2099                                list = list->next;
2100                                insp = list;
2101                        } else {
2102                                /* Eaten partially. */
2103
2104                                if (skb_shared(list)) {
2105                                        /* Sucks! We need to fork list. :-( */
2106                                        clone = skb_clone(list, GFP_ATOMIC);
2107                                        if (!clone)
2108                                                return NULL;
2109                                        insp = list->next;
2110                                        list = clone;
2111                                } else {
2112                                        /* This may be pulled without
2113                                         * problems. */
2114                                        insp = list;
2115                                }
2116                                if (!pskb_pull(list, eat)) {
2117                                        kfree_skb(clone);
2118                                        return NULL;
2119                                }
2120                                break;
2121                        }
2122                } while (eat);
2123
2124                /* Free pulled out fragments. */
2125                while ((list = skb_shinfo(skb)->frag_list) != insp) {
2126                        skb_shinfo(skb)->frag_list = list->next;
2127                        kfree_skb(list);
2128                }
2129                /* And insert new clone at head. */
2130                if (clone) {
2131                        clone->next = list;
2132                        skb_shinfo(skb)->frag_list = clone;
2133                }
2134        }
2135        /* Success! Now we may commit changes to skb data. */
2136
2137pull_pages:
2138        eat = delta;
2139        k = 0;
2140        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2141                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2142
2143                if (size <= eat) {
2144                        skb_frag_unref(skb, i);
2145                        eat -= size;
2146                } else {
2147                        skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2148
2149                        *frag = skb_shinfo(skb)->frags[i];
2150                        if (eat) {
2151                                skb_frag_off_add(frag, eat);
2152                                skb_frag_size_sub(frag, eat);
2153                                if (!i)
2154                                        goto end;
2155                                eat = 0;
2156                        }
2157                        k++;
2158                }
2159        }
2160        skb_shinfo(skb)->nr_frags = k;
2161
2162end:
2163        skb->tail     += delta;
2164        skb->data_len -= delta;
2165
2166        if (!skb->data_len)
2167                skb_zcopy_clear(skb, false);
2168
2169        return skb_tail_pointer(skb);
2170}
2171EXPORT_SYMBOL(__pskb_pull_tail);
2172
2173/**
2174 *      skb_copy_bits - copy bits from skb to kernel buffer
2175 *      @skb: source skb
2176 *      @offset: offset in source
2177 *      @to: destination buffer
2178 *      @len: number of bytes to copy
2179 *
2180 *      Copy the specified number of bytes from the source skb to the
2181 *      destination buffer.
2182 *
2183 *      CAUTION ! :
2184 *              If its prototype is ever changed,
2185 *              check arch/{*}/net/{*}.S files,
2186 *              since it is called from BPF assembly code.
2187 */
2188int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2189{
2190        int start = skb_headlen(skb);
2191        struct sk_buff *frag_iter;
2192        int i, copy;
2193
2194        if (offset > (int)skb->len - len)
2195                goto fault;
2196
2197        /* Copy header. */
2198        if ((copy = start - offset) > 0) {
2199                if (copy > len)
2200                        copy = len;
2201                skb_copy_from_linear_data_offset(skb, offset, to, copy);
2202                if ((len -= copy) == 0)
2203                        return 0;
2204                offset += copy;
2205                to     += copy;
2206        }
2207
2208        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2209                int end;
2210                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2211
2212                WARN_ON(start > offset + len);
2213
2214                end = start + skb_frag_size(f);
2215                if ((copy = end - offset) > 0) {
2216                        u32 p_off, p_len, copied;
2217                        struct page *p;
2218                        u8 *vaddr;
2219
2220                        if (copy > len)
2221                                copy = len;
2222
2223                        skb_frag_foreach_page(f,
2224                                              skb_frag_off(f) + offset - start,
2225                                              copy, p, p_off, p_len, copied) {
2226                                vaddr = kmap_atomic(p);
2227                                memcpy(to + copied, vaddr + p_off, p_len);
2228                                kunmap_atomic(vaddr);
2229                        }
2230
2231                        if ((len -= copy) == 0)
2232                                return 0;
2233                        offset += copy;
2234                        to     += copy;
2235                }
2236                start = end;
2237        }
2238
2239        skb_walk_frags(skb, frag_iter) {
2240                int end;
2241
2242                WARN_ON(start > offset + len);
2243
2244                end = start + frag_iter->len;
2245                if ((copy = end - offset) > 0) {
2246                        if (copy > len)
2247                                copy = len;
2248                        if (skb_copy_bits(frag_iter, offset - start, to, copy))
2249                                goto fault;
2250                        if ((len -= copy) == 0)
2251                                return 0;
2252                        offset += copy;
2253                        to     += copy;
2254                }
2255                start = end;
2256        }
2257
2258        if (!len)
2259                return 0;
2260
2261fault:
2262        return -EFAULT;
2263}
2264EXPORT_SYMBOL(skb_copy_bits);
2265
2266/*
2267 * Callback from splice_to_pipe(), if we need to release some pages
2268 * at the end of the spd in case we error'ed out in filling the pipe.
2269 */
2270static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2271{
2272        put_page(spd->pages[i]);
2273}
2274
2275static struct page *linear_to_page(struct page *page, unsigned int *len,
2276                                   unsigned int *offset,
2277                                   struct sock *sk)
2278{
2279        struct page_frag *pfrag = sk_page_frag(sk);
2280
2281        if (!sk_page_frag_refill(sk, pfrag))
2282                return NULL;
2283
2284        *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2285
2286        memcpy(page_address(pfrag->page) + pfrag->offset,
2287               page_address(page) + *offset, *len);
2288        *offset = pfrag->offset;
2289        pfrag->offset += *len;
2290
2291        return pfrag->page;
2292}
2293
2294static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2295                             struct page *page,
2296                             unsigned int offset)
2297{
2298        return  spd->nr_pages &&
2299                spd->pages[spd->nr_pages - 1] == page &&
2300                (spd->partial[spd->nr_pages - 1].offset +
2301                 spd->partial[spd->nr_pages - 1].len == offset);
2302}
2303
2304/*
2305 * Fill page/offset/length into spd, if it can hold more pages.
2306 */
2307static bool spd_fill_page(struct splice_pipe_desc *spd,
2308                          struct pipe_inode_info *pipe, struct page *page,
2309                          unsigned int *len, unsigned int offset,
2310                          bool linear,
2311                          struct sock *sk)
2312{
2313        if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2314                return true;
2315
2316        if (linear) {
2317                page = linear_to_page(page, len, &offset, sk);
2318                if (!page)
2319                        return true;
2320        }
2321        if (spd_can_coalesce(spd, page, offset)) {
2322                spd->partial[spd->nr_pages - 1].len += *len;
2323                return false;
2324        }
2325        get_page(page);
2326        spd->pages[spd->nr_pages] = page;
2327        spd->partial[spd->nr_pages].len = *len;
2328        spd->partial[spd->nr_pages].offset = offset;
2329        spd->nr_pages++;
2330
2331        return false;
2332}
2333
2334static bool __splice_segment(struct page *page, unsigned int poff,
2335                             unsigned int plen, unsigned int *off,
2336                             unsigned int *len,
2337                             struct splice_pipe_desc *spd, bool linear,
2338                             struct sock *sk,
2339                             struct pipe_inode_info *pipe)
2340{
2341        if (!*len)
2342                return true;
2343
2344        /* skip this segment if already processed */
2345        if (*off >= plen) {
2346                *off -= plen;
2347                return false;
2348        }
2349
2350        /* ignore any bits we already processed */
2351        poff += *off;
2352        plen -= *off;
2353        *off = 0;
2354
2355        do {
2356                unsigned int flen = min(*len, plen);
2357
2358                if (spd_fill_page(spd, pipe, page, &flen, poff,
2359                                  linear, sk))
2360                        return true;
2361                poff += flen;
2362                plen -= flen;
2363                *len -= flen;
2364        } while (*len && plen);
2365
2366        return false;
2367}
2368
2369/*
2370 * Map linear and fragment data from the skb to spd. It reports true if the
2371 * pipe is full or if we already spliced the requested length.
2372 */
2373static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2374                              unsigned int *offset, unsigned int *len,
2375                              struct splice_pipe_desc *spd, struct sock *sk)
2376{
2377        int seg;
2378        struct sk_buff *iter;
2379
2380        /* map the linear part :
2381         * If skb->head_frag is set, this 'linear' part is backed by a
2382         * fragment, and if the head is not shared with any clones then
2383         * we can avoid a copy since we own the head portion of this page.
2384         */
2385        if (__splice_segment(virt_to_page(skb->data),
2386                             (unsigned long) skb->data & (PAGE_SIZE - 1),
2387                             skb_headlen(skb),
2388                             offset, len, spd,
2389                             skb_head_is_locked(skb),
2390                             sk, pipe))
2391                return true;
2392
2393        /*
2394         * then map the fragments
2395         */
2396        for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2397                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2398
2399                if (__splice_segment(skb_frag_page(f),
2400                                     skb_frag_off(f), skb_frag_size(f),
2401                                     offset, len, spd, false, sk, pipe))
2402                        return true;
2403        }
2404
2405        skb_walk_frags(skb, iter) {
2406                if (*offset >= iter->len) {
2407                        *offset -= iter->len;
2408                        continue;
2409                }
2410                /* __skb_splice_bits() only fails if the output has no room
2411                 * left, so no point in going over the frag_list for the error
2412                 * case.
2413                 */
2414                if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2415                        return true;
2416        }
2417
2418        return false;
2419}
2420
2421/*
2422 * Map data from the skb to a pipe. Should handle both the linear part,
2423 * the fragments, and the frag list.
2424 */
2425int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2426                    struct pipe_inode_info *pipe, unsigned int tlen,
2427                    unsigned int flags)
2428{
2429        struct partial_page partial[MAX_SKB_FRAGS];
2430        struct page *pages[MAX_SKB_FRAGS];
2431        struct splice_pipe_desc spd = {
2432                .pages = pages,
2433                .partial = partial,
2434                .nr_pages_max = MAX_SKB_FRAGS,
2435                .ops = &nosteal_pipe_buf_ops,
2436                .spd_release = sock_spd_release,
2437        };
2438        int ret = 0;
2439
2440        __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2441
2442        if (spd.nr_pages)
2443                ret = splice_to_pipe(pipe, &spd);
2444
2445        return ret;
2446}
2447EXPORT_SYMBOL_GPL(skb_splice_bits);
2448
2449/* Send skb data on a socket. Socket must be locked. */
2450int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2451                         int len)
2452{
2453        unsigned int orig_len = len;
2454        struct sk_buff *head = skb;
2455        unsigned short fragidx;
2456        int slen, ret;
2457
2458do_frag_list:
2459
2460        /* Deal with head data */
2461        while (offset < skb_headlen(skb) && len) {
2462                struct kvec kv;
2463                struct msghdr msg;
2464
2465                slen = min_t(int, len, skb_headlen(skb) - offset);
2466                kv.iov_base = skb->data + offset;
2467                kv.iov_len = slen;
2468                memset(&msg, 0, sizeof(msg));
2469                msg.msg_flags = MSG_DONTWAIT;
2470
2471                ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2472                if (ret <= 0)
2473                        goto error;
2474
2475                offset += ret;
2476                len -= ret;
2477        }
2478
2479        /* All the data was skb head? */
2480        if (!len)
2481                goto out;
2482
2483        /* Make offset relative to start of frags */
2484        offset -= skb_headlen(skb);
2485
2486        /* Find where we are in frag list */
2487        for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2488                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2489
2490                if (offset < skb_frag_size(frag))
2491                        break;
2492
2493                offset -= skb_frag_size(frag);
2494        }
2495
2496        for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2497                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2498
2499                slen = min_t(size_t, len, skb_frag_size(frag) - offset);
2500
2501                while (slen) {
2502                        ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
2503                                                     skb_frag_off(frag) + offset,
2504                                                     slen, MSG_DONTWAIT);
2505                        if (ret <= 0)
2506                                goto error;
2507
2508                        len -= ret;
2509                        offset += ret;
2510                        slen -= ret;
2511                }
2512
2513                offset = 0;
2514        }
2515
2516        if (len) {
2517                /* Process any frag lists */
2518
2519                if (skb == head) {
2520                        if (skb_has_frag_list(skb)) {
2521                                skb = skb_shinfo(skb)->frag_list;
2522                                goto do_frag_list;
2523                        }
2524                } else if (skb->next) {
2525                        skb = skb->next;
2526                        goto do_frag_list;
2527                }
2528        }
2529
2530out:
2531        return orig_len - len;
2532
2533error:
2534        return orig_len == len ? ret : orig_len - len;
2535}
2536EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2537
2538/**
2539 *      skb_store_bits - store bits from kernel buffer to skb
2540 *      @skb: destination buffer
2541 *      @offset: offset in destination
2542 *      @from: source buffer
2543 *      @len: number of bytes to copy
2544 *
2545 *      Copy the specified number of bytes from the source buffer to the
2546 *      destination skb.  This function handles all the messy bits of
2547 *      traversing fragment lists and such.
2548 */
2549
2550int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2551{
2552        int start = skb_headlen(skb);
2553        struct sk_buff *frag_iter;
2554        int i, copy;
2555
2556        if (offset > (int)skb->len - len)
2557                goto fault;
2558
2559        if ((copy = start - offset) > 0) {
2560                if (copy > len)
2561                        copy = len;
2562                skb_copy_to_linear_data_offset(skb, offset, from, copy);
2563                if ((len -= copy) == 0)
2564                        return 0;
2565                offset += copy;
2566                from += copy;
2567        }
2568
2569        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2570                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2571                int end;
2572
2573                WARN_ON(start > offset + len);
2574
2575                end = start + skb_frag_size(frag);
2576                if ((copy = end - offset) > 0) {
2577                        u32 p_off, p_len, copied;
2578                        struct page *p;
2579                        u8 *vaddr;
2580
2581                        if (copy > len)
2582                                copy = len;
2583
2584                        skb_frag_foreach_page(frag,
2585                                              skb_frag_off(frag) + offset - start,
2586                                              copy, p, p_off, p_len, copied) {
2587                                vaddr = kmap_atomic(p);
2588                                memcpy(vaddr + p_off, from + copied, p_len);
2589                                kunmap_atomic(vaddr);
2590                        }
2591
2592                        if ((len -= copy) == 0)
2593                                return 0;
2594                        offset += copy;
2595                        from += copy;
2596                }
2597                start = end;
2598        }
2599
2600        skb_walk_frags(skb, frag_iter) {
2601                int end;
2602
2603                WARN_ON(start > offset + len);
2604
2605                end = start + frag_iter->len;
2606                if ((copy = end - offset) > 0) {
2607                        if (copy > len)
2608                                copy = len;
2609                        if (skb_store_bits(frag_iter, offset - start,
2610                                           from, copy))
2611                                goto fault;
2612                        if ((len -= copy) == 0)
2613                                return 0;
2614                        offset += copy;
2615                        from += copy;
2616                }
2617                start = end;
2618        }
2619        if (!len)
2620                return 0;
2621
2622fault:
2623        return -EFAULT;
2624}
2625EXPORT_SYMBOL(skb_store_bits);
2626
2627/* Checksum skb data. */
2628__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2629                      __wsum csum, const struct skb_checksum_ops *ops)
2630{
2631        int start = skb_headlen(skb);
2632        int i, copy = start - offset;
2633        struct sk_buff *frag_iter;
2634        int pos = 0;
2635
2636        /* Checksum header. */
2637        if (copy > 0) {
2638                if (copy > len)
2639                        copy = len;
2640                csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
2641                                       skb->data + offset, copy, csum);
2642                if ((len -= copy) == 0)
2643                        return csum;
2644                offset += copy;
2645                pos     = copy;
2646        }
2647
2648        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2649                int end;
2650                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2651
2652                WARN_ON(start > offset + len);
2653
2654                end = start + skb_frag_size(frag);
2655                if ((copy = end - offset) > 0) {
2656                        u32 p_off, p_len, copied;
2657                        struct page *p;
2658                        __wsum csum2;
2659                        u8 *vaddr;
2660
2661                        if (copy > len)
2662                                copy = len;
2663
2664                        skb_frag_foreach_page(frag,
2665                                              skb_frag_off(frag) + offset - start,
2666                                              copy, p, p_off, p_len, copied) {
2667                                vaddr = kmap_atomic(p);
2668                                csum2 = INDIRECT_CALL_1(ops->update,
2669                                                        csum_partial_ext,
2670                                                        vaddr + p_off, p_len, 0);
2671                                kunmap_atomic(vaddr);
2672                                csum = INDIRECT_CALL_1(ops->combine,
2673                                                       csum_block_add_ext, csum,
2674                                                       csum2, pos, p_len);
2675                                pos += p_len;
2676                        }
2677
2678                        if (!(len -= copy))
2679                                return csum;
2680                        offset += copy;
2681                }
2682                start = end;
2683        }
2684
2685        skb_walk_frags(skb, frag_iter) {
2686                int end;
2687
2688                WARN_ON(start > offset + len);
2689
2690                end = start + frag_iter->len;
2691                if ((copy = end - offset) > 0) {
2692                        __wsum csum2;
2693                        if (copy > len)
2694                                copy = len;
2695                        csum2 = __skb_checksum(frag_iter, offset - start,
2696                                               copy, 0, ops);
2697                        csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
2698                                               csum, csum2, pos, copy);
2699                        if ((len -= copy) == 0)
2700                                return csum;
2701                        offset += copy;
2702                        pos    += copy;
2703                }
2704                start = end;
2705        }
2706        BUG_ON(len);
2707
2708        return csum;
2709}
2710EXPORT_SYMBOL(__skb_checksum);
2711
2712__wsum skb_checksum(const struct sk_buff *skb, int offset,
2713                    int len, __wsum csum)
2714{
2715        const struct skb_checksum_ops ops = {
2716                .update  = csum_partial_ext,
2717                .combine = csum_block_add_ext,
2718        };
2719
2720        return __skb_checksum(skb, offset, len, csum, &ops);
2721}
2722EXPORT_SYMBOL(skb_checksum);
2723
2724/* Both of above in one bottle. */
2725
2726__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2727                                    u8 *to, int len, __wsum csum)
2728{
2729        int start = skb_headlen(skb);
2730        int i, copy = start - offset;
2731        struct sk_buff *frag_iter;
2732        int pos = 0;
2733
2734        /* Copy header. */
2735        if (copy > 0) {
2736                if (copy > len)
2737                        copy = len;
2738                csum = csum_partial_copy_nocheck(skb->data + offset, to,
2739                                                 copy, csum);
2740                if ((len -= copy) == 0)
2741                        return csum;
2742                offset += copy;
2743                to     += copy;
2744                pos     = copy;
2745        }
2746
2747        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2748                int end;
2749
2750                WARN_ON(start > offset + len);
2751
2752                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2753                if ((copy = end - offset) > 0) {
2754                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2755                        u32 p_off, p_len, copied;
2756                        struct page *p;
2757                        __wsum csum2;
2758                        u8 *vaddr;
2759
2760                        if (copy > len)
2761                                copy = len;
2762
2763                        skb_frag_foreach_page(frag,
2764                                              skb_frag_off(frag) + offset - start,
2765                                              copy, p, p_off, p_len, copied) {
2766                                vaddr = kmap_atomic(p);
2767                                csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2768                                                                  to + copied,
2769                                                                  p_len, 0);
2770                                kunmap_atomic(vaddr);
2771                                csum = csum_block_add(csum, csum2, pos);
2772                                pos += p_len;
2773                        }
2774
2775                        if (!(len -= copy))
2776                                return csum;
2777                        offset += copy;
2778                        to     += copy;
2779                }
2780                start = end;
2781        }
2782
2783        skb_walk_frags(skb, frag_iter) {
2784                __wsum csum2;
2785                int end;
2786
2787                WARN_ON(start > offset + len);
2788
2789                end = start + frag_iter->len;
2790                if ((copy = end - offset) > 0) {
2791                        if (copy > len)
2792                                copy = len;
2793                        csum2 = skb_copy_and_csum_bits(frag_iter,
2794                                                       offset - start,
2795                                                       to, copy, 0);
2796                        csum = csum_block_add(csum, csum2, pos);
2797                        if ((len -= copy) == 0)
2798                                return csum;
2799                        offset += copy;
2800                        to     += copy;
2801                        pos    += copy;
2802                }
2803                start = end;
2804        }
2805        BUG_ON(len);
2806        return csum;
2807}
2808EXPORT_SYMBOL(skb_copy_and_csum_bits);
2809
2810__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
2811{
2812        __sum16 sum;
2813
2814        sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
2815        /* See comments in __skb_checksum_complete(). */
2816        if (likely(!sum)) {
2817                if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2818                    !skb->csum_complete_sw)
2819                        netdev_rx_csum_fault(skb->dev, skb);
2820        }
2821        if (!skb_shared(skb))
2822                skb->csum_valid = !sum;
2823        return sum;
2824}
2825EXPORT_SYMBOL(__skb_checksum_complete_head);
2826
2827/* This function assumes skb->csum already holds pseudo header's checksum,
2828 * which has been changed from the hardware checksum, for example, by
2829 * __skb_checksum_validate_complete(). And, the original skb->csum must
2830 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
2831 *
2832 * It returns non-zero if the recomputed checksum is still invalid, otherwise
2833 * zero. The new checksum is stored back into skb->csum unless the skb is
2834 * shared.
2835 */
2836__sum16 __skb_checksum_complete(struct sk_buff *skb)
2837{
2838        __wsum csum;
2839        __sum16 sum;
2840
2841        csum = skb_checksum(skb, 0, skb->len, 0);
2842
2843        sum = csum_fold(csum_add(skb->csum, csum));
2844        /* This check is inverted, because we already knew the hardware
2845         * checksum is invalid before calling this function. So, if the
2846         * re-computed checksum is valid instead, then we have a mismatch
2847         * between the original skb->csum and skb_checksum(). This means either
2848         * the original hardware checksum is incorrect or we screw up skb->csum
2849         * when moving skb->data around.
2850         */
2851        if (likely(!sum)) {
2852                if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2853                    !skb->csum_complete_sw)
2854                        netdev_rx_csum_fault(skb->dev, skb);
2855        }
2856
2857        if (!skb_shared(skb)) {
2858                /* Save full packet checksum */
2859                skb->csum = csum;
2860                skb->ip_summed = CHECKSUM_COMPLETE;
2861                skb->csum_complete_sw = 1;
2862                skb->csum_valid = !sum;
2863        }
2864
2865        return sum;
2866}
2867EXPORT_SYMBOL(__skb_checksum_complete);
2868
2869static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2870{
2871        net_warn_ratelimited(
2872                "%s: attempt to compute crc32c without libcrc32c.ko\n",
2873                __func__);
2874        return 0;
2875}
2876
2877static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2878                                       int offset, int len)
2879{
2880        net_warn_ratelimited(
2881                "%s: attempt to compute crc32c without libcrc32c.ko\n",
2882                __func__);
2883        return 0;
2884}
2885
2886static const struct skb_checksum_ops default_crc32c_ops = {
2887        .update  = warn_crc32c_csum_update,
2888        .combine = warn_crc32c_csum_combine,
2889};
2890
2891const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2892        &default_crc32c_ops;
2893EXPORT_SYMBOL(crc32c_csum_stub);
2894
2895 /**
2896 *      skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2897 *      @from: source buffer
2898 *
2899 *      Calculates the amount of linear headroom needed in the 'to' skb passed
2900 *      into skb_zerocopy().
2901 */
2902unsigned int
2903skb_zerocopy_headlen(const struct sk_buff *from)
2904{
2905        unsigned int hlen = 0;
2906
2907        if (!from->head_frag ||
2908            skb_headlen(from) < L1_CACHE_BYTES ||
2909            skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2910                hlen = skb_headlen(from);
2911
2912        if (skb_has_frag_list(from))
2913                hlen = from->len;
2914
2915        return hlen;
2916}
2917EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2918
2919/**
2920 *      skb_zerocopy - Zero copy skb to skb
2921 *      @to: destination buffer
2922 *      @from: source buffer
2923 *      @len: number of bytes to copy from source buffer
2924 *      @hlen: size of linear headroom in destination buffer
2925 *
2926 *      Copies up to `len` bytes from `from` to `to` by creating references
2927 *      to the frags in the source buffer.
2928 *
2929 *      The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2930 *      headroom in the `to` buffer.
2931 *
2932 *      Return value:
2933 *      0: everything is OK
2934 *      -ENOMEM: couldn't orphan frags of @from due to lack of memory
2935 *      -EFAULT: skb_copy_bits() found some problem with skb geometry
2936 */
2937int
2938skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2939{
2940        int i, j = 0;
2941        int plen = 0; /* length of skb->head fragment */
2942        int ret;
2943        struct page *page;
2944        unsigned int offset;
2945
2946        BUG_ON(!from->head_frag && !hlen);
2947
2948        /* dont bother with small payloads */
2949        if (len <= skb_tailroom(to))
2950                return skb_copy_bits(from, 0, skb_put(to, len), len);
2951
2952        if (hlen) {
2953                ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2954                if (unlikely(ret))
2955                        return ret;
2956                len -= hlen;
2957        } else {
2958                plen = min_t(int, skb_headlen(from), len);
2959                if (plen) {
2960                        page = virt_to_head_page(from->head);
2961                        offset = from->data - (unsigned char *)page_address(page);
2962                        __skb_fill_page_desc(to, 0, page, offset, plen);
2963                        get_page(page);
2964                        j = 1;
2965                        len -= plen;
2966                }
2967        }
2968
2969        to->truesize += len + plen;
2970        to->len += len + plen;
2971        to->data_len += len + plen;
2972
2973        if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2974                skb_tx_error(from);
2975                return -ENOMEM;
2976        }
2977        skb_zerocopy_clone(to, from, GFP_ATOMIC);
2978
2979        for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2980                int size;
2981
2982                if (!len)
2983                        break;
2984                skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2985                size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
2986                                        len);
2987                skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
2988                len -= size;
2989                skb_frag_ref(to, j);
2990                j++;
2991        }
2992        skb_shinfo(to)->nr_frags = j;
2993
2994        return 0;
2995}
2996EXPORT_SYMBOL_GPL(skb_zerocopy);
2997
2998void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2999{
3000        __wsum csum;
3001        long csstart;
3002
3003        if (skb->ip_summed == CHECKSUM_PARTIAL)
3004                csstart = skb_checksum_start_offset(skb);
3005        else
3006                csstart = skb_headlen(skb);
3007
3008        BUG_ON(csstart > skb_headlen(skb));
3009
3010        skb_copy_from_linear_data(skb, to, csstart);
3011
3012        csum = 0;
3013        if (csstart != skb->len)
3014                csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3015                                              skb->len - csstart, 0);
3016
3017        if (skb->ip_summed == CHECKSUM_PARTIAL) {
3018                long csstuff = csstart + skb->csum_offset;
3019
3020                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
3021        }
3022}
3023EXPORT_SYMBOL(skb_copy_and_csum_dev);
3024
3025/**
3026 *      skb_dequeue - remove from the head of the queue
3027 *      @list: list to dequeue from
3028 *
3029 *      Remove the head of the list. The list lock is taken so the function
3030 *      may be used safely with other locking list functions. The head item is
3031 *      returned or %NULL if the list is empty.
3032 */
3033
3034struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3035{
3036        unsigned long flags;
3037        struct sk_buff *result;
3038
3039        spin_lock_irqsave(&list->lock, flags);
3040        result = __skb_dequeue(list);
3041        spin_unlock_irqrestore(&list->lock, flags);
3042        return result;
3043}
3044EXPORT_SYMBOL(skb_dequeue);
3045
3046/**
3047 *      skb_dequeue_tail - remove from the tail of the queue
3048 *      @list: list to dequeue from
3049 *
3050 *      Remove the tail of the list. The list lock is taken so the function
3051 *      may be used safely with other locking list functions. The tail item is
3052 *      returned or %NULL if the list is empty.
3053 */
3054struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3055{
3056        unsigned long flags;
3057        struct sk_buff *result;
3058
3059        spin_lock_irqsave(&list->lock, flags);
3060        result = __skb_dequeue_tail(list);
3061        spin_unlock_irqrestore(&list->lock, flags);
3062        return result;
3063}
3064EXPORT_SYMBOL(skb_dequeue_tail);
3065
3066/**
3067 *      skb_queue_purge - empty a list
3068 *      @list: list to empty
3069 *
3070 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
3071 *      the list and one reference dropped. This function takes the list
3072 *      lock and is atomic with respect to other list locking functions.
3073 */
3074void skb_queue_purge(struct sk_buff_head *list)
3075{
3076        struct sk_buff *skb;
3077        while ((skb = skb_dequeue(list)) != NULL)
3078                kfree_skb(skb);
3079}
3080EXPORT_SYMBOL(skb_queue_purge);
3081
3082/**
3083 *      skb_rbtree_purge - empty a skb rbtree
3084 *      @root: root of the rbtree to empty
3085 *      Return value: the sum of truesizes of all purged skbs.
3086 *
3087 *      Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3088 *      the list and one reference dropped. This function does not take
3089 *      any lock. Synchronization should be handled by the caller (e.g., TCP
3090 *      out-of-order queue is protected by the socket lock).
3091 */
3092unsigned int skb_rbtree_purge(struct rb_root *root)
3093{
3094        struct rb_node *p = rb_first(root);
3095        unsigned int sum = 0;
3096
3097        while (p) {
3098                struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3099
3100                p = rb_next(p);
3101                rb_erase(&skb->rbnode, root);
3102                sum += skb->truesize;
3103                kfree_skb(skb);
3104        }
3105        return sum;
3106}
3107
3108/**
3109 *      skb_queue_head - queue a buffer at the list head
3110 *      @list: list to use
3111 *      @newsk: buffer to queue
3112 *
3113 *      Queue a buffer at the start of the list. This function takes the
3114 *      list lock and can be used safely with other locking &sk_buff functions
3115 *      safely.
3116 *
3117 *      A buffer cannot be placed on two lists at the same time.
3118 */
3119void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3120{
3121        unsigned long flags;
3122
3123        spin_lock_irqsave(&list->lock, flags);
3124        __skb_queue_head(list, newsk);
3125        spin_unlock_irqrestore(&list->lock, flags);
3126}
3127EXPORT_SYMBOL(skb_queue_head);
3128
3129/**
3130 *      skb_queue_tail - queue a buffer at the list tail
3131 *      @list: list to use
3132 *      @newsk: buffer to queue
3133 *
3134 *      Queue a buffer at the tail of the list. This function takes the
3135 *      list lock and can be used safely with other locking &sk_buff functions
3136 *      safely.
3137 *
3138 *      A buffer cannot be placed on two lists at the same time.
3139 */
3140void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3141{
3142        unsigned long flags;
3143
3144        spin_lock_irqsave(&list->lock, flags);
3145        __skb_queue_tail(list, newsk);
3146        spin_unlock_irqrestore(&list->lock, flags);
3147}
3148EXPORT_SYMBOL(skb_queue_tail);
3149
3150/**
3151 *      skb_unlink      -       remove a buffer from a list
3152 *      @skb: buffer to remove
3153 *      @list: list to use
3154 *
3155 *      Remove a packet from a list. The list locks are taken and this
3156 *      function is atomic with respect to other list locked calls
3157 *
3158 *      You must know what list the SKB is on.
3159 */
3160void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3161{
3162        unsigned long flags;
3163
3164        spin_lock_irqsave(&list->lock, flags);
3165        __skb_unlink(skb, list);
3166        spin_unlock_irqrestore(&list->lock, flags);
3167}
3168EXPORT_SYMBOL(skb_unlink);
3169
3170/**
3171 *      skb_append      -       append a buffer
3172 *      @old: buffer to insert after
3173 *      @newsk: buffer to insert
3174 *      @list: list to use
3175 *
3176 *      Place a packet after a given packet in a list. The list locks are taken
3177 *      and this function is atomic with respect to other list locked calls.
3178 *      A buffer cannot be placed on two lists at the same time.
3179 */
3180void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3181{
3182        unsigned long flags;
3183
3184        spin_lock_irqsave(&list->lock, flags);
3185        __skb_queue_after(list, old, newsk);
3186        spin_unlock_irqrestore(&list->lock, flags);
3187}
3188EXPORT_SYMBOL(skb_append);
3189
3190static inline void skb_split_inside_header(struct sk_buff *skb,
3191                                           struct sk_buff* skb1,
3192                                           const u32 len, const int pos)
3193{
3194        int i;
3195
3196        skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3197                                         pos - len);
3198        /* And move data appendix as is. */
3199        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3200                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3201
3202        skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3203        skb_shinfo(skb)->nr_frags  = 0;
3204        skb1->data_len             = skb->data_len;
3205        skb1->len                  += skb1->data_len;
3206        skb->data_len              = 0;
3207        skb->len                   = len;
3208        skb_set_tail_pointer(skb, len);
3209}
3210
3211static inline void skb_split_no_header(struct sk_buff *skb,
3212                                       struct sk_buff* skb1,
3213                                       const u32 len, int pos)
3214{
3215        int i, k = 0;
3216        const int nfrags = skb_shinfo(skb)->nr_frags;
3217
3218        skb_shinfo(skb)->nr_frags = 0;
3219        skb1->len                 = skb1->data_len = skb->len - len;
3220        skb->len                  = len;
3221        skb->data_len             = len - pos;
3222
3223        for (i = 0; i < nfrags; i++) {
3224                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3225
3226                if (pos + size > len) {
3227                        skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3228
3229                        if (pos < len) {
3230                                /* Split frag.
3231                                 * We have two variants in this case:
3232                                 * 1. Move all the frag to the second
3233                                 *    part, if it is possible. F.e.
3234                                 *    this approach is mandatory for TUX,
3235                                 *    where splitting is expensive.
3236                                 * 2. Split is accurately. We make this.
3237                                 */
3238                                skb_frag_ref(skb, i);
3239                                skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3240                                skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3241                                skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3242                                skb_shinfo(skb)->nr_frags++;
3243                        }
3244                        k++;
3245                } else
3246                        skb_shinfo(skb)->nr_frags++;
3247                pos += size;
3248        }
3249        skb_shinfo(skb1)->nr_frags = k;
3250}
3251
3252/**
3253 * skb_split - Split fragmented skb to two parts at length len.
3254 * @skb: the buffer to split
3255 * @skb1: the buffer to receive the second part
3256 * @len: new length for skb
3257 */
3258void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3259{
3260        int pos = skb_headlen(skb);
3261
3262        skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3263                                      SKBTX_SHARED_FRAG;
3264        skb_zerocopy_clone(skb1, skb, 0);
3265        if (len < pos)  /* Split line is inside header. */
3266                skb_split_inside_header(skb, skb1, len, pos);
3267        else            /* Second chunk has no header, nothing to copy. */
3268                skb_split_no_header(skb, skb1, len, pos);
3269}
3270EXPORT_SYMBOL(skb_split);
3271
3272/* Shifting from/to a cloned skb is a no-go.
3273 *
3274 * Caller cannot keep skb_shinfo related pointers past calling here!
3275 */
3276static int skb_prepare_for_shift(struct sk_buff *skb)
3277{
3278        return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3279}
3280
3281/**
3282 * skb_shift - Shifts paged data partially from skb to another
3283 * @tgt: buffer into which tail data gets added
3284 * @skb: buffer from which the paged data comes from
3285 * @shiftlen: shift up to this many bytes
3286 *
3287 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3288 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3289 * It's up to caller to free skb if everything was shifted.
3290 *
3291 * If @tgt runs out of frags, the whole operation is aborted.
3292 *
3293 * Skb cannot include anything else but paged data while tgt is allowed
3294 * to have non-paged data as well.
3295 *
3296 * TODO: full sized shift could be optimized but that would need
3297 * specialized skb free'er to handle frags without up-to-date nr_frags.
3298 */
3299int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3300{
3301        int from, to, merge, todo;
3302        skb_frag_t *fragfrom, *fragto;
3303
3304        BUG_ON(shiftlen > skb->len);
3305
3306        if (skb_headlen(skb))
3307                return 0;
3308        if (skb_zcopy(tgt) || skb_zcopy(skb))
3309                return 0;
3310
3311        todo = shiftlen;
3312        from = 0;
3313        to = skb_shinfo(tgt)->nr_frags;
3314        fragfrom = &skb_shinfo(skb)->frags[from];
3315
3316        /* Actual merge is delayed until the point when we know we can
3317         * commit all, so that we don't have to undo partial changes
3318         */
3319        if (!to ||
3320            !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3321                              skb_frag_off(fragfrom))) {
3322                merge = -1;
3323        } else {
3324                merge = to - 1;
3325
3326                todo -= skb_frag_size(fragfrom);
3327                if (todo < 0) {
3328                        if (skb_prepare_for_shift(skb) ||
3329                            skb_prepare_for_shift(tgt))
3330                                return 0;
3331
3332                        /* All previous frag pointers might be stale! */
3333                        fragfrom = &skb_shinfo(skb)->frags[from];
3334                        fragto = &skb_shinfo(tgt)->frags[merge];
3335
3336                        skb_frag_size_add(fragto, shiftlen);
3337                        skb_frag_size_sub(fragfrom, shiftlen);
3338                        skb_frag_off_add(fragfrom, shiftlen);
3339
3340                        goto onlymerged;
3341                }
3342
3343                from++;
3344        }
3345
3346        /* Skip full, not-fitting skb to avoid expensive operations */
3347        if ((shiftlen == skb->len) &&
3348            (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3349                return 0;
3350
3351        if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3352                return 0;
3353
3354        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3355                if (to == MAX_SKB_FRAGS)
3356                        return 0;
3357
3358                fragfrom = &skb_shinfo(skb)->frags[from];
3359                fragto = &skb_shinfo(tgt)->frags[to];
3360
3361                if (todo >= skb_frag_size(fragfrom)) {
3362                        *fragto = *fragfrom;
3363                        todo -= skb_frag_size(fragfrom);
3364                        from++;
3365                        to++;
3366
3367                } else {
3368                        __skb_frag_ref(fragfrom);
3369                        skb_frag_page_copy(fragto, fragfrom);
3370                        skb_frag_off_copy(fragto, fragfrom);
3371                        skb_frag_size_set(fragto, todo);
3372
3373                        skb_frag_off_add(fragfrom, todo);
3374                        skb_frag_size_sub(fragfrom, todo);
3375                        todo = 0;
3376
3377                        to++;
3378                        break;
3379                }
3380        }
3381
3382        /* Ready to "commit" this state change to tgt */
3383        skb_shinfo(tgt)->nr_frags = to;
3384
3385        if (merge >= 0) {
3386                fragfrom = &skb_shinfo(skb)->frags[0];
3387                fragto = &skb_shinfo(tgt)->frags[merge];
3388
3389                skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3390                __skb_frag_unref(fragfrom);
3391        }
3392
3393        /* Reposition in the original skb */
3394        to = 0;
3395        while (from < skb_shinfo(skb)->nr_frags)
3396                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3397        skb_shinfo(skb)->nr_frags = to;
3398
3399        BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3400
3401onlymerged:
3402        /* Most likely the tgt won't ever need its checksum anymore, skb on
3403         * the other hand might need it if it needs to be resent
3404         */
3405        tgt->ip_summed = CHECKSUM_PARTIAL;
3406        skb->ip_summed = CHECKSUM_PARTIAL;
3407
3408        /* Yak, is it really working this way? Some helper please? */
3409        skb->len -= shiftlen;
3410        skb->data_len -= shiftlen;
3411        skb->truesize -= shiftlen;
3412        tgt->len += shiftlen;
3413        tgt->data_len += shiftlen;
3414        tgt->truesize += shiftlen;
3415
3416        return shiftlen;
3417}
3418
3419/**
3420 * skb_prepare_seq_read - Prepare a sequential read of skb data
3421 * @skb: the buffer to read
3422 * @from: lower offset of data to be read
3423 * @to: upper offset of data to be read
3424 * @st: state variable
3425 *
3426 * Initializes the specified state variable. Must be called before
3427 * invoking skb_seq_read() for the first time.
3428 */
3429void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3430                          unsigned int to, struct skb_seq_state *st)
3431{
3432        st->lower_offset = from;
3433        st->upper_offset = to;
3434        st->root_skb = st->cur_skb = skb;
3435        st->frag_idx = st->stepped_offset = 0;
3436        st->frag_data = NULL;
3437}
3438EXPORT_SYMBOL(skb_prepare_seq_read);
3439
3440/**
3441 * skb_seq_read - Sequentially read skb data
3442 * @consumed: number of bytes consumed by the caller so far
3443 * @data: destination pointer for data to be returned
3444 * @st: state variable
3445 *
3446 * Reads a block of skb data at @consumed relative to the
3447 * lower offset specified to skb_prepare_seq_read(). Assigns
3448 * the head of the data block to @data and returns the length
3449 * of the block or 0 if the end of the skb data or the upper
3450 * offset has been reached.
3451 *
3452 * The caller is not required to consume all of the data
3453 * returned, i.e. @consumed is typically set to the number
3454 * of bytes already consumed and the next call to
3455 * skb_seq_read() will return the remaining part of the block.
3456 *
3457 * Note 1: The size of each block of data returned can be arbitrary,
3458 *       this limitation is the cost for zerocopy sequential
3459 *       reads of potentially non linear data.
3460 *
3461 * Note 2: Fragment lists within fragments are not implemented
3462 *       at the moment, state->root_skb could be replaced with
3463 *       a stack for this purpose.
3464 */
3465unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3466                          struct skb_seq_state *st)
3467{
3468        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3469        skb_frag_t *frag;
3470
3471        if (unlikely(abs_offset >= st->upper_offset)) {
3472                if (st->frag_data) {
3473                        kunmap_atomic(st->frag_data);
3474                        st->frag_data = NULL;
3475                }
3476                return 0;
3477        }
3478
3479next_skb:
3480        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3481
3482        if (abs_offset < block_limit && !st->frag_data) {
3483                *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3484                return block_limit - abs_offset;
3485        }
3486
3487        if (st->frag_idx == 0 && !st->frag_data)
3488                st->stepped_offset += skb_headlen(st->cur_skb);
3489
3490        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3491                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3492                block_limit = skb_frag_size(frag) + st->stepped_offset;
3493
3494                if (abs_offset < block_limit) {
3495                        if (!st->frag_data)
3496                                st->frag_data = kmap_atomic(skb_frag_page(frag));
3497
3498                        *data = (u8 *) st->frag_data + skb_frag_off(frag) +
3499                                (abs_offset - st->stepped_offset);
3500
3501                        return block_limit - abs_offset;
3502                }
3503
3504                if (st->frag_data) {
3505                        kunmap_atomic(st->frag_data);
3506                        st->frag_data = NULL;
3507                }
3508
3509                st->frag_idx++;
3510                st->stepped_offset += skb_frag_size(frag);
3511        }
3512
3513        if (st->frag_data) {
3514                kunmap_atomic(st->frag_data);
3515                st->frag_data = NULL;
3516        }
3517
3518        if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3519                st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3520                st->frag_idx = 0;
3521                goto next_skb;
3522        } else if (st->cur_skb->next) {
3523                st->cur_skb = st->cur_skb->next;
3524                st->frag_idx = 0;
3525                goto next_skb;
3526        }
3527
3528        return 0;
3529}
3530EXPORT_SYMBOL(skb_seq_read);
3531
3532/**
3533 * skb_abort_seq_read - Abort a sequential read of skb data
3534 * @st: state variable
3535 *
3536 * Must be called if skb_seq_read() was not called until it
3537 * returned 0.
3538 */
3539void skb_abort_seq_read(struct skb_seq_state *st)
3540{
3541        if (st->frag_data)
3542                kunmap_atomic(st->frag_data);
3543}
3544EXPORT_SYMBOL(skb_abort_seq_read);
3545
3546#define TS_SKB_CB(state)        ((struct skb_seq_state *) &((state)->cb))
3547
3548static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3549                                          struct ts_config *conf,
3550                                          struct ts_state *state)
3551{
3552        return skb_seq_read(offset, text, TS_SKB_CB(state));
3553}
3554
3555static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3556{
3557        skb_abort_seq_read(TS_SKB_CB(state));
3558}
3559
3560/**
3561 * skb_find_text - Find a text pattern in skb data
3562 * @skb: the buffer to look in
3563 * @from: search offset
3564 * @to: search limit
3565 * @config: textsearch configuration
3566 *
3567 * Finds a pattern in the skb data according to the specified
3568 * textsearch configuration. Use textsearch_next() to retrieve
3569 * subsequent occurrences of the pattern. Returns the offset
3570 * to the first occurrence or UINT_MAX if no match was found.
3571 */
3572unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3573                           unsigned int to, struct ts_config *config)
3574{
3575        struct ts_state state;
3576        unsigned int ret;
3577
3578        config->get_next_block = skb_ts_get_next_block;
3579        config->finish = skb_ts_finish;
3580
3581        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3582
3583        ret = textsearch_find(config, &state);
3584        return (ret <= to - from ? ret : UINT_MAX);
3585}
3586EXPORT_SYMBOL(skb_find_text);
3587
3588int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3589                         int offset, size_t size)
3590{
3591        int i = skb_shinfo(skb)->nr_frags;
3592
3593        if (skb_can_coalesce(skb, i, page, offset)) {
3594                skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3595        } else if (i < MAX_SKB_FRAGS) {
3596                get_page(page);
3597                skb_fill_page_desc(skb, i, page, offset, size);
3598        } else {
3599                return -EMSGSIZE;
3600        }
3601
3602        return 0;
3603}
3604EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3605
3606/**
3607 *      skb_pull_rcsum - pull skb and update receive checksum
3608 *      @skb: buffer to update
3609 *      @len: length of data pulled
3610 *
3611 *      This function performs an skb_pull on the packet and updates
3612 *      the CHECKSUM_COMPLETE checksum.  It should be used on
3613 *      receive path processing instead of skb_pull unless you know
3614 *      that the checksum difference is zero (e.g., a valid IP header)
3615 *      or you are setting ip_summed to CHECKSUM_NONE.
3616 */
3617void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3618{
3619        unsigned char *data = skb->data;
3620
3621        BUG_ON(len > skb->len);
3622        __skb_pull(skb, len);
3623        skb_postpull_rcsum(skb, data, len);
3624        return skb->data;
3625}
3626EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3627
3628static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3629{
3630        skb_frag_t head_frag;
3631        struct page *page;
3632
3633        page = virt_to_head_page(frag_skb->head);
3634        __skb_frag_set_page(&head_frag, page);
3635        skb_frag_off_set(&head_frag, frag_skb->data -
3636                         (unsigned char *)page_address(page));
3637        skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
3638        return head_frag;
3639}
3640
3641/**
3642 *      skb_segment - Perform protocol segmentation on skb.
3643 *      @head_skb: buffer to segment
3644 *      @features: features for the output path (see dev->features)
3645 *
3646 *      This function performs segmentation on the given skb.  It returns
3647 *      a pointer to the first in a list of new skbs for the segments.
3648 *      In case of error it returns ERR_PTR(err).
3649 */
3650struct sk_buff *skb_segment(struct sk_buff *head_skb,
3651                            netdev_features_t features)
3652{
3653        struct sk_buff *segs = NULL;
3654        struct sk_buff *tail = NULL;
3655        struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3656        skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3657        unsigned int mss = skb_shinfo(head_skb)->gso_size;
3658        unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3659        struct sk_buff *frag_skb = head_skb;
3660        unsigned int offset = doffset;
3661        unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3662        unsigned int partial_segs = 0;
3663        unsigned int headroom;
3664        unsigned int len = head_skb->len;
3665        __be16 proto;
3666        bool csum, sg;
3667        int nfrags = skb_shinfo(head_skb)->nr_frags;
3668        int err = -ENOMEM;
3669        int i = 0;
3670        int pos;
3671        int dummy;
3672
3673        if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
3674            (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
3675                /* gso_size is untrusted, and we have a frag_list with a linear
3676                 * non head_frag head.
3677                 *
3678                 * (we assume checking the first list_skb member suffices;
3679                 * i.e if either of the list_skb members have non head_frag
3680                 * head, then the first one has too).
3681                 *
3682                 * If head_skb's headlen does not fit requested gso_size, it
3683                 * means that the frag_list members do NOT terminate on exact
3684                 * gso_size boundaries. Hence we cannot perform skb_frag_t page
3685                 * sharing. Therefore we must fallback to copying the frag_list
3686                 * skbs; we do so by disabling SG.
3687                 */
3688                if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
3689                        features &= ~NETIF_F_SG;
3690        }
3691
3692        __skb_push(head_skb, doffset);
3693        proto = skb_network_protocol(head_skb, &dummy);
3694        if (unlikely(!proto))
3695                return ERR_PTR(-EINVAL);
3696
3697        sg = !!(features & NETIF_F_SG);
3698        csum = !!can_checksum_protocol(features, proto);
3699
3700        if (sg && csum && (mss != GSO_BY_FRAGS))  {
3701                if (!(features & NETIF_F_GSO_PARTIAL)) {
3702                        struct sk_buff *iter;
3703                        unsigned int frag_len;
3704
3705                        if (!list_skb ||
3706                            !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3707                                goto normal;
3708
3709                        /* If we get here then all the required
3710                         * GSO features except frag_list are supported.
3711                         * Try to split the SKB to multiple GSO SKBs
3712                         * with no frag_list.
3713                         * Currently we can do that only when the buffers don't
3714                         * have a linear part and all the buffers except
3715                         * the last are of the same length.
3716                         */
3717                        frag_len = list_skb->len;
3718                        skb_walk_frags(head_skb, iter) {
3719                                if (frag_len != iter->len && iter->next)
3720                                        goto normal;
3721                                if (skb_headlen(iter) && !iter->head_frag)
3722                                        goto normal;
3723
3724                                len -= iter->len;
3725                        }
3726
3727                        if (len != frag_len)
3728                                goto normal;
3729                }
3730
3731                /* GSO partial only requires that we trim off any excess that
3732                 * doesn't fit into an MSS sized block, so take care of that
3733                 * now.
3734                 */
3735                partial_segs = len / mss;
3736                if (partial_segs > 1)
3737                        mss *= partial_segs;
3738                else
3739                        partial_segs = 0;
3740        }
3741
3742normal:
3743        headroom = skb_headroom(head_skb);
3744        pos = skb_headlen(head_skb);
3745
3746        do {
3747                struct sk_buff *nskb;
3748                skb_frag_t *nskb_frag;
3749                int hsize;
3750                int size;
3751
3752                if (unlikely(mss == GSO_BY_FRAGS)) {
3753                        len = list_skb->len;
3754                } else {
3755                        len = head_skb->len - offset;
3756                        if (len > mss)
3757                                len = mss;
3758                }
3759
3760                hsize = skb_headlen(head_skb) - offset;
3761                if (hsize < 0)
3762                        hsize = 0;
3763                if (hsize > len || !sg)
3764                        hsize = len;
3765
3766                if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3767                    (skb_headlen(list_skb) == len || sg)) {
3768                        BUG_ON(skb_headlen(list_skb) > len);
3769
3770                        i = 0;
3771                        nfrags = skb_shinfo(list_skb)->nr_frags;
3772                        frag = skb_shinfo(list_skb)->frags;
3773                        frag_skb = list_skb;
3774                        pos += skb_headlen(list_skb);
3775
3776                        while (pos < offset + len) {
3777                                BUG_ON(i >= nfrags);
3778
3779                                size = skb_frag_size(frag);
3780                                if (pos + size > offset + len)
3781                                        break;
3782
3783                                i++;
3784                                pos += size;
3785                                frag++;
3786                        }
3787
3788                        nskb = skb_clone(list_skb, GFP_ATOMIC);
3789                        list_skb = list_skb->next;
3790
3791                        if (unlikely(!nskb))
3792                                goto err;
3793
3794                        if (unlikely(pskb_trim(nskb, len))) {
3795                                kfree_skb(nskb);
3796                                goto err;
3797                        }
3798
3799                        hsize = skb_end_offset(nskb);
3800                        if (skb_cow_head(nskb, doffset + headroom)) {
3801                                kfree_skb(nskb);
3802                                goto err;
3803                        }
3804
3805                        nskb->truesize += skb_end_offset(nskb) - hsize;
3806                        skb_release_head_state(nskb);
3807                        __skb_push(nskb, doffset);
3808                } else {
3809                        nskb = __alloc_skb(hsize + doffset + headroom,
3810                                           GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3811                                           NUMA_NO_NODE);
3812
3813                        if (unlikely(!nskb))
3814                                goto err;
3815
3816                        skb_reserve(nskb, headroom);
3817                        __skb_put(nskb, doffset);
3818                }
3819
3820                if (segs)
3821                        tail->next = nskb;
3822                else
3823                        segs = nskb;
3824                tail = nskb;
3825
3826                __copy_skb_header(nskb, head_skb);
3827
3828                skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3829                skb_reset_mac_len(nskb);
3830
3831                skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3832                                                 nskb->data - tnl_hlen,
3833                                                 doffset + tnl_hlen);
3834
3835                if (nskb->len == len + doffset)
3836                        goto perform_csum_check;
3837
3838                if (!sg) {
3839                        if (!nskb->remcsum_offload)
3840                                nskb->ip_summed = CHECKSUM_NONE;
3841                        SKB_GSO_CB(nskb)->csum =
3842                                skb_copy_and_csum_bits(head_skb, offset,
3843                                                       skb_put(nskb, len),
3844                                                       len, 0);
3845                        SKB_GSO_CB(nskb)->csum_start =
3846                                skb_headroom(nskb) + doffset;
3847                        continue;
3848                }
3849
3850                nskb_frag = skb_shinfo(nskb)->frags;
3851
3852                skb_copy_from_linear_data_offset(head_skb, offset,
3853                                                 skb_put(nskb, hsize), hsize);
3854
3855                skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3856                                              SKBTX_SHARED_FRAG;
3857
3858                if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3859                    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3860                        goto err;
3861
3862                while (pos < offset + len) {
3863                        if (i >= nfrags) {
3864                                i = 0;
3865                                nfrags = skb_shinfo(list_skb)->nr_frags;
3866                                frag = skb_shinfo(list_skb)->frags;
3867                                frag_skb = list_skb;
3868                                if (!skb_headlen(list_skb)) {
3869                                        BUG_ON(!nfrags);
3870                                } else {
3871                                        BUG_ON(!list_skb->head_frag);
3872
3873                                        /* to make room for head_frag. */
3874                                        i--;
3875                                        frag--;
3876                                }
3877                                if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3878                                    skb_zerocopy_clone(nskb, frag_skb,
3879                                                       GFP_ATOMIC))
3880                                        goto err;
3881
3882                                list_skb = list_skb->next;
3883                        }
3884
3885                        if (unlikely(skb_shinfo(nskb)->nr_frags >=
3886                                     MAX_SKB_FRAGS)) {
3887                                net_warn_ratelimited(
3888                                        "skb_segment: too many frags: %u %u\n",
3889                                        pos, mss);
3890                                err = -EINVAL;
3891                                goto err;
3892                        }
3893
3894                        *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3895                        __skb_frag_ref(nskb_frag);
3896                        size = skb_frag_size(nskb_frag);
3897
3898                        if (pos < offset) {
3899                                skb_frag_off_add(nskb_frag, offset - pos);
3900                                skb_frag_size_sub(nskb_frag, offset - pos);
3901                        }
3902
3903                        skb_shinfo(nskb)->nr_frags++;
3904
3905                        if (pos + size <= offset + len) {
3906                                i++;
3907                                frag++;
3908                                pos += size;
3909                        } else {
3910                                skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3911                                goto skip_fraglist;
3912                        }
3913
3914                        nskb_frag++;
3915                }
3916
3917skip_fraglist:
3918                nskb->data_len = len - hsize;
3919                nskb->len += nskb->data_len;
3920                nskb->truesize += nskb->data_len;
3921
3922perform_csum_check:
3923                if (!csum) {
3924                        if (skb_has_shared_frag(nskb) &&
3925                            __skb_linearize(nskb))
3926                                goto err;
3927
3928                        if (!nskb->remcsum_offload)
3929                                nskb->ip_summed = CHECKSUM_NONE;
3930                        SKB_GSO_CB(nskb)->csum =
3931                                skb_checksum(nskb, doffset,
3932                                             nskb->len - doffset, 0);
3933                        SKB_GSO_CB(nskb)->csum_start =
3934                                skb_headroom(nskb) + doffset;
3935                }
3936        } while ((offset += len) < head_skb->len);
3937
3938        /* Some callers want to get the end of the list.
3939         * Put it in segs->prev to avoid walking the list.
3940         * (see validate_xmit_skb_list() for example)
3941         */
3942        segs->prev = tail;
3943
3944        if (partial_segs) {
3945                struct sk_buff *iter;
3946                int type = skb_shinfo(head_skb)->gso_type;
3947                unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3948
3949                /* Update type to add partial and then remove dodgy if set */
3950                type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3951                type &= ~SKB_GSO_DODGY;
3952
3953                /* Update GSO info and prepare to start updating headers on
3954                 * our way back down the stack of protocols.
3955                 */
3956                for (iter = segs; iter; iter = iter->next) {
3957                        skb_shinfo(iter)->gso_size = gso_size;
3958                        skb_shinfo(iter)->gso_segs = partial_segs;
3959                        skb_shinfo(iter)->gso_type = type;
3960                        SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3961                }
3962
3963                if (tail->len - doffset <= gso_size)
3964                        skb_shinfo(tail)->gso_size = 0;
3965                else if (tail != segs)
3966                        skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3967        }
3968
3969        /* Following permits correct backpressure, for protocols
3970         * using skb_set_owner_w().
3971         * Idea is to tranfert ownership from head_skb to last segment.
3972         */
3973        if (head_skb->destructor == sock_wfree) {
3974                swap(tail->truesize, head_skb->truesize);
3975                swap(tail->destructor, head_skb->destructor);
3976                swap(tail->sk, head_skb->sk);
3977        }
3978        return segs;
3979
3980err:
3981        kfree_skb_list(segs);
3982        return ERR_PTR(err);
3983}
3984EXPORT_SYMBOL_GPL(skb_segment);
3985
3986int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
3987{
3988        struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3989        unsigned int offset = skb_gro_offset(skb);
3990        unsigned int headlen = skb_headlen(skb);
3991        unsigned int len = skb_gro_len(skb);
3992        unsigned int delta_truesize;
3993        struct sk_buff *lp;
3994
3995        if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
3996                return -E2BIG;
3997
3998        lp = NAPI_GRO_CB(p)->last;
3999        pinfo = skb_shinfo(lp);
4000
4001        if (headlen <= offset) {
4002                skb_frag_t *frag;
4003                skb_frag_t *frag2;
4004                int i = skbinfo->nr_frags;
4005                int nr_frags = pinfo->nr_frags + i;
4006
4007                if (nr_frags > MAX_SKB_FRAGS)
4008                        goto merge;
4009
4010                offset -= headlen;
4011                pinfo->nr_frags = nr_frags;
4012                skbinfo->nr_frags = 0;
4013
4014                frag = pinfo->frags + nr_frags;
4015                frag2 = skbinfo->frags + i;
4016                do {
4017                        *--frag = *--frag2;
4018                } while (--i);
4019
4020                skb_frag_off_add(frag, offset);
4021                skb_frag_size_sub(frag, offset);
4022
4023                /* all fragments truesize : remove (head size + sk_buff) */
4024                delta_truesize = skb->truesize -
4025                                 SKB_TRUESIZE(skb_end_offset(skb));
4026
4027                skb->truesize -= skb->data_len;
4028                skb->len -= skb->data_len;
4029                skb->data_len = 0;
4030
4031                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
4032                goto done;
4033        } else if (skb->head_frag) {
4034                int nr_frags = pinfo->nr_frags;
4035                skb_frag_t *frag = pinfo->frags + nr_frags;
4036                struct page *page = virt_to_head_page(skb->head);
4037                unsigned int first_size = headlen - offset;
4038                unsigned int first_offset;
4039
4040                if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
4041                        goto merge;
4042
4043                first_offset = skb->data -
4044                               (unsigned char *)page_address(page) +
4045                               offset;
4046
4047                pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
4048
4049                __skb_frag_set_page(frag, page);
4050                skb_frag_off_set(frag, first_offset);
4051                skb_frag_size_set(frag, first_size);
4052
4053                memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
4054                /* We dont need to clear skbinfo->nr_frags here */
4055
4056                delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4057                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
4058                goto done;
4059        }
4060
4061merge:
4062        delta_truesize = skb->truesize;
4063        if (offset > headlen) {
4064                unsigned int eat = offset - headlen;
4065
4066                skb_frag_off_add(&skbinfo->frags[0], eat);
4067                skb_frag_size_sub(&skbinfo->frags[0], eat);
4068                skb->data_len -= eat;
4069                skb->len -= eat;
4070                offset = headlen;
4071        }
4072
4073        __skb_pull(skb, offset);
4074
4075        if (NAPI_GRO_CB(p)->last == p)
4076                skb_shinfo(p)->frag_list = skb;
4077        else
4078                NAPI_GRO_CB(p)->last->next = skb;
4079        NAPI_GRO_CB(p)->last = skb;
4080        __skb_header_release(skb);
4081        lp = p;
4082
4083done:
4084        NAPI_GRO_CB(p)->count++;
4085        p->data_len += len;
4086        p->truesize += delta_truesize;
4087        p->len += len;
4088        if (lp != p) {
4089                lp->data_len += len;
4090                lp->truesize += delta_truesize;
4091                lp->len += len;
4092        }
4093        NAPI_GRO_CB(skb)->same_flow = 1;
4094        return 0;
4095}
4096EXPORT_SYMBOL_GPL(skb_gro_receive);
4097
4098#ifdef CONFIG_SKB_EXTENSIONS
4099#define SKB_EXT_ALIGN_VALUE     8
4100#define SKB_EXT_CHUNKSIZEOF(x)  (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4101
4102static const u8 skb_ext_type_len[] = {
4103#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4104        [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4105#endif
4106#ifdef CONFIG_XFRM
4107        [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4108#endif
4109#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4110        [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4111#endif
4112};
4113
4114static __always_inline unsigned int skb_ext_total_length(void)
4115{
4116        return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4117#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4118                skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4119#endif
4120#ifdef CONFIG_XFRM
4121                skb_ext_type_len[SKB_EXT_SEC_PATH] +
4122#endif
4123#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4124                skb_ext_type_len[TC_SKB_EXT] +
4125#endif
4126                0;
4127}
4128
4129static void skb_extensions_init(void)
4130{
4131        BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4132        BUILD_BUG_ON(skb_ext_total_length() > 255);
4133
4134        skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4135                                             SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4136                                             0,
4137                                             SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4138                                             NULL);
4139}
4140#else
4141static void skb_extensions_init(void) {}
4142#endif
4143
4144void __init skb_init(void)
4145{
4146        skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4147                                              sizeof(struct sk_buff),
4148                                              0,
4149                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4150                                              offsetof(struct sk_buff, cb),
4151                                              sizeof_field(struct sk_buff, cb),
4152                                              NULL);
4153        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4154                                                sizeof(struct sk_buff_fclones),
4155                                                0,
4156                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4157                                                NULL);
4158        skb_extensions_init();
4159}
4160
4161static int
4162__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4163               unsigned int recursion_level)
4164{
4165        int start = skb_headlen(skb);
4166        int i, copy = start - offset;
4167        struct sk_buff *frag_iter;
4168        int elt = 0;
4169
4170        if (unlikely(recursion_level >= 24))
4171                return -EMSGSIZE;
4172
4173        if (copy > 0) {
4174                if (copy > len)
4175                        copy = len;
4176                sg_set_buf(sg, skb->data + offset, copy);
4177                elt++;
4178                if ((len -= copy) == 0)
4179                        return elt;
4180                offset += copy;
4181        }
4182
4183        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4184                int end;
4185
4186                WARN_ON(start > offset + len);
4187
4188                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4189                if ((copy = end - offset) > 0) {
4190                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4191                        if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4192                                return -EMSGSIZE;
4193
4194                        if (copy > len)
4195                                copy = len;
4196                        sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4197                                    skb_frag_off(frag) + offset - start);
4198                        elt++;
4199                        if (!(len -= copy))
4200                                return elt;
4201                        offset += copy;
4202                }
4203                start = end;
4204        }
4205
4206        skb_walk_frags(skb, frag_iter) {
4207                int end, ret;
4208
4209                WARN_ON(start > offset + len);
4210
4211                end = start + frag_iter->len;
4212                if ((copy = end - offset) > 0) {
4213                        if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4214                                return -EMSGSIZE;
4215
4216                        if (copy > len)
4217                                copy = len;
4218                        ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4219                                              copy, recursion_level + 1);
4220                        if (unlikely(ret < 0))
4221                                return ret;
4222                        elt += ret;
4223                        if ((len -= copy) == 0)
4224                                return elt;
4225                        offset += copy;
4226                }
4227                start = end;
4228        }
4229        BUG_ON(len);
4230        return elt;
4231}
4232
4233/**
4234 *      skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4235 *      @skb: Socket buffer containing the buffers to be mapped
4236 *      @sg: The scatter-gather list to map into
4237 *      @offset: The offset into the buffer's contents to start mapping
4238 *      @len: Length of buffer space to be mapped
4239 *
4240 *      Fill the specified scatter-gather list with mappings/pointers into a
4241 *      region of the buffer space attached to a socket buffer. Returns either
4242 *      the number of scatterlist items used, or -EMSGSIZE if the contents
4243 *      could not fit.
4244 */
4245int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4246{
4247        int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4248
4249        if (nsg <= 0)
4250                return nsg;
4251
4252        sg_mark_end(&sg[nsg - 1]);
4253
4254        return nsg;
4255}
4256EXPORT_SYMBOL_GPL(skb_to_sgvec);
4257
4258/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4259 * sglist without mark the sg which contain last skb data as the end.
4260 * So the caller can mannipulate sg list as will when padding new data after
4261 * the first call without calling sg_unmark_end to expend sg list.
4262 *
4263 * Scenario to use skb_to_sgvec_nomark:
4264 * 1. sg_init_table
4265 * 2. skb_to_sgvec_nomark(payload1)
4266 * 3. skb_to_sgvec_nomark(payload2)
4267 *
4268 * This is equivalent to:
4269 * 1. sg_init_table
4270 * 2. skb_to_sgvec(payload1)
4271 * 3. sg_unmark_end
4272 * 4. skb_to_sgvec(payload2)
4273 *
4274 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4275 * is more preferable.
4276 */
4277int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4278                        int offset, int len)
4279{
4280        return __skb_to_sgvec(skb, sg, offset, len, 0);
4281}
4282EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4283
4284
4285
4286/**
4287 *      skb_cow_data - Check that a socket buffer's data buffers are writable
4288 *      @skb: The socket buffer to check.
4289 *      @tailbits: Amount of trailing space to be added
4290 *      @trailer: Returned pointer to the skb where the @tailbits space begins
4291 *
4292 *      Make sure that the data buffers attached to a socket buffer are
4293 *      writable. If they are not, private copies are made of the data buffers
4294 *      and the socket buffer is set to use these instead.
4295 *
4296 *      If @tailbits is given, make sure that there is space to write @tailbits
4297 *      bytes of data beyond current end of socket buffer.  @trailer will be
4298 *      set to point to the skb in which this space begins.
4299 *
4300 *      The number of scatterlist elements required to completely map the
4301 *      COW'd and extended socket buffer will be returned.
4302 */
4303int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4304{
4305        int copyflag;
4306        int elt;
4307        struct sk_buff *skb1, **skb_p;
4308
4309        /* If skb is cloned or its head is paged, reallocate
4310         * head pulling out all the pages (pages are considered not writable
4311         * at the moment even if they are anonymous).
4312         */
4313        if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4314            __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4315                return -ENOMEM;
4316
4317        /* Easy case. Most of packets will go this way. */
4318        if (!skb_has_frag_list(skb)) {
4319                /* A little of trouble, not enough of space for trailer.
4320                 * This should not happen, when stack is tuned to generate
4321                 * good frames. OK, on miss we reallocate and reserve even more
4322                 * space, 128 bytes is fair. */
4323
4324                if (skb_tailroom(skb) < tailbits &&
4325                    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4326                        return -ENOMEM;
4327
4328                /* Voila! */
4329                *trailer = skb;
4330                return 1;
4331        }
4332
4333        /* Misery. We are in troubles, going to mincer fragments... */
4334
4335        elt = 1;
4336        skb_p = &skb_shinfo(skb)->frag_list;
4337        copyflag = 0;
4338
4339        while ((skb1 = *skb_p) != NULL) {
4340                int ntail = 0;
4341
4342                /* The fragment is partially pulled by someone,
4343                 * this can happen on input. Copy it and everything
4344                 * after it. */
4345
4346                if (skb_shared(skb1))
4347                        copyflag = 1;
4348
4349                /* If the skb is the last, worry about trailer. */
4350
4351                if (skb1->next == NULL && tailbits) {
4352                        if (skb_shinfo(skb1)->nr_frags ||
4353                            skb_has_frag_list(skb1) ||
4354                            skb_tailroom(skb1) < tailbits)
4355                                ntail = tailbits + 128;
4356                }
4357
4358                if (copyflag ||
4359                    skb_cloned(skb1) ||
4360                    ntail ||
4361                    skb_shinfo(skb1)->nr_frags ||
4362                    skb_has_frag_list(skb1)) {
4363                        struct sk_buff *skb2;
4364
4365                        /* Fuck, we are miserable poor guys... */
4366                        if (ntail == 0)
4367                                skb2 = skb_copy(skb1, GFP_ATOMIC);
4368                        else
4369                                skb2 = skb_copy_expand(skb1,
4370                                                       skb_headroom(skb1),
4371                                                       ntail,
4372                                                       GFP_ATOMIC);
4373                        if (unlikely(skb2 == NULL))
4374                                return -ENOMEM;
4375
4376                        if (skb1->sk)
4377                                skb_set_owner_w(skb2, skb1->sk);
4378
4379                        /* Looking around. Are we still alive?
4380                         * OK, link new skb, drop old one */
4381
4382                        skb2->next = skb1->next;
4383                        *skb_p = skb2;
4384                        kfree_skb(skb1);
4385                        skb1 = skb2;
4386                }
4387                elt++;
4388                *trailer = skb1;
4389                skb_p = &skb1->next;
4390        }
4391
4392        return elt;
4393}
4394EXPORT_SYMBOL_GPL(skb_cow_data);
4395
4396static void sock_rmem_free(struct sk_buff *skb)
4397{
4398        struct sock *sk = skb->sk;
4399
4400        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4401}
4402
4403static void skb_set_err_queue(struct sk_buff *skb)
4404{
4405        /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4406         * So, it is safe to (mis)use it to mark skbs on the error queue.
4407         */
4408        skb->pkt_type = PACKET_OUTGOING;
4409        BUILD_BUG_ON(PACKET_OUTGOING == 0);
4410}
4411
4412/*
4413 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4414 */
4415int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4416{
4417        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4418            (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4419                return -ENOMEM;
4420
4421        skb_orphan(skb);
4422        skb->sk = sk;
4423        skb->destructor = sock_rmem_free;
4424        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4425        skb_set_err_queue(skb);
4426
4427        /* before exiting rcu section, make sure dst is refcounted */
4428        skb_dst_force(skb);
4429
4430        skb_queue_tail(&sk->sk_error_queue, skb);
4431        if (!sock_flag(sk, SOCK_DEAD))
4432                sk->sk_error_report(sk);
4433        return 0;
4434}
4435EXPORT_SYMBOL(sock_queue_err_skb);
4436
4437static bool is_icmp_err_skb(const struct sk_buff *skb)
4438{
4439        return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4440                       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4441}
4442
4443struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4444{
4445        struct sk_buff_head *q = &sk->sk_error_queue;
4446        struct sk_buff *skb, *skb_next = NULL;
4447        bool icmp_next = false;
4448        unsigned long flags;
4449
4450        spin_lock_irqsave(&q->lock, flags);
4451        skb = __skb_dequeue(q);
4452        if (skb && (skb_next = skb_peek(q))) {
4453                icmp_next = is_icmp_err_skb(skb_next);
4454                if (icmp_next)
4455                        sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4456        }
4457        spin_unlock_irqrestore(&q->lock, flags);
4458
4459        if (is_icmp_err_skb(skb) && !icmp_next)
4460                sk->sk_err = 0;
4461
4462        if (skb_next)
4463                sk->sk_error_report(sk);
4464
4465        return skb;
4466}
4467EXPORT_SYMBOL(sock_dequeue_err_skb);
4468
4469/**
4470 * skb_clone_sk - create clone of skb, and take reference to socket
4471 * @skb: the skb to clone
4472 *
4473 * This function creates a clone of a buffer that holds a reference on
4474 * sk_refcnt.  Buffers created via this function are meant to be
4475 * returned using sock_queue_err_skb, or free via kfree_skb.
4476 *
4477 * When passing buffers allocated with this function to sock_queue_err_skb
4478 * it is necessary to wrap the call with sock_hold/sock_put in order to
4479 * prevent the socket from being released prior to being enqueued on
4480 * the sk_error_queue.
4481 */
4482struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4483{
4484        struct sock *sk = skb->sk;
4485        struct sk_buff *clone;
4486
4487        if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4488                return NULL;
4489
4490        clone = skb_clone(skb, GFP_ATOMIC);
4491        if (!clone) {
4492                sock_put(sk);
4493                return NULL;
4494        }
4495
4496        clone->sk = sk;
4497        clone->destructor = sock_efree;
4498
4499        return clone;
4500}
4501EXPORT_SYMBOL(skb_clone_sk);
4502
4503static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4504                                        struct sock *sk,
4505                                        int tstype,
4506                                        bool opt_stats)
4507{
4508        struct sock_exterr_skb *serr;
4509        int err;
4510
4511        BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4512
4513        serr = SKB_EXT_ERR(skb);
4514        memset(serr, 0, sizeof(*serr));
4515        serr->ee.ee_errno = ENOMSG;
4516        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4517        serr->ee.ee_info = tstype;
4518        serr->opt_stats = opt_stats;
4519        serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4520        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4521                serr->ee.ee_data = skb_shinfo(skb)->tskey;
4522                if (sk->sk_protocol == IPPROTO_TCP &&
4523                    sk->sk_type == SOCK_STREAM)
4524                        serr->ee.ee_data -= sk->sk_tskey;
4525        }
4526
4527        err = sock_queue_err_skb(sk, skb);
4528
4529        if (err)
4530                kfree_skb(skb);
4531}
4532
4533static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4534{
4535        bool ret;
4536
4537        if (likely(sysctl_tstamp_allow_data || tsonly))
4538                return true;
4539
4540        read_lock_bh(&sk->sk_callback_lock);
4541        ret = sk->sk_socket && sk->sk_socket->file &&
4542              file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4543        read_unlock_bh(&sk->sk_callback_lock);
4544        return ret;
4545}
4546
4547void skb_complete_tx_timestamp(struct sk_buff *skb,
4548                               struct skb_shared_hwtstamps *hwtstamps)
4549{
4550        struct sock *sk = skb->sk;
4551
4552        if (!skb_may_tx_timestamp(sk, false))
4553                goto err;
4554
4555        /* Take a reference to prevent skb_orphan() from freeing the socket,
4556         * but only if the socket refcount is not zero.
4557         */
4558        if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4559                *skb_hwtstamps(skb) = *hwtstamps;
4560                __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4561                sock_put(sk);
4562                return;
4563        }
4564
4565err:
4566        kfree_skb(skb);
4567}
4568EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4569
4570void __skb_tstamp_tx(struct sk_buff *orig_skb,
4571                     struct skb_shared_hwtstamps *hwtstamps,
4572                     struct sock *sk, int tstype)
4573{
4574        struct sk_buff *skb;
4575        bool tsonly, opt_stats = false;
4576
4577        if (!sk)
4578                return;
4579
4580        if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4581            skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4582                return;
4583
4584        tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4585        if (!skb_may_tx_timestamp(sk, tsonly))
4586                return;
4587
4588        if (tsonly) {
4589#ifdef CONFIG_INET
4590                if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4591                    sk->sk_protocol == IPPROTO_TCP &&
4592                    sk->sk_type == SOCK_STREAM) {
4593                        skb = tcp_get_timestamping_opt_stats(sk);
4594                        opt_stats = true;
4595                } else
4596#endif
4597                        skb = alloc_skb(0, GFP_ATOMIC);
4598        } else {
4599                skb = skb_clone(orig_skb, GFP_ATOMIC);
4600        }
4601        if (!skb)
4602                return;
4603
4604        if (tsonly) {
4605                skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4606                                             SKBTX_ANY_TSTAMP;
4607                skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4608        }
4609
4610        if (hwtstamps)
4611                *skb_hwtstamps(skb) = *hwtstamps;
4612        else
4613                skb->tstamp = ktime_get_real();
4614
4615        __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4616}
4617EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4618
4619void skb_tstamp_tx(struct sk_buff *orig_skb,
4620                   struct skb_shared_hwtstamps *hwtstamps)
4621{
4622        return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4623                               SCM_TSTAMP_SND);
4624}
4625EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4626
4627void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4628{
4629        struct sock *sk = skb->sk;
4630        struct sock_exterr_skb *serr;
4631        int err = 1;
4632
4633        skb->wifi_acked_valid = 1;
4634        skb->wifi_acked = acked;
4635
4636        serr = SKB_EXT_ERR(skb);
4637        memset(serr, 0, sizeof(*serr));
4638        serr->ee.ee_errno = ENOMSG;
4639        serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4640
4641        /* Take a reference to prevent skb_orphan() from freeing the socket,
4642         * but only if the socket refcount is not zero.
4643         */
4644        if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4645                err = sock_queue_err_skb(sk, skb);
4646                sock_put(sk);
4647        }
4648        if (err)
4649                kfree_skb(skb);
4650}
4651EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4652
4653/**
4654 * skb_partial_csum_set - set up and verify partial csum values for packet
4655 * @skb: the skb to set
4656 * @start: the number of bytes after skb->data to start checksumming.
4657 * @off: the offset from start to place the checksum.
4658 *
4659 * For untrusted partially-checksummed packets, we need to make sure the values
4660 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4661 *
4662 * This function checks and sets those values and skb->ip_summed: if this
4663 * returns false you should drop the packet.
4664 */
4665bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4666{
4667        u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4668        u32 csum_start = skb_headroom(skb) + (u32)start;
4669
4670        if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4671                net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4672                                     start, off, skb_headroom(skb), skb_headlen(skb));
4673                return false;
4674        }
4675        skb->ip_summed = CHECKSUM_PARTIAL;
4676        skb->csum_start = csum_start;
4677        skb->csum_offset = off;
4678        skb_set_transport_header(skb, start);
4679        return true;
4680}
4681EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4682
4683static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4684                               unsigned int max)
4685{
4686        if (skb_headlen(skb) >= len)
4687                return 0;
4688
4689        /* If we need to pullup then pullup to the max, so we
4690         * won't need to do it again.
4691         */
4692        if (max > skb->len)
4693                max = skb->len;
4694
4695        if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4696                return -ENOMEM;
4697
4698        if (skb_headlen(skb) < len)
4699                return -EPROTO;
4700
4701        return 0;
4702}
4703
4704#define MAX_TCP_HDR_LEN (15 * 4)
4705
4706static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4707                                      typeof(IPPROTO_IP) proto,
4708                                      unsigned int off)
4709{
4710        switch (proto) {
4711                int err;
4712
4713        case IPPROTO_TCP:
4714                err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4715                                          off + MAX_TCP_HDR_LEN);
4716                if (!err && !skb_partial_csum_set(skb, off,
4717                                                  offsetof(struct tcphdr,
4718                                                           check)))
4719                        err = -EPROTO;
4720                return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4721
4722        case IPPROTO_UDP:
4723                err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4724                                          off + sizeof(struct udphdr));
4725                if (!err && !skb_partial_csum_set(skb, off,
4726                                                  offsetof(struct udphdr,
4727                                                           check)))
4728                        err = -EPROTO;
4729                return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4730        }
4731
4732        return ERR_PTR(-EPROTO);
4733}
4734
4735/* This value should be large enough to cover a tagged ethernet header plus
4736 * maximally sized IP and TCP or UDP headers.
4737 */
4738#define MAX_IP_HDR_LEN 128
4739
4740static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4741{
4742        unsigned int off;
4743        bool fragment;
4744        __sum16 *csum;
4745        int err;
4746
4747        fragment = false;
4748
4749        err = skb_maybe_pull_tail(skb,
4750                                  sizeof(struct iphdr),
4751                                  MAX_IP_HDR_LEN);
4752        if (err < 0)
4753                goto out;
4754
4755        if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4756                fragment = true;
4757
4758        off = ip_hdrlen(skb);
4759
4760        err = -EPROTO;
4761
4762        if (fragment)
4763                goto out;
4764
4765        csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4766        if (IS_ERR(csum))
4767                return PTR_ERR(csum);
4768
4769        if (recalculate)
4770                *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4771                                           ip_hdr(skb)->daddr,
4772                                           skb->len - off,
4773                                           ip_hdr(skb)->protocol, 0);
4774        err = 0;
4775
4776out:
4777        return err;
4778}
4779
4780/* This value should be large enough to cover a tagged ethernet header plus
4781 * an IPv6 header, all options, and a maximal TCP or UDP header.
4782 */
4783#define MAX_IPV6_HDR_LEN 256
4784
4785#define OPT_HDR(type, skb, off) \
4786        (type *)(skb_network_header(skb) + (off))
4787
4788static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4789{
4790        int err;
4791        u8 nexthdr;
4792        unsigned int off;
4793        unsigned int len;
4794        bool fragment;
4795        bool done;
4796        __sum16 *csum;
4797
4798        fragment = false;
4799        done = false;
4800
4801        off = sizeof(struct ipv6hdr);
4802
4803        err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4804        if (err < 0)
4805                goto out;
4806
4807        nexthdr = ipv6_hdr(skb)->nexthdr;
4808
4809        len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4810        while (off <= len && !done) {
4811                switch (nexthdr) {
4812                case IPPROTO_DSTOPTS:
4813                case IPPROTO_HOPOPTS:
4814                case IPPROTO_ROUTING: {
4815                        struct ipv6_opt_hdr *hp;
4816
4817                        err = skb_maybe_pull_tail(skb,
4818                                                  off +
4819                                                  sizeof(struct ipv6_opt_hdr),
4820                                                  MAX_IPV6_HDR_LEN);
4821                        if (err < 0)
4822                                goto out;
4823
4824                        hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4825                        nexthdr = hp->nexthdr;
4826                        off += ipv6_optlen(hp);
4827                        break;
4828                }
4829                case IPPROTO_AH: {
4830                        struct ip_auth_hdr *hp;
4831
4832                        err = skb_maybe_pull_tail(skb,
4833                                                  off +
4834                                                  sizeof(struct ip_auth_hdr),
4835                                                  MAX_IPV6_HDR_LEN);
4836                        if (err < 0)
4837                                goto out;
4838
4839                        hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4840                        nexthdr = hp->nexthdr;
4841                        off += ipv6_authlen(hp);
4842                        break;
4843                }
4844                case IPPROTO_FRAGMENT: {
4845                        struct frag_hdr *hp;
4846
4847                        err = skb_maybe_pull_tail(skb,
4848                                                  off +
4849                                                  sizeof(struct frag_hdr),
4850                                                  MAX_IPV6_HDR_LEN);
4851                        if (err < 0)
4852                                goto out;
4853
4854                        hp = OPT_HDR(struct frag_hdr, skb, off);
4855
4856                        if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4857                                fragment = true;
4858
4859                        nexthdr = hp->nexthdr;
4860                        off += sizeof(struct frag_hdr);
4861                        break;
4862                }
4863                default:
4864                        done = true;
4865                        break;
4866                }
4867        }
4868
4869        err = -EPROTO;
4870
4871        if (!done || fragment)
4872                goto out;
4873
4874        csum = skb_checksum_setup_ip(skb, nexthdr, off);
4875        if (IS_ERR(csum))
4876                return PTR_ERR(csum);
4877
4878        if (recalculate)
4879                *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4880                                         &ipv6_hdr(skb)->daddr,
4881                                         skb->len - off, nexthdr, 0);
4882        err = 0;
4883
4884out:
4885        return err;
4886}
4887
4888/**
4889 * skb_checksum_setup - set up partial checksum offset
4890 * @skb: the skb to set up
4891 * @recalculate: if true the pseudo-header checksum will be recalculated
4892 */
4893int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4894{
4895        int err;
4896
4897        switch (skb->protocol) {
4898        case htons(ETH_P_IP):
4899                err = skb_checksum_setup_ipv4(skb, recalculate);
4900                break;
4901
4902        case htons(ETH_P_IPV6):
4903                err = skb_checksum_setup_ipv6(skb, recalculate);
4904                break;
4905
4906        default:
4907                err = -EPROTO;
4908                break;
4909        }
4910
4911        return err;
4912}
4913EXPORT_SYMBOL(skb_checksum_setup);
4914
4915/**
4916 * skb_checksum_maybe_trim - maybe trims the given skb
4917 * @skb: the skb to check
4918 * @transport_len: the data length beyond the network header
4919 *
4920 * Checks whether the given skb has data beyond the given transport length.
4921 * If so, returns a cloned skb trimmed to this transport length.
4922 * Otherwise returns the provided skb. Returns NULL in error cases
4923 * (e.g. transport_len exceeds skb length or out-of-memory).
4924 *
4925 * Caller needs to set the skb transport header and free any returned skb if it
4926 * differs from the provided skb.
4927 */
4928static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4929                                               unsigned int transport_len)
4930{
4931        struct sk_buff *skb_chk;
4932        unsigned int len = skb_transport_offset(skb) + transport_len;
4933        int ret;
4934
4935        if (skb->len < len)
4936                return NULL;
4937        else if (skb->len == len)
4938                return skb;
4939
4940        skb_chk = skb_clone(skb, GFP_ATOMIC);
4941        if (!skb_chk)
4942                return NULL;
4943
4944        ret = pskb_trim_rcsum(skb_chk, len);
4945        if (ret) {
4946                kfree_skb(skb_chk);
4947                return NULL;
4948        }
4949
4950        return skb_chk;
4951}
4952
4953/**
4954 * skb_checksum_trimmed - validate checksum of an skb
4955 * @skb: the skb to check
4956 * @transport_len: the data length beyond the network header
4957 * @skb_chkf: checksum function to use
4958 *
4959 * Applies the given checksum function skb_chkf to the provided skb.
4960 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4961 *
4962 * If the skb has data beyond the given transport length, then a
4963 * trimmed & cloned skb is checked and returned.
4964 *
4965 * Caller needs to set the skb transport header and free any returned skb if it
4966 * differs from the provided skb.
4967 */
4968struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4969                                     unsigned int transport_len,
4970                                     __sum16(*skb_chkf)(struct sk_buff *skb))
4971{
4972        struct sk_buff *skb_chk;
4973        unsigned int offset = skb_transport_offset(skb);
4974        __sum16 ret;
4975
4976        skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4977        if (!skb_chk)
4978                goto err;
4979
4980        if (!pskb_may_pull(skb_chk, offset))
4981                goto err;
4982
4983        skb_pull_rcsum(skb_chk, offset);
4984        ret = skb_chkf(skb_chk);
4985        skb_push_rcsum(skb_chk, offset);
4986
4987        if (ret)
4988                goto err;
4989
4990        return skb_chk;
4991
4992err:
4993        if (skb_chk && skb_chk != skb)
4994                kfree_skb(skb_chk);
4995
4996        return NULL;
4997
4998}
4999EXPORT_SYMBOL(skb_checksum_trimmed);
5000
5001void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5002{
5003        net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5004                             skb->dev->name);
5005}
5006EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5007
5008void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5009{
5010        if (head_stolen) {
5011                skb_release_head_state(skb);
5012                kmem_cache_free(skbuff_head_cache, skb);
5013        } else {
5014                __kfree_skb(skb);
5015        }
5016}
5017EXPORT_SYMBOL(kfree_skb_partial);
5018
5019/**
5020 * skb_try_coalesce - try to merge skb to prior one
5021 * @to: prior buffer
5022 * @from: buffer to add
5023 * @fragstolen: pointer to boolean
5024 * @delta_truesize: how much more was allocated than was requested
5025 */
5026bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5027                      bool *fragstolen, int *delta_truesize)
5028{
5029        struct skb_shared_info *to_shinfo, *from_shinfo;
5030        int i, delta, len = from->len;
5031
5032        *fragstolen = false;
5033
5034        if (skb_cloned(to))
5035                return false;
5036
5037        if (len <= skb_tailroom(to)) {
5038                if (len)
5039                        BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5040                *delta_truesize = 0;
5041                return true;
5042        }
5043
5044        to_shinfo = skb_shinfo(to);
5045        from_shinfo = skb_shinfo(from);
5046        if (to_shinfo->frag_list || from_shinfo->frag_list)
5047                return false;
5048        if (skb_zcopy(to) || skb_zcopy(from))
5049                return false;
5050
5051        if (skb_headlen(from) != 0) {
5052                struct page *page;
5053                unsigned int offset;
5054
5055                if (to_shinfo->nr_frags +
5056                    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5057                        return false;
5058
5059                if (skb_head_is_locked(from))
5060                        return false;
5061
5062                delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5063
5064                page = virt_to_head_page(from->head);
5065                offset = from->data - (unsigned char *)page_address(page);
5066
5067                skb_fill_page_desc(to, to_shinfo->nr_frags,
5068                                   page, offset, skb_headlen(from));
5069                *fragstolen = true;
5070        } else {
5071                if (to_shinfo->nr_frags +
5072                    from_shinfo->nr_frags > MAX_SKB_FRAGS)
5073                        return false;
5074
5075                delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5076        }
5077
5078        WARN_ON_ONCE(delta < len);
5079
5080        memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5081               from_shinfo->frags,
5082               from_shinfo->nr_frags * sizeof(skb_frag_t));
5083        to_shinfo->nr_frags += from_shinfo->nr_frags;
5084
5085        if (!skb_cloned(from))
5086                from_shinfo->nr_frags = 0;
5087
5088        /* if the skb is not cloned this does nothing
5089         * since we set nr_frags to 0.
5090         */
5091        for (i = 0; i < from_shinfo->nr_frags; i++)
5092                __skb_frag_ref(&from_shinfo->frags[i]);
5093
5094        to->truesize += delta;
5095        to->len += len;
5096        to->data_len += len;
5097
5098        *delta_truesize = delta;
5099        return true;
5100}
5101EXPORT_SYMBOL(skb_try_coalesce);
5102
5103/**
5104 * skb_scrub_packet - scrub an skb
5105 *
5106 * @skb: buffer to clean
5107 * @xnet: packet is crossing netns
5108 *
5109 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5110 * into/from a tunnel. Some information have to be cleared during these
5111 * operations.
5112 * skb_scrub_packet can also be used to clean a skb before injecting it in
5113 * another namespace (@xnet == true). We have to clear all information in the
5114 * skb that could impact namespace isolation.
5115 */
5116void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5117{
5118        skb->pkt_type = PACKET_HOST;
5119        skb->skb_iif = 0;
5120        skb->ignore_df = 0;
5121        skb_dst_drop(skb);
5122        skb_ext_reset(skb);
5123        nf_reset_ct(skb);
5124        nf_reset_trace(skb);
5125
5126#ifdef CONFIG_NET_SWITCHDEV
5127        skb->offload_fwd_mark = 0;
5128        skb->offload_l3_fwd_mark = 0;
5129#endif
5130
5131        if (!xnet)
5132                return;
5133
5134        ipvs_reset(skb);
5135        skb->mark = 0;
5136        skb->tstamp = 0;
5137}
5138EXPORT_SYMBOL_GPL(skb_scrub_packet);
5139
5140/**
5141 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5142 *
5143 * @skb: GSO skb
5144 *
5145 * skb_gso_transport_seglen is used to determine the real size of the
5146 * individual segments, including Layer4 headers (TCP/UDP).
5147 *
5148 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5149 */
5150static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5151{
5152        const struct skb_shared_info *shinfo = skb_shinfo(skb);
5153        unsigned int thlen = 0;
5154
5155        if (skb->encapsulation) {
5156                thlen = skb_inner_transport_header(skb) -
5157                        skb_transport_header(skb);
5158
5159                if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5160                        thlen += inner_tcp_hdrlen(skb);
5161        } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5162                thlen = tcp_hdrlen(skb);
5163        } else if (unlikely(skb_is_gso_sctp(skb))) {
5164                thlen = sizeof(struct sctphdr);
5165        } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5166                thlen = sizeof(struct udphdr);
5167        }
5168        /* UFO sets gso_size to the size of the fragmentation
5169         * payload, i.e. the size of the L4 (UDP) header is already
5170         * accounted for.
5171         */
5172        return thlen + shinfo->gso_size;
5173}
5174
5175/**
5176 * skb_gso_network_seglen - Return length of individual segments of a gso packet
5177 *
5178 * @skb: GSO skb
5179 *
5180 * skb_gso_network_seglen is used to determine the real size of the
5181 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5182 *
5183 * The MAC/L2 header is not accounted for.
5184 */
5185static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5186{
5187        unsigned int hdr_len = skb_transport_header(skb) -
5188                               skb_network_header(skb);
5189
5190        return hdr_len + skb_gso_transport_seglen(skb);
5191}
5192
5193/**
5194 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5195 *
5196 * @skb: GSO skb
5197 *
5198 * skb_gso_mac_seglen is used to determine the real size of the
5199 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5200 * headers (TCP/UDP).
5201 */
5202static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5203{
5204        unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5205
5206        return hdr_len + skb_gso_transport_seglen(skb);
5207}
5208
5209/**
5210 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
5211 *
5212 * There are a couple of instances where we have a GSO skb, and we
5213 * want to determine what size it would be after it is segmented.
5214 *
5215 * We might want to check:
5216 * -    L3+L4+payload size (e.g. IP forwarding)
5217 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5218 *
5219 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5220 *
5221 * @skb: GSO skb
5222 *
5223 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5224 *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5225 *
5226 * @max_len: The maximum permissible length.
5227 *
5228 * Returns true if the segmented length <= max length.
5229 */
5230static inline bool skb_gso_size_check(const struct sk_buff *skb,
5231                                      unsigned int seg_len,
5232                                      unsigned int max_len) {
5233        const struct skb_shared_info *shinfo = skb_shinfo(skb);
5234        const struct sk_buff *iter;
5235
5236        if (shinfo->gso_size != GSO_BY_FRAGS)
5237                return seg_len <= max_len;
5238
5239        /* Undo this so we can re-use header sizes */
5240        seg_len -= GSO_BY_FRAGS;
5241
5242        skb_walk_frags(skb, iter) {
5243                if (seg_len + skb_headlen(iter) > max_len)
5244                        return false;
5245        }
5246
5247        return true;
5248}
5249
5250/**
5251 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5252 *
5253 * @skb: GSO skb
5254 * @mtu: MTU to validate against
5255 *
5256 * skb_gso_validate_network_len validates if a given skb will fit a
5257 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5258 * payload.
5259 */
5260bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5261{
5262        return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5263}
5264EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5265
5266/**
5267 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5268 *
5269 * @skb: GSO skb
5270 * @len: length to validate against
5271 *
5272 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5273 * length once split, including L2, L3 and L4 headers and the payload.
5274 */
5275bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5276{
5277        return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5278}
5279EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5280
5281static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5282{
5283        int mac_len, meta_len;
5284        void *meta;
5285
5286        if (skb_cow(skb, skb_headroom(skb)) < 0) {
5287                kfree_skb(skb);
5288                return NULL;
5289        }
5290
5291        mac_len = skb->data - skb_mac_header(skb);
5292        if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5293                memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5294                        mac_len - VLAN_HLEN - ETH_TLEN);
5295        }
5296
5297        meta_len = skb_metadata_len(skb);
5298        if (meta_len) {
5299                meta = skb_metadata_end(skb) - meta_len;
5300                memmove(meta + VLAN_HLEN, meta, meta_len);
5301        }
5302
5303        skb->mac_header += VLAN_HLEN;
5304        return skb;
5305}
5306
5307struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5308{
5309        struct vlan_hdr *vhdr;
5310        u16 vlan_tci;
5311
5312        if (unlikely(skb_vlan_tag_present(skb))) {
5313                /* vlan_tci is already set-up so leave this for another time */
5314                return skb;
5315        }
5316
5317        skb = skb_share_check(skb, GFP_ATOMIC);
5318        if (unlikely(!skb))
5319                goto err_free;
5320
5321        if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5322                goto err_free;
5323
5324        vhdr = (struct vlan_hdr *)skb->data;
5325        vlan_tci = ntohs(vhdr->h_vlan_TCI);
5326        __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5327
5328        skb_pull_rcsum(skb, VLAN_HLEN);
5329        vlan_set_encap_proto(skb, vhdr);
5330
5331        skb = skb_reorder_vlan_header(skb);
5332        if (unlikely(!skb))
5333                goto err_free;
5334
5335        skb_reset_network_header(skb);
5336        skb_reset_transport_header(skb);
5337        skb_reset_mac_len(skb);
5338
5339        return skb;
5340
5341err_free:
5342        kfree_skb(skb);
5343        return NULL;
5344}
5345EXPORT_SYMBOL(skb_vlan_untag);
5346
5347int skb_ensure_writable(struct sk_buff *skb, int write_len)
5348{
5349        if (!pskb_may_pull(skb, write_len))
5350                return -ENOMEM;
5351
5352        if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5353                return 0;
5354
5355        return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5356}
5357EXPORT_SYMBOL(skb_ensure_writable);
5358
5359/* remove VLAN header from packet and update csum accordingly.
5360 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5361 */
5362int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5363{
5364        struct vlan_hdr *vhdr;
5365        int offset = skb->data - skb_mac_header(skb);
5366        int err;
5367
5368        if (WARN_ONCE(offset,
5369                      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5370                      offset)) {
5371                return -EINVAL;
5372        }
5373
5374        err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5375        if (unlikely(err))
5376                return err;
5377
5378        skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5379
5380        vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5381        *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5382
5383        memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5384        __skb_pull(skb, VLAN_HLEN);
5385
5386        vlan_set_encap_proto(skb, vhdr);
5387        skb->mac_header += VLAN_HLEN;
5388
5389        if (skb_network_offset(skb) < ETH_HLEN)
5390                skb_set_network_header(skb, ETH_HLEN);
5391
5392        skb_reset_mac_len(skb);
5393
5394        return err;
5395}
5396EXPORT_SYMBOL(__skb_vlan_pop);
5397
5398/* Pop a vlan tag either from hwaccel or from payload.
5399 * Expects skb->data at mac header.
5400 */
5401int skb_vlan_pop(struct sk_buff *skb)
5402{
5403        u16 vlan_tci;
5404        __be16 vlan_proto;
5405        int err;
5406
5407        if (likely(skb_vlan_tag_present(skb))) {
5408                __vlan_hwaccel_clear_tag(skb);
5409        } else {
5410                if (unlikely(!eth_type_vlan(skb->protocol)))
5411                        return 0;
5412
5413                err = __skb_vlan_pop(skb, &vlan_tci);
5414                if (err)
5415                        return err;
5416        }
5417        /* move next vlan tag to hw accel tag */
5418        if (likely(!eth_type_vlan(skb->protocol)))
5419                return 0;
5420
5421        vlan_proto = skb->protocol;
5422        err = __skb_vlan_pop(skb, &vlan_tci);
5423        if (unlikely(err))
5424                return err;
5425
5426        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5427        return 0;
5428}
5429EXPORT_SYMBOL(skb_vlan_pop);
5430
5431/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5432 * Expects skb->data at mac header.
5433 */
5434int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5435{
5436        if (skb_vlan_tag_present(skb)) {
5437                int offset = skb->data - skb_mac_header(skb);
5438                int err;
5439
5440                if (WARN_ONCE(offset,
5441                              "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5442                              offset)) {
5443                        return -EINVAL;
5444                }
5445
5446                err = __vlan_insert_tag(skb, skb->vlan_proto,
5447                                        skb_vlan_tag_get(skb));
5448                if (err)
5449                        return err;
5450
5451                skb->protocol = skb->vlan_proto;
5452                skb->mac_len += VLAN_HLEN;
5453
5454                skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5455        }
5456        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5457        return 0;
5458}
5459EXPORT_SYMBOL(skb_vlan_push);
5460
5461/* Update the ethertype of hdr and the skb csum value if required. */
5462static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
5463                             __be16 ethertype)
5464{
5465        if (skb->ip_summed == CHECKSUM_COMPLETE) {
5466                __be16 diff[] = { ~hdr->h_proto, ethertype };
5467
5468                skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5469        }
5470
5471        hdr->h_proto = ethertype;
5472}
5473
5474/**
5475 * skb_mpls_push() - push a new MPLS header after the mac header
5476 *
5477 * @skb: buffer
5478 * @mpls_lse: MPLS label stack entry to push
5479 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5480 * @mac_len: length of the MAC header
5481 *
5482 * Expects skb->data at mac header.
5483 *
5484 * Returns 0 on success, -errno otherwise.
5485 */
5486int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5487                  int mac_len)
5488{
5489        struct mpls_shim_hdr *lse;
5490        int err;
5491
5492        if (unlikely(!eth_p_mpls(mpls_proto)))
5493                return -EINVAL;
5494
5495        /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
5496        if (skb->encapsulation)
5497                return -EINVAL;
5498
5499        err = skb_cow_head(skb, MPLS_HLEN);
5500        if (unlikely(err))
5501                return err;
5502
5503        if (!skb->inner_protocol) {
5504                skb_set_inner_network_header(skb, mac_len);
5505                skb_set_inner_protocol(skb, skb->protocol);
5506        }
5507
5508        skb_push(skb, MPLS_HLEN);
5509        memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5510                mac_len);
5511        skb_reset_mac_header(skb);
5512        skb_set_network_header(skb, mac_len);
5513
5514        lse = mpls_hdr(skb);
5515        lse->label_stack_entry = mpls_lse;
5516        skb_postpush_rcsum(skb, lse, MPLS_HLEN);
5517
5518        if (skb->dev && skb->dev->type == ARPHRD_ETHER)
5519                skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
5520        skb->protocol = mpls_proto;
5521
5522        return 0;
5523}
5524EXPORT_SYMBOL_GPL(skb_mpls_push);
5525
5526/**
5527 * skb_mpls_pop() - pop the outermost MPLS header
5528 *
5529 * @skb: buffer
5530 * @next_proto: ethertype of header after popped MPLS header
5531 * @mac_len: length of the MAC header
5532 *
5533 * Expects skb->data at mac header.
5534 *
5535 * Returns 0 on success, -errno otherwise.
5536 */
5537int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
5538{
5539        int err;
5540
5541        if (unlikely(!eth_p_mpls(skb->protocol)))
5542                return 0;
5543
5544        err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
5545        if (unlikely(err))
5546                return err;
5547
5548        skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5549        memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
5550                mac_len);
5551
5552        __skb_pull(skb, MPLS_HLEN);
5553        skb_reset_mac_header(skb);
5554        skb_set_network_header(skb, mac_len);
5555
5556        if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
5557                struct ethhdr *hdr;
5558
5559                /* use mpls_hdr() to get ethertype to account for VLANs. */
5560                hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5561                skb_mod_eth_type(skb, hdr, next_proto);
5562        }
5563        skb->protocol = next_proto;
5564
5565        return 0;
5566}
5567EXPORT_SYMBOL_GPL(skb_mpls_pop);
5568
5569/**
5570 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5571 *
5572 * @skb: buffer
5573 * @mpls_lse: new MPLS label stack entry to update to
5574 *
5575 * Expects skb->data at mac header.
5576 *
5577 * Returns 0 on success, -errno otherwise.
5578 */
5579int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5580{
5581        int err;
5582
5583        if (unlikely(!eth_p_mpls(skb->protocol)))
5584                return -EINVAL;
5585
5586        err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5587        if (unlikely(err))
5588                return err;
5589
5590        if (skb->ip_summed == CHECKSUM_COMPLETE) {
5591                __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5592
5593                skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5594        }
5595
5596        mpls_hdr(skb)->label_stack_entry = mpls_lse;
5597
5598        return 0;
5599}
5600EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5601
5602/**
5603 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
5604 *
5605 * @skb: buffer
5606 *
5607 * Expects skb->data at mac header.
5608 *
5609 * Returns 0 on success, -errno otherwise.
5610 */
5611int skb_mpls_dec_ttl(struct sk_buff *skb)
5612{
5613        u32 lse;
5614        u8 ttl;
5615
5616        if (unlikely(!eth_p_mpls(skb->protocol)))
5617                return -EINVAL;
5618
5619        lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
5620        ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
5621        if (!--ttl)
5622                return -EINVAL;
5623
5624        lse &= ~MPLS_LS_TTL_MASK;
5625        lse |= ttl << MPLS_LS_TTL_SHIFT;
5626
5627        return skb_mpls_update_lse(skb, cpu_to_be32(lse));
5628}
5629EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
5630
5631/**
5632 * alloc_skb_with_frags - allocate skb with page frags
5633 *
5634 * @header_len: size of linear part
5635 * @data_len: needed length in frags
5636 * @max_page_order: max page order desired.
5637 * @errcode: pointer to error code if any
5638 * @gfp_mask: allocation mask
5639 *
5640 * This can be used to allocate a paged skb, given a maximal order for frags.
5641 */
5642struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5643                                     unsigned long data_len,
5644                                     int max_page_order,
5645                                     int *errcode,
5646                                     gfp_t gfp_mask)
5647{
5648        int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5649        unsigned long chunk;
5650        struct sk_buff *skb;
5651        struct page *page;
5652        int i;
5653
5654        *errcode = -EMSGSIZE;
5655        /* Note this test could be relaxed, if we succeed to allocate
5656         * high order pages...
5657         */
5658        if (npages > MAX_SKB_FRAGS)
5659                return NULL;
5660
5661        *errcode = -ENOBUFS;
5662        skb = alloc_skb(header_len, gfp_mask);
5663        if (!skb)
5664                return NULL;
5665
5666        skb->truesize += npages << PAGE_SHIFT;
5667
5668        for (i = 0; npages > 0; i++) {
5669                int order = max_page_order;
5670
5671                while (order) {
5672                        if (npages >= 1 << order) {
5673                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5674                                                   __GFP_COMP |
5675                                                   __GFP_NOWARN,
5676                                                   order);
5677                                if (page)
5678                                        goto fill_page;
5679                                /* Do not retry other high order allocations */
5680                                order = 1;
5681                                max_page_order = 0;
5682                        }
5683                        order--;
5684                }
5685                page = alloc_page(gfp_mask);
5686                if (!page)
5687                        goto failure;
5688fill_page:
5689                chunk = min_t(unsigned long, data_len,
5690                              PAGE_SIZE << order);
5691                skb_fill_page_desc(skb, i, page, 0, chunk);
5692                data_len -= chunk;
5693                npages -= 1 << order;
5694        }
5695        return skb;
5696
5697failure:
5698        kfree_skb(skb);
5699        return NULL;
5700}
5701EXPORT_SYMBOL(alloc_skb_with_frags);
5702
5703/* carve out the first off bytes from skb when off < headlen */
5704static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5705                                    const int headlen, gfp_t gfp_mask)
5706{
5707        int i;
5708        int size = skb_end_offset(skb);
5709        int new_hlen = headlen - off;
5710        u8 *data;
5711
5712        size = SKB_DATA_ALIGN(size);
5713
5714        if (skb_pfmemalloc(skb))
5715                gfp_mask |= __GFP_MEMALLOC;
5716        data = kmalloc_reserve(size +
5717                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5718                               gfp_mask, NUMA_NO_NODE, NULL);
5719        if (!data)
5720                return -ENOMEM;
5721
5722        size = SKB_WITH_OVERHEAD(ksize(data));
5723
5724        /* Copy real data, and all frags */
5725        skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5726        skb->len -= off;
5727
5728        memcpy((struct skb_shared_info *)(data + size),
5729               skb_shinfo(skb),
5730               offsetof(struct skb_shared_info,
5731                        frags[skb_shinfo(skb)->nr_frags]));
5732        if (skb_cloned(skb)) {
5733                /* drop the old head gracefully */
5734                if (skb_orphan_frags(skb, gfp_mask)) {
5735                        kfree(data);
5736                        return -ENOMEM;
5737                }
5738                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5739                        skb_frag_ref(skb, i);
5740                if (skb_has_frag_list(skb))
5741                        skb_clone_fraglist(skb);
5742                skb_release_data(skb);
5743        } else {
5744                /* we can reuse existing recount- all we did was
5745                 * relocate values
5746                 */
5747                skb_free_head(skb);
5748        }
5749
5750        skb->head = data;
5751        skb->data = data;
5752        skb->head_frag = 0;
5753#ifdef NET_SKBUFF_DATA_USES_OFFSET
5754        skb->end = size;
5755#else
5756        skb->end = skb->head + size;
5757#endif
5758        skb_set_tail_pointer(skb, skb_headlen(skb));
5759        skb_headers_offset_update(skb, 0);
5760        skb->cloned = 0;
5761        skb->hdr_len = 0;
5762        skb->nohdr = 0;
5763        atomic_set(&skb_shinfo(skb)->dataref, 1);
5764
5765        return 0;
5766}
5767
5768static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5769
5770/* carve out the first eat bytes from skb's frag_list. May recurse into
5771 * pskb_carve()
5772 */
5773static int pskb_carve_frag_list(struct sk_buff *skb,
5774                                struct skb_shared_info *shinfo, int eat,
5775                                gfp_t gfp_mask)
5776{
5777        struct sk_buff *list = shinfo->frag_list;
5778        struct sk_buff *clone = NULL;
5779        struct sk_buff *insp = NULL;
5780
5781        do {
5782                if (!list) {
5783                        pr_err("Not enough bytes to eat. Want %d\n", eat);
5784                        return -EFAULT;
5785                }
5786                if (list->len <= eat) {
5787                        /* Eaten as whole. */
5788                        eat -= list->len;
5789                        list = list->next;
5790                        insp = list;
5791                } else {
5792                        /* Eaten partially. */
5793                        if (skb_shared(list)) {
5794                                clone = skb_clone(list, gfp_mask);
5795                                if (!clone)
5796                                        return -ENOMEM;
5797                                insp = list->next;
5798                                list = clone;
5799                        } else {
5800                                /* This may be pulled without problems. */
5801                                insp = list;
5802                        }
5803                        if (pskb_carve(list, eat, gfp_mask) < 0) {
5804                                kfree_skb(clone);
5805                                return -ENOMEM;
5806                        }
5807                        break;
5808                }
5809        } while (eat);
5810
5811        /* Free pulled out fragments. */
5812        while ((list = shinfo->frag_list) != insp) {
5813                shinfo->frag_list = list->next;
5814                kfree_skb(list);
5815        }
5816        /* And insert new clone at head. */
5817        if (clone) {
5818                clone->next = list;
5819                shinfo->frag_list = clone;
5820        }
5821        return 0;
5822}
5823
5824/* carve off first len bytes from skb. Split line (off) is in the
5825 * non-linear part of skb
5826 */
5827static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5828                                       int pos, gfp_t gfp_mask)
5829{
5830        int i, k = 0;
5831        int size = skb_end_offset(skb);
5832        u8 *data;
5833        const int nfrags = skb_shinfo(skb)->nr_frags;
5834        struct skb_shared_info *shinfo;
5835
5836        size = SKB_DATA_ALIGN(size);
5837
5838        if (skb_pfmemalloc(skb))
5839                gfp_mask |= __GFP_MEMALLOC;
5840        data = kmalloc_reserve(size +
5841                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5842                               gfp_mask, NUMA_NO_NODE, NULL);
5843        if (!data)
5844                return -ENOMEM;
5845
5846        size = SKB_WITH_OVERHEAD(ksize(data));
5847
5848        memcpy((struct skb_shared_info *)(data + size),
5849               skb_shinfo(skb), offsetof(struct skb_shared_info,
5850                                         frags[skb_shinfo(skb)->nr_frags]));
5851        if (skb_orphan_frags(skb, gfp_mask)) {
5852                kfree(data);
5853                return -ENOMEM;
5854        }
5855        shinfo = (struct skb_shared_info *)(data + size);
5856        for (i = 0; i < nfrags; i++) {
5857                int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5858
5859                if (pos + fsize > off) {
5860                        shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5861
5862                        if (pos < off) {
5863                                /* Split frag.
5864                                 * We have two variants in this case:
5865                                 * 1. Move all the frag to the second
5866                                 *    part, if it is possible. F.e.
5867                                 *    this approach is mandatory for TUX,
5868                                 *    where splitting is expensive.
5869                                 * 2. Split is accurately. We make this.
5870                                 */
5871                                skb_frag_off_add(&shinfo->frags[0], off - pos);
5872                                skb_frag_size_sub(&shinfo->frags[0], off - pos);
5873                        }
5874                        skb_frag_ref(skb, i);
5875                        k++;
5876                }
5877                pos += fsize;
5878        }
5879        shinfo->nr_frags = k;
5880        if (skb_has_frag_list(skb))
5881                skb_clone_fraglist(skb);
5882
5883        if (k == 0) {
5884                /* split line is in frag list */
5885                pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5886        }
5887        skb_release_data(skb);
5888
5889        skb->head = data;
5890        skb->head_frag = 0;
5891        skb->data = data;
5892#ifdef NET_SKBUFF_DATA_USES_OFFSET
5893        skb->end = size;
5894#else
5895        skb->end = skb->head + size;
5896#endif
5897        skb_reset_tail_pointer(skb);
5898        skb_headers_offset_update(skb, 0);
5899        skb->cloned   = 0;
5900        skb->hdr_len  = 0;
5901        skb->nohdr    = 0;
5902        skb->len -= off;
5903        skb->data_len = skb->len;
5904        atomic_set(&skb_shinfo(skb)->dataref, 1);
5905        return 0;
5906}
5907
5908/* remove len bytes from the beginning of the skb */
5909static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5910{
5911        int headlen = skb_headlen(skb);
5912
5913        if (len < headlen)
5914                return pskb_carve_inside_header(skb, len, headlen, gfp);
5915        else
5916                return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5917}
5918
5919/* Extract to_copy bytes starting at off from skb, and return this in
5920 * a new skb
5921 */
5922struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5923                             int to_copy, gfp_t gfp)
5924{
5925        struct sk_buff  *clone = skb_clone(skb, gfp);
5926
5927        if (!clone)
5928                return NULL;
5929
5930        if (pskb_carve(clone, off, gfp) < 0 ||
5931            pskb_trim(clone, to_copy)) {
5932                kfree_skb(clone);
5933                return NULL;
5934        }
5935        return clone;
5936}
5937EXPORT_SYMBOL(pskb_extract);
5938
5939/**
5940 * skb_condense - try to get rid of fragments/frag_list if possible
5941 * @skb: buffer
5942 *
5943 * Can be used to save memory before skb is added to a busy queue.
5944 * If packet has bytes in frags and enough tail room in skb->head,
5945 * pull all of them, so that we can free the frags right now and adjust
5946 * truesize.
5947 * Notes:
5948 *      We do not reallocate skb->head thus can not fail.
5949 *      Caller must re-evaluate skb->truesize if needed.
5950 */
5951void skb_condense(struct sk_buff *skb)
5952{
5953        if (skb->data_len) {
5954                if (skb->data_len > skb->end - skb->tail ||
5955                    skb_cloned(skb))
5956                        return;
5957
5958                /* Nice, we can free page frag(s) right now */
5959                __pskb_pull_tail(skb, skb->data_len);
5960        }
5961        /* At this point, skb->truesize might be over estimated,
5962         * because skb had a fragment, and fragments do not tell
5963         * their truesize.
5964         * When we pulled its content into skb->head, fragment
5965         * was freed, but __pskb_pull_tail() could not possibly
5966         * adjust skb->truesize, not knowing the frag truesize.
5967         */
5968        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5969}
5970
5971#ifdef CONFIG_SKB_EXTENSIONS
5972static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
5973{
5974        return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
5975}
5976
5977static struct skb_ext *skb_ext_alloc(void)
5978{
5979        struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
5980
5981        if (new) {
5982                memset(new->offset, 0, sizeof(new->offset));
5983                refcount_set(&new->refcnt, 1);
5984        }
5985
5986        return new;
5987}
5988
5989static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
5990                                         unsigned int old_active)
5991{
5992        struct skb_ext *new;
5993
5994        if (refcount_read(&old->refcnt) == 1)
5995                return old;
5996
5997        new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
5998        if (!new)
5999                return NULL;
6000
6001        memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6002        refcount_set(&new->refcnt, 1);
6003
6004#ifdef CONFIG_XFRM
6005        if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6006                struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6007                unsigned int i;
6008
6009                for (i = 0; i < sp->len; i++)
6010                        xfrm_state_hold(sp->xvec[i]);
6011        }
6012#endif
6013        __skb_ext_put(old);
6014        return new;
6015}
6016
6017/**
6018 * skb_ext_add - allocate space for given extension, COW if needed
6019 * @skb: buffer
6020 * @id: extension to allocate space for
6021 *
6022 * Allocates enough space for the given extension.
6023 * If the extension is already present, a pointer to that extension
6024 * is returned.
6025 *
6026 * If the skb was cloned, COW applies and the returned memory can be
6027 * modified without changing the extension space of clones buffers.
6028 *
6029 * Returns pointer to the extension or NULL on allocation failure.
6030 */
6031void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6032{
6033        struct skb_ext *new, *old = NULL;
6034        unsigned int newlen, newoff;
6035
6036        if (skb->active_extensions) {
6037                old = skb->extensions;
6038
6039                new = skb_ext_maybe_cow(old, skb->active_extensions);
6040                if (!new)
6041                        return NULL;
6042
6043                if (__skb_ext_exist(new, id))
6044                        goto set_active;
6045
6046                newoff = new->chunks;
6047        } else {
6048                newoff = SKB_EXT_CHUNKSIZEOF(*new);
6049
6050                new = skb_ext_alloc();
6051                if (!new)
6052                        return NULL;
6053        }
6054
6055        newlen = newoff + skb_ext_type_len[id];
6056        new->chunks = newlen;
6057        new->offset[id] = newoff;
6058set_active:
6059        skb->extensions = new;
6060        skb->active_extensions |= 1 << id;
6061        return skb_ext_get_ptr(new, id);
6062}
6063EXPORT_SYMBOL(skb_ext_add);
6064
6065#ifdef CONFIG_XFRM
6066static void skb_ext_put_sp(struct sec_path *sp)
6067{
6068        unsigned int i;
6069
6070        for (i = 0; i < sp->len; i++)
6071                xfrm_state_put(sp->xvec[i]);
6072}
6073#endif
6074
6075void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6076{
6077        struct skb_ext *ext = skb->extensions;
6078
6079        skb->active_extensions &= ~(1 << id);
6080        if (skb->active_extensions == 0) {
6081                skb->extensions = NULL;
6082                __skb_ext_put(ext);
6083#ifdef CONFIG_XFRM
6084        } else if (id == SKB_EXT_SEC_PATH &&
6085                   refcount_read(&ext->refcnt) == 1) {
6086                struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6087
6088                skb_ext_put_sp(sp);
6089                sp->len = 0;
6090#endif
6091        }
6092}
6093EXPORT_SYMBOL(__skb_ext_del);
6094
6095void __skb_ext_put(struct skb_ext *ext)
6096{
6097        /* If this is last clone, nothing can increment
6098         * it after check passes.  Avoids one atomic op.
6099         */
6100        if (refcount_read(&ext->refcnt) == 1)
6101                goto free_now;
6102
6103        if (!refcount_dec_and_test(&ext->refcnt))
6104                return;
6105free_now:
6106#ifdef CONFIG_XFRM
6107        if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6108                skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6109#endif
6110
6111        kmem_cache_free(skbuff_ext_cache, ext);
6112}
6113EXPORT_SYMBOL(__skb_ext_put);
6114#endif /* CONFIG_SKB_EXTENSIONS */
6115