linux/net/core/skbuff.c
<<
>>
Prefs
   1/*
   2 *      Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *      Fixes:
   8 *              Alan Cox        :       Fixed the worst of the load
   9 *                                      balancer bugs.
  10 *              Dave Platt      :       Interrupt stacking fix.
  11 *      Richard Kooijman        :       Timestamp fixes.
  12 *              Alan Cox        :       Changed buffer format.
  13 *              Alan Cox        :       destructor hook for AF_UNIX etc.
  14 *              Linus Torvalds  :       Better skb_clone.
  15 *              Alan Cox        :       Added skb_copy.
  16 *              Alan Cox        :       Added all the changed routines Linus
  17 *                                      only put in the headers
  18 *              Ray VanTassle   :       Fixed --skb->lock in free
  19 *              Alan Cox        :       skb_copy copy arp field
  20 *              Andi Kleen      :       slabified it.
  21 *              Robert Olsson   :       Removed skb_head_pool
  22 *
  23 *      NOTE:
  24 *              The __skb_ routines should be called with interrupts
  25 *      disabled, or you better be *real* sure that the operation is atomic
  26 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *      or via disabling bottom half handlers, etc).
  28 *
  29 *      This program is free software; you can redistribute it and/or
  30 *      modify it under the terms of the GNU General Public License
  31 *      as published by the Free Software Foundation; either version
  32 *      2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *      The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
  39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40
  41#include <linux/module.h>
  42#include <linux/types.h>
  43#include <linux/kernel.h>
  44#include <linux/mm.h>
  45#include <linux/interrupt.h>
  46#include <linux/in.h>
  47#include <linux/inet.h>
  48#include <linux/slab.h>
  49#include <linux/tcp.h>
  50#include <linux/udp.h>
  51#include <linux/sctp.h>
  52#include <linux/netdevice.h>
  53#ifdef CONFIG_NET_CLS_ACT
  54#include <net/pkt_sched.h>
  55#endif
  56#include <linux/string.h>
  57#include <linux/skbuff.h>
  58#include <linux/splice.h>
  59#include <linux/cache.h>
  60#include <linux/rtnetlink.h>
  61#include <linux/init.h>
  62#include <linux/scatterlist.h>
  63#include <linux/errqueue.h>
  64#include <linux/prefetch.h>
  65#include <linux/if_vlan.h>
  66
  67#include <net/protocol.h>
  68#include <net/dst.h>
  69#include <net/sock.h>
  70#include <net/checksum.h>
  71#include <net/ip6_checksum.h>
  72#include <net/xfrm.h>
  73
  74#include <linux/uaccess.h>
  75#include <trace/events/skb.h>
  76#include <linux/highmem.h>
  77#include <linux/capability.h>
  78#include <linux/user_namespace.h>
  79
  80struct kmem_cache *skbuff_head_cache __ro_after_init;
  81static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
  82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
  83EXPORT_SYMBOL(sysctl_max_skb_frags);
  84
  85/**
  86 *      skb_panic - private function for out-of-line support
  87 *      @skb:   buffer
  88 *      @sz:    size
  89 *      @addr:  address
  90 *      @msg:   skb_over_panic or skb_under_panic
  91 *
  92 *      Out-of-line support for skb_put() and skb_push().
  93 *      Called via the wrapper skb_over_panic() or skb_under_panic().
  94 *      Keep out of line to prevent kernel bloat.
  95 *      __builtin_return_address is not used because it is not always reliable.
  96 */
  97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
  98                      const char msg[])
  99{
 100        pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
 101                 msg, addr, skb->len, sz, skb->head, skb->data,
 102                 (unsigned long)skb->tail, (unsigned long)skb->end,
 103                 skb->dev ? skb->dev->name : "<NULL>");
 104        BUG();
 105}
 106
 107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 108{
 109        skb_panic(skb, sz, addr, __func__);
 110}
 111
 112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 113{
 114        skb_panic(skb, sz, addr, __func__);
 115}
 116
 117/*
 118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
 119 * the caller if emergency pfmemalloc reserves are being used. If it is and
 120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
 121 * may be used. Otherwise, the packet data may be discarded until enough
 122 * memory is free
 123 */
 124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
 125         __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
 126
 127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
 128                               unsigned long ip, bool *pfmemalloc)
 129{
 130        void *obj;
 131        bool ret_pfmemalloc = false;
 132
 133        /*
 134         * Try a regular allocation, when that fails and we're not entitled
 135         * to the reserves, fail.
 136         */
 137        obj = kmalloc_node_track_caller(size,
 138                                        flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
 139                                        node);
 140        if (obj || !(gfp_pfmemalloc_allowed(flags)))
 141                goto out;
 142
 143        /* Try again but now we are using pfmemalloc reserves */
 144        ret_pfmemalloc = true;
 145        obj = kmalloc_node_track_caller(size, flags, node);
 146
 147out:
 148        if (pfmemalloc)
 149                *pfmemalloc = ret_pfmemalloc;
 150
 151        return obj;
 152}
 153
 154/*      Allocate a new skbuff. We do this ourselves so we can fill in a few
 155 *      'private' fields and also do memory statistics to find all the
 156 *      [BEEP] leaks.
 157 *
 158 */
 159
 160/**
 161 *      __alloc_skb     -       allocate a network buffer
 162 *      @size: size to allocate
 163 *      @gfp_mask: allocation mask
 164 *      @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
 165 *              instead of head cache and allocate a cloned (child) skb.
 166 *              If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
 167 *              allocations in case the data is required for writeback
 168 *      @node: numa node to allocate memory on
 169 *
 170 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
 171 *      tail room of at least size bytes. The object has a reference count
 172 *      of one. The return is the buffer. On a failure the return is %NULL.
 173 *
 174 *      Buffers may only be allocated from interrupts using a @gfp_mask of
 175 *      %GFP_ATOMIC.
 176 */
 177struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 178                            int flags, int node)
 179{
 180        struct kmem_cache *cache;
 181        struct skb_shared_info *shinfo;
 182        struct sk_buff *skb;
 183        u8 *data;
 184        bool pfmemalloc;
 185
 186        cache = (flags & SKB_ALLOC_FCLONE)
 187                ? skbuff_fclone_cache : skbuff_head_cache;
 188
 189        if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
 190                gfp_mask |= __GFP_MEMALLOC;
 191
 192        /* Get the HEAD */
 193        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 194        if (!skb)
 195                goto out;
 196        prefetchw(skb);
 197
 198        /* We do our best to align skb_shared_info on a separate cache
 199         * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
 200         * aligned memory blocks, unless SLUB/SLAB debug is enabled.
 201         * Both skb->head and skb_shared_info are cache line aligned.
 202         */
 203        size = SKB_DATA_ALIGN(size);
 204        size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 205        data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
 206        if (!data)
 207                goto nodata;
 208        /* kmalloc(size) might give us more room than requested.
 209         * Put skb_shared_info exactly at the end of allocated zone,
 210         * to allow max possible filling before reallocation.
 211         */
 212        size = SKB_WITH_OVERHEAD(ksize(data));
 213        prefetchw(data + size);
 214
 215        /*
 216         * Only clear those fields we need to clear, not those that we will
 217         * actually initialise below. Hence, don't put any more fields after
 218         * the tail pointer in struct sk_buff!
 219         */
 220        memset(skb, 0, offsetof(struct sk_buff, tail));
 221        /* Account for allocated memory : skb + skb->head */
 222        skb->truesize = SKB_TRUESIZE(size);
 223        skb->pfmemalloc = pfmemalloc;
 224        refcount_set(&skb->users, 1);
 225        skb->head = data;
 226        skb->data = data;
 227        skb_reset_tail_pointer(skb);
 228        skb->end = skb->tail + size;
 229        skb->mac_header = (typeof(skb->mac_header))~0U;
 230        skb->transport_header = (typeof(skb->transport_header))~0U;
 231
 232        /* make sure we initialize shinfo sequentially */
 233        shinfo = skb_shinfo(skb);
 234        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 235        atomic_set(&shinfo->dataref, 1);
 236
 237        if (flags & SKB_ALLOC_FCLONE) {
 238                struct sk_buff_fclones *fclones;
 239
 240                fclones = container_of(skb, struct sk_buff_fclones, skb1);
 241
 242                skb->fclone = SKB_FCLONE_ORIG;
 243                refcount_set(&fclones->fclone_ref, 1);
 244
 245                fclones->skb2.fclone = SKB_FCLONE_CLONE;
 246        }
 247out:
 248        return skb;
 249nodata:
 250        kmem_cache_free(cache, skb);
 251        skb = NULL;
 252        goto out;
 253}
 254EXPORT_SYMBOL(__alloc_skb);
 255
 256/**
 257 * __build_skb - build a network buffer
 258 * @data: data buffer provided by caller
 259 * @frag_size: size of data, or 0 if head was kmalloced
 260 *
 261 * Allocate a new &sk_buff. Caller provides space holding head and
 262 * skb_shared_info. @data must have been allocated by kmalloc() only if
 263 * @frag_size is 0, otherwise data should come from the page allocator
 264 *  or vmalloc()
 265 * The return is the new skb buffer.
 266 * On a failure the return is %NULL, and @data is not freed.
 267 * Notes :
 268 *  Before IO, driver allocates only data buffer where NIC put incoming frame
 269 *  Driver should add room at head (NET_SKB_PAD) and
 270 *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
 271 *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
 272 *  before giving packet to stack.
 273 *  RX rings only contains data buffers, not full skbs.
 274 */
 275struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 276{
 277        struct skb_shared_info *shinfo;
 278        struct sk_buff *skb;
 279        unsigned int size = frag_size ? : ksize(data);
 280
 281        skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
 282        if (!skb)
 283                return NULL;
 284
 285        size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 286
 287        memset(skb, 0, offsetof(struct sk_buff, tail));
 288        skb->truesize = SKB_TRUESIZE(size);
 289        refcount_set(&skb->users, 1);
 290        skb->head = data;
 291        skb->data = data;
 292        skb_reset_tail_pointer(skb);
 293        skb->end = skb->tail + size;
 294        skb->mac_header = (typeof(skb->mac_header))~0U;
 295        skb->transport_header = (typeof(skb->transport_header))~0U;
 296
 297        /* make sure we initialize shinfo sequentially */
 298        shinfo = skb_shinfo(skb);
 299        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 300        atomic_set(&shinfo->dataref, 1);
 301
 302        return skb;
 303}
 304
 305/* build_skb() is wrapper over __build_skb(), that specifically
 306 * takes care of skb->head and skb->pfmemalloc
 307 * This means that if @frag_size is not zero, then @data must be backed
 308 * by a page fragment, not kmalloc() or vmalloc()
 309 */
 310struct sk_buff *build_skb(void *data, unsigned int frag_size)
 311{
 312        struct sk_buff *skb = __build_skb(data, frag_size);
 313
 314        if (skb && frag_size) {
 315                skb->head_frag = 1;
 316                if (page_is_pfmemalloc(virt_to_head_page(data)))
 317                        skb->pfmemalloc = 1;
 318        }
 319        return skb;
 320}
 321EXPORT_SYMBOL(build_skb);
 322
 323#define NAPI_SKB_CACHE_SIZE     64
 324
 325struct napi_alloc_cache {
 326        struct page_frag_cache page;
 327        unsigned int skb_count;
 328        void *skb_cache[NAPI_SKB_CACHE_SIZE];
 329};
 330
 331static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 332static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
 333
 334static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 335{
 336        struct page_frag_cache *nc;
 337        unsigned long flags;
 338        void *data;
 339
 340        local_irq_save(flags);
 341        nc = this_cpu_ptr(&netdev_alloc_cache);
 342        data = page_frag_alloc(nc, fragsz, gfp_mask);
 343        local_irq_restore(flags);
 344        return data;
 345}
 346
 347/**
 348 * netdev_alloc_frag - allocate a page fragment
 349 * @fragsz: fragment size
 350 *
 351 * Allocates a frag from a page for receive buffer.
 352 * Uses GFP_ATOMIC allocations.
 353 */
 354void *netdev_alloc_frag(unsigned int fragsz)
 355{
 356        return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
 357}
 358EXPORT_SYMBOL(netdev_alloc_frag);
 359
 360static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 361{
 362        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 363
 364        return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 365}
 366
 367void *napi_alloc_frag(unsigned int fragsz)
 368{
 369        return __napi_alloc_frag(fragsz, GFP_ATOMIC);
 370}
 371EXPORT_SYMBOL(napi_alloc_frag);
 372
 373/**
 374 *      __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 375 *      @dev: network device to receive on
 376 *      @len: length to allocate
 377 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
 378 *
 379 *      Allocate a new &sk_buff and assign it a usage count of one. The
 380 *      buffer has NET_SKB_PAD headroom built in. Users should allocate
 381 *      the headroom they think they need without accounting for the
 382 *      built in space. The built in space is used for optimisations.
 383 *
 384 *      %NULL is returned if there is no free memory.
 385 */
 386struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 387                                   gfp_t gfp_mask)
 388{
 389        struct page_frag_cache *nc;
 390        unsigned long flags;
 391        struct sk_buff *skb;
 392        bool pfmemalloc;
 393        void *data;
 394
 395        len += NET_SKB_PAD;
 396
 397        if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 398            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 399                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
 400                if (!skb)
 401                        goto skb_fail;
 402                goto skb_success;
 403        }
 404
 405        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 406        len = SKB_DATA_ALIGN(len);
 407
 408        if (sk_memalloc_socks())
 409                gfp_mask |= __GFP_MEMALLOC;
 410
 411        local_irq_save(flags);
 412
 413        nc = this_cpu_ptr(&netdev_alloc_cache);
 414        data = page_frag_alloc(nc, len, gfp_mask);
 415        pfmemalloc = nc->pfmemalloc;
 416
 417        local_irq_restore(flags);
 418
 419        if (unlikely(!data))
 420                return NULL;
 421
 422        skb = __build_skb(data, len);
 423        if (unlikely(!skb)) {
 424                skb_free_frag(data);
 425                return NULL;
 426        }
 427
 428        /* use OR instead of assignment to avoid clearing of bits in mask */
 429        if (pfmemalloc)
 430                skb->pfmemalloc = 1;
 431        skb->head_frag = 1;
 432
 433skb_success:
 434        skb_reserve(skb, NET_SKB_PAD);
 435        skb->dev = dev;
 436
 437skb_fail:
 438        return skb;
 439}
 440EXPORT_SYMBOL(__netdev_alloc_skb);
 441
 442/**
 443 *      __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
 444 *      @napi: napi instance this buffer was allocated for
 445 *      @len: length to allocate
 446 *      @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
 447 *
 448 *      Allocate a new sk_buff for use in NAPI receive.  This buffer will
 449 *      attempt to allocate the head from a special reserved region used
 450 *      only for NAPI Rx allocation.  By doing this we can save several
 451 *      CPU cycles by avoiding having to disable and re-enable IRQs.
 452 *
 453 *      %NULL is returned if there is no free memory.
 454 */
 455struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
 456                                 gfp_t gfp_mask)
 457{
 458        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 459        struct sk_buff *skb;
 460        void *data;
 461
 462        len += NET_SKB_PAD + NET_IP_ALIGN;
 463
 464        if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 465            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 466                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
 467                if (!skb)
 468                        goto skb_fail;
 469                goto skb_success;
 470        }
 471
 472        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 473        len = SKB_DATA_ALIGN(len);
 474
 475        if (sk_memalloc_socks())
 476                gfp_mask |= __GFP_MEMALLOC;
 477
 478        data = page_frag_alloc(&nc->page, len, gfp_mask);
 479        if (unlikely(!data))
 480                return NULL;
 481
 482        skb = __build_skb(data, len);
 483        if (unlikely(!skb)) {
 484                skb_free_frag(data);
 485                return NULL;
 486        }
 487
 488        /* use OR instead of assignment to avoid clearing of bits in mask */
 489        if (nc->page.pfmemalloc)
 490                skb->pfmemalloc = 1;
 491        skb->head_frag = 1;
 492
 493skb_success:
 494        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 495        skb->dev = napi->dev;
 496
 497skb_fail:
 498        return skb;
 499}
 500EXPORT_SYMBOL(__napi_alloc_skb);
 501
 502void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 503                     int size, unsigned int truesize)
 504{
 505        skb_fill_page_desc(skb, i, page, off, size);
 506        skb->len += size;
 507        skb->data_len += size;
 508        skb->truesize += truesize;
 509}
 510EXPORT_SYMBOL(skb_add_rx_frag);
 511
 512void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
 513                          unsigned int truesize)
 514{
 515        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 516
 517        skb_frag_size_add(frag, size);
 518        skb->len += size;
 519        skb->data_len += size;
 520        skb->truesize += truesize;
 521}
 522EXPORT_SYMBOL(skb_coalesce_rx_frag);
 523
 524static void skb_drop_list(struct sk_buff **listp)
 525{
 526        kfree_skb_list(*listp);
 527        *listp = NULL;
 528}
 529
 530static inline void skb_drop_fraglist(struct sk_buff *skb)
 531{
 532        skb_drop_list(&skb_shinfo(skb)->frag_list);
 533}
 534
 535static void skb_clone_fraglist(struct sk_buff *skb)
 536{
 537        struct sk_buff *list;
 538
 539        skb_walk_frags(skb, list)
 540                skb_get(list);
 541}
 542
 543static void skb_free_head(struct sk_buff *skb)
 544{
 545        unsigned char *head = skb->head;
 546
 547        if (skb->head_frag)
 548                skb_free_frag(head);
 549        else
 550                kfree(head);
 551}
 552
 553static void skb_release_data(struct sk_buff *skb)
 554{
 555        struct skb_shared_info *shinfo = skb_shinfo(skb);
 556        int i;
 557
 558        if (skb->cloned &&
 559            atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 560                              &shinfo->dataref))
 561                return;
 562
 563        for (i = 0; i < shinfo->nr_frags; i++)
 564                __skb_frag_unref(&shinfo->frags[i]);
 565
 566        if (shinfo->frag_list)
 567                kfree_skb_list(shinfo->frag_list);
 568
 569        skb_zcopy_clear(skb, true);
 570        skb_free_head(skb);
 571}
 572
 573/*
 574 *      Free an skbuff by memory without cleaning the state.
 575 */
 576static void kfree_skbmem(struct sk_buff *skb)
 577{
 578        struct sk_buff_fclones *fclones;
 579
 580        switch (skb->fclone) {
 581        case SKB_FCLONE_UNAVAILABLE:
 582                kmem_cache_free(skbuff_head_cache, skb);
 583                return;
 584
 585        case SKB_FCLONE_ORIG:
 586                fclones = container_of(skb, struct sk_buff_fclones, skb1);
 587
 588                /* We usually free the clone (TX completion) before original skb
 589                 * This test would have no chance to be true for the clone,
 590                 * while here, branch prediction will be good.
 591                 */
 592                if (refcount_read(&fclones->fclone_ref) == 1)
 593                        goto fastpath;
 594                break;
 595
 596        default: /* SKB_FCLONE_CLONE */
 597                fclones = container_of(skb, struct sk_buff_fclones, skb2);
 598                break;
 599        }
 600        if (!refcount_dec_and_test(&fclones->fclone_ref))
 601                return;
 602fastpath:
 603        kmem_cache_free(skbuff_fclone_cache, fclones);
 604}
 605
 606void skb_release_head_state(struct sk_buff *skb)
 607{
 608        skb_dst_drop(skb);
 609        secpath_reset(skb);
 610        if (skb->destructor) {
 611                WARN_ON(in_irq());
 612                skb->destructor(skb);
 613        }
 614#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 615        nf_conntrack_put(skb_nfct(skb));
 616#endif
 617#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 618        nf_bridge_put(skb->nf_bridge);
 619#endif
 620}
 621
 622/* Free everything but the sk_buff shell. */
 623static void skb_release_all(struct sk_buff *skb)
 624{
 625        skb_release_head_state(skb);
 626        if (likely(skb->head))
 627                skb_release_data(skb);
 628}
 629
 630/**
 631 *      __kfree_skb - private function
 632 *      @skb: buffer
 633 *
 634 *      Free an sk_buff. Release anything attached to the buffer.
 635 *      Clean the state. This is an internal helper function. Users should
 636 *      always call kfree_skb
 637 */
 638
 639void __kfree_skb(struct sk_buff *skb)
 640{
 641        skb_release_all(skb);
 642        kfree_skbmem(skb);
 643}
 644EXPORT_SYMBOL(__kfree_skb);
 645
 646/**
 647 *      kfree_skb - free an sk_buff
 648 *      @skb: buffer to free
 649 *
 650 *      Drop a reference to the buffer and free it if the usage count has
 651 *      hit zero.
 652 */
 653void kfree_skb(struct sk_buff *skb)
 654{
 655        if (!skb_unref(skb))
 656                return;
 657
 658        trace_kfree_skb(skb, __builtin_return_address(0));
 659        __kfree_skb(skb);
 660}
 661EXPORT_SYMBOL(kfree_skb);
 662
 663void kfree_skb_list(struct sk_buff *segs)
 664{
 665        while (segs) {
 666                struct sk_buff *next = segs->next;
 667
 668                kfree_skb(segs);
 669                segs = next;
 670        }
 671}
 672EXPORT_SYMBOL(kfree_skb_list);
 673
 674/**
 675 *      skb_tx_error - report an sk_buff xmit error
 676 *      @skb: buffer that triggered an error
 677 *
 678 *      Report xmit error if a device callback is tracking this skb.
 679 *      skb must be freed afterwards.
 680 */
 681void skb_tx_error(struct sk_buff *skb)
 682{
 683        skb_zcopy_clear(skb, true);
 684}
 685EXPORT_SYMBOL(skb_tx_error);
 686
 687/**
 688 *      consume_skb - free an skbuff
 689 *      @skb: buffer to free
 690 *
 691 *      Drop a ref to the buffer and free it if the usage count has hit zero
 692 *      Functions identically to kfree_skb, but kfree_skb assumes that the frame
 693 *      is being dropped after a failure and notes that
 694 */
 695void consume_skb(struct sk_buff *skb)
 696{
 697        if (!skb_unref(skb))
 698                return;
 699
 700        trace_consume_skb(skb);
 701        __kfree_skb(skb);
 702}
 703EXPORT_SYMBOL(consume_skb);
 704
 705/**
 706 *      consume_stateless_skb - free an skbuff, assuming it is stateless
 707 *      @skb: buffer to free
 708 *
 709 *      Alike consume_skb(), but this variant assumes that this is the last
 710 *      skb reference and all the head states have been already dropped
 711 */
 712void __consume_stateless_skb(struct sk_buff *skb)
 713{
 714        trace_consume_skb(skb);
 715        skb_release_data(skb);
 716        kfree_skbmem(skb);
 717}
 718
 719void __kfree_skb_flush(void)
 720{
 721        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 722
 723        /* flush skb_cache if containing objects */
 724        if (nc->skb_count) {
 725                kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
 726                                     nc->skb_cache);
 727                nc->skb_count = 0;
 728        }
 729}
 730
 731static inline void _kfree_skb_defer(struct sk_buff *skb)
 732{
 733        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 734
 735        /* drop skb->head and call any destructors for packet */
 736        skb_release_all(skb);
 737
 738        /* record skb to CPU local list */
 739        nc->skb_cache[nc->skb_count++] = skb;
 740
 741#ifdef CONFIG_SLUB
 742        /* SLUB writes into objects when freeing */
 743        prefetchw(skb);
 744#endif
 745
 746        /* flush skb_cache if it is filled */
 747        if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
 748                kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
 749                                     nc->skb_cache);
 750                nc->skb_count = 0;
 751        }
 752}
 753void __kfree_skb_defer(struct sk_buff *skb)
 754{
 755        _kfree_skb_defer(skb);
 756}
 757
 758void napi_consume_skb(struct sk_buff *skb, int budget)
 759{
 760        if (unlikely(!skb))
 761                return;
 762
 763        /* Zero budget indicate non-NAPI context called us, like netpoll */
 764        if (unlikely(!budget)) {
 765                dev_consume_skb_any(skb);
 766                return;
 767        }
 768
 769        if (!skb_unref(skb))
 770                return;
 771
 772        /* if reaching here SKB is ready to free */
 773        trace_consume_skb(skb);
 774
 775        /* if SKB is a clone, don't handle this case */
 776        if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
 777                __kfree_skb(skb);
 778                return;
 779        }
 780
 781        _kfree_skb_defer(skb);
 782}
 783EXPORT_SYMBOL(napi_consume_skb);
 784
 785/* Make sure a field is enclosed inside headers_start/headers_end section */
 786#define CHECK_SKB_FIELD(field) \
 787        BUILD_BUG_ON(offsetof(struct sk_buff, field) <          \
 788                     offsetof(struct sk_buff, headers_start));  \
 789        BUILD_BUG_ON(offsetof(struct sk_buff, field) >          \
 790                     offsetof(struct sk_buff, headers_end));    \
 791
 792static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 793{
 794        new->tstamp             = old->tstamp;
 795        /* We do not copy old->sk */
 796        new->dev                = old->dev;
 797        memcpy(new->cb, old->cb, sizeof(old->cb));
 798        skb_dst_copy(new, old);
 799#ifdef CONFIG_XFRM
 800        new->sp                 = secpath_get(old->sp);
 801#endif
 802        __nf_copy(new, old, false);
 803
 804        /* Note : this field could be in headers_start/headers_end section
 805         * It is not yet because we do not want to have a 16 bit hole
 806         */
 807        new->queue_mapping = old->queue_mapping;
 808
 809        memcpy(&new->headers_start, &old->headers_start,
 810               offsetof(struct sk_buff, headers_end) -
 811               offsetof(struct sk_buff, headers_start));
 812        CHECK_SKB_FIELD(protocol);
 813        CHECK_SKB_FIELD(csum);
 814        CHECK_SKB_FIELD(hash);
 815        CHECK_SKB_FIELD(priority);
 816        CHECK_SKB_FIELD(skb_iif);
 817        CHECK_SKB_FIELD(vlan_proto);
 818        CHECK_SKB_FIELD(vlan_tci);
 819        CHECK_SKB_FIELD(transport_header);
 820        CHECK_SKB_FIELD(network_header);
 821        CHECK_SKB_FIELD(mac_header);
 822        CHECK_SKB_FIELD(inner_protocol);
 823        CHECK_SKB_FIELD(inner_transport_header);
 824        CHECK_SKB_FIELD(inner_network_header);
 825        CHECK_SKB_FIELD(inner_mac_header);
 826        CHECK_SKB_FIELD(mark);
 827#ifdef CONFIG_NETWORK_SECMARK
 828        CHECK_SKB_FIELD(secmark);
 829#endif
 830#ifdef CONFIG_NET_RX_BUSY_POLL
 831        CHECK_SKB_FIELD(napi_id);
 832#endif
 833#ifdef CONFIG_XPS
 834        CHECK_SKB_FIELD(sender_cpu);
 835#endif
 836#ifdef CONFIG_NET_SCHED
 837        CHECK_SKB_FIELD(tc_index);
 838#endif
 839
 840}
 841
 842/*
 843 * You should not add any new code to this function.  Add it to
 844 * __copy_skb_header above instead.
 845 */
 846static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 847{
 848#define C(x) n->x = skb->x
 849
 850        n->next = n->prev = NULL;
 851        n->sk = NULL;
 852        __copy_skb_header(n, skb);
 853
 854        C(len);
 855        C(data_len);
 856        C(mac_len);
 857        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 858        n->cloned = 1;
 859        n->nohdr = 0;
 860        n->peeked = 0;
 861        C(pfmemalloc);
 862        n->destructor = NULL;
 863        C(tail);
 864        C(end);
 865        C(head);
 866        C(head_frag);
 867        C(data);
 868        C(truesize);
 869        refcount_set(&n->users, 1);
 870
 871        atomic_inc(&(skb_shinfo(skb)->dataref));
 872        skb->cloned = 1;
 873
 874        return n;
 875#undef C
 876}
 877
 878/**
 879 *      skb_morph       -       morph one skb into another
 880 *      @dst: the skb to receive the contents
 881 *      @src: the skb to supply the contents
 882 *
 883 *      This is identical to skb_clone except that the target skb is
 884 *      supplied by the user.
 885 *
 886 *      The target skb is returned upon exit.
 887 */
 888struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 889{
 890        skb_release_all(dst);
 891        return __skb_clone(dst, src);
 892}
 893EXPORT_SYMBOL_GPL(skb_morph);
 894
 895int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
 896{
 897        unsigned long max_pg, num_pg, new_pg, old_pg;
 898        struct user_struct *user;
 899
 900        if (capable(CAP_IPC_LOCK) || !size)
 901                return 0;
 902
 903        num_pg = (size >> PAGE_SHIFT) + 2;      /* worst case */
 904        max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 905        user = mmp->user ? : current_user();
 906
 907        do {
 908                old_pg = atomic_long_read(&user->locked_vm);
 909                new_pg = old_pg + num_pg;
 910                if (new_pg > max_pg)
 911                        return -ENOBUFS;
 912        } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
 913                 old_pg);
 914
 915        if (!mmp->user) {
 916                mmp->user = get_uid(user);
 917                mmp->num_pg = num_pg;
 918        } else {
 919                mmp->num_pg += num_pg;
 920        }
 921
 922        return 0;
 923}
 924EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
 925
 926void mm_unaccount_pinned_pages(struct mmpin *mmp)
 927{
 928        if (mmp->user) {
 929                atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
 930                free_uid(mmp->user);
 931        }
 932}
 933EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
 934
 935struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 936{
 937        struct ubuf_info *uarg;
 938        struct sk_buff *skb;
 939
 940        WARN_ON_ONCE(!in_task());
 941
 942        skb = sock_omalloc(sk, 0, GFP_KERNEL);
 943        if (!skb)
 944                return NULL;
 945
 946        BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
 947        uarg = (void *)skb->cb;
 948        uarg->mmp.user = NULL;
 949
 950        if (mm_account_pinned_pages(&uarg->mmp, size)) {
 951                kfree_skb(skb);
 952                return NULL;
 953        }
 954
 955        uarg->callback = sock_zerocopy_callback;
 956        uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
 957        uarg->len = 1;
 958        uarg->bytelen = size;
 959        uarg->zerocopy = 1;
 960        refcount_set(&uarg->refcnt, 1);
 961        sock_hold(sk);
 962
 963        return uarg;
 964}
 965EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
 966
 967static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
 968{
 969        return container_of((void *)uarg, struct sk_buff, cb);
 970}
 971
 972struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
 973                                        struct ubuf_info *uarg)
 974{
 975        if (uarg) {
 976                const u32 byte_limit = 1 << 19;         /* limit to a few TSO */
 977                u32 bytelen, next;
 978
 979                /* realloc only when socket is locked (TCP, UDP cork),
 980                 * so uarg->len and sk_zckey access is serialized
 981                 */
 982                if (!sock_owned_by_user(sk)) {
 983                        WARN_ON_ONCE(1);
 984                        return NULL;
 985                }
 986
 987                bytelen = uarg->bytelen + size;
 988                if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
 989                        /* TCP can create new skb to attach new uarg */
 990                        if (sk->sk_type == SOCK_STREAM)
 991                                goto new_alloc;
 992                        return NULL;
 993                }
 994
 995                next = (u32)atomic_read(&sk->sk_zckey);
 996                if ((u32)(uarg->id + uarg->len) == next) {
 997                        if (mm_account_pinned_pages(&uarg->mmp, size))
 998                                return NULL;
 999                        uarg->len++;
1000                        uarg->bytelen = bytelen;
1001                        atomic_set(&sk->sk_zckey, ++next);
1002                        sock_zerocopy_get(uarg);
1003                        return uarg;
1004                }
1005        }
1006
1007new_alloc:
1008        return sock_zerocopy_alloc(sk, size);
1009}
1010EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1011
1012static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1013{
1014        struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1015        u32 old_lo, old_hi;
1016        u64 sum_len;
1017
1018        old_lo = serr->ee.ee_info;
1019        old_hi = serr->ee.ee_data;
1020        sum_len = old_hi - old_lo + 1ULL + len;
1021
1022        if (sum_len >= (1ULL << 32))
1023                return false;
1024
1025        if (lo != old_hi + 1)
1026                return false;
1027
1028        serr->ee.ee_data += len;
1029        return true;
1030}
1031
1032void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1033{
1034        struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1035        struct sock_exterr_skb *serr;
1036        struct sock *sk = skb->sk;
1037        struct sk_buff_head *q;
1038        unsigned long flags;
1039        u32 lo, hi;
1040        u16 len;
1041
1042        mm_unaccount_pinned_pages(&uarg->mmp);
1043
1044        /* if !len, there was only 1 call, and it was aborted
1045         * so do not queue a completion notification
1046         */
1047        if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1048                goto release;
1049
1050        len = uarg->len;
1051        lo = uarg->id;
1052        hi = uarg->id + len - 1;
1053
1054        serr = SKB_EXT_ERR(skb);
1055        memset(serr, 0, sizeof(*serr));
1056        serr->ee.ee_errno = 0;
1057        serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1058        serr->ee.ee_data = hi;
1059        serr->ee.ee_info = lo;
1060        if (!success)
1061                serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1062
1063        q = &sk->sk_error_queue;
1064        spin_lock_irqsave(&q->lock, flags);
1065        tail = skb_peek_tail(q);
1066        if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1067            !skb_zerocopy_notify_extend(tail, lo, len)) {
1068                __skb_queue_tail(q, skb);
1069                skb = NULL;
1070        }
1071        spin_unlock_irqrestore(&q->lock, flags);
1072
1073        sk->sk_error_report(sk);
1074
1075release:
1076        consume_skb(skb);
1077        sock_put(sk);
1078}
1079EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1080
1081void sock_zerocopy_put(struct ubuf_info *uarg)
1082{
1083        if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1084                if (uarg->callback)
1085                        uarg->callback(uarg, uarg->zerocopy);
1086                else
1087                        consume_skb(skb_from_uarg(uarg));
1088        }
1089}
1090EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1091
1092void sock_zerocopy_put_abort(struct ubuf_info *uarg)
1093{
1094        if (uarg) {
1095                struct sock *sk = skb_from_uarg(uarg)->sk;
1096
1097                atomic_dec(&sk->sk_zckey);
1098                uarg->len--;
1099
1100                sock_zerocopy_put(uarg);
1101        }
1102}
1103EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1104
1105extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1106                                   struct iov_iter *from, size_t length);
1107
1108int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1109                             struct msghdr *msg, int len,
1110                             struct ubuf_info *uarg)
1111{
1112        struct ubuf_info *orig_uarg = skb_zcopy(skb);
1113        struct iov_iter orig_iter = msg->msg_iter;
1114        int err, orig_len = skb->len;
1115
1116        /* An skb can only point to one uarg. This edge case happens when
1117         * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1118         */
1119        if (orig_uarg && uarg != orig_uarg)
1120                return -EEXIST;
1121
1122        err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1123        if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1124                struct sock *save_sk = skb->sk;
1125
1126                /* Streams do not free skb on error. Reset to prev state. */
1127                msg->msg_iter = orig_iter;
1128                skb->sk = sk;
1129                ___pskb_trim(skb, orig_len);
1130                skb->sk = save_sk;
1131                return err;
1132        }
1133
1134        skb_zcopy_set(skb, uarg);
1135        return skb->len - orig_len;
1136}
1137EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1138
1139static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1140                              gfp_t gfp_mask)
1141{
1142        if (skb_zcopy(orig)) {
1143                if (skb_zcopy(nskb)) {
1144                        /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1145                        if (!gfp_mask) {
1146                                WARN_ON_ONCE(1);
1147                                return -ENOMEM;
1148                        }
1149                        if (skb_uarg(nskb) == skb_uarg(orig))
1150                                return 0;
1151                        if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1152                                return -EIO;
1153                }
1154                skb_zcopy_set(nskb, skb_uarg(orig));
1155        }
1156        return 0;
1157}
1158
1159/**
1160 *      skb_copy_ubufs  -       copy userspace skb frags buffers to kernel
1161 *      @skb: the skb to modify
1162 *      @gfp_mask: allocation priority
1163 *
1164 *      This must be called on SKBTX_DEV_ZEROCOPY skb.
1165 *      It will copy all frags into kernel and drop the reference
1166 *      to userspace pages.
1167 *
1168 *      If this function is called from an interrupt gfp_mask() must be
1169 *      %GFP_ATOMIC.
1170 *
1171 *      Returns 0 on success or a negative error code on failure
1172 *      to allocate kernel memory to copy to.
1173 */
1174int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1175{
1176        int num_frags = skb_shinfo(skb)->nr_frags;
1177        struct page *page, *head = NULL;
1178        int i, new_frags;
1179        u32 d_off;
1180
1181        if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1182                return -EINVAL;
1183
1184        if (!num_frags)
1185                goto release;
1186
1187        new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1188        for (i = 0; i < new_frags; i++) {
1189                page = alloc_page(gfp_mask);
1190                if (!page) {
1191                        while (head) {
1192                                struct page *next = (struct page *)page_private(head);
1193                                put_page(head);
1194                                head = next;
1195                        }
1196                        return -ENOMEM;
1197                }
1198                set_page_private(page, (unsigned long)head);
1199                head = page;
1200        }
1201
1202        page = head;
1203        d_off = 0;
1204        for (i = 0; i < num_frags; i++) {
1205                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1206                u32 p_off, p_len, copied;
1207                struct page *p;
1208                u8 *vaddr;
1209
1210                skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
1211                                      p, p_off, p_len, copied) {
1212                        u32 copy, done = 0;
1213                        vaddr = kmap_atomic(p);
1214
1215                        while (done < p_len) {
1216                                if (d_off == PAGE_SIZE) {
1217                                        d_off = 0;
1218                                        page = (struct page *)page_private(page);
1219                                }
1220                                copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1221                                memcpy(page_address(page) + d_off,
1222                                       vaddr + p_off + done, copy);
1223                                done += copy;
1224                                d_off += copy;
1225                        }
1226                        kunmap_atomic(vaddr);
1227                }
1228        }
1229
1230        /* skb frags release userspace buffers */
1231        for (i = 0; i < num_frags; i++)
1232                skb_frag_unref(skb, i);
1233
1234        /* skb frags point to kernel buffers */
1235        for (i = 0; i < new_frags - 1; i++) {
1236                __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1237                head = (struct page *)page_private(head);
1238        }
1239        __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1240        skb_shinfo(skb)->nr_frags = new_frags;
1241
1242release:
1243        skb_zcopy_clear(skb, false);
1244        return 0;
1245}
1246EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1247
1248/**
1249 *      skb_clone       -       duplicate an sk_buff
1250 *      @skb: buffer to clone
1251 *      @gfp_mask: allocation priority
1252 *
1253 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
1254 *      copies share the same packet data but not structure. The new
1255 *      buffer has a reference count of 1. If the allocation fails the
1256 *      function returns %NULL otherwise the new buffer is returned.
1257 *
1258 *      If this function is called from an interrupt gfp_mask() must be
1259 *      %GFP_ATOMIC.
1260 */
1261
1262struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1263{
1264        struct sk_buff_fclones *fclones = container_of(skb,
1265                                                       struct sk_buff_fclones,
1266                                                       skb1);
1267        struct sk_buff *n;
1268
1269        if (skb_orphan_frags(skb, gfp_mask))
1270                return NULL;
1271
1272        if (skb->fclone == SKB_FCLONE_ORIG &&
1273            refcount_read(&fclones->fclone_ref) == 1) {
1274                n = &fclones->skb2;
1275                refcount_set(&fclones->fclone_ref, 2);
1276        } else {
1277                if (skb_pfmemalloc(skb))
1278                        gfp_mask |= __GFP_MEMALLOC;
1279
1280                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1281                if (!n)
1282                        return NULL;
1283
1284                n->fclone = SKB_FCLONE_UNAVAILABLE;
1285        }
1286
1287        return __skb_clone(n, skb);
1288}
1289EXPORT_SYMBOL(skb_clone);
1290
1291void skb_headers_offset_update(struct sk_buff *skb, int off)
1292{
1293        /* Only adjust this if it actually is csum_start rather than csum */
1294        if (skb->ip_summed == CHECKSUM_PARTIAL)
1295                skb->csum_start += off;
1296        /* {transport,network,mac}_header and tail are relative to skb->head */
1297        skb->transport_header += off;
1298        skb->network_header   += off;
1299        if (skb_mac_header_was_set(skb))
1300                skb->mac_header += off;
1301        skb->inner_transport_header += off;
1302        skb->inner_network_header += off;
1303        skb->inner_mac_header += off;
1304}
1305EXPORT_SYMBOL(skb_headers_offset_update);
1306
1307void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1308{
1309        __copy_skb_header(new, old);
1310
1311        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1312        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1313        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1314}
1315EXPORT_SYMBOL(skb_copy_header);
1316
1317static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1318{
1319        if (skb_pfmemalloc(skb))
1320                return SKB_ALLOC_RX;
1321        return 0;
1322}
1323
1324/**
1325 *      skb_copy        -       create private copy of an sk_buff
1326 *      @skb: buffer to copy
1327 *      @gfp_mask: allocation priority
1328 *
1329 *      Make a copy of both an &sk_buff and its data. This is used when the
1330 *      caller wishes to modify the data and needs a private copy of the
1331 *      data to alter. Returns %NULL on failure or the pointer to the buffer
1332 *      on success. The returned buffer has a reference count of 1.
1333 *
1334 *      As by-product this function converts non-linear &sk_buff to linear
1335 *      one, so that &sk_buff becomes completely private and caller is allowed
1336 *      to modify all the data of returned buffer. This means that this
1337 *      function is not recommended for use in circumstances when only
1338 *      header is going to be modified. Use pskb_copy() instead.
1339 */
1340
1341struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1342{
1343        int headerlen = skb_headroom(skb);
1344        unsigned int size = skb_end_offset(skb) + skb->data_len;
1345        struct sk_buff *n = __alloc_skb(size, gfp_mask,
1346                                        skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1347
1348        if (!n)
1349                return NULL;
1350
1351        /* Set the data pointer */
1352        skb_reserve(n, headerlen);
1353        /* Set the tail pointer and length */
1354        skb_put(n, skb->len);
1355
1356        BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1357
1358        skb_copy_header(n, skb);
1359        return n;
1360}
1361EXPORT_SYMBOL(skb_copy);
1362
1363/**
1364 *      __pskb_copy_fclone      -  create copy of an sk_buff with private head.
1365 *      @skb: buffer to copy
1366 *      @headroom: headroom of new skb
1367 *      @gfp_mask: allocation priority
1368 *      @fclone: if true allocate the copy of the skb from the fclone
1369 *      cache instead of the head cache; it is recommended to set this
1370 *      to true for the cases where the copy will likely be cloned
1371 *
1372 *      Make a copy of both an &sk_buff and part of its data, located
1373 *      in header. Fragmented data remain shared. This is used when
1374 *      the caller wishes to modify only header of &sk_buff and needs
1375 *      private copy of the header to alter. Returns %NULL on failure
1376 *      or the pointer to the buffer on success.
1377 *      The returned buffer has a reference count of 1.
1378 */
1379
1380struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1381                                   gfp_t gfp_mask, bool fclone)
1382{
1383        unsigned int size = skb_headlen(skb) + headroom;
1384        int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1385        struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1386
1387        if (!n)
1388                goto out;
1389
1390        /* Set the data pointer */
1391        skb_reserve(n, headroom);
1392        /* Set the tail pointer and length */
1393        skb_put(n, skb_headlen(skb));
1394        /* Copy the bytes */
1395        skb_copy_from_linear_data(skb, n->data, n->len);
1396
1397        n->truesize += skb->data_len;
1398        n->data_len  = skb->data_len;
1399        n->len       = skb->len;
1400
1401        if (skb_shinfo(skb)->nr_frags) {
1402                int i;
1403
1404                if (skb_orphan_frags(skb, gfp_mask) ||
1405                    skb_zerocopy_clone(n, skb, gfp_mask)) {
1406                        kfree_skb(n);
1407                        n = NULL;
1408                        goto out;
1409                }
1410                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1411                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1412                        skb_frag_ref(skb, i);
1413                }
1414                skb_shinfo(n)->nr_frags = i;
1415        }
1416
1417        if (skb_has_frag_list(skb)) {
1418                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1419                skb_clone_fraglist(n);
1420        }
1421
1422        skb_copy_header(n, skb);
1423out:
1424        return n;
1425}
1426EXPORT_SYMBOL(__pskb_copy_fclone);
1427
1428/**
1429 *      pskb_expand_head - reallocate header of &sk_buff
1430 *      @skb: buffer to reallocate
1431 *      @nhead: room to add at head
1432 *      @ntail: room to add at tail
1433 *      @gfp_mask: allocation priority
1434 *
1435 *      Expands (or creates identical copy, if @nhead and @ntail are zero)
1436 *      header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1437 *      reference count of 1. Returns zero in the case of success or error,
1438 *      if expansion failed. In the last case, &sk_buff is not changed.
1439 *
1440 *      All the pointers pointing into skb header may change and must be
1441 *      reloaded after call to this function.
1442 */
1443
1444int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1445                     gfp_t gfp_mask)
1446{
1447        int i, osize = skb_end_offset(skb);
1448        int size = osize + nhead + ntail;
1449        long off;
1450        u8 *data;
1451
1452        BUG_ON(nhead < 0);
1453
1454        BUG_ON(skb_shared(skb));
1455
1456        size = SKB_DATA_ALIGN(size);
1457
1458        if (skb_pfmemalloc(skb))
1459                gfp_mask |= __GFP_MEMALLOC;
1460        data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1461                               gfp_mask, NUMA_NO_NODE, NULL);
1462        if (!data)
1463                goto nodata;
1464        size = SKB_WITH_OVERHEAD(ksize(data));
1465
1466        /* Copy only real data... and, alas, header. This should be
1467         * optimized for the cases when header is void.
1468         */
1469        memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1470
1471        memcpy((struct skb_shared_info *)(data + size),
1472               skb_shinfo(skb),
1473               offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1474
1475        /*
1476         * if shinfo is shared we must drop the old head gracefully, but if it
1477         * is not we can just drop the old head and let the existing refcount
1478         * be since all we did is relocate the values
1479         */
1480        if (skb_cloned(skb)) {
1481                if (skb_orphan_frags(skb, gfp_mask))
1482                        goto nofrags;
1483                if (skb_zcopy(skb))
1484                        refcount_inc(&skb_uarg(skb)->refcnt);
1485                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1486                        skb_frag_ref(skb, i);
1487
1488                if (skb_has_frag_list(skb))
1489                        skb_clone_fraglist(skb);
1490
1491                skb_release_data(skb);
1492        } else {
1493                skb_free_head(skb);
1494        }
1495        off = (data + nhead) - skb->head;
1496
1497        skb->head     = data;
1498        skb->head_frag = 0;
1499        skb->data    += off;
1500#ifdef NET_SKBUFF_DATA_USES_OFFSET
1501        skb->end      = size;
1502        off           = nhead;
1503#else
1504        skb->end      = skb->head + size;
1505#endif
1506        skb->tail             += off;
1507        skb_headers_offset_update(skb, nhead);
1508        skb->cloned   = 0;
1509        skb->hdr_len  = 0;
1510        skb->nohdr    = 0;
1511        atomic_set(&skb_shinfo(skb)->dataref, 1);
1512
1513        skb_metadata_clear(skb);
1514
1515        /* It is not generally safe to change skb->truesize.
1516         * For the moment, we really care of rx path, or
1517         * when skb is orphaned (not attached to a socket).
1518         */
1519        if (!skb->sk || skb->destructor == sock_edemux)
1520                skb->truesize += size - osize;
1521
1522        return 0;
1523
1524nofrags:
1525        kfree(data);
1526nodata:
1527        return -ENOMEM;
1528}
1529EXPORT_SYMBOL(pskb_expand_head);
1530
1531/* Make private copy of skb with writable head and some headroom */
1532
1533struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1534{
1535        struct sk_buff *skb2;
1536        int delta = headroom - skb_headroom(skb);
1537
1538        if (delta <= 0)
1539                skb2 = pskb_copy(skb, GFP_ATOMIC);
1540        else {
1541                skb2 = skb_clone(skb, GFP_ATOMIC);
1542                if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1543                                             GFP_ATOMIC)) {
1544                        kfree_skb(skb2);
1545                        skb2 = NULL;
1546                }
1547        }
1548        return skb2;
1549}
1550EXPORT_SYMBOL(skb_realloc_headroom);
1551
1552/**
1553 *      skb_copy_expand -       copy and expand sk_buff
1554 *      @skb: buffer to copy
1555 *      @newheadroom: new free bytes at head
1556 *      @newtailroom: new free bytes at tail
1557 *      @gfp_mask: allocation priority
1558 *
1559 *      Make a copy of both an &sk_buff and its data and while doing so
1560 *      allocate additional space.
1561 *
1562 *      This is used when the caller wishes to modify the data and needs a
1563 *      private copy of the data to alter as well as more space for new fields.
1564 *      Returns %NULL on failure or the pointer to the buffer
1565 *      on success. The returned buffer has a reference count of 1.
1566 *
1567 *      You must pass %GFP_ATOMIC as the allocation priority if this function
1568 *      is called from an interrupt.
1569 */
1570struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1571                                int newheadroom, int newtailroom,
1572                                gfp_t gfp_mask)
1573{
1574        /*
1575         *      Allocate the copy buffer
1576         */
1577        struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1578                                        gfp_mask, skb_alloc_rx_flag(skb),
1579                                        NUMA_NO_NODE);
1580        int oldheadroom = skb_headroom(skb);
1581        int head_copy_len, head_copy_off;
1582
1583        if (!n)
1584                return NULL;
1585
1586        skb_reserve(n, newheadroom);
1587
1588        /* Set the tail pointer and length */
1589        skb_put(n, skb->len);
1590
1591        head_copy_len = oldheadroom;
1592        head_copy_off = 0;
1593        if (newheadroom <= head_copy_len)
1594                head_copy_len = newheadroom;
1595        else
1596                head_copy_off = newheadroom - head_copy_len;
1597
1598        /* Copy the linear header and data. */
1599        BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1600                             skb->len + head_copy_len));
1601
1602        skb_copy_header(n, skb);
1603
1604        skb_headers_offset_update(n, newheadroom - oldheadroom);
1605
1606        return n;
1607}
1608EXPORT_SYMBOL(skb_copy_expand);
1609
1610/**
1611 *      __skb_pad               -       zero pad the tail of an skb
1612 *      @skb: buffer to pad
1613 *      @pad: space to pad
1614 *      @free_on_error: free buffer on error
1615 *
1616 *      Ensure that a buffer is followed by a padding area that is zero
1617 *      filled. Used by network drivers which may DMA or transfer data
1618 *      beyond the buffer end onto the wire.
1619 *
1620 *      May return error in out of memory cases. The skb is freed on error
1621 *      if @free_on_error is true.
1622 */
1623
1624int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1625{
1626        int err;
1627        int ntail;
1628
1629        /* If the skbuff is non linear tailroom is always zero.. */
1630        if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1631                memset(skb->data+skb->len, 0, pad);
1632                return 0;
1633        }
1634
1635        ntail = skb->data_len + pad - (skb->end - skb->tail);
1636        if (likely(skb_cloned(skb) || ntail > 0)) {
1637                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1638                if (unlikely(err))
1639                        goto free_skb;
1640        }
1641
1642        /* FIXME: The use of this function with non-linear skb's really needs
1643         * to be audited.
1644         */
1645        err = skb_linearize(skb);
1646        if (unlikely(err))
1647                goto free_skb;
1648
1649        memset(skb->data + skb->len, 0, pad);
1650        return 0;
1651
1652free_skb:
1653        if (free_on_error)
1654                kfree_skb(skb);
1655        return err;
1656}
1657EXPORT_SYMBOL(__skb_pad);
1658
1659/**
1660 *      pskb_put - add data to the tail of a potentially fragmented buffer
1661 *      @skb: start of the buffer to use
1662 *      @tail: tail fragment of the buffer to use
1663 *      @len: amount of data to add
1664 *
1665 *      This function extends the used data area of the potentially
1666 *      fragmented buffer. @tail must be the last fragment of @skb -- or
1667 *      @skb itself. If this would exceed the total buffer size the kernel
1668 *      will panic. A pointer to the first byte of the extra data is
1669 *      returned.
1670 */
1671
1672void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1673{
1674        if (tail != skb) {
1675                skb->data_len += len;
1676                skb->len += len;
1677        }
1678        return skb_put(tail, len);
1679}
1680EXPORT_SYMBOL_GPL(pskb_put);
1681
1682/**
1683 *      skb_put - add data to a buffer
1684 *      @skb: buffer to use
1685 *      @len: amount of data to add
1686 *
1687 *      This function extends the used data area of the buffer. If this would
1688 *      exceed the total buffer size the kernel will panic. A pointer to the
1689 *      first byte of the extra data is returned.
1690 */
1691void *skb_put(struct sk_buff *skb, unsigned int len)
1692{
1693        void *tmp = skb_tail_pointer(skb);
1694        SKB_LINEAR_ASSERT(skb);
1695        skb->tail += len;
1696        skb->len  += len;
1697        if (unlikely(skb->tail > skb->end))
1698                skb_over_panic(skb, len, __builtin_return_address(0));
1699        return tmp;
1700}
1701EXPORT_SYMBOL(skb_put);
1702
1703/**
1704 *      skb_push - add data to the start of a buffer
1705 *      @skb: buffer to use
1706 *      @len: amount of data to add
1707 *
1708 *      This function extends the used data area of the buffer at the buffer
1709 *      start. If this would exceed the total buffer headroom the kernel will
1710 *      panic. A pointer to the first byte of the extra data is returned.
1711 */
1712void *skb_push(struct sk_buff *skb, unsigned int len)
1713{
1714        skb->data -= len;
1715        skb->len  += len;
1716        if (unlikely(skb->data < skb->head))
1717                skb_under_panic(skb, len, __builtin_return_address(0));
1718        return skb->data;
1719}
1720EXPORT_SYMBOL(skb_push);
1721
1722/**
1723 *      skb_pull - remove data from the start of a buffer
1724 *      @skb: buffer to use
1725 *      @len: amount of data to remove
1726 *
1727 *      This function removes data from the start of a buffer, returning
1728 *      the memory to the headroom. A pointer to the next data in the buffer
1729 *      is returned. Once the data has been pulled future pushes will overwrite
1730 *      the old data.
1731 */
1732void *skb_pull(struct sk_buff *skb, unsigned int len)
1733{
1734        return skb_pull_inline(skb, len);
1735}
1736EXPORT_SYMBOL(skb_pull);
1737
1738/**
1739 *      skb_trim - remove end from a buffer
1740 *      @skb: buffer to alter
1741 *      @len: new length
1742 *
1743 *      Cut the length of a buffer down by removing data from the tail. If
1744 *      the buffer is already under the length specified it is not modified.
1745 *      The skb must be linear.
1746 */
1747void skb_trim(struct sk_buff *skb, unsigned int len)
1748{
1749        if (skb->len > len)
1750                __skb_trim(skb, len);
1751}
1752EXPORT_SYMBOL(skb_trim);
1753
1754/* Trims skb to length len. It can change skb pointers.
1755 */
1756
1757int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1758{
1759        struct sk_buff **fragp;
1760        struct sk_buff *frag;
1761        int offset = skb_headlen(skb);
1762        int nfrags = skb_shinfo(skb)->nr_frags;
1763        int i;
1764        int err;
1765
1766        if (skb_cloned(skb) &&
1767            unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1768                return err;
1769
1770        i = 0;
1771        if (offset >= len)
1772                goto drop_pages;
1773
1774        for (; i < nfrags; i++) {
1775                int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1776
1777                if (end < len) {
1778                        offset = end;
1779                        continue;
1780                }
1781
1782                skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1783
1784drop_pages:
1785                skb_shinfo(skb)->nr_frags = i;
1786
1787                for (; i < nfrags; i++)
1788                        skb_frag_unref(skb, i);
1789
1790                if (skb_has_frag_list(skb))
1791                        skb_drop_fraglist(skb);
1792                goto done;
1793        }
1794
1795        for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1796             fragp = &frag->next) {
1797                int end = offset + frag->len;
1798
1799                if (skb_shared(frag)) {
1800                        struct sk_buff *nfrag;
1801
1802                        nfrag = skb_clone(frag, GFP_ATOMIC);
1803                        if (unlikely(!nfrag))
1804                                return -ENOMEM;
1805
1806                        nfrag->next = frag->next;
1807                        consume_skb(frag);
1808                        frag = nfrag;
1809                        *fragp = frag;
1810                }
1811
1812                if (end < len) {
1813                        offset = end;
1814                        continue;
1815                }
1816
1817                if (end > len &&
1818                    unlikely((err = pskb_trim(frag, len - offset))))
1819                        return err;
1820
1821                if (frag->next)
1822                        skb_drop_list(&frag->next);
1823                break;
1824        }
1825
1826done:
1827        if (len > skb_headlen(skb)) {
1828                skb->data_len -= skb->len - len;
1829                skb->len       = len;
1830        } else {
1831                skb->len       = len;
1832                skb->data_len  = 0;
1833                skb_set_tail_pointer(skb, len);
1834        }
1835
1836        if (!skb->sk || skb->destructor == sock_edemux)
1837                skb_condense(skb);
1838        return 0;
1839}
1840EXPORT_SYMBOL(___pskb_trim);
1841
1842/* Note : use pskb_trim_rcsum() instead of calling this directly
1843 */
1844int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
1845{
1846        if (skb->ip_summed == CHECKSUM_COMPLETE) {
1847                int delta = skb->len - len;
1848
1849                skb->csum = csum_block_sub(skb->csum,
1850                                           skb_checksum(skb, len, delta, 0),
1851                                           len);
1852        }
1853        return __pskb_trim(skb, len);
1854}
1855EXPORT_SYMBOL(pskb_trim_rcsum_slow);
1856
1857/**
1858 *      __pskb_pull_tail - advance tail of skb header
1859 *      @skb: buffer to reallocate
1860 *      @delta: number of bytes to advance tail
1861 *
1862 *      The function makes a sense only on a fragmented &sk_buff,
1863 *      it expands header moving its tail forward and copying necessary
1864 *      data from fragmented part.
1865 *
1866 *      &sk_buff MUST have reference count of 1.
1867 *
1868 *      Returns %NULL (and &sk_buff does not change) if pull failed
1869 *      or value of new tail of skb in the case of success.
1870 *
1871 *      All the pointers pointing into skb header may change and must be
1872 *      reloaded after call to this function.
1873 */
1874
1875/* Moves tail of skb head forward, copying data from fragmented part,
1876 * when it is necessary.
1877 * 1. It may fail due to malloc failure.
1878 * 2. It may change skb pointers.
1879 *
1880 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1881 */
1882void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1883{
1884        /* If skb has not enough free space at tail, get new one
1885         * plus 128 bytes for future expansions. If we have enough
1886         * room at tail, reallocate without expansion only if skb is cloned.
1887         */
1888        int i, k, eat = (skb->tail + delta) - skb->end;
1889
1890        if (eat > 0 || skb_cloned(skb)) {
1891                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1892                                     GFP_ATOMIC))
1893                        return NULL;
1894        }
1895
1896        BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
1897                             skb_tail_pointer(skb), delta));
1898
1899        /* Optimization: no fragments, no reasons to preestimate
1900         * size of pulled pages. Superb.
1901         */
1902        if (!skb_has_frag_list(skb))
1903                goto pull_pages;
1904
1905        /* Estimate size of pulled pages. */
1906        eat = delta;
1907        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1908                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1909
1910                if (size >= eat)
1911                        goto pull_pages;
1912                eat -= size;
1913        }
1914
1915        /* If we need update frag list, we are in troubles.
1916         * Certainly, it is possible to add an offset to skb data,
1917         * but taking into account that pulling is expected to
1918         * be very rare operation, it is worth to fight against
1919         * further bloating skb head and crucify ourselves here instead.
1920         * Pure masohism, indeed. 8)8)
1921         */
1922        if (eat) {
1923                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1924                struct sk_buff *clone = NULL;
1925                struct sk_buff *insp = NULL;
1926
1927                do {
1928                        BUG_ON(!list);
1929
1930                        if (list->len <= eat) {
1931                                /* Eaten as whole. */
1932                                eat -= list->len;
1933                                list = list->next;
1934                                insp = list;
1935                        } else {
1936                                /* Eaten partially. */
1937
1938                                if (skb_shared(list)) {
1939                                        /* Sucks! We need to fork list. :-( */
1940                                        clone = skb_clone(list, GFP_ATOMIC);
1941                                        if (!clone)
1942                                                return NULL;
1943                                        insp = list->next;
1944                                        list = clone;
1945                                } else {
1946                                        /* This may be pulled without
1947                                         * problems. */
1948                                        insp = list;
1949                                }
1950                                if (!pskb_pull(list, eat)) {
1951                                        kfree_skb(clone);
1952                                        return NULL;
1953                                }
1954                                break;
1955                        }
1956                } while (eat);
1957
1958                /* Free pulled out fragments. */
1959                while ((list = skb_shinfo(skb)->frag_list) != insp) {
1960                        skb_shinfo(skb)->frag_list = list->next;
1961                        kfree_skb(list);
1962                }
1963                /* And insert new clone at head. */
1964                if (clone) {
1965                        clone->next = list;
1966                        skb_shinfo(skb)->frag_list = clone;
1967                }
1968        }
1969        /* Success! Now we may commit changes to skb data. */
1970
1971pull_pages:
1972        eat = delta;
1973        k = 0;
1974        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1975                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1976
1977                if (size <= eat) {
1978                        skb_frag_unref(skb, i);
1979                        eat -= size;
1980                } else {
1981                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1982                        if (eat) {
1983                                skb_shinfo(skb)->frags[k].page_offset += eat;
1984                                skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1985                                if (!i)
1986                                        goto end;
1987                                eat = 0;
1988                        }
1989                        k++;
1990                }
1991        }
1992        skb_shinfo(skb)->nr_frags = k;
1993
1994end:
1995        skb->tail     += delta;
1996        skb->data_len -= delta;
1997
1998        if (!skb->data_len)
1999                skb_zcopy_clear(skb, false);
2000
2001        return skb_tail_pointer(skb);
2002}
2003EXPORT_SYMBOL(__pskb_pull_tail);
2004
2005/**
2006 *      skb_copy_bits - copy bits from skb to kernel buffer
2007 *      @skb: source skb
2008 *      @offset: offset in source
2009 *      @to: destination buffer
2010 *      @len: number of bytes to copy
2011 *
2012 *      Copy the specified number of bytes from the source skb to the
2013 *      destination buffer.
2014 *
2015 *      CAUTION ! :
2016 *              If its prototype is ever changed,
2017 *              check arch/{*}/net/{*}.S files,
2018 *              since it is called from BPF assembly code.
2019 */
2020int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2021{
2022        int start = skb_headlen(skb);
2023        struct sk_buff *frag_iter;
2024        int i, copy;
2025
2026        if (offset > (int)skb->len - len)
2027                goto fault;
2028
2029        /* Copy header. */
2030        if ((copy = start - offset) > 0) {
2031                if (copy > len)
2032                        copy = len;
2033                skb_copy_from_linear_data_offset(skb, offset, to, copy);
2034                if ((len -= copy) == 0)
2035                        return 0;
2036                offset += copy;
2037                to     += copy;
2038        }
2039
2040        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2041                int end;
2042                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2043
2044                WARN_ON(start > offset + len);
2045
2046                end = start + skb_frag_size(f);
2047                if ((copy = end - offset) > 0) {
2048                        u32 p_off, p_len, copied;
2049                        struct page *p;
2050                        u8 *vaddr;
2051
2052                        if (copy > len)
2053                                copy = len;
2054
2055                        skb_frag_foreach_page(f,
2056                                              f->page_offset + offset - start,
2057                                              copy, p, p_off, p_len, copied) {
2058                                vaddr = kmap_atomic(p);
2059                                memcpy(to + copied, vaddr + p_off, p_len);
2060                                kunmap_atomic(vaddr);
2061                        }
2062
2063                        if ((len -= copy) == 0)
2064                                return 0;
2065                        offset += copy;
2066                        to     += copy;
2067                }
2068                start = end;
2069        }
2070
2071        skb_walk_frags(skb, frag_iter) {
2072                int end;
2073
2074                WARN_ON(start > offset + len);
2075
2076                end = start + frag_iter->len;
2077                if ((copy = end - offset) > 0) {
2078                        if (copy > len)
2079                                copy = len;
2080                        if (skb_copy_bits(frag_iter, offset - start, to, copy))
2081                                goto fault;
2082                        if ((len -= copy) == 0)
2083                                return 0;
2084                        offset += copy;
2085                        to     += copy;
2086                }
2087                start = end;
2088        }
2089
2090        if (!len)
2091                return 0;
2092
2093fault:
2094        return -EFAULT;
2095}
2096EXPORT_SYMBOL(skb_copy_bits);
2097
2098/*
2099 * Callback from splice_to_pipe(), if we need to release some pages
2100 * at the end of the spd in case we error'ed out in filling the pipe.
2101 */
2102static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2103{
2104        put_page(spd->pages[i]);
2105}
2106
2107static struct page *linear_to_page(struct page *page, unsigned int *len,
2108                                   unsigned int *offset,
2109                                   struct sock *sk)
2110{
2111        struct page_frag *pfrag = sk_page_frag(sk);
2112
2113        if (!sk_page_frag_refill(sk, pfrag))
2114                return NULL;
2115
2116        *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2117
2118        memcpy(page_address(pfrag->page) + pfrag->offset,
2119               page_address(page) + *offset, *len);
2120        *offset = pfrag->offset;
2121        pfrag->offset += *len;
2122
2123        return pfrag->page;
2124}
2125
2126static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2127                             struct page *page,
2128                             unsigned int offset)
2129{
2130        return  spd->nr_pages &&
2131                spd->pages[spd->nr_pages - 1] == page &&
2132                (spd->partial[spd->nr_pages - 1].offset +
2133                 spd->partial[spd->nr_pages - 1].len == offset);
2134}
2135
2136/*
2137 * Fill page/offset/length into spd, if it can hold more pages.
2138 */
2139static bool spd_fill_page(struct splice_pipe_desc *spd,
2140                          struct pipe_inode_info *pipe, struct page *page,
2141                          unsigned int *len, unsigned int offset,
2142                          bool linear,
2143                          struct sock *sk)
2144{
2145        if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2146                return true;
2147
2148        if (linear) {
2149                page = linear_to_page(page, len, &offset, sk);
2150                if (!page)
2151                        return true;
2152        }
2153        if (spd_can_coalesce(spd, page, offset)) {
2154                spd->partial[spd->nr_pages - 1].len += *len;
2155                return false;
2156        }
2157        get_page(page);
2158        spd->pages[spd->nr_pages] = page;
2159        spd->partial[spd->nr_pages].len = *len;
2160        spd->partial[spd->nr_pages].offset = offset;
2161        spd->nr_pages++;
2162
2163        return false;
2164}
2165
2166static bool __splice_segment(struct page *page, unsigned int poff,
2167                             unsigned int plen, unsigned int *off,
2168                             unsigned int *len,
2169                             struct splice_pipe_desc *spd, bool linear,
2170                             struct sock *sk,
2171                             struct pipe_inode_info *pipe)
2172{
2173        if (!*len)
2174                return true;
2175
2176        /* skip this segment if already processed */
2177        if (*off >= plen) {
2178                *off -= plen;
2179                return false;
2180        }
2181
2182        /* ignore any bits we already processed */
2183        poff += *off;
2184        plen -= *off;
2185        *off = 0;
2186
2187        do {
2188                unsigned int flen = min(*len, plen);
2189
2190                if (spd_fill_page(spd, pipe, page, &flen, poff,
2191                                  linear, sk))
2192                        return true;
2193                poff += flen;
2194                plen -= flen;
2195                *len -= flen;
2196        } while (*len && plen);
2197
2198        return false;
2199}
2200
2201/*
2202 * Map linear and fragment data from the skb to spd. It reports true if the
2203 * pipe is full or if we already spliced the requested length.
2204 */
2205static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2206                              unsigned int *offset, unsigned int *len,
2207                              struct splice_pipe_desc *spd, struct sock *sk)
2208{
2209        int seg;
2210        struct sk_buff *iter;
2211
2212        /* map the linear part :
2213         * If skb->head_frag is set, this 'linear' part is backed by a
2214         * fragment, and if the head is not shared with any clones then
2215         * we can avoid a copy since we own the head portion of this page.
2216         */
2217        if (__splice_segment(virt_to_page(skb->data),
2218                             (unsigned long) skb->data & (PAGE_SIZE - 1),
2219                             skb_headlen(skb),
2220                             offset, len, spd,
2221                             skb_head_is_locked(skb),
2222                             sk, pipe))
2223                return true;
2224
2225        /*
2226         * then map the fragments
2227         */
2228        for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2229                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2230
2231                if (__splice_segment(skb_frag_page(f),
2232                                     f->page_offset, skb_frag_size(f),
2233                                     offset, len, spd, false, sk, pipe))
2234                        return true;
2235        }
2236
2237        skb_walk_frags(skb, iter) {
2238                if (*offset >= iter->len) {
2239                        *offset -= iter->len;
2240                        continue;
2241                }
2242                /* __skb_splice_bits() only fails if the output has no room
2243                 * left, so no point in going over the frag_list for the error
2244                 * case.
2245                 */
2246                if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2247                        return true;
2248        }
2249
2250        return false;
2251}
2252
2253/*
2254 * Map data from the skb to a pipe. Should handle both the linear part,
2255 * the fragments, and the frag list.
2256 */
2257int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2258                    struct pipe_inode_info *pipe, unsigned int tlen,
2259                    unsigned int flags)
2260{
2261        struct partial_page partial[MAX_SKB_FRAGS];
2262        struct page *pages[MAX_SKB_FRAGS];
2263        struct splice_pipe_desc spd = {
2264                .pages = pages,
2265                .partial = partial,
2266                .nr_pages_max = MAX_SKB_FRAGS,
2267                .ops = &nosteal_pipe_buf_ops,
2268                .spd_release = sock_spd_release,
2269        };
2270        int ret = 0;
2271
2272        __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2273
2274        if (spd.nr_pages)
2275                ret = splice_to_pipe(pipe, &spd);
2276
2277        return ret;
2278}
2279EXPORT_SYMBOL_GPL(skb_splice_bits);
2280
2281/* Send skb data on a socket. Socket must be locked. */
2282int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2283                         int len)
2284{
2285        unsigned int orig_len = len;
2286        struct sk_buff *head = skb;
2287        unsigned short fragidx;
2288        int slen, ret;
2289
2290do_frag_list:
2291
2292        /* Deal with head data */
2293        while (offset < skb_headlen(skb) && len) {
2294                struct kvec kv;
2295                struct msghdr msg;
2296
2297                slen = min_t(int, len, skb_headlen(skb) - offset);
2298                kv.iov_base = skb->data + offset;
2299                kv.iov_len = slen;
2300                memset(&msg, 0, sizeof(msg));
2301
2302                ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2303                if (ret <= 0)
2304                        goto error;
2305
2306                offset += ret;
2307                len -= ret;
2308        }
2309
2310        /* All the data was skb head? */
2311        if (!len)
2312                goto out;
2313
2314        /* Make offset relative to start of frags */
2315        offset -= skb_headlen(skb);
2316
2317        /* Find where we are in frag list */
2318        for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2319                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2320
2321                if (offset < frag->size)
2322                        break;
2323
2324                offset -= frag->size;
2325        }
2326
2327        for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2328                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2329
2330                slen = min_t(size_t, len, frag->size - offset);
2331
2332                while (slen) {
2333                        ret = kernel_sendpage_locked(sk, frag->page.p,
2334                                                     frag->page_offset + offset,
2335                                                     slen, MSG_DONTWAIT);
2336                        if (ret <= 0)
2337                                goto error;
2338
2339                        len -= ret;
2340                        offset += ret;
2341                        slen -= ret;
2342                }
2343
2344                offset = 0;
2345        }
2346
2347        if (len) {
2348                /* Process any frag lists */
2349
2350                if (skb == head) {
2351                        if (skb_has_frag_list(skb)) {
2352                                skb = skb_shinfo(skb)->frag_list;
2353                                goto do_frag_list;
2354                        }
2355                } else if (skb->next) {
2356                        skb = skb->next;
2357                        goto do_frag_list;
2358                }
2359        }
2360
2361out:
2362        return orig_len - len;
2363
2364error:
2365        return orig_len == len ? ret : orig_len - len;
2366}
2367EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2368
2369/* Send skb data on a socket. */
2370int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2371{
2372        int ret = 0;
2373
2374        lock_sock(sk);
2375        ret = skb_send_sock_locked(sk, skb, offset, len);
2376        release_sock(sk);
2377
2378        return ret;
2379}
2380EXPORT_SYMBOL_GPL(skb_send_sock);
2381
2382/**
2383 *      skb_store_bits - store bits from kernel buffer to skb
2384 *      @skb: destination buffer
2385 *      @offset: offset in destination
2386 *      @from: source buffer
2387 *      @len: number of bytes to copy
2388 *
2389 *      Copy the specified number of bytes from the source buffer to the
2390 *      destination skb.  This function handles all the messy bits of
2391 *      traversing fragment lists and such.
2392 */
2393
2394int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2395{
2396        int start = skb_headlen(skb);
2397        struct sk_buff *frag_iter;
2398        int i, copy;
2399
2400        if (offset > (int)skb->len - len)
2401                goto fault;
2402
2403        if ((copy = start - offset) > 0) {
2404                if (copy > len)
2405                        copy = len;
2406                skb_copy_to_linear_data_offset(skb, offset, from, copy);
2407                if ((len -= copy) == 0)
2408                        return 0;
2409                offset += copy;
2410                from += copy;
2411        }
2412
2413        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2414                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2415                int end;
2416
2417                WARN_ON(start > offset + len);
2418
2419                end = start + skb_frag_size(frag);
2420                if ((copy = end - offset) > 0) {
2421                        u32 p_off, p_len, copied;
2422                        struct page *p;
2423                        u8 *vaddr;
2424
2425                        if (copy > len)
2426                                copy = len;
2427
2428                        skb_frag_foreach_page(frag,
2429                                              frag->page_offset + offset - start,
2430                                              copy, p, p_off, p_len, copied) {
2431                                vaddr = kmap_atomic(p);
2432                                memcpy(vaddr + p_off, from + copied, p_len);
2433                                kunmap_atomic(vaddr);
2434                        }
2435
2436                        if ((len -= copy) == 0)
2437                                return 0;
2438                        offset += copy;
2439                        from += copy;
2440                }
2441                start = end;
2442        }
2443
2444        skb_walk_frags(skb, frag_iter) {
2445                int end;
2446
2447                WARN_ON(start > offset + len);
2448
2449                end = start + frag_iter->len;
2450                if ((copy = end - offset) > 0) {
2451                        if (copy > len)
2452                                copy = len;
2453                        if (skb_store_bits(frag_iter, offset - start,
2454                                           from, copy))
2455                                goto fault;
2456                        if ((len -= copy) == 0)
2457                                return 0;
2458                        offset += copy;
2459                        from += copy;
2460                }
2461                start = end;
2462        }
2463        if (!len)
2464                return 0;
2465
2466fault:
2467        return -EFAULT;
2468}
2469EXPORT_SYMBOL(skb_store_bits);
2470
2471/* Checksum skb data. */
2472__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2473                      __wsum csum, const struct skb_checksum_ops *ops)
2474{
2475        int start = skb_headlen(skb);
2476        int i, copy = start - offset;
2477        struct sk_buff *frag_iter;
2478        int pos = 0;
2479
2480        /* Checksum header. */
2481        if (copy > 0) {
2482                if (copy > len)
2483                        copy = len;
2484                csum = ops->update(skb->data + offset, copy, csum);
2485                if ((len -= copy) == 0)
2486                        return csum;
2487                offset += copy;
2488                pos     = copy;
2489        }
2490
2491        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2492                int end;
2493                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2494
2495                WARN_ON(start > offset + len);
2496
2497                end = start + skb_frag_size(frag);
2498                if ((copy = end - offset) > 0) {
2499                        u32 p_off, p_len, copied;
2500                        struct page *p;
2501                        __wsum csum2;
2502                        u8 *vaddr;
2503
2504                        if (copy > len)
2505                                copy = len;
2506
2507                        skb_frag_foreach_page(frag,
2508                                              frag->page_offset + offset - start,
2509                                              copy, p, p_off, p_len, copied) {
2510                                vaddr = kmap_atomic(p);
2511                                csum2 = ops->update(vaddr + p_off, p_len, 0);
2512                                kunmap_atomic(vaddr);
2513                                csum = ops->combine(csum, csum2, pos, p_len);
2514                                pos += p_len;
2515                        }
2516
2517                        if (!(len -= copy))
2518                                return csum;
2519                        offset += copy;
2520                }
2521                start = end;
2522        }
2523
2524        skb_walk_frags(skb, frag_iter) {
2525                int end;
2526
2527                WARN_ON(start > offset + len);
2528
2529                end = start + frag_iter->len;
2530                if ((copy = end - offset) > 0) {
2531                        __wsum csum2;
2532                        if (copy > len)
2533                                copy = len;
2534                        csum2 = __skb_checksum(frag_iter, offset - start,
2535                                               copy, 0, ops);
2536                        csum = ops->combine(csum, csum2, pos, copy);
2537                        if ((len -= copy) == 0)
2538                                return csum;
2539                        offset += copy;
2540                        pos    += copy;
2541                }
2542                start = end;
2543        }
2544        BUG_ON(len);
2545
2546        return csum;
2547}
2548EXPORT_SYMBOL(__skb_checksum);
2549
2550__wsum skb_checksum(const struct sk_buff *skb, int offset,
2551                    int len, __wsum csum)
2552{
2553        const struct skb_checksum_ops ops = {
2554                .update  = csum_partial_ext,
2555                .combine = csum_block_add_ext,
2556        };
2557
2558        return __skb_checksum(skb, offset, len, csum, &ops);
2559}
2560EXPORT_SYMBOL(skb_checksum);
2561
2562/* Both of above in one bottle. */
2563
2564__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2565                                    u8 *to, int len, __wsum csum)
2566{
2567        int start = skb_headlen(skb);
2568        int i, copy = start - offset;
2569        struct sk_buff *frag_iter;
2570        int pos = 0;
2571
2572        /* Copy header. */
2573        if (copy > 0) {
2574                if (copy > len)
2575                        copy = len;
2576                csum = csum_partial_copy_nocheck(skb->data + offset, to,
2577                                                 copy, csum);
2578                if ((len -= copy) == 0)
2579                        return csum;
2580                offset += copy;
2581                to     += copy;
2582                pos     = copy;
2583        }
2584
2585        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2586                int end;
2587
2588                WARN_ON(start > offset + len);
2589
2590                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2591                if ((copy = end - offset) > 0) {
2592                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2593                        u32 p_off, p_len, copied;
2594                        struct page *p;
2595                        __wsum csum2;
2596                        u8 *vaddr;
2597
2598                        if (copy > len)
2599                                copy = len;
2600
2601                        skb_frag_foreach_page(frag,
2602                                              frag->page_offset + offset - start,
2603                                              copy, p, p_off, p_len, copied) {
2604                                vaddr = kmap_atomic(p);
2605                                csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2606                                                                  to + copied,
2607                                                                  p_len, 0);
2608                                kunmap_atomic(vaddr);
2609                                csum = csum_block_add(csum, csum2, pos);
2610                                pos += p_len;
2611                        }
2612
2613                        if (!(len -= copy))
2614                                return csum;
2615                        offset += copy;
2616                        to     += copy;
2617                }
2618                start = end;
2619        }
2620
2621        skb_walk_frags(skb, frag_iter) {
2622                __wsum csum2;
2623                int end;
2624
2625                WARN_ON(start > offset + len);
2626
2627                end = start + frag_iter->len;
2628                if ((copy = end - offset) > 0) {
2629                        if (copy > len)
2630                                copy = len;
2631                        csum2 = skb_copy_and_csum_bits(frag_iter,
2632                                                       offset - start,
2633                                                       to, copy, 0);
2634                        csum = csum_block_add(csum, csum2, pos);
2635                        if ((len -= copy) == 0)
2636                                return csum;
2637                        offset += copy;
2638                        to     += copy;
2639                        pos    += copy;
2640                }
2641                start = end;
2642        }
2643        BUG_ON(len);
2644        return csum;
2645}
2646EXPORT_SYMBOL(skb_copy_and_csum_bits);
2647
2648static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2649{
2650        net_warn_ratelimited(
2651                "%s: attempt to compute crc32c without libcrc32c.ko\n",
2652                __func__);
2653        return 0;
2654}
2655
2656static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2657                                       int offset, int len)
2658{
2659        net_warn_ratelimited(
2660                "%s: attempt to compute crc32c without libcrc32c.ko\n",
2661                __func__);
2662        return 0;
2663}
2664
2665static const struct skb_checksum_ops default_crc32c_ops = {
2666        .update  = warn_crc32c_csum_update,
2667        .combine = warn_crc32c_csum_combine,
2668};
2669
2670const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2671        &default_crc32c_ops;
2672EXPORT_SYMBOL(crc32c_csum_stub);
2673
2674 /**
2675 *      skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2676 *      @from: source buffer
2677 *
2678 *      Calculates the amount of linear headroom needed in the 'to' skb passed
2679 *      into skb_zerocopy().
2680 */
2681unsigned int
2682skb_zerocopy_headlen(const struct sk_buff *from)
2683{
2684        unsigned int hlen = 0;
2685
2686        if (!from->head_frag ||
2687            skb_headlen(from) < L1_CACHE_BYTES ||
2688            skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2689                hlen = skb_headlen(from);
2690
2691        if (skb_has_frag_list(from))
2692                hlen = from->len;
2693
2694        return hlen;
2695}
2696EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2697
2698/**
2699 *      skb_zerocopy - Zero copy skb to skb
2700 *      @to: destination buffer
2701 *      @from: source buffer
2702 *      @len: number of bytes to copy from source buffer
2703 *      @hlen: size of linear headroom in destination buffer
2704 *
2705 *      Copies up to `len` bytes from `from` to `to` by creating references
2706 *      to the frags in the source buffer.
2707 *
2708 *      The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2709 *      headroom in the `to` buffer.
2710 *
2711 *      Return value:
2712 *      0: everything is OK
2713 *      -ENOMEM: couldn't orphan frags of @from due to lack of memory
2714 *      -EFAULT: skb_copy_bits() found some problem with skb geometry
2715 */
2716int
2717skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2718{
2719        int i, j = 0;
2720        int plen = 0; /* length of skb->head fragment */
2721        int ret;
2722        struct page *page;
2723        unsigned int offset;
2724
2725        BUG_ON(!from->head_frag && !hlen);
2726
2727        /* dont bother with small payloads */
2728        if (len <= skb_tailroom(to))
2729                return skb_copy_bits(from, 0, skb_put(to, len), len);
2730
2731        if (hlen) {
2732                ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2733                if (unlikely(ret))
2734                        return ret;
2735                len -= hlen;
2736        } else {
2737                plen = min_t(int, skb_headlen(from), len);
2738                if (plen) {
2739                        page = virt_to_head_page(from->head);
2740                        offset = from->data - (unsigned char *)page_address(page);
2741                        __skb_fill_page_desc(to, 0, page, offset, plen);
2742                        get_page(page);
2743                        j = 1;
2744                        len -= plen;
2745                }
2746        }
2747
2748        to->truesize += len + plen;
2749        to->len += len + plen;
2750        to->data_len += len + plen;
2751
2752        if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2753                skb_tx_error(from);
2754                return -ENOMEM;
2755        }
2756        skb_zerocopy_clone(to, from, GFP_ATOMIC);
2757
2758        for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2759                if (!len)
2760                        break;
2761                skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2762                skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2763                len -= skb_shinfo(to)->frags[j].size;
2764                skb_frag_ref(to, j);
2765                j++;
2766        }
2767        skb_shinfo(to)->nr_frags = j;
2768
2769        return 0;
2770}
2771EXPORT_SYMBOL_GPL(skb_zerocopy);
2772
2773void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2774{
2775        __wsum csum;
2776        long csstart;
2777
2778        if (skb->ip_summed == CHECKSUM_PARTIAL)
2779                csstart = skb_checksum_start_offset(skb);
2780        else
2781                csstart = skb_headlen(skb);
2782
2783        BUG_ON(csstart > skb_headlen(skb));
2784
2785        skb_copy_from_linear_data(skb, to, csstart);
2786
2787        csum = 0;
2788        if (csstart != skb->len)
2789                csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2790                                              skb->len - csstart, 0);
2791
2792        if (skb->ip_summed == CHECKSUM_PARTIAL) {
2793                long csstuff = csstart + skb->csum_offset;
2794
2795                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2796        }
2797}
2798EXPORT_SYMBOL(skb_copy_and_csum_dev);
2799
2800/**
2801 *      skb_dequeue - remove from the head of the queue
2802 *      @list: list to dequeue from
2803 *
2804 *      Remove the head of the list. The list lock is taken so the function
2805 *      may be used safely with other locking list functions. The head item is
2806 *      returned or %NULL if the list is empty.
2807 */
2808
2809struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2810{
2811        unsigned long flags;
2812        struct sk_buff *result;
2813
2814        spin_lock_irqsave(&list->lock, flags);
2815        result = __skb_dequeue(list);
2816        spin_unlock_irqrestore(&list->lock, flags);
2817        return result;
2818}
2819EXPORT_SYMBOL(skb_dequeue);
2820
2821/**
2822 *      skb_dequeue_tail - remove from the tail of the queue
2823 *      @list: list to dequeue from
2824 *
2825 *      Remove the tail of the list. The list lock is taken so the function
2826 *      may be used safely with other locking list functions. The tail item is
2827 *      returned or %NULL if the list is empty.
2828 */
2829struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2830{
2831        unsigned long flags;
2832        struct sk_buff *result;
2833
2834        spin_lock_irqsave(&list->lock, flags);
2835        result = __skb_dequeue_tail(list);
2836        spin_unlock_irqrestore(&list->lock, flags);
2837        return result;
2838}
2839EXPORT_SYMBOL(skb_dequeue_tail);
2840
2841/**
2842 *      skb_queue_purge - empty a list
2843 *      @list: list to empty
2844 *
2845 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
2846 *      the list and one reference dropped. This function takes the list
2847 *      lock and is atomic with respect to other list locking functions.
2848 */
2849void skb_queue_purge(struct sk_buff_head *list)
2850{
2851        struct sk_buff *skb;
2852        while ((skb = skb_dequeue(list)) != NULL)
2853                kfree_skb(skb);
2854}
2855EXPORT_SYMBOL(skb_queue_purge);
2856
2857/**
2858 *      skb_rbtree_purge - empty a skb rbtree
2859 *      @root: root of the rbtree to empty
2860 *      Return value: the sum of truesizes of all purged skbs.
2861 *
2862 *      Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
2863 *      the list and one reference dropped. This function does not take
2864 *      any lock. Synchronization should be handled by the caller (e.g., TCP
2865 *      out-of-order queue is protected by the socket lock).
2866 */
2867unsigned int skb_rbtree_purge(struct rb_root *root)
2868{
2869        struct rb_node *p = rb_first(root);
2870        unsigned int sum = 0;
2871
2872        while (p) {
2873                struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
2874
2875                p = rb_next(p);
2876                rb_erase(&skb->rbnode, root);
2877                sum += skb->truesize;
2878                kfree_skb(skb);
2879        }
2880        return sum;
2881}
2882
2883/**
2884 *      skb_queue_head - queue a buffer at the list head
2885 *      @list: list to use
2886 *      @newsk: buffer to queue
2887 *
2888 *      Queue a buffer at the start of the list. This function takes the
2889 *      list lock and can be used safely with other locking &sk_buff functions
2890 *      safely.
2891 *
2892 *      A buffer cannot be placed on two lists at the same time.
2893 */
2894void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2895{
2896        unsigned long flags;
2897
2898        spin_lock_irqsave(&list->lock, flags);
2899        __skb_queue_head(list, newsk);
2900        spin_unlock_irqrestore(&list->lock, flags);
2901}
2902EXPORT_SYMBOL(skb_queue_head);
2903
2904/**
2905 *      skb_queue_tail - queue a buffer at the list tail
2906 *      @list: list to use
2907 *      @newsk: buffer to queue
2908 *
2909 *      Queue a buffer at the tail of the list. This function takes the
2910 *      list lock and can be used safely with other locking &sk_buff functions
2911 *      safely.
2912 *
2913 *      A buffer cannot be placed on two lists at the same time.
2914 */
2915void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2916{
2917        unsigned long flags;
2918
2919        spin_lock_irqsave(&list->lock, flags);
2920        __skb_queue_tail(list, newsk);
2921        spin_unlock_irqrestore(&list->lock, flags);
2922}
2923EXPORT_SYMBOL(skb_queue_tail);
2924
2925/**
2926 *      skb_unlink      -       remove a buffer from a list
2927 *      @skb: buffer to remove
2928 *      @list: list to use
2929 *
2930 *      Remove a packet from a list. The list locks are taken and this
2931 *      function is atomic with respect to other list locked calls
2932 *
2933 *      You must know what list the SKB is on.
2934 */
2935void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2936{
2937        unsigned long flags;
2938
2939        spin_lock_irqsave(&list->lock, flags);
2940        __skb_unlink(skb, list);
2941        spin_unlock_irqrestore(&list->lock, flags);
2942}
2943EXPORT_SYMBOL(skb_unlink);
2944
2945/**
2946 *      skb_append      -       append a buffer
2947 *      @old: buffer to insert after
2948 *      @newsk: buffer to insert
2949 *      @list: list to use
2950 *
2951 *      Place a packet after a given packet in a list. The list locks are taken
2952 *      and this function is atomic with respect to other list locked calls.
2953 *      A buffer cannot be placed on two lists at the same time.
2954 */
2955void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2956{
2957        unsigned long flags;
2958
2959        spin_lock_irqsave(&list->lock, flags);
2960        __skb_queue_after(list, old, newsk);
2961        spin_unlock_irqrestore(&list->lock, flags);
2962}
2963EXPORT_SYMBOL(skb_append);
2964
2965/**
2966 *      skb_insert      -       insert a buffer
2967 *      @old: buffer to insert before
2968 *      @newsk: buffer to insert
2969 *      @list: list to use
2970 *
2971 *      Place a packet before a given packet in a list. The list locks are
2972 *      taken and this function is atomic with respect to other list locked
2973 *      calls.
2974 *
2975 *      A buffer cannot be placed on two lists at the same time.
2976 */
2977void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2978{
2979        unsigned long flags;
2980
2981        spin_lock_irqsave(&list->lock, flags);
2982        __skb_insert(newsk, old->prev, old, list);
2983        spin_unlock_irqrestore(&list->lock, flags);
2984}
2985EXPORT_SYMBOL(skb_insert);
2986
2987static inline void skb_split_inside_header(struct sk_buff *skb,
2988                                           struct sk_buff* skb1,
2989                                           const u32 len, const int pos)
2990{
2991        int i;
2992
2993        skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2994                                         pos - len);
2995        /* And move data appendix as is. */
2996        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2997                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2998
2999        skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3000        skb_shinfo(skb)->nr_frags  = 0;
3001        skb1->data_len             = skb->data_len;
3002        skb1->len                  += skb1->data_len;
3003        skb->data_len              = 0;
3004        skb->len                   = len;
3005        skb_set_tail_pointer(skb, len);
3006}
3007
3008static inline void skb_split_no_header(struct sk_buff *skb,
3009                                       struct sk_buff* skb1,
3010                                       const u32 len, int pos)
3011{
3012        int i, k = 0;
3013        const int nfrags = skb_shinfo(skb)->nr_frags;
3014
3015        skb_shinfo(skb)->nr_frags = 0;
3016        skb1->len                 = skb1->data_len = skb->len - len;
3017        skb->len                  = len;
3018        skb->data_len             = len - pos;
3019
3020        for (i = 0; i < nfrags; i++) {
3021                int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3022
3023                if (pos + size > len) {
3024                        skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3025
3026                        if (pos < len) {
3027                                /* Split frag.
3028                                 * We have two variants in this case:
3029                                 * 1. Move all the frag to the second
3030                                 *    part, if it is possible. F.e.
3031                                 *    this approach is mandatory for TUX,
3032                                 *    where splitting is expensive.
3033                                 * 2. Split is accurately. We make this.
3034                                 */
3035                                skb_frag_ref(skb, i);
3036                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
3037                                skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3038                                skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3039                                skb_shinfo(skb)->nr_frags++;
3040                        }
3041                        k++;
3042                } else
3043                        skb_shinfo(skb)->nr_frags++;
3044                pos += size;
3045        }
3046        skb_shinfo(skb1)->nr_frags = k;
3047}
3048
3049/**
3050 * skb_split - Split fragmented skb to two parts at length len.
3051 * @skb: the buffer to split
3052 * @skb1: the buffer to receive the second part
3053 * @len: new length for skb
3054 */
3055void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3056{
3057        int pos = skb_headlen(skb);
3058
3059        skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3060                                      SKBTX_SHARED_FRAG;
3061        skb_zerocopy_clone(skb1, skb, 0);
3062        if (len < pos)  /* Split line is inside header. */
3063                skb_split_inside_header(skb, skb1, len, pos);
3064        else            /* Second chunk has no header, nothing to copy. */
3065                skb_split_no_header(skb, skb1, len, pos);
3066}
3067EXPORT_SYMBOL(skb_split);
3068
3069/* Shifting from/to a cloned skb is a no-go.
3070 *
3071 * Caller cannot keep skb_shinfo related pointers past calling here!
3072 */
3073static int skb_prepare_for_shift(struct sk_buff *skb)
3074{
3075        return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3076}
3077
3078/**
3079 * skb_shift - Shifts paged data partially from skb to another
3080 * @tgt: buffer into which tail data gets added
3081 * @skb: buffer from which the paged data comes from
3082 * @shiftlen: shift up to this many bytes
3083 *
3084 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3085 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3086 * It's up to caller to free skb if everything was shifted.
3087 *
3088 * If @tgt runs out of frags, the whole operation is aborted.
3089 *
3090 * Skb cannot include anything else but paged data while tgt is allowed
3091 * to have non-paged data as well.
3092 *
3093 * TODO: full sized shift could be optimized but that would need
3094 * specialized skb free'er to handle frags without up-to-date nr_frags.
3095 */
3096int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3097{
3098        int from, to, merge, todo;
3099        struct skb_frag_struct *fragfrom, *fragto;
3100
3101        BUG_ON(shiftlen > skb->len);
3102
3103        if (skb_headlen(skb))
3104                return 0;
3105        if (skb_zcopy(tgt) || skb_zcopy(skb))
3106                return 0;
3107
3108        todo = shiftlen;
3109        from = 0;
3110        to = skb_shinfo(tgt)->nr_frags;
3111        fragfrom = &skb_shinfo(skb)->frags[from];
3112
3113        /* Actual merge is delayed until the point when we know we can
3114         * commit all, so that we don't have to undo partial changes
3115         */
3116        if (!to ||
3117            !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3118                              fragfrom->page_offset)) {
3119                merge = -1;
3120        } else {
3121                merge = to - 1;
3122
3123                todo -= skb_frag_size(fragfrom);
3124                if (todo < 0) {
3125                        if (skb_prepare_for_shift(skb) ||
3126                            skb_prepare_for_shift(tgt))
3127                                return 0;
3128
3129                        /* All previous frag pointers might be stale! */
3130                        fragfrom = &skb_shinfo(skb)->frags[from];
3131                        fragto = &skb_shinfo(tgt)->frags[merge];
3132
3133                        skb_frag_size_add(fragto, shiftlen);
3134                        skb_frag_size_sub(fragfrom, shiftlen);
3135                        fragfrom->page_offset += shiftlen;
3136
3137                        goto onlymerged;
3138                }
3139
3140                from++;
3141        }
3142
3143        /* Skip full, not-fitting skb to avoid expensive operations */
3144        if ((shiftlen == skb->len) &&
3145            (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3146                return 0;
3147
3148        if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3149                return 0;
3150
3151        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3152                if (to == MAX_SKB_FRAGS)
3153                        return 0;
3154
3155                fragfrom = &skb_shinfo(skb)->frags[from];
3156                fragto = &skb_shinfo(tgt)->frags[to];
3157
3158                if (todo >= skb_frag_size(fragfrom)) {
3159                        *fragto = *fragfrom;
3160                        todo -= skb_frag_size(fragfrom);
3161                        from++;
3162                        to++;
3163
3164                } else {
3165                        __skb_frag_ref(fragfrom);
3166                        fragto->page = fragfrom->page;
3167                        fragto->page_offset = fragfrom->page_offset;
3168                        skb_frag_size_set(fragto, todo);
3169
3170                        fragfrom->page_offset += todo;
3171                        skb_frag_size_sub(fragfrom, todo);
3172                        todo = 0;
3173
3174                        to++;
3175                        break;
3176                }
3177        }
3178
3179        /* Ready to "commit" this state change to tgt */
3180        skb_shinfo(tgt)->nr_frags = to;
3181
3182        if (merge >= 0) {
3183                fragfrom = &skb_shinfo(skb)->frags[0];
3184                fragto = &skb_shinfo(tgt)->frags[merge];
3185
3186                skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3187                __skb_frag_unref(fragfrom);
3188        }
3189
3190        /* Reposition in the original skb */
3191        to = 0;
3192        while (from < skb_shinfo(skb)->nr_frags)
3193                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3194        skb_shinfo(skb)->nr_frags = to;
3195
3196        BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3197
3198onlymerged:
3199        /* Most likely the tgt won't ever need its checksum anymore, skb on
3200         * the other hand might need it if it needs to be resent
3201         */
3202        tgt->ip_summed = CHECKSUM_PARTIAL;
3203        skb->ip_summed = CHECKSUM_PARTIAL;
3204
3205        /* Yak, is it really working this way? Some helper please? */
3206        skb->len -= shiftlen;
3207        skb->data_len -= shiftlen;
3208        skb->truesize -= shiftlen;
3209        tgt->len += shiftlen;
3210        tgt->data_len += shiftlen;
3211        tgt->truesize += shiftlen;
3212
3213        return shiftlen;
3214}
3215
3216/**
3217 * skb_prepare_seq_read - Prepare a sequential read of skb data
3218 * @skb: the buffer to read
3219 * @from: lower offset of data to be read
3220 * @to: upper offset of data to be read
3221 * @st: state variable
3222 *
3223 * Initializes the specified state variable. Must be called before
3224 * invoking skb_seq_read() for the first time.
3225 */
3226void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3227                          unsigned int to, struct skb_seq_state *st)
3228{
3229        st->lower_offset = from;
3230        st->upper_offset = to;
3231        st->root_skb = st->cur_skb = skb;
3232        st->frag_idx = st->stepped_offset = 0;
3233        st->frag_data = NULL;
3234}
3235EXPORT_SYMBOL(skb_prepare_seq_read);
3236
3237/**
3238 * skb_seq_read - Sequentially read skb data
3239 * @consumed: number of bytes consumed by the caller so far
3240 * @data: destination pointer for data to be returned
3241 * @st: state variable
3242 *
3243 * Reads a block of skb data at @consumed relative to the
3244 * lower offset specified to skb_prepare_seq_read(). Assigns
3245 * the head of the data block to @data and returns the length
3246 * of the block or 0 if the end of the skb data or the upper
3247 * offset has been reached.
3248 *
3249 * The caller is not required to consume all of the data
3250 * returned, i.e. @consumed is typically set to the number
3251 * of bytes already consumed and the next call to
3252 * skb_seq_read() will return the remaining part of the block.
3253 *
3254 * Note 1: The size of each block of data returned can be arbitrary,
3255 *       this limitation is the cost for zerocopy sequential
3256 *       reads of potentially non linear data.
3257 *
3258 * Note 2: Fragment lists within fragments are not implemented
3259 *       at the moment, state->root_skb could be replaced with
3260 *       a stack for this purpose.
3261 */
3262unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3263                          struct skb_seq_state *st)
3264{
3265        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3266        skb_frag_t *frag;
3267
3268        if (unlikely(abs_offset >= st->upper_offset)) {
3269                if (st->frag_data) {
3270                        kunmap_atomic(st->frag_data);
3271                        st->frag_data = NULL;
3272                }
3273                return 0;
3274        }
3275
3276next_skb:
3277        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3278
3279        if (abs_offset < block_limit && !st->frag_data) {
3280                *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3281                return block_limit - abs_offset;
3282        }
3283
3284        if (st->frag_idx == 0 && !st->frag_data)
3285                st->stepped_offset += skb_headlen(st->cur_skb);
3286
3287        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3288                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3289                block_limit = skb_frag_size(frag) + st->stepped_offset;
3290
3291                if (abs_offset < block_limit) {
3292                        if (!st->frag_data)
3293                                st->frag_data = kmap_atomic(skb_frag_page(frag));
3294
3295                        *data = (u8 *) st->frag_data + frag->page_offset +
3296                                (abs_offset - st->stepped_offset);
3297
3298                        return block_limit - abs_offset;
3299                }
3300
3301                if (st->frag_data) {
3302                        kunmap_atomic(st->frag_data);
3303                        st->frag_data = NULL;
3304                }
3305
3306                st->frag_idx++;
3307                st->stepped_offset += skb_frag_size(frag);
3308        }
3309
3310        if (st->frag_data) {
3311                kunmap_atomic(st->frag_data);
3312                st->frag_data = NULL;
3313        }
3314
3315        if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3316                st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3317                st->frag_idx = 0;
3318                goto next_skb;
3319        } else if (st->cur_skb->next) {
3320                st->cur_skb = st->cur_skb->next;
3321                st->frag_idx = 0;
3322                goto next_skb;
3323        }
3324
3325        return 0;
3326}
3327EXPORT_SYMBOL(skb_seq_read);
3328
3329/**
3330 * skb_abort_seq_read - Abort a sequential read of skb data
3331 * @st: state variable
3332 *
3333 * Must be called if skb_seq_read() was not called until it
3334 * returned 0.
3335 */
3336void skb_abort_seq_read(struct skb_seq_state *st)
3337{
3338        if (st->frag_data)
3339                kunmap_atomic(st->frag_data);
3340}
3341EXPORT_SYMBOL(skb_abort_seq_read);
3342
3343#define TS_SKB_CB(state)        ((struct skb_seq_state *) &((state)->cb))
3344
3345static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3346                                          struct ts_config *conf,
3347                                          struct ts_state *state)
3348{
3349        return skb_seq_read(offset, text, TS_SKB_CB(state));
3350}
3351
3352static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3353{
3354        skb_abort_seq_read(TS_SKB_CB(state));
3355}
3356
3357/**
3358 * skb_find_text - Find a text pattern in skb data
3359 * @skb: the buffer to look in
3360 * @from: search offset
3361 * @to: search limit
3362 * @config: textsearch configuration
3363 *
3364 * Finds a pattern in the skb data according to the specified
3365 * textsearch configuration. Use textsearch_next() to retrieve
3366 * subsequent occurrences of the pattern. Returns the offset
3367 * to the first occurrence or UINT_MAX if no match was found.
3368 */
3369unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3370                           unsigned int to, struct ts_config *config)
3371{
3372        struct ts_state state;
3373        unsigned int ret;
3374
3375        config->get_next_block = skb_ts_get_next_block;
3376        config->finish = skb_ts_finish;
3377
3378        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3379
3380        ret = textsearch_find(config, &state);
3381        return (ret <= to - from ? ret : UINT_MAX);
3382}
3383EXPORT_SYMBOL(skb_find_text);
3384
3385/**
3386 * skb_append_datato_frags - append the user data to a skb
3387 * @sk: sock  structure
3388 * @skb: skb structure to be appended with user data.
3389 * @getfrag: call back function to be used for getting the user data
3390 * @from: pointer to user message iov
3391 * @length: length of the iov message
3392 *
3393 * Description: This procedure append the user data in the fragment part
3394 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
3395 */
3396int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
3397                        int (*getfrag)(void *from, char *to, int offset,
3398                                        int len, int odd, struct sk_buff *skb),
3399                        void *from, int length)
3400{
3401        int frg_cnt = skb_shinfo(skb)->nr_frags;
3402        int copy;
3403        int offset = 0;
3404        int ret;
3405        struct page_frag *pfrag = &current->task_frag;
3406
3407        do {
3408                /* Return error if we don't have space for new frag */
3409                if (frg_cnt >= MAX_SKB_FRAGS)
3410                        return -EMSGSIZE;
3411
3412                if (!sk_page_frag_refill(sk, pfrag))
3413                        return -ENOMEM;
3414
3415                /* copy the user data to page */
3416                copy = min_t(int, length, pfrag->size - pfrag->offset);
3417
3418                ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
3419                              offset, copy, 0, skb);
3420                if (ret < 0)
3421                        return -EFAULT;
3422
3423                /* copy was successful so update the size parameters */
3424                skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
3425                                   copy);
3426                frg_cnt++;
3427                pfrag->offset += copy;
3428                get_page(pfrag->page);
3429
3430                skb->truesize += copy;
3431                refcount_add(copy, &sk->sk_wmem_alloc);
3432                skb->len += copy;
3433                skb->data_len += copy;
3434                offset += copy;
3435                length -= copy;
3436
3437        } while (length > 0);
3438
3439        return 0;
3440}
3441EXPORT_SYMBOL(skb_append_datato_frags);
3442
3443int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3444                         int offset, size_t size)
3445{
3446        int i = skb_shinfo(skb)->nr_frags;
3447
3448        if (skb_can_coalesce(skb, i, page, offset)) {
3449                skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3450        } else if (i < MAX_SKB_FRAGS) {
3451                get_page(page);
3452                skb_fill_page_desc(skb, i, page, offset, size);
3453        } else {
3454                return -EMSGSIZE;
3455        }
3456
3457        return 0;
3458}
3459EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3460
3461/**
3462 *      skb_pull_rcsum - pull skb and update receive checksum
3463 *      @skb: buffer to update
3464 *      @len: length of data pulled
3465 *
3466 *      This function performs an skb_pull on the packet and updates
3467 *      the CHECKSUM_COMPLETE checksum.  It should be used on
3468 *      receive path processing instead of skb_pull unless you know
3469 *      that the checksum difference is zero (e.g., a valid IP header)
3470 *      or you are setting ip_summed to CHECKSUM_NONE.
3471 */
3472void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3473{
3474        unsigned char *data = skb->data;
3475
3476        BUG_ON(len > skb->len);
3477        __skb_pull(skb, len);
3478        skb_postpull_rcsum(skb, data, len);
3479        return skb->data;
3480}
3481EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3482
3483static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3484{
3485        skb_frag_t head_frag;
3486        struct page *page;
3487
3488        page = virt_to_head_page(frag_skb->head);
3489        head_frag.page.p = page;
3490        head_frag.page_offset = frag_skb->data -
3491                (unsigned char *)page_address(page);
3492        head_frag.size = skb_headlen(frag_skb);
3493        return head_frag;
3494}
3495
3496/**
3497 *      skb_segment - Perform protocol segmentation on skb.
3498 *      @head_skb: buffer to segment
3499 *      @features: features for the output path (see dev->features)
3500 *
3501 *      This function performs segmentation on the given skb.  It returns
3502 *      a pointer to the first in a list of new skbs for the segments.
3503 *      In case of error it returns ERR_PTR(err).
3504 */
3505struct sk_buff *skb_segment(struct sk_buff *head_skb,
3506                            netdev_features_t features)
3507{
3508        struct sk_buff *segs = NULL;
3509        struct sk_buff *tail = NULL;
3510        struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3511        skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3512        unsigned int mss = skb_shinfo(head_skb)->gso_size;
3513        unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3514        struct sk_buff *frag_skb = head_skb;
3515        unsigned int offset = doffset;
3516        unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3517        unsigned int partial_segs = 0;
3518        unsigned int headroom;
3519        unsigned int len = head_skb->len;
3520        __be16 proto;
3521        bool csum, sg;
3522        int nfrags = skb_shinfo(head_skb)->nr_frags;
3523        int err = -ENOMEM;
3524        int i = 0;
3525        int pos;
3526        int dummy;
3527
3528        __skb_push(head_skb, doffset);
3529        proto = skb_network_protocol(head_skb, &dummy);
3530        if (unlikely(!proto))
3531                return ERR_PTR(-EINVAL);
3532
3533        sg = !!(features & NETIF_F_SG);
3534        csum = !!can_checksum_protocol(features, proto);
3535
3536        if (sg && csum && (mss != GSO_BY_FRAGS))  {
3537                if (!(features & NETIF_F_GSO_PARTIAL)) {
3538                        struct sk_buff *iter;
3539                        unsigned int frag_len;
3540
3541                        if (!list_skb ||
3542                            !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3543                                goto normal;
3544
3545                        /* If we get here then all the required
3546                         * GSO features except frag_list are supported.
3547                         * Try to split the SKB to multiple GSO SKBs
3548                         * with no frag_list.
3549                         * Currently we can do that only when the buffers don't
3550                         * have a linear part and all the buffers except
3551                         * the last are of the same length.
3552                         */
3553                        frag_len = list_skb->len;
3554                        skb_walk_frags(head_skb, iter) {
3555                                if (frag_len != iter->len && iter->next)
3556                                        goto normal;
3557                                if (skb_headlen(iter) && !iter->head_frag)
3558                                        goto normal;
3559
3560                                len -= iter->len;
3561                        }
3562
3563                        if (len != frag_len)
3564                                goto normal;
3565                }
3566
3567                /* GSO partial only requires that we trim off any excess that
3568                 * doesn't fit into an MSS sized block, so take care of that
3569                 * now.
3570                 */
3571                partial_segs = len / mss;
3572                if (partial_segs > 1)
3573                        mss *= partial_segs;
3574                else
3575                        partial_segs = 0;
3576        }
3577
3578normal:
3579        headroom = skb_headroom(head_skb);
3580        pos = skb_headlen(head_skb);
3581
3582        do {
3583                struct sk_buff *nskb;
3584                skb_frag_t *nskb_frag;
3585                int hsize;
3586                int size;
3587
3588                if (unlikely(mss == GSO_BY_FRAGS)) {
3589                        len = list_skb->len;
3590                } else {
3591                        len = head_skb->len - offset;
3592                        if (len > mss)
3593                                len = mss;
3594                }
3595
3596                hsize = skb_headlen(head_skb) - offset;
3597                if (hsize < 0)
3598                        hsize = 0;
3599                if (hsize > len || !sg)
3600                        hsize = len;
3601
3602                if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3603                    (skb_headlen(list_skb) == len || sg)) {
3604                        BUG_ON(skb_headlen(list_skb) > len);
3605
3606                        i = 0;
3607                        nfrags = skb_shinfo(list_skb)->nr_frags;
3608                        frag = skb_shinfo(list_skb)->frags;
3609                        frag_skb = list_skb;
3610                        pos += skb_headlen(list_skb);
3611
3612                        while (pos < offset + len) {
3613                                BUG_ON(i >= nfrags);
3614
3615                                size = skb_frag_size(frag);
3616                                if (pos + size > offset + len)
3617                                        break;
3618
3619                                i++;
3620                                pos += size;
3621                                frag++;
3622                        }
3623
3624                        nskb = skb_clone(list_skb, GFP_ATOMIC);
3625                        list_skb = list_skb->next;
3626
3627                        if (unlikely(!nskb))
3628                                goto err;
3629
3630                        if (unlikely(pskb_trim(nskb, len))) {
3631                                kfree_skb(nskb);
3632                                goto err;
3633                        }
3634
3635                        hsize = skb_end_offset(nskb);
3636                        if (skb_cow_head(nskb, doffset + headroom)) {
3637                                kfree_skb(nskb);
3638                                goto err;
3639                        }
3640
3641                        nskb->truesize += skb_end_offset(nskb) - hsize;
3642                        skb_release_head_state(nskb);
3643                        __skb_push(nskb, doffset);
3644                } else {
3645                        nskb = __alloc_skb(hsize + doffset + headroom,
3646                                           GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3647                                           NUMA_NO_NODE);
3648
3649                        if (unlikely(!nskb))
3650                                goto err;
3651
3652                        skb_reserve(nskb, headroom);
3653                        __skb_put(nskb, doffset);
3654                }
3655
3656                if (segs)
3657                        tail->next = nskb;
3658                else
3659                        segs = nskb;
3660                tail = nskb;
3661
3662                __copy_skb_header(nskb, head_skb);
3663
3664                skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3665                skb_reset_mac_len(nskb);
3666
3667                skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3668                                                 nskb->data - tnl_hlen,
3669                                                 doffset + tnl_hlen);
3670
3671                if (nskb->len == len + doffset)
3672                        goto perform_csum_check;
3673
3674                if (!sg) {
3675                        if (!nskb->remcsum_offload)
3676                                nskb->ip_summed = CHECKSUM_NONE;
3677                        SKB_GSO_CB(nskb)->csum =
3678                                skb_copy_and_csum_bits(head_skb, offset,
3679                                                       skb_put(nskb, len),
3680                                                       len, 0);
3681                        SKB_GSO_CB(nskb)->csum_start =
3682                                skb_headroom(nskb) + doffset;
3683                        continue;
3684                }
3685
3686                nskb_frag = skb_shinfo(nskb)->frags;
3687
3688                skb_copy_from_linear_data_offset(head_skb, offset,
3689                                                 skb_put(nskb, hsize), hsize);
3690
3691                skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3692                                              SKBTX_SHARED_FRAG;
3693
3694                if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3695                    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3696                        goto err;
3697
3698                while (pos < offset + len) {
3699                        if (i >= nfrags) {
3700                                i = 0;
3701                                nfrags = skb_shinfo(list_skb)->nr_frags;
3702                                frag = skb_shinfo(list_skb)->frags;
3703                                frag_skb = list_skb;
3704                                if (!skb_headlen(list_skb)) {
3705                                        BUG_ON(!nfrags);
3706                                } else {
3707                                        BUG_ON(!list_skb->head_frag);
3708
3709                                        /* to make room for head_frag. */
3710                                        i--;
3711                                        frag--;
3712                                }
3713                                if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3714                                    skb_zerocopy_clone(nskb, frag_skb,
3715                                                       GFP_ATOMIC))
3716                                        goto err;
3717
3718                                list_skb = list_skb->next;
3719                        }
3720
3721                        if (unlikely(skb_shinfo(nskb)->nr_frags >=
3722                                     MAX_SKB_FRAGS)) {
3723                                net_warn_ratelimited(
3724                                        "skb_segment: too many frags: %u %u\n",
3725                                        pos, mss);
3726                                err = -EINVAL;
3727                                goto err;
3728                        }
3729
3730                        *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3731                        __skb_frag_ref(nskb_frag);
3732                        size = skb_frag_size(nskb_frag);
3733
3734                        if (pos < offset) {
3735                                nskb_frag->page_offset += offset - pos;
3736                                skb_frag_size_sub(nskb_frag, offset - pos);
3737                        }
3738
3739                        skb_shinfo(nskb)->nr_frags++;
3740
3741                        if (pos + size <= offset + len) {
3742                                i++;
3743                                frag++;
3744                                pos += size;
3745                        } else {
3746                                skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3747                                goto skip_fraglist;
3748                        }
3749
3750                        nskb_frag++;
3751                }
3752
3753skip_fraglist:
3754                nskb->data_len = len - hsize;
3755                nskb->len += nskb->data_len;
3756                nskb->truesize += nskb->data_len;
3757
3758perform_csum_check:
3759                if (!csum) {
3760                        if (skb_has_shared_frag(nskb) &&
3761                            __skb_linearize(nskb))
3762                                goto err;
3763
3764                        if (!nskb->remcsum_offload)
3765                                nskb->ip_summed = CHECKSUM_NONE;
3766                        SKB_GSO_CB(nskb)->csum =
3767                                skb_checksum(nskb, doffset,
3768                                             nskb->len - doffset, 0);
3769                        SKB_GSO_CB(nskb)->csum_start =
3770                                skb_headroom(nskb) + doffset;
3771                }
3772        } while ((offset += len) < head_skb->len);
3773
3774        /* Some callers want to get the end of the list.
3775         * Put it in segs->prev to avoid walking the list.
3776         * (see validate_xmit_skb_list() for example)
3777         */
3778        segs->prev = tail;
3779
3780        if (partial_segs) {
3781                struct sk_buff *iter;
3782                int type = skb_shinfo(head_skb)->gso_type;
3783                unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3784
3785                /* Update type to add partial and then remove dodgy if set */
3786                type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3787                type &= ~SKB_GSO_DODGY;
3788
3789                /* Update GSO info and prepare to start updating headers on
3790                 * our way back down the stack of protocols.
3791                 */
3792                for (iter = segs; iter; iter = iter->next) {
3793                        skb_shinfo(iter)->gso_size = gso_size;
3794                        skb_shinfo(iter)->gso_segs = partial_segs;
3795                        skb_shinfo(iter)->gso_type = type;
3796                        SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3797                }
3798
3799                if (tail->len - doffset <= gso_size)
3800                        skb_shinfo(tail)->gso_size = 0;
3801                else if (tail != segs)
3802                        skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3803        }
3804
3805        /* Following permits correct backpressure, for protocols
3806         * using skb_set_owner_w().
3807         * Idea is to tranfert ownership from head_skb to last segment.
3808         */
3809        if (head_skb->destructor == sock_wfree) {
3810                swap(tail->truesize, head_skb->truesize);
3811                swap(tail->destructor, head_skb->destructor);
3812                swap(tail->sk, head_skb->sk);
3813        }
3814        return segs;
3815
3816err:
3817        kfree_skb_list(segs);
3818        return ERR_PTR(err);
3819}
3820EXPORT_SYMBOL_GPL(skb_segment);
3821
3822int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
3823{
3824        struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3825        unsigned int offset = skb_gro_offset(skb);
3826        unsigned int headlen = skb_headlen(skb);
3827        unsigned int len = skb_gro_len(skb);
3828        unsigned int delta_truesize;
3829        struct sk_buff *lp;
3830
3831        if (unlikely(p->len + len >= 65536))
3832                return -E2BIG;
3833
3834        lp = NAPI_GRO_CB(p)->last;
3835        pinfo = skb_shinfo(lp);
3836
3837        if (headlen <= offset) {
3838                skb_frag_t *frag;
3839                skb_frag_t *frag2;
3840                int i = skbinfo->nr_frags;
3841                int nr_frags = pinfo->nr_frags + i;
3842
3843                if (nr_frags > MAX_SKB_FRAGS)
3844                        goto merge;
3845
3846                offset -= headlen;
3847                pinfo->nr_frags = nr_frags;
3848                skbinfo->nr_frags = 0;
3849
3850                frag = pinfo->frags + nr_frags;
3851                frag2 = skbinfo->frags + i;
3852                do {
3853                        *--frag = *--frag2;
3854                } while (--i);
3855
3856                frag->page_offset += offset;
3857                skb_frag_size_sub(frag, offset);
3858
3859                /* all fragments truesize : remove (head size + sk_buff) */
3860                delta_truesize = skb->truesize -
3861                                 SKB_TRUESIZE(skb_end_offset(skb));
3862
3863                skb->truesize -= skb->data_len;
3864                skb->len -= skb->data_len;
3865                skb->data_len = 0;
3866
3867                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3868                goto done;
3869        } else if (skb->head_frag) {
3870                int nr_frags = pinfo->nr_frags;
3871                skb_frag_t *frag = pinfo->frags + nr_frags;
3872                struct page *page = virt_to_head_page(skb->head);
3873                unsigned int first_size = headlen - offset;
3874                unsigned int first_offset;
3875
3876                if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3877                        goto merge;
3878
3879                first_offset = skb->data -
3880                               (unsigned char *)page_address(page) +
3881                               offset;
3882
3883                pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3884
3885                frag->page.p      = page;
3886                frag->page_offset = first_offset;
3887                skb_frag_size_set(frag, first_size);
3888
3889                memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3890                /* We dont need to clear skbinfo->nr_frags here */
3891
3892                delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3893                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3894                goto done;
3895        }
3896
3897merge:
3898        delta_truesize = skb->truesize;
3899        if (offset > headlen) {
3900                unsigned int eat = offset - headlen;
3901
3902                skbinfo->frags[0].page_offset += eat;
3903                skb_frag_size_sub(&skbinfo->frags[0], eat);
3904                skb->data_len -= eat;
3905                skb->len -= eat;
3906                offset = headlen;
3907        }
3908
3909        __skb_pull(skb, offset);
3910
3911        if (NAPI_GRO_CB(p)->last == p)
3912                skb_shinfo(p)->frag_list = skb;
3913        else
3914                NAPI_GRO_CB(p)->last->next = skb;
3915        NAPI_GRO_CB(p)->last = skb;
3916        __skb_header_release(skb);
3917        lp = p;
3918
3919done:
3920        NAPI_GRO_CB(p)->count++;
3921        p->data_len += len;
3922        p->truesize += delta_truesize;
3923        p->len += len;
3924        if (lp != p) {
3925                lp->data_len += len;
3926                lp->truesize += delta_truesize;
3927                lp->len += len;
3928        }
3929        NAPI_GRO_CB(skb)->same_flow = 1;
3930        return 0;
3931}
3932EXPORT_SYMBOL_GPL(skb_gro_receive);
3933
3934void __init skb_init(void)
3935{
3936        skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
3937                                              sizeof(struct sk_buff),
3938                                              0,
3939                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3940                                              offsetof(struct sk_buff, cb),
3941                                              sizeof_field(struct sk_buff, cb),
3942                                              NULL);
3943        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3944                                                sizeof(struct sk_buff_fclones),
3945                                                0,
3946                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3947                                                NULL);
3948}
3949
3950static int
3951__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3952               unsigned int recursion_level)
3953{
3954        int start = skb_headlen(skb);
3955        int i, copy = start - offset;
3956        struct sk_buff *frag_iter;
3957        int elt = 0;
3958
3959        if (unlikely(recursion_level >= 24))
3960                return -EMSGSIZE;
3961
3962        if (copy > 0) {
3963                if (copy > len)
3964                        copy = len;
3965                sg_set_buf(sg, skb->data + offset, copy);
3966                elt++;
3967                if ((len -= copy) == 0)
3968                        return elt;
3969                offset += copy;
3970        }
3971
3972        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3973                int end;
3974
3975                WARN_ON(start > offset + len);
3976
3977                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3978                if ((copy = end - offset) > 0) {
3979                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3980                        if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3981                                return -EMSGSIZE;
3982
3983                        if (copy > len)
3984                                copy = len;
3985                        sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3986                                        frag->page_offset+offset-start);
3987                        elt++;
3988                        if (!(len -= copy))
3989                                return elt;
3990                        offset += copy;
3991                }
3992                start = end;
3993        }
3994
3995        skb_walk_frags(skb, frag_iter) {
3996                int end, ret;
3997
3998                WARN_ON(start > offset + len);
3999
4000                end = start + frag_iter->len;
4001                if ((copy = end - offset) > 0) {
4002                        if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4003                                return -EMSGSIZE;
4004
4005                        if (copy > len)
4006                                copy = len;
4007                        ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4008                                              copy, recursion_level + 1);
4009                        if (unlikely(ret < 0))
4010                                return ret;
4011                        elt += ret;
4012                        if ((len -= copy) == 0)
4013                                return elt;
4014                        offset += copy;
4015                }
4016                start = end;
4017        }
4018        BUG_ON(len);
4019        return elt;
4020}
4021
4022/**
4023 *      skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4024 *      @skb: Socket buffer containing the buffers to be mapped
4025 *      @sg: The scatter-gather list to map into
4026 *      @offset: The offset into the buffer's contents to start mapping
4027 *      @len: Length of buffer space to be mapped
4028 *
4029 *      Fill the specified scatter-gather list with mappings/pointers into a
4030 *      region of the buffer space attached to a socket buffer. Returns either
4031 *      the number of scatterlist items used, or -EMSGSIZE if the contents
4032 *      could not fit.
4033 */
4034int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4035{
4036        int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4037
4038        if (nsg <= 0)
4039                return nsg;
4040
4041        sg_mark_end(&sg[nsg - 1]);
4042
4043        return nsg;
4044}
4045EXPORT_SYMBOL_GPL(skb_to_sgvec);
4046
4047/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4048 * sglist without mark the sg which contain last skb data as the end.
4049 * So the caller can mannipulate sg list as will when padding new data after
4050 * the first call without calling sg_unmark_end to expend sg list.
4051 *
4052 * Scenario to use skb_to_sgvec_nomark:
4053 * 1. sg_init_table
4054 * 2. skb_to_sgvec_nomark(payload1)
4055 * 3. skb_to_sgvec_nomark(payload2)
4056 *
4057 * This is equivalent to:
4058 * 1. sg_init_table
4059 * 2. skb_to_sgvec(payload1)
4060 * 3. sg_unmark_end
4061 * 4. skb_to_sgvec(payload2)
4062 *
4063 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4064 * is more preferable.
4065 */
4066int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4067                        int offset, int len)
4068{
4069        return __skb_to_sgvec(skb, sg, offset, len, 0);
4070}
4071EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4072
4073
4074
4075/**
4076 *      skb_cow_data - Check that a socket buffer's data buffers are writable
4077 *      @skb: The socket buffer to check.
4078 *      @tailbits: Amount of trailing space to be added
4079 *      @trailer: Returned pointer to the skb where the @tailbits space begins
4080 *
4081 *      Make sure that the data buffers attached to a socket buffer are
4082 *      writable. If they are not, private copies are made of the data buffers
4083 *      and the socket buffer is set to use these instead.
4084 *
4085 *      If @tailbits is given, make sure that there is space to write @tailbits
4086 *      bytes of data beyond current end of socket buffer.  @trailer will be
4087 *      set to point to the skb in which this space begins.
4088 *
4089 *      The number of scatterlist elements required to completely map the
4090 *      COW'd and extended socket buffer will be returned.
4091 */
4092int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4093{
4094        int copyflag;
4095        int elt;
4096        struct sk_buff *skb1, **skb_p;
4097
4098        /* If skb is cloned or its head is paged, reallocate
4099         * head pulling out all the pages (pages are considered not writable
4100         * at the moment even if they are anonymous).
4101         */
4102        if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4103            __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4104                return -ENOMEM;
4105
4106        /* Easy case. Most of packets will go this way. */
4107        if (!skb_has_frag_list(skb)) {
4108                /* A little of trouble, not enough of space for trailer.
4109                 * This should not happen, when stack is tuned to generate
4110                 * good frames. OK, on miss we reallocate and reserve even more
4111                 * space, 128 bytes is fair. */
4112
4113                if (skb_tailroom(skb) < tailbits &&
4114                    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4115                        return -ENOMEM;
4116
4117                /* Voila! */
4118                *trailer = skb;
4119                return 1;
4120        }
4121
4122        /* Misery. We are in troubles, going to mincer fragments... */
4123
4124        elt = 1;
4125        skb_p = &skb_shinfo(skb)->frag_list;
4126        copyflag = 0;
4127
4128        while ((skb1 = *skb_p) != NULL) {
4129                int ntail = 0;
4130
4131                /* The fragment is partially pulled by someone,
4132                 * this can happen on input. Copy it and everything
4133                 * after it. */
4134
4135                if (skb_shared(skb1))
4136                        copyflag = 1;
4137
4138                /* If the skb is the last, worry about trailer. */
4139
4140                if (skb1->next == NULL && tailbits) {
4141                        if (skb_shinfo(skb1)->nr_frags ||
4142                            skb_has_frag_list(skb1) ||
4143                            skb_tailroom(skb1) < tailbits)
4144                                ntail = tailbits + 128;
4145                }
4146
4147                if (copyflag ||
4148                    skb_cloned(skb1) ||
4149                    ntail ||
4150                    skb_shinfo(skb1)->nr_frags ||
4151                    skb_has_frag_list(skb1)) {
4152                        struct sk_buff *skb2;
4153
4154                        /* Fuck, we are miserable poor guys... */
4155                        if (ntail == 0)
4156                                skb2 = skb_copy(skb1, GFP_ATOMIC);
4157                        else
4158                                skb2 = skb_copy_expand(skb1,
4159                                                       skb_headroom(skb1),
4160                                                       ntail,
4161                                                       GFP_ATOMIC);
4162                        if (unlikely(skb2 == NULL))
4163                                return -ENOMEM;
4164
4165                        if (skb1->sk)
4166                                skb_set_owner_w(skb2, skb1->sk);
4167
4168                        /* Looking around. Are we still alive?
4169                         * OK, link new skb, drop old one */
4170
4171                        skb2->next = skb1->next;
4172                        *skb_p = skb2;
4173                        kfree_skb(skb1);
4174                        skb1 = skb2;
4175                }
4176                elt++;
4177                *trailer = skb1;
4178                skb_p = &skb1->next;
4179        }
4180
4181        return elt;
4182}
4183EXPORT_SYMBOL_GPL(skb_cow_data);
4184
4185static void sock_rmem_free(struct sk_buff *skb)
4186{
4187        struct sock *sk = skb->sk;
4188
4189        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4190}
4191
4192static void skb_set_err_queue(struct sk_buff *skb)
4193{
4194        /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4195         * So, it is safe to (mis)use it to mark skbs on the error queue.
4196         */
4197        skb->pkt_type = PACKET_OUTGOING;
4198        BUILD_BUG_ON(PACKET_OUTGOING == 0);
4199}
4200
4201/*
4202 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4203 */
4204int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4205{
4206        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4207            (unsigned int)sk->sk_rcvbuf)
4208                return -ENOMEM;
4209
4210        skb_orphan(skb);
4211        skb->sk = sk;
4212        skb->destructor = sock_rmem_free;
4213        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4214        skb_set_err_queue(skb);
4215
4216        /* before exiting rcu section, make sure dst is refcounted */
4217        skb_dst_force(skb);
4218
4219        skb_queue_tail(&sk->sk_error_queue, skb);
4220        if (!sock_flag(sk, SOCK_DEAD))
4221                sk->sk_error_report(sk);
4222        return 0;
4223}
4224EXPORT_SYMBOL(sock_queue_err_skb);
4225
4226static bool is_icmp_err_skb(const struct sk_buff *skb)
4227{
4228        return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4229                       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4230}
4231
4232struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4233{
4234        struct sk_buff_head *q = &sk->sk_error_queue;
4235        struct sk_buff *skb, *skb_next = NULL;
4236        bool icmp_next = false;
4237        unsigned long flags;
4238
4239        spin_lock_irqsave(&q->lock, flags);
4240        skb = __skb_dequeue(q);
4241        if (skb && (skb_next = skb_peek(q))) {
4242                icmp_next = is_icmp_err_skb(skb_next);
4243                if (icmp_next)
4244                        sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4245        }
4246        spin_unlock_irqrestore(&q->lock, flags);
4247
4248        if (is_icmp_err_skb(skb) && !icmp_next)
4249                sk->sk_err = 0;
4250
4251        if (skb_next)
4252                sk->sk_error_report(sk);
4253
4254        return skb;
4255}
4256EXPORT_SYMBOL(sock_dequeue_err_skb);
4257
4258/**
4259 * skb_clone_sk - create clone of skb, and take reference to socket
4260 * @skb: the skb to clone
4261 *
4262 * This function creates a clone of a buffer that holds a reference on
4263 * sk_refcnt.  Buffers created via this function are meant to be
4264 * returned using sock_queue_err_skb, or free via kfree_skb.
4265 *
4266 * When passing buffers allocated with this function to sock_queue_err_skb
4267 * it is necessary to wrap the call with sock_hold/sock_put in order to
4268 * prevent the socket from being released prior to being enqueued on
4269 * the sk_error_queue.
4270 */
4271struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4272{
4273        struct sock *sk = skb->sk;
4274        struct sk_buff *clone;
4275
4276        if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4277                return NULL;
4278
4279        clone = skb_clone(skb, GFP_ATOMIC);
4280        if (!clone) {
4281                sock_put(sk);
4282                return NULL;
4283        }
4284
4285        clone->sk = sk;
4286        clone->destructor = sock_efree;
4287
4288        return clone;
4289}
4290EXPORT_SYMBOL(skb_clone_sk);
4291
4292static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4293                                        struct sock *sk,
4294                                        int tstype,
4295                                        bool opt_stats)
4296{
4297        struct sock_exterr_skb *serr;
4298        int err;
4299
4300        BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4301
4302        serr = SKB_EXT_ERR(skb);
4303        memset(serr, 0, sizeof(*serr));
4304        serr->ee.ee_errno = ENOMSG;
4305        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4306        serr->ee.ee_info = tstype;
4307        serr->opt_stats = opt_stats;
4308        serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4309        if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4310                serr->ee.ee_data = skb_shinfo(skb)->tskey;
4311                if (sk->sk_protocol == IPPROTO_TCP &&
4312                    sk->sk_type == SOCK_STREAM)
4313                        serr->ee.ee_data -= sk->sk_tskey;
4314        }
4315
4316        err = sock_queue_err_skb(sk, skb);
4317
4318        if (err)
4319                kfree_skb(skb);
4320}
4321
4322static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4323{
4324        bool ret;
4325
4326        if (likely(sysctl_tstamp_allow_data || tsonly))
4327                return true;
4328
4329        read_lock_bh(&sk->sk_callback_lock);
4330        ret = sk->sk_socket && sk->sk_socket->file &&
4331              file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4332        read_unlock_bh(&sk->sk_callback_lock);
4333        return ret;
4334}
4335
4336void skb_complete_tx_timestamp(struct sk_buff *skb,
4337                               struct skb_shared_hwtstamps *hwtstamps)
4338{
4339        struct sock *sk = skb->sk;
4340
4341        if (!skb_may_tx_timestamp(sk, false))
4342                goto err;
4343
4344        /* Take a reference to prevent skb_orphan() from freeing the socket,
4345         * but only if the socket refcount is not zero.
4346         */
4347        if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4348                *skb_hwtstamps(skb) = *hwtstamps;
4349                __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4350                sock_put(sk);
4351                return;
4352        }
4353
4354err:
4355        kfree_skb(skb);
4356}
4357EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4358
4359void __skb_tstamp_tx(struct sk_buff *orig_skb,
4360                     struct skb_shared_hwtstamps *hwtstamps,
4361                     struct sock *sk, int tstype)
4362{
4363        struct sk_buff *skb;
4364        bool tsonly, opt_stats = false;
4365
4366        if (!sk)
4367                return;
4368
4369        if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4370            skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4371                return;
4372
4373        tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4374        if (!skb_may_tx_timestamp(sk, tsonly))
4375                return;
4376
4377        if (tsonly) {
4378#ifdef CONFIG_INET
4379                if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4380                    sk->sk_protocol == IPPROTO_TCP &&
4381                    sk->sk_type == SOCK_STREAM) {
4382                        skb = tcp_get_timestamping_opt_stats(sk);
4383                        opt_stats = true;
4384                } else
4385#endif
4386                        skb = alloc_skb(0, GFP_ATOMIC);
4387        } else {
4388                skb = skb_clone(orig_skb, GFP_ATOMIC);
4389        }
4390        if (!skb)
4391                return;
4392
4393        if (tsonly) {
4394                skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4395                                             SKBTX_ANY_TSTAMP;
4396                skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4397        }
4398
4399        if (hwtstamps)
4400                *skb_hwtstamps(skb) = *hwtstamps;
4401        else
4402                skb->tstamp = ktime_get_real();
4403
4404        __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4405}
4406EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4407
4408void skb_tstamp_tx(struct sk_buff *orig_skb,
4409                   struct skb_shared_hwtstamps *hwtstamps)
4410{
4411        return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4412                               SCM_TSTAMP_SND);
4413}
4414EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4415
4416void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4417{
4418        struct sock *sk = skb->sk;
4419        struct sock_exterr_skb *serr;
4420        int err = 1;
4421
4422        skb->wifi_acked_valid = 1;
4423        skb->wifi_acked = acked;
4424
4425        serr = SKB_EXT_ERR(skb);
4426        memset(serr, 0, sizeof(*serr));
4427        serr->ee.ee_errno = ENOMSG;
4428        serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4429
4430        /* Take a reference to prevent skb_orphan() from freeing the socket,
4431         * but only if the socket refcount is not zero.
4432         */
4433        if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4434                err = sock_queue_err_skb(sk, skb);
4435                sock_put(sk);
4436        }
4437        if (err)
4438                kfree_skb(skb);
4439}
4440EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4441
4442/**
4443 * skb_partial_csum_set - set up and verify partial csum values for packet
4444 * @skb: the skb to set
4445 * @start: the number of bytes after skb->data to start checksumming.
4446 * @off: the offset from start to place the checksum.
4447 *
4448 * For untrusted partially-checksummed packets, we need to make sure the values
4449 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4450 *
4451 * This function checks and sets those values and skb->ip_summed: if this
4452 * returns false you should drop the packet.
4453 */
4454bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4455{
4456        u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4457        u32 csum_start = skb_headroom(skb) + (u32)start;
4458
4459        if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4460                net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4461                                     start, off, skb_headroom(skb), skb_headlen(skb));
4462                return false;
4463        }
4464        skb->ip_summed = CHECKSUM_PARTIAL;
4465        skb->csum_start = csum_start;
4466        skb->csum_offset = off;
4467        skb_set_transport_header(skb, start);
4468        return true;
4469}
4470EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4471
4472static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4473                               unsigned int max)
4474{
4475        if (skb_headlen(skb) >= len)
4476                return 0;
4477
4478        /* If we need to pullup then pullup to the max, so we
4479         * won't need to do it again.
4480         */
4481        if (max > skb->len)
4482                max = skb->len;
4483
4484        if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4485                return -ENOMEM;
4486
4487        if (skb_headlen(skb) < len)
4488                return -EPROTO;
4489
4490        return 0;
4491}
4492
4493#define MAX_TCP_HDR_LEN (15 * 4)
4494
4495static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4496                                      typeof(IPPROTO_IP) proto,
4497                                      unsigned int off)
4498{
4499        switch (proto) {
4500                int err;
4501
4502        case IPPROTO_TCP:
4503                err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4504                                          off + MAX_TCP_HDR_LEN);
4505                if (!err && !skb_partial_csum_set(skb, off,
4506                                                  offsetof(struct tcphdr,
4507                                                           check)))
4508                        err = -EPROTO;
4509                return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4510
4511        case IPPROTO_UDP:
4512                err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4513                                          off + sizeof(struct udphdr));
4514                if (!err && !skb_partial_csum_set(skb, off,
4515                                                  offsetof(struct udphdr,
4516                                                           check)))
4517                        err = -EPROTO;
4518                return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4519        }
4520
4521        return ERR_PTR(-EPROTO);
4522}
4523
4524/* This value should be large enough to cover a tagged ethernet header plus
4525 * maximally sized IP and TCP or UDP headers.
4526 */
4527#define MAX_IP_HDR_LEN 128
4528
4529static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4530{
4531        unsigned int off;
4532        bool fragment;
4533        __sum16 *csum;
4534        int err;
4535
4536        fragment = false;
4537
4538        err = skb_maybe_pull_tail(skb,
4539                                  sizeof(struct iphdr),
4540                                  MAX_IP_HDR_LEN);
4541        if (err < 0)
4542                goto out;
4543
4544        if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4545                fragment = true;
4546
4547        off = ip_hdrlen(skb);
4548
4549        err = -EPROTO;
4550
4551        if (fragment)
4552                goto out;
4553
4554        csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4555        if (IS_ERR(csum))
4556                return PTR_ERR(csum);
4557
4558        if (recalculate)
4559                *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4560                                           ip_hdr(skb)->daddr,
4561                                           skb->len - off,
4562                                           ip_hdr(skb)->protocol, 0);
4563        err = 0;
4564
4565out:
4566        return err;
4567}
4568
4569/* This value should be large enough to cover a tagged ethernet header plus
4570 * an IPv6 header, all options, and a maximal TCP or UDP header.
4571 */
4572#define MAX_IPV6_HDR_LEN 256
4573
4574#define OPT_HDR(type, skb, off) \
4575        (type *)(skb_network_header(skb) + (off))
4576
4577static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4578{
4579        int err;
4580        u8 nexthdr;
4581        unsigned int off;
4582        unsigned int len;
4583        bool fragment;
4584        bool done;
4585        __sum16 *csum;
4586
4587        fragment = false;
4588        done = false;
4589
4590        off = sizeof(struct ipv6hdr);
4591
4592        err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4593        if (err < 0)
4594                goto out;
4595
4596        nexthdr = ipv6_hdr(skb)->nexthdr;
4597
4598        len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4599        while (off <= len && !done) {
4600                switch (nexthdr) {
4601                case IPPROTO_DSTOPTS:
4602                case IPPROTO_HOPOPTS:
4603                case IPPROTO_ROUTING: {
4604                        struct ipv6_opt_hdr *hp;
4605
4606                        err = skb_maybe_pull_tail(skb,
4607                                                  off +
4608                                                  sizeof(struct ipv6_opt_hdr),
4609                                                  MAX_IPV6_HDR_LEN);
4610                        if (err < 0)
4611                                goto out;
4612
4613                        hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4614                        nexthdr = hp->nexthdr;
4615                        off += ipv6_optlen(hp);
4616                        break;
4617                }
4618                case IPPROTO_AH: {
4619                        struct ip_auth_hdr *hp;
4620
4621                        err = skb_maybe_pull_tail(skb,
4622                                                  off +
4623                                                  sizeof(struct ip_auth_hdr),
4624                                                  MAX_IPV6_HDR_LEN);
4625                        if (err < 0)
4626                                goto out;
4627
4628                        hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4629                        nexthdr = hp->nexthdr;
4630                        off += ipv6_authlen(hp);
4631                        break;
4632                }
4633                case IPPROTO_FRAGMENT: {
4634                        struct frag_hdr *hp;
4635
4636                        err = skb_maybe_pull_tail(skb,
4637                                                  off +
4638                                                  sizeof(struct frag_hdr),
4639                                                  MAX_IPV6_HDR_LEN);
4640                        if (err < 0)
4641                                goto out;
4642
4643                        hp = OPT_HDR(struct frag_hdr, skb, off);
4644
4645                        if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4646                                fragment = true;
4647
4648                        nexthdr = hp->nexthdr;
4649                        off += sizeof(struct frag_hdr);
4650                        break;
4651                }
4652                default:
4653                        done = true;
4654                        break;
4655                }
4656        }
4657
4658        err = -EPROTO;
4659
4660        if (!done || fragment)
4661                goto out;
4662
4663        csum = skb_checksum_setup_ip(skb, nexthdr, off);
4664        if (IS_ERR(csum))
4665                return PTR_ERR(csum);
4666
4667        if (recalculate)
4668                *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4669                                         &ipv6_hdr(skb)->daddr,
4670                                         skb->len - off, nexthdr, 0);
4671        err = 0;
4672
4673out:
4674        return err;
4675}
4676
4677/**
4678 * skb_checksum_setup - set up partial checksum offset
4679 * @skb: the skb to set up
4680 * @recalculate: if true the pseudo-header checksum will be recalculated
4681 */
4682int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4683{
4684        int err;
4685
4686        switch (skb->protocol) {
4687        case htons(ETH_P_IP):
4688                err = skb_checksum_setup_ipv4(skb, recalculate);
4689                break;
4690
4691        case htons(ETH_P_IPV6):
4692                err = skb_checksum_setup_ipv6(skb, recalculate);
4693                break;
4694
4695        default:
4696                err = -EPROTO;
4697                break;
4698        }
4699
4700        return err;
4701}
4702EXPORT_SYMBOL(skb_checksum_setup);
4703
4704/**
4705 * skb_checksum_maybe_trim - maybe trims the given skb
4706 * @skb: the skb to check
4707 * @transport_len: the data length beyond the network header
4708 *
4709 * Checks whether the given skb has data beyond the given transport length.
4710 * If so, returns a cloned skb trimmed to this transport length.
4711 * Otherwise returns the provided skb. Returns NULL in error cases
4712 * (e.g. transport_len exceeds skb length or out-of-memory).
4713 *
4714 * Caller needs to set the skb transport header and free any returned skb if it
4715 * differs from the provided skb.
4716 */
4717static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4718                                               unsigned int transport_len)
4719{
4720        struct sk_buff *skb_chk;
4721        unsigned int len = skb_transport_offset(skb) + transport_len;
4722        int ret;
4723
4724        if (skb->len < len)
4725                return NULL;
4726        else if (skb->len == len)
4727                return skb;
4728
4729        skb_chk = skb_clone(skb, GFP_ATOMIC);
4730        if (!skb_chk)
4731                return NULL;
4732
4733        ret = pskb_trim_rcsum(skb_chk, len);
4734        if (ret) {
4735                kfree_skb(skb_chk);
4736                return NULL;
4737        }
4738
4739        return skb_chk;
4740}
4741
4742/**
4743 * skb_checksum_trimmed - validate checksum of an skb
4744 * @skb: the skb to check
4745 * @transport_len: the data length beyond the network header
4746 * @skb_chkf: checksum function to use
4747 *
4748 * Applies the given checksum function skb_chkf to the provided skb.
4749 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4750 *
4751 * If the skb has data beyond the given transport length, then a
4752 * trimmed & cloned skb is checked and returned.
4753 *
4754 * Caller needs to set the skb transport header and free any returned skb if it
4755 * differs from the provided skb.
4756 */
4757struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4758                                     unsigned int transport_len,
4759                                     __sum16(*skb_chkf)(struct sk_buff *skb))
4760{
4761        struct sk_buff *skb_chk;
4762        unsigned int offset = skb_transport_offset(skb);
4763        __sum16 ret;
4764
4765        skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4766        if (!skb_chk)
4767                goto err;
4768
4769        if (!pskb_may_pull(skb_chk, offset))
4770                goto err;
4771
4772        skb_pull_rcsum(skb_chk, offset);
4773        ret = skb_chkf(skb_chk);
4774        skb_push_rcsum(skb_chk, offset);
4775
4776        if (ret)
4777                goto err;
4778
4779        return skb_chk;
4780
4781err:
4782        if (skb_chk && skb_chk != skb)
4783                kfree_skb(skb_chk);
4784
4785        return NULL;
4786
4787}
4788EXPORT_SYMBOL(skb_checksum_trimmed);
4789
4790void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4791{
4792        net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4793                             skb->dev->name);
4794}
4795EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4796
4797void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4798{
4799        if (head_stolen) {
4800                skb_release_head_state(skb);
4801                kmem_cache_free(skbuff_head_cache, skb);
4802        } else {
4803                __kfree_skb(skb);
4804        }
4805}
4806EXPORT_SYMBOL(kfree_skb_partial);
4807
4808/**
4809 * skb_try_coalesce - try to merge skb to prior one
4810 * @to: prior buffer
4811 * @from: buffer to add
4812 * @fragstolen: pointer to boolean
4813 * @delta_truesize: how much more was allocated than was requested
4814 */
4815bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4816                      bool *fragstolen, int *delta_truesize)
4817{
4818        struct skb_shared_info *to_shinfo, *from_shinfo;
4819        int i, delta, len = from->len;
4820
4821        *fragstolen = false;
4822
4823        if (skb_cloned(to))
4824                return false;
4825
4826        if (len <= skb_tailroom(to)) {
4827                if (len)
4828                        BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4829                *delta_truesize = 0;
4830                return true;
4831        }
4832
4833        to_shinfo = skb_shinfo(to);
4834        from_shinfo = skb_shinfo(from);
4835        if (to_shinfo->frag_list || from_shinfo->frag_list)
4836                return false;
4837        if (skb_zcopy(to) || skb_zcopy(from))
4838                return false;
4839
4840        if (skb_headlen(from) != 0) {
4841                struct page *page;
4842                unsigned int offset;
4843
4844                if (to_shinfo->nr_frags +
4845                    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
4846                        return false;
4847
4848                if (skb_head_is_locked(from))
4849                        return false;
4850
4851                delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4852
4853                page = virt_to_head_page(from->head);
4854                offset = from->data - (unsigned char *)page_address(page);
4855
4856                skb_fill_page_desc(to, to_shinfo->nr_frags,
4857                                   page, offset, skb_headlen(from));
4858                *fragstolen = true;
4859        } else {
4860                if (to_shinfo->nr_frags +
4861                    from_shinfo->nr_frags > MAX_SKB_FRAGS)
4862                        return false;
4863
4864                delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4865        }
4866
4867        WARN_ON_ONCE(delta < len);
4868
4869        memcpy(to_shinfo->frags + to_shinfo->nr_frags,
4870               from_shinfo->frags,
4871               from_shinfo->nr_frags * sizeof(skb_frag_t));
4872        to_shinfo->nr_frags += from_shinfo->nr_frags;
4873
4874        if (!skb_cloned(from))
4875                from_shinfo->nr_frags = 0;
4876
4877        /* if the skb is not cloned this does nothing
4878         * since we set nr_frags to 0.
4879         */
4880        for (i = 0; i < from_shinfo->nr_frags; i++)
4881                __skb_frag_ref(&from_shinfo->frags[i]);
4882
4883        to->truesize += delta;
4884        to->len += len;
4885        to->data_len += len;
4886
4887        *delta_truesize = delta;
4888        return true;
4889}
4890EXPORT_SYMBOL(skb_try_coalesce);
4891
4892/**
4893 * skb_scrub_packet - scrub an skb
4894 *
4895 * @skb: buffer to clean
4896 * @xnet: packet is crossing netns
4897 *
4898 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4899 * into/from a tunnel. Some information have to be cleared during these
4900 * operations.
4901 * skb_scrub_packet can also be used to clean a skb before injecting it in
4902 * another namespace (@xnet == true). We have to clear all information in the
4903 * skb that could impact namespace isolation.
4904 */
4905void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4906{
4907        skb->pkt_type = PACKET_HOST;
4908        skb->skb_iif = 0;
4909        skb->ignore_df = 0;
4910        skb_dst_drop(skb);
4911        secpath_reset(skb);
4912        nf_reset(skb);
4913        nf_reset_trace(skb);
4914
4915        if (!xnet)
4916                return;
4917
4918        ipvs_reset(skb);
4919        skb->mark = 0;
4920        skb->tstamp = 0;
4921}
4922EXPORT_SYMBOL_GPL(skb_scrub_packet);
4923
4924/**
4925 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4926 *
4927 * @skb: GSO skb
4928 *
4929 * skb_gso_transport_seglen is used to determine the real size of the
4930 * individual segments, including Layer4 headers (TCP/UDP).
4931 *
4932 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4933 */
4934static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4935{
4936        const struct skb_shared_info *shinfo = skb_shinfo(skb);
4937        unsigned int thlen = 0;
4938
4939        if (skb->encapsulation) {
4940                thlen = skb_inner_transport_header(skb) -
4941                        skb_transport_header(skb);
4942
4943                if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4944                        thlen += inner_tcp_hdrlen(skb);
4945        } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4946                thlen = tcp_hdrlen(skb);
4947        } else if (unlikely(skb_is_gso_sctp(skb))) {
4948                thlen = sizeof(struct sctphdr);
4949        } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
4950                thlen = sizeof(struct udphdr);
4951        }
4952        /* UFO sets gso_size to the size of the fragmentation
4953         * payload, i.e. the size of the L4 (UDP) header is already
4954         * accounted for.
4955         */
4956        return thlen + shinfo->gso_size;
4957}
4958
4959/**
4960 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4961 *
4962 * @skb: GSO skb
4963 *
4964 * skb_gso_network_seglen is used to determine the real size of the
4965 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4966 *
4967 * The MAC/L2 header is not accounted for.
4968 */
4969static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4970{
4971        unsigned int hdr_len = skb_transport_header(skb) -
4972                               skb_network_header(skb);
4973
4974        return hdr_len + skb_gso_transport_seglen(skb);
4975}
4976
4977/**
4978 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4979 *
4980 * @skb: GSO skb
4981 *
4982 * skb_gso_mac_seglen is used to determine the real size of the
4983 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4984 * headers (TCP/UDP).
4985 */
4986static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4987{
4988        unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4989
4990        return hdr_len + skb_gso_transport_seglen(skb);
4991}
4992
4993/**
4994 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
4995 *
4996 * There are a couple of instances where we have a GSO skb, and we
4997 * want to determine what size it would be after it is segmented.
4998 *
4999 * We might want to check:
5000 * -    L3+L4+payload size (e.g. IP forwarding)
5001 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5002 *
5003 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5004 *
5005 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5006 *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5007 *
5008 * @max_len: The maximum permissible length.
5009 *
5010 * Returns true if the segmented length <= max length.
5011 */
5012static inline bool skb_gso_size_check(const struct sk_buff *skb,
5013                                      unsigned int seg_len,
5014                                      unsigned int max_len) {
5015        const struct skb_shared_info *shinfo = skb_shinfo(skb);
5016        const struct sk_buff *iter;
5017
5018        if (shinfo->gso_size != GSO_BY_FRAGS)
5019                return seg_len <= max_len;
5020
5021        /* Undo this so we can re-use header sizes */
5022        seg_len -= GSO_BY_FRAGS;
5023
5024        skb_walk_frags(skb, iter) {
5025                if (seg_len + skb_headlen(iter) > max_len)
5026                        return false;
5027        }
5028
5029        return true;
5030}
5031
5032/**
5033 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5034 *
5035 * @skb: GSO skb
5036 * @mtu: MTU to validate against
5037 *
5038 * skb_gso_validate_network_len validates if a given skb will fit a
5039 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5040 * payload.
5041 */
5042bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5043{
5044        return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5045}
5046EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5047
5048/**
5049 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5050 *
5051 * @skb: GSO skb
5052 * @len: length to validate against
5053 *
5054 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5055 * length once split, including L2, L3 and L4 headers and the payload.
5056 */
5057bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5058{
5059        return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5060}
5061EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5062
5063static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5064{
5065        int mac_len;
5066
5067        if (skb_cow(skb, skb_headroom(skb)) < 0) {
5068                kfree_skb(skb);
5069                return NULL;
5070        }
5071
5072        mac_len = skb->data - skb_mac_header(skb);
5073        if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5074                memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5075                        mac_len - VLAN_HLEN - ETH_TLEN);
5076        }
5077        skb->mac_header += VLAN_HLEN;
5078        return skb;
5079}
5080
5081struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5082{
5083        struct vlan_hdr *vhdr;
5084        u16 vlan_tci;
5085
5086        if (unlikely(skb_vlan_tag_present(skb))) {
5087                /* vlan_tci is already set-up so leave this for another time */
5088                return skb;
5089        }
5090
5091        skb = skb_share_check(skb, GFP_ATOMIC);
5092        if (unlikely(!skb))
5093                goto err_free;
5094
5095        if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5096                goto err_free;
5097
5098        vhdr = (struct vlan_hdr *)skb->data;
5099        vlan_tci = ntohs(vhdr->h_vlan_TCI);
5100        __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5101
5102        skb_pull_rcsum(skb, VLAN_HLEN);
5103        vlan_set_encap_proto(skb, vhdr);
5104
5105        skb = skb_reorder_vlan_header(skb);
5106        if (unlikely(!skb))
5107                goto err_free;
5108
5109        skb_reset_network_header(skb);
5110        skb_reset_transport_header(skb);
5111        skb_reset_mac_len(skb);
5112
5113        return skb;
5114
5115err_free:
5116        kfree_skb(skb);
5117        return NULL;
5118}
5119EXPORT_SYMBOL(skb_vlan_untag);
5120
5121int skb_ensure_writable(struct sk_buff *skb, int write_len)
5122{
5123        if (!pskb_may_pull(skb, write_len))
5124                return -ENOMEM;
5125
5126        if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5127                return 0;
5128
5129        return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5130}
5131EXPORT_SYMBOL(skb_ensure_writable);
5132
5133/* remove VLAN header from packet and update csum accordingly.
5134 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5135 */
5136int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5137{
5138        struct vlan_hdr *vhdr;
5139        int offset = skb->data - skb_mac_header(skb);
5140        int err;
5141
5142        if (WARN_ONCE(offset,
5143                      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5144                      offset)) {
5145                return -EINVAL;
5146        }
5147
5148        err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5149        if (unlikely(err))
5150                return err;
5151
5152        skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5153
5154        vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5155        *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5156
5157        memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5158        __skb_pull(skb, VLAN_HLEN);
5159
5160        vlan_set_encap_proto(skb, vhdr);
5161        skb->mac_header += VLAN_HLEN;
5162
5163        if (skb_network_offset(skb) < ETH_HLEN)
5164                skb_set_network_header(skb, ETH_HLEN);
5165
5166        skb_reset_mac_len(skb);
5167
5168        return err;
5169}
5170EXPORT_SYMBOL(__skb_vlan_pop);
5171
5172/* Pop a vlan tag either from hwaccel or from payload.
5173 * Expects skb->data at mac header.
5174 */
5175int skb_vlan_pop(struct sk_buff *skb)
5176{
5177        u16 vlan_tci;
5178        __be16 vlan_proto;
5179        int err;
5180
5181        if (likely(skb_vlan_tag_present(skb))) {
5182                skb->vlan_tci = 0;
5183        } else {
5184                if (unlikely(!eth_type_vlan(skb->protocol)))
5185                        return 0;
5186
5187                err = __skb_vlan_pop(skb, &vlan_tci);
5188                if (err)
5189                        return err;
5190        }
5191        /* move next vlan tag to hw accel tag */
5192        if (likely(!eth_type_vlan(skb->protocol)))
5193                return 0;
5194
5195        vlan_proto = skb->protocol;
5196        err = __skb_vlan_pop(skb, &vlan_tci);
5197        if (unlikely(err))
5198                return err;
5199
5200        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5201        return 0;
5202}
5203EXPORT_SYMBOL(skb_vlan_pop);
5204
5205/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5206 * Expects skb->data at mac header.
5207 */
5208int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5209{
5210        if (skb_vlan_tag_present(skb)) {
5211                int offset = skb->data - skb_mac_header(skb);
5212                int err;
5213
5214                if (WARN_ONCE(offset,
5215                              "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5216                              offset)) {
5217                        return -EINVAL;
5218                }
5219
5220                err = __vlan_insert_tag(skb, skb->vlan_proto,
5221                                        skb_vlan_tag_get(skb));
5222                if (err)
5223                        return err;
5224
5225                skb->protocol = skb->vlan_proto;
5226                skb->mac_len += VLAN_HLEN;
5227
5228                skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5229        }
5230        __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5231        return 0;
5232}
5233EXPORT_SYMBOL(skb_vlan_push);
5234
5235/**
5236 * alloc_skb_with_frags - allocate skb with page frags
5237 *
5238 * @header_len: size of linear part
5239 * @data_len: needed length in frags
5240 * @max_page_order: max page order desired.
5241 * @errcode: pointer to error code if any
5242 * @gfp_mask: allocation mask
5243 *
5244 * This can be used to allocate a paged skb, given a maximal order for frags.
5245 */
5246struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5247                                     unsigned long data_len,
5248                                     int max_page_order,
5249                                     int *errcode,
5250                                     gfp_t gfp_mask)
5251{
5252        int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5253        unsigned long chunk;
5254        struct sk_buff *skb;
5255        struct page *page;
5256        gfp_t gfp_head;
5257        int i;
5258
5259        *errcode = -EMSGSIZE;
5260        /* Note this test could be relaxed, if we succeed to allocate
5261         * high order pages...
5262         */
5263        if (npages > MAX_SKB_FRAGS)
5264                return NULL;
5265
5266        gfp_head = gfp_mask;
5267        if (gfp_head & __GFP_DIRECT_RECLAIM)
5268                gfp_head |= __GFP_RETRY_MAYFAIL;
5269
5270        *errcode = -ENOBUFS;
5271        skb = alloc_skb(header_len, gfp_head);
5272        if (!skb)
5273                return NULL;
5274
5275        skb->truesize += npages << PAGE_SHIFT;
5276
5277        for (i = 0; npages > 0; i++) {
5278                int order = max_page_order;
5279
5280                while (order) {
5281                        if (npages >= 1 << order) {
5282                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5283                                                   __GFP_COMP |
5284                                                   __GFP_NOWARN,
5285                                                   order);
5286                                if (page)
5287                                        goto fill_page;
5288                                /* Do not retry other high order allocations */
5289                                order = 1;
5290                                max_page_order = 0;
5291                        }
5292                        order--;
5293                }
5294                page = alloc_page(gfp_mask);
5295                if (!page)
5296                        goto failure;
5297fill_page:
5298                chunk = min_t(unsigned long, data_len,
5299                              PAGE_SIZE << order);
5300                skb_fill_page_desc(skb, i, page, 0, chunk);
5301                data_len -= chunk;
5302                npages -= 1 << order;
5303        }
5304        return skb;
5305
5306failure:
5307        kfree_skb(skb);
5308        return NULL;
5309}
5310EXPORT_SYMBOL(alloc_skb_with_frags);
5311
5312/* carve out the first off bytes from skb when off < headlen */
5313static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5314                                    const int headlen, gfp_t gfp_mask)
5315{
5316        int i;
5317        int size = skb_end_offset(skb);
5318        int new_hlen = headlen - off;
5319        u8 *data;
5320
5321        size = SKB_DATA_ALIGN(size);
5322
5323        if (skb_pfmemalloc(skb))
5324                gfp_mask |= __GFP_MEMALLOC;
5325        data = kmalloc_reserve(size +
5326                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5327                               gfp_mask, NUMA_NO_NODE, NULL);
5328        if (!data)
5329                return -ENOMEM;
5330
5331        size = SKB_WITH_OVERHEAD(ksize(data));
5332
5333        /* Copy real data, and all frags */
5334        skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5335        skb->len -= off;
5336
5337        memcpy((struct skb_shared_info *)(data + size),
5338               skb_shinfo(skb),
5339               offsetof(struct skb_shared_info,
5340                        frags[skb_shinfo(skb)->nr_frags]));
5341        if (skb_cloned(skb)) {
5342                /* drop the old head gracefully */
5343                if (skb_orphan_frags(skb, gfp_mask)) {
5344                        kfree(data);
5345                        return -ENOMEM;
5346                }
5347                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5348                        skb_frag_ref(skb, i);
5349                if (skb_has_frag_list(skb))
5350                        skb_clone_fraglist(skb);
5351                skb_release_data(skb);
5352        } else {
5353                /* we can reuse existing recount- all we did was
5354                 * relocate values
5355                 */
5356                skb_free_head(skb);
5357        }
5358
5359        skb->head = data;
5360        skb->data = data;
5361        skb->head_frag = 0;
5362#ifdef NET_SKBUFF_DATA_USES_OFFSET
5363        skb->end = size;
5364#else
5365        skb->end = skb->head + size;
5366#endif
5367        skb_set_tail_pointer(skb, skb_headlen(skb));
5368        skb_headers_offset_update(skb, 0);
5369        skb->cloned = 0;
5370        skb->hdr_len = 0;
5371        skb->nohdr = 0;
5372        atomic_set(&skb_shinfo(skb)->dataref, 1);
5373
5374        return 0;
5375}
5376
5377static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5378
5379/* carve out the first eat bytes from skb's frag_list. May recurse into
5380 * pskb_carve()
5381 */
5382static int pskb_carve_frag_list(struct sk_buff *skb,
5383                                struct skb_shared_info *shinfo, int eat,
5384                                gfp_t gfp_mask)
5385{
5386        struct sk_buff *list = shinfo->frag_list;
5387        struct sk_buff *clone = NULL;
5388        struct sk_buff *insp = NULL;
5389
5390        do {
5391                if (!list) {
5392                        pr_err("Not enough bytes to eat. Want %d\n", eat);
5393                        return -EFAULT;
5394                }
5395                if (list->len <= eat) {
5396                        /* Eaten as whole. */
5397                        eat -= list->len;
5398                        list = list->next;
5399                        insp = list;
5400                } else {
5401                        /* Eaten partially. */
5402                        if (skb_shared(list)) {
5403                                clone = skb_clone(list, gfp_mask);
5404                                if (!clone)
5405                                        return -ENOMEM;
5406                                insp = list->next;
5407                                list = clone;
5408                        } else {
5409                                /* This may be pulled without problems. */
5410                                insp = list;
5411                        }
5412                        if (pskb_carve(list, eat, gfp_mask) < 0) {
5413                                kfree_skb(clone);
5414                                return -ENOMEM;
5415                        }
5416                        break;
5417                }
5418        } while (eat);
5419
5420        /* Free pulled out fragments. */
5421        while ((list = shinfo->frag_list) != insp) {
5422                shinfo->frag_list = list->next;
5423                kfree_skb(list);
5424        }
5425        /* And insert new clone at head. */
5426        if (clone) {
5427                clone->next = list;
5428                shinfo->frag_list = clone;
5429        }
5430        return 0;
5431}
5432
5433/* carve off first len bytes from skb. Split line (off) is in the
5434 * non-linear part of skb
5435 */
5436static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5437                                       int pos, gfp_t gfp_mask)
5438{
5439        int i, k = 0;
5440        int size = skb_end_offset(skb);
5441        u8 *data;
5442        const int nfrags = skb_shinfo(skb)->nr_frags;
5443        struct skb_shared_info *shinfo;
5444
5445        size = SKB_DATA_ALIGN(size);
5446
5447        if (skb_pfmemalloc(skb))
5448                gfp_mask |= __GFP_MEMALLOC;
5449        data = kmalloc_reserve(size +
5450                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5451                               gfp_mask, NUMA_NO_NODE, NULL);
5452        if (!data)
5453                return -ENOMEM;
5454
5455        size = SKB_WITH_OVERHEAD(ksize(data));
5456
5457        memcpy((struct skb_shared_info *)(data + size),
5458               skb_shinfo(skb), offsetof(struct skb_shared_info,
5459                                         frags[skb_shinfo(skb)->nr_frags]));
5460        if (skb_orphan_frags(skb, gfp_mask)) {
5461                kfree(data);
5462                return -ENOMEM;
5463        }
5464        shinfo = (struct skb_shared_info *)(data + size);
5465        for (i = 0; i < nfrags; i++) {
5466                int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5467
5468                if (pos + fsize > off) {
5469                        shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5470
5471                        if (pos < off) {
5472                                /* Split frag.
5473                                 * We have two variants in this case:
5474                                 * 1. Move all the frag to the second
5475                                 *    part, if it is possible. F.e.
5476                                 *    this approach is mandatory for TUX,
5477                                 *    where splitting is expensive.
5478                                 * 2. Split is accurately. We make this.
5479                                 */
5480                                shinfo->frags[0].page_offset += off - pos;
5481                                skb_frag_size_sub(&shinfo->frags[0], off - pos);
5482                        }
5483                        skb_frag_ref(skb, i);
5484                        k++;
5485                }
5486                pos += fsize;
5487        }
5488        shinfo->nr_frags = k;
5489        if (skb_has_frag_list(skb))
5490                skb_clone_fraglist(skb);
5491
5492        if (k == 0) {
5493                /* split line is in frag list */
5494                pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5495        }
5496        skb_release_data(skb);
5497
5498        skb->head = data;
5499        skb->head_frag = 0;
5500        skb->data = data;
5501#ifdef NET_SKBUFF_DATA_USES_OFFSET
5502        skb->end = size;
5503#else
5504        skb->end = skb->head + size;
5505#endif
5506        skb_reset_tail_pointer(skb);
5507        skb_headers_offset_update(skb, 0);
5508        skb->cloned   = 0;
5509        skb->hdr_len  = 0;
5510        skb->nohdr    = 0;
5511        skb->len -= off;
5512        skb->data_len = skb->len;
5513        atomic_set(&skb_shinfo(skb)->dataref, 1);
5514        return 0;
5515}
5516
5517/* remove len bytes from the beginning of the skb */
5518static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5519{
5520        int headlen = skb_headlen(skb);
5521
5522        if (len < headlen)
5523                return pskb_carve_inside_header(skb, len, headlen, gfp);
5524        else
5525                return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5526}
5527
5528/* Extract to_copy bytes starting at off from skb, and return this in
5529 * a new skb
5530 */
5531struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5532                             int to_copy, gfp_t gfp)
5533{
5534        struct sk_buff  *clone = skb_clone(skb, gfp);
5535
5536        if (!clone)
5537                return NULL;
5538
5539        if (pskb_carve(clone, off, gfp) < 0 ||
5540            pskb_trim(clone, to_copy)) {
5541                kfree_skb(clone);
5542                return NULL;
5543        }
5544        return clone;
5545}
5546EXPORT_SYMBOL(pskb_extract);
5547
5548/**
5549 * skb_condense - try to get rid of fragments/frag_list if possible
5550 * @skb: buffer
5551 *
5552 * Can be used to save memory before skb is added to a busy queue.
5553 * If packet has bytes in frags and enough tail room in skb->head,
5554 * pull all of them, so that we can free the frags right now and adjust
5555 * truesize.
5556 * Notes:
5557 *      We do not reallocate skb->head thus can not fail.
5558 *      Caller must re-evaluate skb->truesize if needed.
5559 */
5560void skb_condense(struct sk_buff *skb)
5561{
5562        if (skb->data_len) {
5563                if (skb->data_len > skb->end - skb->tail ||
5564                    skb_cloned(skb))
5565                        return;
5566
5567                /* Nice, we can free page frag(s) right now */
5568                __pskb_pull_tail(skb, skb->data_len);
5569        }
5570        /* At this point, skb->truesize might be over estimated,
5571         * because skb had a fragment, and fragments do not tell
5572         * their truesize.
5573         * When we pulled its content into skb->head, fragment
5574         * was freed, but __pskb_pull_tail() could not possibly
5575         * adjust skb->truesize, not knowing the frag truesize.
5576         */
5577        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5578}
5579