linux/net/core/skbuff.c
<<
>>
Prefs
   1/*
   2 *      Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *      Fixes:
   8 *              Alan Cox        :       Fixed the worst of the load
   9 *                                      balancer bugs.
  10 *              Dave Platt      :       Interrupt stacking fix.
  11 *      Richard Kooijman        :       Timestamp fixes.
  12 *              Alan Cox        :       Changed buffer format.
  13 *              Alan Cox        :       destructor hook for AF_UNIX etc.
  14 *              Linus Torvalds  :       Better skb_clone.
  15 *              Alan Cox        :       Added skb_copy.
  16 *              Alan Cox        :       Added all the changed routines Linus
  17 *                                      only put in the headers
  18 *              Ray VanTassle   :       Fixed --skb->lock in free
  19 *              Alan Cox        :       skb_copy copy arp field
  20 *              Andi Kleen      :       slabified it.
  21 *              Robert Olsson   :       Removed skb_head_pool
  22 *
  23 *      NOTE:
  24 *              The __skb_ routines should be called with interrupts
  25 *      disabled, or you better be *real* sure that the operation is atomic
  26 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *      or via disabling bottom half handlers, etc).
  28 *
  29 *      This program is free software; you can redistribute it and/or
  30 *      modify it under the terms of the GNU General Public License
  31 *      as published by the Free Software Foundation; either version
  32 *      2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *      The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
  39#include <linux/module.h>
  40#include <linux/types.h>
  41#include <linux/kernel.h>
  42#include <linux/kmemcheck.h>
  43#include <linux/mm.h>
  44#include <linux/interrupt.h>
  45#include <linux/in.h>
  46#include <linux/inet.h>
  47#include <linux/slab.h>
  48#include <linux/netdevice.h>
  49#ifdef CONFIG_NET_CLS_ACT
  50#include <net/pkt_sched.h>
  51#endif
  52#include <linux/string.h>
  53#include <linux/skbuff.h>
  54#include <linux/splice.h>
  55#include <linux/cache.h>
  56#include <linux/rtnetlink.h>
  57#include <linux/init.h>
  58#include <linux/scatterlist.h>
  59#include <linux/errqueue.h>
  60
  61#include <net/protocol.h>
  62#include <net/dst.h>
  63#include <net/sock.h>
  64#include <net/checksum.h>
  65#include <net/xfrm.h>
  66
  67#include <asm/uaccess.h>
  68#include <asm/system.h>
  69#include <trace/events/skb.h>
  70
  71#include "kmap_skb.h"
  72
  73static struct kmem_cache *skbuff_head_cache __read_mostly;
  74static struct kmem_cache *skbuff_fclone_cache __read_mostly;
  75
  76static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
  77                                  struct pipe_buffer *buf)
  78{
  79        put_page(buf->page);
  80}
  81
  82static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
  83                                struct pipe_buffer *buf)
  84{
  85        get_page(buf->page);
  86}
  87
  88static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
  89                               struct pipe_buffer *buf)
  90{
  91        return 1;
  92}
  93
  94
  95/* Pipe buffer operations for a socket. */
  96static const struct pipe_buf_operations sock_pipe_buf_ops = {
  97        .can_merge = 0,
  98        .map = generic_pipe_buf_map,
  99        .unmap = generic_pipe_buf_unmap,
 100        .confirm = generic_pipe_buf_confirm,
 101        .release = sock_pipe_buf_release,
 102        .steal = sock_pipe_buf_steal,
 103        .get = sock_pipe_buf_get,
 104};
 105
 106/*
 107 *      Keep out-of-line to prevent kernel bloat.
 108 *      __builtin_return_address is not used because it is not always
 109 *      reliable.
 110 */
 111
 112/**
 113 *      skb_over_panic  -       private function
 114 *      @skb: buffer
 115 *      @sz: size
 116 *      @here: address
 117 *
 118 *      Out of line support code for skb_put(). Not user callable.
 119 */
 120static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 121{
 122        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
 123                          "data:%p tail:%#lx end:%#lx dev:%s\n",
 124               here, skb->len, sz, skb->head, skb->data,
 125               (unsigned long)skb->tail, (unsigned long)skb->end,
 126               skb->dev ? skb->dev->name : "<NULL>");
 127        BUG();
 128}
 129
 130/**
 131 *      skb_under_panic -       private function
 132 *      @skb: buffer
 133 *      @sz: size
 134 *      @here: address
 135 *
 136 *      Out of line support code for skb_push(). Not user callable.
 137 */
 138
 139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 140{
 141        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
 142                          "data:%p tail:%#lx end:%#lx dev:%s\n",
 143               here, skb->len, sz, skb->head, skb->data,
 144               (unsigned long)skb->tail, (unsigned long)skb->end,
 145               skb->dev ? skb->dev->name : "<NULL>");
 146        BUG();
 147}
 148
 149/*      Allocate a new skbuff. We do this ourselves so we can fill in a few
 150 *      'private' fields and also do memory statistics to find all the
 151 *      [BEEP] leaks.
 152 *
 153 */
 154
 155/**
 156 *      __alloc_skb     -       allocate a network buffer
 157 *      @size: size to allocate
 158 *      @gfp_mask: allocation mask
 159 *      @fclone: allocate from fclone cache instead of head cache
 160 *              and allocate a cloned (child) skb
 161 *      @node: numa node to allocate memory on
 162 *
 163 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
 164 *      tail room of size bytes. The object has a reference count of one.
 165 *      The return is the buffer. On a failure the return is %NULL.
 166 *
 167 *      Buffers may only be allocated from interrupts using a @gfp_mask of
 168 *      %GFP_ATOMIC.
 169 */
 170struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 171                            int fclone, int node)
 172{
 173        struct kmem_cache *cache;
 174        struct skb_shared_info *shinfo;
 175        struct sk_buff *skb;
 176        u8 *data;
 177
 178        cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 179
 180        /* Get the HEAD */
 181        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 182        if (!skb)
 183                goto out;
 184        prefetchw(skb);
 185
 186        size = SKB_DATA_ALIGN(size);
 187        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
 188                        gfp_mask, node);
 189        if (!data)
 190                goto nodata;
 191        prefetchw(data + size);
 192
 193        /*
 194         * Only clear those fields we need to clear, not those that we will
 195         * actually initialise below. Hence, don't put any more fields after
 196         * the tail pointer in struct sk_buff!
 197         */
 198        memset(skb, 0, offsetof(struct sk_buff, tail));
 199        skb->truesize = size + sizeof(struct sk_buff);
 200        atomic_set(&skb->users, 1);
 201        skb->head = data;
 202        skb->data = data;
 203        skb_reset_tail_pointer(skb);
 204        skb->end = skb->tail + size;
 205#ifdef NET_SKBUFF_DATA_USES_OFFSET
 206        skb->mac_header = ~0U;
 207#endif
 208
 209        /* make sure we initialize shinfo sequentially */
 210        shinfo = skb_shinfo(skb);
 211        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 212        atomic_set(&shinfo->dataref, 1);
 213        kmemcheck_annotate_variable(shinfo->destructor_arg);
 214
 215        if (fclone) {
 216                struct sk_buff *child = skb + 1;
 217                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 218
 219                kmemcheck_annotate_bitfield(child, flags1);
 220                kmemcheck_annotate_bitfield(child, flags2);
 221                skb->fclone = SKB_FCLONE_ORIG;
 222                atomic_set(fclone_ref, 1);
 223
 224                child->fclone = SKB_FCLONE_UNAVAILABLE;
 225        }
 226out:
 227        return skb;
 228nodata:
 229        kmem_cache_free(cache, skb);
 230        skb = NULL;
 231        goto out;
 232}
 233EXPORT_SYMBOL(__alloc_skb);
 234
 235/**
 236 *      __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 237 *      @dev: network device to receive on
 238 *      @length: length to allocate
 239 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
 240 *
 241 *      Allocate a new &sk_buff and assign it a usage count of one. The
 242 *      buffer has unspecified headroom built in. Users should allocate
 243 *      the headroom they think they need without accounting for the
 244 *      built in space. The built in space is used for optimisations.
 245 *
 246 *      %NULL is returned if there is no free memory.
 247 */
 248struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 249                unsigned int length, gfp_t gfp_mask)
 250{
 251        struct sk_buff *skb;
 252
 253        skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
 254        if (likely(skb)) {
 255                skb_reserve(skb, NET_SKB_PAD);
 256                skb->dev = dev;
 257        }
 258        return skb;
 259}
 260EXPORT_SYMBOL(__netdev_alloc_skb);
 261
 262void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 263                int size)
 264{
 265        skb_fill_page_desc(skb, i, page, off, size);
 266        skb->len += size;
 267        skb->data_len += size;
 268        skb->truesize += size;
 269}
 270EXPORT_SYMBOL(skb_add_rx_frag);
 271
 272/**
 273 *      dev_alloc_skb - allocate an skbuff for receiving
 274 *      @length: length to allocate
 275 *
 276 *      Allocate a new &sk_buff and assign it a usage count of one. The
 277 *      buffer has unspecified headroom built in. Users should allocate
 278 *      the headroom they think they need without accounting for the
 279 *      built in space. The built in space is used for optimisations.
 280 *
 281 *      %NULL is returned if there is no free memory. Although this function
 282 *      allocates memory it can be called from an interrupt.
 283 */
 284struct sk_buff *dev_alloc_skb(unsigned int length)
 285{
 286        /*
 287         * There is more code here than it seems:
 288         * __dev_alloc_skb is an inline
 289         */
 290        return __dev_alloc_skb(length, GFP_ATOMIC);
 291}
 292EXPORT_SYMBOL(dev_alloc_skb);
 293
 294static void skb_drop_list(struct sk_buff **listp)
 295{
 296        struct sk_buff *list = *listp;
 297
 298        *listp = NULL;
 299
 300        do {
 301                struct sk_buff *this = list;
 302                list = list->next;
 303                kfree_skb(this);
 304        } while (list);
 305}
 306
 307static inline void skb_drop_fraglist(struct sk_buff *skb)
 308{
 309        skb_drop_list(&skb_shinfo(skb)->frag_list);
 310}
 311
 312static void skb_clone_fraglist(struct sk_buff *skb)
 313{
 314        struct sk_buff *list;
 315
 316        skb_walk_frags(skb, list)
 317                skb_get(list);
 318}
 319
 320static void skb_release_data(struct sk_buff *skb)
 321{
 322        if (!skb->cloned ||
 323            !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 324                               &skb_shinfo(skb)->dataref)) {
 325                if (skb_shinfo(skb)->nr_frags) {
 326                        int i;
 327                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 328                                put_page(skb_shinfo(skb)->frags[i].page);
 329                }
 330
 331                if (skb_has_frag_list(skb))
 332                        skb_drop_fraglist(skb);
 333
 334                kfree(skb->head);
 335        }
 336}
 337
 338/*
 339 *      Free an skbuff by memory without cleaning the state.
 340 */
 341static void kfree_skbmem(struct sk_buff *skb)
 342{
 343        struct sk_buff *other;
 344        atomic_t *fclone_ref;
 345
 346        switch (skb->fclone) {
 347        case SKB_FCLONE_UNAVAILABLE:
 348                kmem_cache_free(skbuff_head_cache, skb);
 349                break;
 350
 351        case SKB_FCLONE_ORIG:
 352                fclone_ref = (atomic_t *) (skb + 2);
 353                if (atomic_dec_and_test(fclone_ref))
 354                        kmem_cache_free(skbuff_fclone_cache, skb);
 355                break;
 356
 357        case SKB_FCLONE_CLONE:
 358                fclone_ref = (atomic_t *) (skb + 1);
 359                other = skb - 1;
 360
 361                /* The clone portion is available for
 362                 * fast-cloning again.
 363                 */
 364                skb->fclone = SKB_FCLONE_UNAVAILABLE;
 365
 366                if (atomic_dec_and_test(fclone_ref))
 367                        kmem_cache_free(skbuff_fclone_cache, other);
 368                break;
 369        }
 370}
 371
 372static void skb_release_head_state(struct sk_buff *skb)
 373{
 374        skb_dst_drop(skb);
 375#ifdef CONFIG_XFRM
 376        secpath_put(skb->sp);
 377#endif
 378        if (skb->destructor) {
 379                WARN_ON(in_irq());
 380                skb->destructor(skb);
 381        }
 382#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 383        nf_conntrack_put(skb->nfct);
 384#endif
 385#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 386        nf_conntrack_put_reasm(skb->nfct_reasm);
 387#endif
 388#ifdef CONFIG_BRIDGE_NETFILTER
 389        nf_bridge_put(skb->nf_bridge);
 390#endif
 391/* XXX: IS this still necessary? - JHS */
 392#ifdef CONFIG_NET_SCHED
 393        skb->tc_index = 0;
 394#ifdef CONFIG_NET_CLS_ACT
 395        skb->tc_verd = 0;
 396#endif
 397#endif
 398}
 399
 400/* Free everything but the sk_buff shell. */
 401static void skb_release_all(struct sk_buff *skb)
 402{
 403        skb_release_head_state(skb);
 404        skb_release_data(skb);
 405}
 406
 407/**
 408 *      __kfree_skb - private function
 409 *      @skb: buffer
 410 *
 411 *      Free an sk_buff. Release anything attached to the buffer.
 412 *      Clean the state. This is an internal helper function. Users should
 413 *      always call kfree_skb
 414 */
 415
 416void __kfree_skb(struct sk_buff *skb)
 417{
 418        skb_release_all(skb);
 419        kfree_skbmem(skb);
 420}
 421EXPORT_SYMBOL(__kfree_skb);
 422
 423/**
 424 *      kfree_skb - free an sk_buff
 425 *      @skb: buffer to free
 426 *
 427 *      Drop a reference to the buffer and free it if the usage count has
 428 *      hit zero.
 429 */
 430void kfree_skb(struct sk_buff *skb)
 431{
 432        if (unlikely(!skb))
 433                return;
 434        if (likely(atomic_read(&skb->users) == 1))
 435                smp_rmb();
 436        else if (likely(!atomic_dec_and_test(&skb->users)))
 437                return;
 438        trace_kfree_skb(skb, __builtin_return_address(0));
 439        __kfree_skb(skb);
 440}
 441EXPORT_SYMBOL(kfree_skb);
 442
 443/**
 444 *      consume_skb - free an skbuff
 445 *      @skb: buffer to free
 446 *
 447 *      Drop a ref to the buffer and free it if the usage count has hit zero
 448 *      Functions identically to kfree_skb, but kfree_skb assumes that the frame
 449 *      is being dropped after a failure and notes that
 450 */
 451void consume_skb(struct sk_buff *skb)
 452{
 453        if (unlikely(!skb))
 454                return;
 455        if (likely(atomic_read(&skb->users) == 1))
 456                smp_rmb();
 457        else if (likely(!atomic_dec_and_test(&skb->users)))
 458                return;
 459        trace_consume_skb(skb);
 460        __kfree_skb(skb);
 461}
 462EXPORT_SYMBOL(consume_skb);
 463
 464/**
 465 *      skb_recycle_check - check if skb can be reused for receive
 466 *      @skb: buffer
 467 *      @skb_size: minimum receive buffer size
 468 *
 469 *      Checks that the skb passed in is not shared or cloned, and
 470 *      that it is linear and its head portion at least as large as
 471 *      skb_size so that it can be recycled as a receive buffer.
 472 *      If these conditions are met, this function does any necessary
 473 *      reference count dropping and cleans up the skbuff as if it
 474 *      just came from __alloc_skb().
 475 */
 476bool skb_recycle_check(struct sk_buff *skb, int skb_size)
 477{
 478        struct skb_shared_info *shinfo;
 479
 480        if (irqs_disabled())
 481                return false;
 482
 483        if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
 484                return false;
 485
 486        skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
 487        if (skb_end_pointer(skb) - skb->head < skb_size)
 488                return false;
 489
 490        if (skb_shared(skb) || skb_cloned(skb))
 491                return false;
 492
 493        skb_release_head_state(skb);
 494
 495        shinfo = skb_shinfo(skb);
 496        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 497        atomic_set(&shinfo->dataref, 1);
 498
 499        memset(skb, 0, offsetof(struct sk_buff, tail));
 500        skb->data = skb->head + NET_SKB_PAD;
 501        skb_reset_tail_pointer(skb);
 502
 503        return true;
 504}
 505EXPORT_SYMBOL(skb_recycle_check);
 506
 507static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 508{
 509        new->tstamp             = old->tstamp;
 510        new->dev                = old->dev;
 511        new->transport_header   = old->transport_header;
 512        new->network_header     = old->network_header;
 513        new->mac_header         = old->mac_header;
 514        skb_dst_copy(new, old);
 515        new->rxhash             = old->rxhash;
 516#ifdef CONFIG_XFRM
 517        new->sp                 = secpath_get(old->sp);
 518#endif
 519        memcpy(new->cb, old->cb, sizeof(old->cb));
 520        new->csum               = old->csum;
 521        new->local_df           = old->local_df;
 522        new->pkt_type           = old->pkt_type;
 523        new->ip_summed          = old->ip_summed;
 524        skb_copy_queue_mapping(new, old);
 525        new->priority           = old->priority;
 526        new->deliver_no_wcard   = old->deliver_no_wcard;
 527#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 528        new->ipvs_property      = old->ipvs_property;
 529#endif
 530        new->protocol           = old->protocol;
 531        new->mark               = old->mark;
 532        new->skb_iif            = old->skb_iif;
 533        __nf_copy(new, old);
 534#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 535    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 536        new->nf_trace           = old->nf_trace;
 537#endif
 538#ifdef CONFIG_NET_SCHED
 539        new->tc_index           = old->tc_index;
 540#ifdef CONFIG_NET_CLS_ACT
 541        new->tc_verd            = old->tc_verd;
 542#endif
 543#endif
 544        new->vlan_tci           = old->vlan_tci;
 545
 546        skb_copy_secmark(new, old);
 547}
 548
 549/*
 550 * You should not add any new code to this function.  Add it to
 551 * __copy_skb_header above instead.
 552 */
 553static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 554{
 555#define C(x) n->x = skb->x
 556
 557        n->next = n->prev = NULL;
 558        n->sk = NULL;
 559        __copy_skb_header(n, skb);
 560
 561        C(len);
 562        C(data_len);
 563        C(mac_len);
 564        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 565        n->cloned = 1;
 566        n->nohdr = 0;
 567        n->destructor = NULL;
 568        C(tail);
 569        C(end);
 570        C(head);
 571        C(data);
 572        C(truesize);
 573        atomic_set(&n->users, 1);
 574
 575        atomic_inc(&(skb_shinfo(skb)->dataref));
 576        skb->cloned = 1;
 577
 578        return n;
 579#undef C
 580}
 581
 582/**
 583 *      skb_morph       -       morph one skb into another
 584 *      @dst: the skb to receive the contents
 585 *      @src: the skb to supply the contents
 586 *
 587 *      This is identical to skb_clone except that the target skb is
 588 *      supplied by the user.
 589 *
 590 *      The target skb is returned upon exit.
 591 */
 592struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 593{
 594        skb_release_all(dst);
 595        return __skb_clone(dst, src);
 596}
 597EXPORT_SYMBOL_GPL(skb_morph);
 598
 599/**
 600 *      skb_clone       -       duplicate an sk_buff
 601 *      @skb: buffer to clone
 602 *      @gfp_mask: allocation priority
 603 *
 604 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
 605 *      copies share the same packet data but not structure. The new
 606 *      buffer has a reference count of 1. If the allocation fails the
 607 *      function returns %NULL otherwise the new buffer is returned.
 608 *
 609 *      If this function is called from an interrupt gfp_mask() must be
 610 *      %GFP_ATOMIC.
 611 */
 612
 613struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 614{
 615        struct sk_buff *n;
 616
 617        n = skb + 1;
 618        if (skb->fclone == SKB_FCLONE_ORIG &&
 619            n->fclone == SKB_FCLONE_UNAVAILABLE) {
 620                atomic_t *fclone_ref = (atomic_t *) (n + 1);
 621                n->fclone = SKB_FCLONE_CLONE;
 622                atomic_inc(fclone_ref);
 623        } else {
 624                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 625                if (!n)
 626                        return NULL;
 627
 628                kmemcheck_annotate_bitfield(n, flags1);
 629                kmemcheck_annotate_bitfield(n, flags2);
 630                n->fclone = SKB_FCLONE_UNAVAILABLE;
 631        }
 632
 633        return __skb_clone(n, skb);
 634}
 635EXPORT_SYMBOL(skb_clone);
 636
 637static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 638{
 639#ifndef NET_SKBUFF_DATA_USES_OFFSET
 640        /*
 641         *      Shift between the two data areas in bytes
 642         */
 643        unsigned long offset = new->data - old->data;
 644#endif
 645
 646        __copy_skb_header(new, old);
 647
 648#ifndef NET_SKBUFF_DATA_USES_OFFSET
 649        /* {transport,network,mac}_header are relative to skb->head */
 650        new->transport_header += offset;
 651        new->network_header   += offset;
 652        if (skb_mac_header_was_set(new))
 653                new->mac_header       += offset;
 654#endif
 655        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 656        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 657        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 658}
 659
 660/**
 661 *      skb_copy        -       create private copy of an sk_buff
 662 *      @skb: buffer to copy
 663 *      @gfp_mask: allocation priority
 664 *
 665 *      Make a copy of both an &sk_buff and its data. This is used when the
 666 *      caller wishes to modify the data and needs a private copy of the
 667 *      data to alter. Returns %NULL on failure or the pointer to the buffer
 668 *      on success. The returned buffer has a reference count of 1.
 669 *
 670 *      As by-product this function converts non-linear &sk_buff to linear
 671 *      one, so that &sk_buff becomes completely private and caller is allowed
 672 *      to modify all the data of returned buffer. This means that this
 673 *      function is not recommended for use in circumstances when only
 674 *      header is going to be modified. Use pskb_copy() instead.
 675 */
 676
 677struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 678{
 679        int headerlen = skb_headroom(skb);
 680        unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
 681        struct sk_buff *n = alloc_skb(size, gfp_mask);
 682
 683        if (!n)
 684                return NULL;
 685
 686        /* Set the data pointer */
 687        skb_reserve(n, headerlen);
 688        /* Set the tail pointer and length */
 689        skb_put(n, skb->len);
 690
 691        if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
 692                BUG();
 693
 694        copy_skb_header(n, skb);
 695        return n;
 696}
 697EXPORT_SYMBOL(skb_copy);
 698
 699/**
 700 *      pskb_copy       -       create copy of an sk_buff with private head.
 701 *      @skb: buffer to copy
 702 *      @gfp_mask: allocation priority
 703 *
 704 *      Make a copy of both an &sk_buff and part of its data, located
 705 *      in header. Fragmented data remain shared. This is used when
 706 *      the caller wishes to modify only header of &sk_buff and needs
 707 *      private copy of the header to alter. Returns %NULL on failure
 708 *      or the pointer to the buffer on success.
 709 *      The returned buffer has a reference count of 1.
 710 */
 711
 712struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 713{
 714        unsigned int size = skb_end_pointer(skb) - skb->head;
 715        struct sk_buff *n = alloc_skb(size, gfp_mask);
 716
 717        if (!n)
 718                goto out;
 719
 720        /* Set the data pointer */
 721        skb_reserve(n, skb_headroom(skb));
 722        /* Set the tail pointer and length */
 723        skb_put(n, skb_headlen(skb));
 724        /* Copy the bytes */
 725        skb_copy_from_linear_data(skb, n->data, n->len);
 726
 727        n->truesize += skb->data_len;
 728        n->data_len  = skb->data_len;
 729        n->len       = skb->len;
 730
 731        if (skb_shinfo(skb)->nr_frags) {
 732                int i;
 733
 734                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 735                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
 736                        get_page(skb_shinfo(n)->frags[i].page);
 737                }
 738                skb_shinfo(n)->nr_frags = i;
 739        }
 740
 741        if (skb_has_frag_list(skb)) {
 742                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
 743                skb_clone_fraglist(n);
 744        }
 745
 746        copy_skb_header(n, skb);
 747out:
 748        return n;
 749}
 750EXPORT_SYMBOL(pskb_copy);
 751
 752/**
 753 *      pskb_expand_head - reallocate header of &sk_buff
 754 *      @skb: buffer to reallocate
 755 *      @nhead: room to add at head
 756 *      @ntail: room to add at tail
 757 *      @gfp_mask: allocation priority
 758 *
 759 *      Expands (or creates identical copy, if &nhead and &ntail are zero)
 760 *      header of skb. &sk_buff itself is not changed. &sk_buff MUST have
 761 *      reference count of 1. Returns zero in the case of success or error,
 762 *      if expansion failed. In the last case, &sk_buff is not changed.
 763 *
 764 *      All the pointers pointing into skb header may change and must be
 765 *      reloaded after call to this function.
 766 */
 767
 768int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 769                     gfp_t gfp_mask)
 770{
 771        int i;
 772        u8 *data;
 773        int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
 774        long off;
 775        bool fastpath;
 776
 777        BUG_ON(nhead < 0);
 778
 779        if (skb_shared(skb))
 780                BUG();
 781
 782        size = SKB_DATA_ALIGN(size);
 783
 784        /* Check if we can avoid taking references on fragments if we own
 785         * the last reference on skb->head. (see skb_release_data())
 786         */
 787        if (!skb->cloned)
 788                fastpath = true;
 789        else {
 790                int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
 791
 792                fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
 793        }
 794
 795        if (fastpath &&
 796            size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
 797                memmove(skb->head + size, skb_shinfo(skb),
 798                        offsetof(struct skb_shared_info,
 799                                 frags[skb_shinfo(skb)->nr_frags]));
 800                memmove(skb->head + nhead, skb->head,
 801                        skb_tail_pointer(skb) - skb->head);
 802                off = nhead;
 803                goto adjust_others;
 804        }
 805
 806        data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 807        if (!data)
 808                goto nodata;
 809
 810        /* Copy only real data... and, alas, header. This should be
 811         * optimized for the cases when header is void.
 812         */
 813        memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
 814
 815        memcpy((struct skb_shared_info *)(data + size),
 816               skb_shinfo(skb),
 817               offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 818
 819        if (fastpath) {
 820                kfree(skb->head);
 821        } else {
 822                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 823                        get_page(skb_shinfo(skb)->frags[i].page);
 824
 825                if (skb_has_frag_list(skb))
 826                        skb_clone_fraglist(skb);
 827
 828                skb_release_data(skb);
 829        }
 830        off = (data + nhead) - skb->head;
 831
 832        skb->head     = data;
 833adjust_others:
 834        skb->data    += off;
 835#ifdef NET_SKBUFF_DATA_USES_OFFSET
 836        skb->end      = size;
 837        off           = nhead;
 838#else
 839        skb->end      = skb->head + size;
 840#endif
 841        /* {transport,network,mac}_header and tail are relative to skb->head */
 842        skb->tail             += off;
 843        skb->transport_header += off;
 844        skb->network_header   += off;
 845        if (skb_mac_header_was_set(skb))
 846                skb->mac_header += off;
 847        /* Only adjust this if it actually is csum_start rather than csum */
 848        if (skb->ip_summed == CHECKSUM_PARTIAL)
 849                skb->csum_start += nhead;
 850        skb->cloned   = 0;
 851        skb->hdr_len  = 0;
 852        skb->nohdr    = 0;
 853        atomic_set(&skb_shinfo(skb)->dataref, 1);
 854        return 0;
 855
 856nodata:
 857        return -ENOMEM;
 858}
 859EXPORT_SYMBOL(pskb_expand_head);
 860
 861/* Make private copy of skb with writable head and some headroom */
 862
 863struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 864{
 865        struct sk_buff *skb2;
 866        int delta = headroom - skb_headroom(skb);
 867
 868        if (delta <= 0)
 869                skb2 = pskb_copy(skb, GFP_ATOMIC);
 870        else {
 871                skb2 = skb_clone(skb, GFP_ATOMIC);
 872                if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
 873                                             GFP_ATOMIC)) {
 874                        kfree_skb(skb2);
 875                        skb2 = NULL;
 876                }
 877        }
 878        return skb2;
 879}
 880EXPORT_SYMBOL(skb_realloc_headroom);
 881
 882/**
 883 *      skb_copy_expand -       copy and expand sk_buff
 884 *      @skb: buffer to copy
 885 *      @newheadroom: new free bytes at head
 886 *      @newtailroom: new free bytes at tail
 887 *      @gfp_mask: allocation priority
 888 *
 889 *      Make a copy of both an &sk_buff and its data and while doing so
 890 *      allocate additional space.
 891 *
 892 *      This is used when the caller wishes to modify the data and needs a
 893 *      private copy of the data to alter as well as more space for new fields.
 894 *      Returns %NULL on failure or the pointer to the buffer
 895 *      on success. The returned buffer has a reference count of 1.
 896 *
 897 *      You must pass %GFP_ATOMIC as the allocation priority if this function
 898 *      is called from an interrupt.
 899 */
 900struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 901                                int newheadroom, int newtailroom,
 902                                gfp_t gfp_mask)
 903{
 904        /*
 905         *      Allocate the copy buffer
 906         */
 907        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
 908                                      gfp_mask);
 909        int oldheadroom = skb_headroom(skb);
 910        int head_copy_len, head_copy_off;
 911        int off;
 912
 913        if (!n)
 914                return NULL;
 915
 916        skb_reserve(n, newheadroom);
 917
 918        /* Set the tail pointer and length */
 919        skb_put(n, skb->len);
 920
 921        head_copy_len = oldheadroom;
 922        head_copy_off = 0;
 923        if (newheadroom <= head_copy_len)
 924                head_copy_len = newheadroom;
 925        else
 926                head_copy_off = newheadroom - head_copy_len;
 927
 928        /* Copy the linear header and data. */
 929        if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
 930                          skb->len + head_copy_len))
 931                BUG();
 932
 933        copy_skb_header(n, skb);
 934
 935        off                  = newheadroom - oldheadroom;
 936        if (n->ip_summed == CHECKSUM_PARTIAL)
 937                n->csum_start += off;
 938#ifdef NET_SKBUFF_DATA_USES_OFFSET
 939        n->transport_header += off;
 940        n->network_header   += off;
 941        if (skb_mac_header_was_set(skb))
 942                n->mac_header += off;
 943#endif
 944
 945        return n;
 946}
 947EXPORT_SYMBOL(skb_copy_expand);
 948
 949/**
 950 *      skb_pad                 -       zero pad the tail of an skb
 951 *      @skb: buffer to pad
 952 *      @pad: space to pad
 953 *
 954 *      Ensure that a buffer is followed by a padding area that is zero
 955 *      filled. Used by network drivers which may DMA or transfer data
 956 *      beyond the buffer end onto the wire.
 957 *
 958 *      May return error in out of memory cases. The skb is freed on error.
 959 */
 960
 961int skb_pad(struct sk_buff *skb, int pad)
 962{
 963        int err;
 964        int ntail;
 965
 966        /* If the skbuff is non linear tailroom is always zero.. */
 967        if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
 968                memset(skb->data+skb->len, 0, pad);
 969                return 0;
 970        }
 971
 972        ntail = skb->data_len + pad - (skb->end - skb->tail);
 973        if (likely(skb_cloned(skb) || ntail > 0)) {
 974                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
 975                if (unlikely(err))
 976                        goto free_skb;
 977        }
 978
 979        /* FIXME: The use of this function with non-linear skb's really needs
 980         * to be audited.
 981         */
 982        err = skb_linearize(skb);
 983        if (unlikely(err))
 984                goto free_skb;
 985
 986        memset(skb->data + skb->len, 0, pad);
 987        return 0;
 988
 989free_skb:
 990        kfree_skb(skb);
 991        return err;
 992}
 993EXPORT_SYMBOL(skb_pad);
 994
 995/**
 996 *      skb_put - add data to a buffer
 997 *      @skb: buffer to use
 998 *      @len: amount of data to add
 999 *
1000 *      This function extends the used data area of the buffer. If this would
1001 *      exceed the total buffer size the kernel will panic. A pointer to the
1002 *      first byte of the extra data is returned.
1003 */
1004unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1005{
1006        unsigned char *tmp = skb_tail_pointer(skb);
1007        SKB_LINEAR_ASSERT(skb);
1008        skb->tail += len;
1009        skb->len  += len;
1010        if (unlikely(skb->tail > skb->end))
1011                skb_over_panic(skb, len, __builtin_return_address(0));
1012        return tmp;
1013}
1014EXPORT_SYMBOL(skb_put);
1015
1016/**
1017 *      skb_push - add data to the start of a buffer
1018 *      @skb: buffer to use
1019 *      @len: amount of data to add
1020 *
1021 *      This function extends the used data area of the buffer at the buffer
1022 *      start. If this would exceed the total buffer headroom the kernel will
1023 *      panic. A pointer to the first byte of the extra data is returned.
1024 */
1025unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1026{
1027        skb->data -= len;
1028        skb->len  += len;
1029        if (unlikely(skb->data<skb->head))
1030                skb_under_panic(skb, len, __builtin_return_address(0));
1031        return skb->data;
1032}
1033EXPORT_SYMBOL(skb_push);
1034
1035/**
1036 *      skb_pull - remove data from the start of a buffer
1037 *      @skb: buffer to use
1038 *      @len: amount of data to remove
1039 *
1040 *      This function removes data from the start of a buffer, returning
1041 *      the memory to the headroom. A pointer to the next data in the buffer
1042 *      is returned. Once the data has been pulled future pushes will overwrite
1043 *      the old data.
1044 */
1045unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1046{
1047        return skb_pull_inline(skb, len);
1048}
1049EXPORT_SYMBOL(skb_pull);
1050
1051/**
1052 *      skb_trim - remove end from a buffer
1053 *      @skb: buffer to alter
1054 *      @len: new length
1055 *
1056 *      Cut the length of a buffer down by removing data from the tail. If
1057 *      the buffer is already under the length specified it is not modified.
1058 *      The skb must be linear.
1059 */
1060void skb_trim(struct sk_buff *skb, unsigned int len)
1061{
1062        if (skb->len > len)
1063                __skb_trim(skb, len);
1064}
1065EXPORT_SYMBOL(skb_trim);
1066
1067/* Trims skb to length len. It can change skb pointers.
1068 */
1069
1070int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1071{
1072        struct sk_buff **fragp;
1073        struct sk_buff *frag;
1074        int offset = skb_headlen(skb);
1075        int nfrags = skb_shinfo(skb)->nr_frags;
1076        int i;
1077        int err;
1078
1079        if (skb_cloned(skb) &&
1080            unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1081                return err;
1082
1083        i = 0;
1084        if (offset >= len)
1085                goto drop_pages;
1086
1087        for (; i < nfrags; i++) {
1088                int end = offset + skb_shinfo(skb)->frags[i].size;
1089
1090                if (end < len) {
1091                        offset = end;
1092                        continue;
1093                }
1094
1095                skb_shinfo(skb)->frags[i++].size = len - offset;
1096
1097drop_pages:
1098                skb_shinfo(skb)->nr_frags = i;
1099
1100                for (; i < nfrags; i++)
1101                        put_page(skb_shinfo(skb)->frags[i].page);
1102
1103                if (skb_has_frag_list(skb))
1104                        skb_drop_fraglist(skb);
1105                goto done;
1106        }
1107
1108        for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1109             fragp = &frag->next) {
1110                int end = offset + frag->len;
1111
1112                if (skb_shared(frag)) {
1113                        struct sk_buff *nfrag;
1114
1115                        nfrag = skb_clone(frag, GFP_ATOMIC);
1116                        if (unlikely(!nfrag))
1117                                return -ENOMEM;
1118
1119                        nfrag->next = frag->next;
1120                        kfree_skb(frag);
1121                        frag = nfrag;
1122                        *fragp = frag;
1123                }
1124
1125                if (end < len) {
1126                        offset = end;
1127                        continue;
1128                }
1129
1130                if (end > len &&
1131                    unlikely((err = pskb_trim(frag, len - offset))))
1132                        return err;
1133
1134                if (frag->next)
1135                        skb_drop_list(&frag->next);
1136                break;
1137        }
1138
1139done:
1140        if (len > skb_headlen(skb)) {
1141                skb->data_len -= skb->len - len;
1142                skb->len       = len;
1143        } else {
1144                skb->len       = len;
1145                skb->data_len  = 0;
1146                skb_set_tail_pointer(skb, len);
1147        }
1148
1149        return 0;
1150}
1151EXPORT_SYMBOL(___pskb_trim);
1152
1153/**
1154 *      __pskb_pull_tail - advance tail of skb header
1155 *      @skb: buffer to reallocate
1156 *      @delta: number of bytes to advance tail
1157 *
1158 *      The function makes a sense only on a fragmented &sk_buff,
1159 *      it expands header moving its tail forward and copying necessary
1160 *      data from fragmented part.
1161 *
1162 *      &sk_buff MUST have reference count of 1.
1163 *
1164 *      Returns %NULL (and &sk_buff does not change) if pull failed
1165 *      or value of new tail of skb in the case of success.
1166 *
1167 *      All the pointers pointing into skb header may change and must be
1168 *      reloaded after call to this function.
1169 */
1170
1171/* Moves tail of skb head forward, copying data from fragmented part,
1172 * when it is necessary.
1173 * 1. It may fail due to malloc failure.
1174 * 2. It may change skb pointers.
1175 *
1176 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1177 */
1178unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1179{
1180        /* If skb has not enough free space at tail, get new one
1181         * plus 128 bytes for future expansions. If we have enough
1182         * room at tail, reallocate without expansion only if skb is cloned.
1183         */
1184        int i, k, eat = (skb->tail + delta) - skb->end;
1185
1186        if (eat > 0 || skb_cloned(skb)) {
1187                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1188                                     GFP_ATOMIC))
1189                        return NULL;
1190        }
1191
1192        if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1193                BUG();
1194
1195        /* Optimization: no fragments, no reasons to preestimate
1196         * size of pulled pages. Superb.
1197         */
1198        if (!skb_has_frag_list(skb))
1199                goto pull_pages;
1200
1201        /* Estimate size of pulled pages. */
1202        eat = delta;
1203        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1204                if (skb_shinfo(skb)->frags[i].size >= eat)
1205                        goto pull_pages;
1206                eat -= skb_shinfo(skb)->frags[i].size;
1207        }
1208
1209        /* If we need update frag list, we are in troubles.
1210         * Certainly, it possible to add an offset to skb data,
1211         * but taking into account that pulling is expected to
1212         * be very rare operation, it is worth to fight against
1213         * further bloating skb head and crucify ourselves here instead.
1214         * Pure masohism, indeed. 8)8)
1215         */
1216        if (eat) {
1217                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1218                struct sk_buff *clone = NULL;
1219                struct sk_buff *insp = NULL;
1220
1221                do {
1222                        BUG_ON(!list);
1223
1224                        if (list->len <= eat) {
1225                                /* Eaten as whole. */
1226                                eat -= list->len;
1227                                list = list->next;
1228                                insp = list;
1229                        } else {
1230                                /* Eaten partially. */
1231
1232                                if (skb_shared(list)) {
1233                                        /* Sucks! We need to fork list. :-( */
1234                                        clone = skb_clone(list, GFP_ATOMIC);
1235                                        if (!clone)
1236                                                return NULL;
1237                                        insp = list->next;
1238                                        list = clone;
1239                                } else {
1240                                        /* This may be pulled without
1241                                         * problems. */
1242                                        insp = list;
1243                                }
1244                                if (!pskb_pull(list, eat)) {
1245                                        kfree_skb(clone);
1246                                        return NULL;
1247                                }
1248                                break;
1249                        }
1250                } while (eat);
1251
1252                /* Free pulled out fragments. */
1253                while ((list = skb_shinfo(skb)->frag_list) != insp) {
1254                        skb_shinfo(skb)->frag_list = list->next;
1255                        kfree_skb(list);
1256                }
1257                /* And insert new clone at head. */
1258                if (clone) {
1259                        clone->next = list;
1260                        skb_shinfo(skb)->frag_list = clone;
1261                }
1262        }
1263        /* Success! Now we may commit changes to skb data. */
1264
1265pull_pages:
1266        eat = delta;
1267        k = 0;
1268        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1269                if (skb_shinfo(skb)->frags[i].size <= eat) {
1270                        put_page(skb_shinfo(skb)->frags[i].page);
1271                        eat -= skb_shinfo(skb)->frags[i].size;
1272                } else {
1273                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1274                        if (eat) {
1275                                skb_shinfo(skb)->frags[k].page_offset += eat;
1276                                skb_shinfo(skb)->frags[k].size -= eat;
1277                                eat = 0;
1278                        }
1279                        k++;
1280                }
1281        }
1282        skb_shinfo(skb)->nr_frags = k;
1283
1284        skb->tail     += delta;
1285        skb->data_len -= delta;
1286
1287        return skb_tail_pointer(skb);
1288}
1289EXPORT_SYMBOL(__pskb_pull_tail);
1290
1291/* Copy some data bits from skb to kernel buffer. */
1292
1293int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1294{
1295        int start = skb_headlen(skb);
1296        struct sk_buff *frag_iter;
1297        int i, copy;
1298
1299        if (offset > (int)skb->len - len)
1300                goto fault;
1301
1302        /* Copy header. */
1303        if ((copy = start - offset) > 0) {
1304                if (copy > len)
1305                        copy = len;
1306                skb_copy_from_linear_data_offset(skb, offset, to, copy);
1307                if ((len -= copy) == 0)
1308                        return 0;
1309                offset += copy;
1310                to     += copy;
1311        }
1312
1313        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1314                int end;
1315
1316                WARN_ON(start > offset + len);
1317
1318                end = start + skb_shinfo(skb)->frags[i].size;
1319                if ((copy = end - offset) > 0) {
1320                        u8 *vaddr;
1321
1322                        if (copy > len)
1323                                copy = len;
1324
1325                        vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1326                        memcpy(to,
1327                               vaddr + skb_shinfo(skb)->frags[i].page_offset+
1328                               offset - start, copy);
1329                        kunmap_skb_frag(vaddr);
1330
1331                        if ((len -= copy) == 0)
1332                                return 0;
1333                        offset += copy;
1334                        to     += copy;
1335                }
1336                start = end;
1337        }
1338
1339        skb_walk_frags(skb, frag_iter) {
1340                int end;
1341
1342                WARN_ON(start > offset + len);
1343
1344                end = start + frag_iter->len;
1345                if ((copy = end - offset) > 0) {
1346                        if (copy > len)
1347                                copy = len;
1348                        if (skb_copy_bits(frag_iter, offset - start, to, copy))
1349                                goto fault;
1350                        if ((len -= copy) == 0)
1351                                return 0;
1352                        offset += copy;
1353                        to     += copy;
1354                }
1355                start = end;
1356        }
1357        if (!len)
1358                return 0;
1359
1360fault:
1361        return -EFAULT;
1362}
1363EXPORT_SYMBOL(skb_copy_bits);
1364
1365/*
1366 * Callback from splice_to_pipe(), if we need to release some pages
1367 * at the end of the spd in case we error'ed out in filling the pipe.
1368 */
1369static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1370{
1371        put_page(spd->pages[i]);
1372}
1373
1374static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1375                                          unsigned int *offset,
1376                                          struct sk_buff *skb, struct sock *sk)
1377{
1378        struct page *p = sk->sk_sndmsg_page;
1379        unsigned int off;
1380
1381        if (!p) {
1382new_page:
1383                p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1384                if (!p)
1385                        return NULL;
1386
1387                off = sk->sk_sndmsg_off = 0;
1388                /* hold one ref to this page until it's full */
1389        } else {
1390                unsigned int mlen;
1391
1392                off = sk->sk_sndmsg_off;
1393                mlen = PAGE_SIZE - off;
1394                if (mlen < 64 && mlen < *len) {
1395                        put_page(p);
1396                        goto new_page;
1397                }
1398
1399                *len = min_t(unsigned int, *len, mlen);
1400        }
1401
1402        memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1403        sk->sk_sndmsg_off += *len;
1404        *offset = off;
1405        get_page(p);
1406
1407        return p;
1408}
1409
1410/*
1411 * Fill page/offset/length into spd, if it can hold more pages.
1412 */
1413static inline int spd_fill_page(struct splice_pipe_desc *spd,
1414                                struct pipe_inode_info *pipe, struct page *page,
1415                                unsigned int *len, unsigned int offset,
1416                                struct sk_buff *skb, int linear,
1417                                struct sock *sk)
1418{
1419        if (unlikely(spd->nr_pages == pipe->buffers))
1420                return 1;
1421
1422        if (linear) {
1423                page = linear_to_page(page, len, &offset, skb, sk);
1424                if (!page)
1425                        return 1;
1426        } else
1427                get_page(page);
1428
1429        spd->pages[spd->nr_pages] = page;
1430        spd->partial[spd->nr_pages].len = *len;
1431        spd->partial[spd->nr_pages].offset = offset;
1432        spd->nr_pages++;
1433
1434        return 0;
1435}
1436
1437static inline void __segment_seek(struct page **page, unsigned int *poff,
1438                                  unsigned int *plen, unsigned int off)
1439{
1440        unsigned long n;
1441
1442        *poff += off;
1443        n = *poff / PAGE_SIZE;
1444        if (n)
1445                *page = nth_page(*page, n);
1446
1447        *poff = *poff % PAGE_SIZE;
1448        *plen -= off;
1449}
1450
1451static inline int __splice_segment(struct page *page, unsigned int poff,
1452                                   unsigned int plen, unsigned int *off,
1453                                   unsigned int *len, struct sk_buff *skb,
1454                                   struct splice_pipe_desc *spd, int linear,
1455                                   struct sock *sk,
1456                                   struct pipe_inode_info *pipe)
1457{
1458        if (!*len)
1459                return 1;
1460
1461        /* skip this segment if already processed */
1462        if (*off >= plen) {
1463                *off -= plen;
1464                return 0;
1465        }
1466
1467        /* ignore any bits we already processed */
1468        if (*off) {
1469                __segment_seek(&page, &poff, &plen, *off);
1470                *off = 0;
1471        }
1472
1473        do {
1474                unsigned int flen = min(*len, plen);
1475
1476                /* the linear region may spread across several pages  */
1477                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1478
1479                if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1480                        return 1;
1481
1482                __segment_seek(&page, &poff, &plen, flen);
1483                *len -= flen;
1484
1485        } while (*len && plen);
1486
1487        return 0;
1488}
1489
1490/*
1491 * Map linear and fragment data from the skb to spd. It reports failure if the
1492 * pipe is full or if we already spliced the requested length.
1493 */
1494static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1495                             unsigned int *offset, unsigned int *len,
1496                             struct splice_pipe_desc *spd, struct sock *sk)
1497{
1498        int seg;
1499
1500        /*
1501         * map the linear part
1502         */
1503        if (__splice_segment(virt_to_page(skb->data),
1504                             (unsigned long) skb->data & (PAGE_SIZE - 1),
1505                             skb_headlen(skb),
1506                             offset, len, skb, spd, 1, sk, pipe))
1507                return 1;
1508
1509        /*
1510         * then map the fragments
1511         */
1512        for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1513                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1514
1515                if (__splice_segment(f->page, f->page_offset, f->size,
1516                                     offset, len, skb, spd, 0, sk, pipe))
1517                        return 1;
1518        }
1519
1520        return 0;
1521}
1522
1523/*
1524 * Map data from the skb to a pipe. Should handle both the linear part,
1525 * the fragments, and the frag list. It does NOT handle frag lists within
1526 * the frag list, if such a thing exists. We'd probably need to recurse to
1527 * handle that cleanly.
1528 */
1529int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1530                    struct pipe_inode_info *pipe, unsigned int tlen,
1531                    unsigned int flags)
1532{
1533        struct partial_page partial[PIPE_DEF_BUFFERS];
1534        struct page *pages[PIPE_DEF_BUFFERS];
1535        struct splice_pipe_desc spd = {
1536                .pages = pages,
1537                .partial = partial,
1538                .flags = flags,
1539                .ops = &sock_pipe_buf_ops,
1540                .spd_release = sock_spd_release,
1541        };
1542        struct sk_buff *frag_iter;
1543        struct sock *sk = skb->sk;
1544        int ret = 0;
1545
1546        if (splice_grow_spd(pipe, &spd))
1547                return -ENOMEM;
1548
1549        /*
1550         * __skb_splice_bits() only fails if the output has no room left,
1551         * so no point in going over the frag_list for the error case.
1552         */
1553        if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1554                goto done;
1555        else if (!tlen)
1556                goto done;
1557
1558        /*
1559         * now see if we have a frag_list to map
1560         */
1561        skb_walk_frags(skb, frag_iter) {
1562                if (!tlen)
1563                        break;
1564                if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1565                        break;
1566        }
1567
1568done:
1569        if (spd.nr_pages) {
1570                /*
1571                 * Drop the socket lock, otherwise we have reverse
1572                 * locking dependencies between sk_lock and i_mutex
1573                 * here as compared to sendfile(). We enter here
1574                 * with the socket lock held, and splice_to_pipe() will
1575                 * grab the pipe inode lock. For sendfile() emulation,
1576                 * we call into ->sendpage() with the i_mutex lock held
1577                 * and networking will grab the socket lock.
1578                 */
1579                release_sock(sk);
1580                ret = splice_to_pipe(pipe, &spd);
1581                lock_sock(sk);
1582        }
1583
1584        splice_shrink_spd(pipe, &spd);
1585        return ret;
1586}
1587
1588/**
1589 *      skb_store_bits - store bits from kernel buffer to skb
1590 *      @skb: destination buffer
1591 *      @offset: offset in destination
1592 *      @from: source buffer
1593 *      @len: number of bytes to copy
1594 *
1595 *      Copy the specified number of bytes from the source buffer to the
1596 *      destination skb.  This function handles all the messy bits of
1597 *      traversing fragment lists and such.
1598 */
1599
1600int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1601{
1602        int start = skb_headlen(skb);
1603        struct sk_buff *frag_iter;
1604        int i, copy;
1605
1606        if (offset > (int)skb->len - len)
1607                goto fault;
1608
1609        if ((copy = start - offset) > 0) {
1610                if (copy > len)
1611                        copy = len;
1612                skb_copy_to_linear_data_offset(skb, offset, from, copy);
1613                if ((len -= copy) == 0)
1614                        return 0;
1615                offset += copy;
1616                from += copy;
1617        }
1618
1619        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1620                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1621                int end;
1622
1623                WARN_ON(start > offset + len);
1624
1625                end = start + frag->size;
1626                if ((copy = end - offset) > 0) {
1627                        u8 *vaddr;
1628
1629                        if (copy > len)
1630                                copy = len;
1631
1632                        vaddr = kmap_skb_frag(frag);
1633                        memcpy(vaddr + frag->page_offset + offset - start,
1634                               from, copy);
1635                        kunmap_skb_frag(vaddr);
1636
1637                        if ((len -= copy) == 0)
1638                                return 0;
1639                        offset += copy;
1640                        from += copy;
1641                }
1642                start = end;
1643        }
1644
1645        skb_walk_frags(skb, frag_iter) {
1646                int end;
1647
1648                WARN_ON(start > offset + len);
1649
1650                end = start + frag_iter->len;
1651                if ((copy = end - offset) > 0) {
1652                        if (copy > len)
1653                                copy = len;
1654                        if (skb_store_bits(frag_iter, offset - start,
1655                                           from, copy))
1656                                goto fault;
1657                        if ((len -= copy) == 0)
1658                                return 0;
1659                        offset += copy;
1660                        from += copy;
1661                }
1662                start = end;
1663        }
1664        if (!len)
1665                return 0;
1666
1667fault:
1668        return -EFAULT;
1669}
1670EXPORT_SYMBOL(skb_store_bits);
1671
1672/* Checksum skb data. */
1673
1674__wsum skb_checksum(const struct sk_buff *skb, int offset,
1675                          int len, __wsum csum)
1676{
1677        int start = skb_headlen(skb);
1678        int i, copy = start - offset;
1679        struct sk_buff *frag_iter;
1680        int pos = 0;
1681
1682        /* Checksum header. */
1683        if (copy > 0) {
1684                if (copy > len)
1685                        copy = len;
1686                csum = csum_partial(skb->data + offset, copy, csum);
1687                if ((len -= copy) == 0)
1688                        return csum;
1689                offset += copy;
1690                pos     = copy;
1691        }
1692
1693        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1694                int end;
1695
1696                WARN_ON(start > offset + len);
1697
1698                end = start + skb_shinfo(skb)->frags[i].size;
1699                if ((copy = end - offset) > 0) {
1700                        __wsum csum2;
1701                        u8 *vaddr;
1702                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1703
1704                        if (copy > len)
1705                                copy = len;
1706                        vaddr = kmap_skb_frag(frag);
1707                        csum2 = csum_partial(vaddr + frag->page_offset +
1708                                             offset - start, copy, 0);
1709                        kunmap_skb_frag(vaddr);
1710                        csum = csum_block_add(csum, csum2, pos);
1711                        if (!(len -= copy))
1712                                return csum;
1713                        offset += copy;
1714                        pos    += copy;
1715                }
1716                start = end;
1717        }
1718
1719        skb_walk_frags(skb, frag_iter) {
1720                int end;
1721
1722                WARN_ON(start > offset + len);
1723
1724                end = start + frag_iter->len;
1725                if ((copy = end - offset) > 0) {
1726                        __wsum csum2;
1727                        if (copy > len)
1728                                copy = len;
1729                        csum2 = skb_checksum(frag_iter, offset - start,
1730                                             copy, 0);
1731                        csum = csum_block_add(csum, csum2, pos);
1732                        if ((len -= copy) == 0)
1733                                return csum;
1734                        offset += copy;
1735                        pos    += copy;
1736                }
1737                start = end;
1738        }
1739        BUG_ON(len);
1740
1741        return csum;
1742}
1743EXPORT_SYMBOL(skb_checksum);
1744
1745/* Both of above in one bottle. */
1746
1747__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1748                                    u8 *to, int len, __wsum csum)
1749{
1750        int start = skb_headlen(skb);
1751        int i, copy = start - offset;
1752        struct sk_buff *frag_iter;
1753        int pos = 0;
1754
1755        /* Copy header. */
1756        if (copy > 0) {
1757                if (copy > len)
1758                        copy = len;
1759                csum = csum_partial_copy_nocheck(skb->data + offset, to,
1760                                                 copy, csum);
1761                if ((len -= copy) == 0)
1762                        return csum;
1763                offset += copy;
1764                to     += copy;
1765                pos     = copy;
1766        }
1767
1768        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1769                int end;
1770
1771                WARN_ON(start > offset + len);
1772
1773                end = start + skb_shinfo(skb)->frags[i].size;
1774                if ((copy = end - offset) > 0) {
1775                        __wsum csum2;
1776                        u8 *vaddr;
1777                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1778
1779                        if (copy > len)
1780                                copy = len;
1781                        vaddr = kmap_skb_frag(frag);
1782                        csum2 = csum_partial_copy_nocheck(vaddr +
1783                                                          frag->page_offset +
1784                                                          offset - start, to,
1785                                                          copy, 0);
1786                        kunmap_skb_frag(vaddr);
1787                        csum = csum_block_add(csum, csum2, pos);
1788                        if (!(len -= copy))
1789                                return csum;
1790                        offset += copy;
1791                        to     += copy;
1792                        pos    += copy;
1793                }
1794                start = end;
1795        }
1796
1797        skb_walk_frags(skb, frag_iter) {
1798                __wsum csum2;
1799                int end;
1800
1801                WARN_ON(start > offset + len);
1802
1803                end = start + frag_iter->len;
1804                if ((copy = end - offset) > 0) {
1805                        if (copy > len)
1806                                copy = len;
1807                        csum2 = skb_copy_and_csum_bits(frag_iter,
1808                                                       offset - start,
1809                                                       to, copy, 0);
1810                        csum = csum_block_add(csum, csum2, pos);
1811                        if ((len -= copy) == 0)
1812                                return csum;
1813                        offset += copy;
1814                        to     += copy;
1815                        pos    += copy;
1816                }
1817                start = end;
1818        }
1819        BUG_ON(len);
1820        return csum;
1821}
1822EXPORT_SYMBOL(skb_copy_and_csum_bits);
1823
1824void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1825{
1826        __wsum csum;
1827        long csstart;
1828
1829        if (skb->ip_summed == CHECKSUM_PARTIAL)
1830                csstart = skb_checksum_start_offset(skb);
1831        else
1832                csstart = skb_headlen(skb);
1833
1834        BUG_ON(csstart > skb_headlen(skb));
1835
1836        skb_copy_from_linear_data(skb, to, csstart);
1837
1838        csum = 0;
1839        if (csstart != skb->len)
1840                csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1841                                              skb->len - csstart, 0);
1842
1843        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1844                long csstuff = csstart + skb->csum_offset;
1845
1846                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
1847        }
1848}
1849EXPORT_SYMBOL(skb_copy_and_csum_dev);
1850
1851/**
1852 *      skb_dequeue - remove from the head of the queue
1853 *      @list: list to dequeue from
1854 *
1855 *      Remove the head of the list. The list lock is taken so the function
1856 *      may be used safely with other locking list functions. The head item is
1857 *      returned or %NULL if the list is empty.
1858 */
1859
1860struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1861{
1862        unsigned long flags;
1863        struct sk_buff *result;
1864
1865        spin_lock_irqsave(&list->lock, flags);
1866        result = __skb_dequeue(list);
1867        spin_unlock_irqrestore(&list->lock, flags);
1868        return result;
1869}
1870EXPORT_SYMBOL(skb_dequeue);
1871
1872/**
1873 *      skb_dequeue_tail - remove from the tail of the queue
1874 *      @list: list to dequeue from
1875 *
1876 *      Remove the tail of the list. The list lock is taken so the function
1877 *      may be used safely with other locking list functions. The tail item is
1878 *      returned or %NULL if the list is empty.
1879 */
1880struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1881{
1882        unsigned long flags;
1883        struct sk_buff *result;
1884
1885        spin_lock_irqsave(&list->lock, flags);
1886        result = __skb_dequeue_tail(list);
1887        spin_unlock_irqrestore(&list->lock, flags);
1888        return result;
1889}
1890EXPORT_SYMBOL(skb_dequeue_tail);
1891
1892/**
1893 *      skb_queue_purge - empty a list
1894 *      @list: list to empty
1895 *
1896 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1897 *      the list and one reference dropped. This function takes the list
1898 *      lock and is atomic with respect to other list locking functions.
1899 */
1900void skb_queue_purge(struct sk_buff_head *list)
1901{
1902        struct sk_buff *skb;
1903        while ((skb = skb_dequeue(list)) != NULL)
1904                kfree_skb(skb);
1905}
1906EXPORT_SYMBOL(skb_queue_purge);
1907
1908/**
1909 *      skb_queue_head - queue a buffer at the list head
1910 *      @list: list to use
1911 *      @newsk: buffer to queue
1912 *
1913 *      Queue a buffer at the start of the list. This function takes the
1914 *      list lock and can be used safely with other locking &sk_buff functions
1915 *      safely.
1916 *
1917 *      A buffer cannot be placed on two lists at the same time.
1918 */
1919void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1920{
1921        unsigned long flags;
1922
1923        spin_lock_irqsave(&list->lock, flags);
1924        __skb_queue_head(list, newsk);
1925        spin_unlock_irqrestore(&list->lock, flags);
1926}
1927EXPORT_SYMBOL(skb_queue_head);
1928
1929/**
1930 *      skb_queue_tail - queue a buffer at the list tail
1931 *      @list: list to use
1932 *      @newsk: buffer to queue
1933 *
1934 *      Queue a buffer at the tail of the list. This function takes the
1935 *      list lock and can be used safely with other locking &sk_buff functions
1936 *      safely.
1937 *
1938 *      A buffer cannot be placed on two lists at the same time.
1939 */
1940void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1941{
1942        unsigned long flags;
1943
1944        spin_lock_irqsave(&list->lock, flags);
1945        __skb_queue_tail(list, newsk);
1946        spin_unlock_irqrestore(&list->lock, flags);
1947}
1948EXPORT_SYMBOL(skb_queue_tail);
1949
1950/**
1951 *      skb_unlink      -       remove a buffer from a list
1952 *      @skb: buffer to remove
1953 *      @list: list to use
1954 *
1955 *      Remove a packet from a list. The list locks are taken and this
1956 *      function is atomic with respect to other list locked calls
1957 *
1958 *      You must know what list the SKB is on.
1959 */
1960void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1961{
1962        unsigned long flags;
1963
1964        spin_lock_irqsave(&list->lock, flags);
1965        __skb_unlink(skb, list);
1966        spin_unlock_irqrestore(&list->lock, flags);
1967}
1968EXPORT_SYMBOL(skb_unlink);
1969
1970/**
1971 *      skb_append      -       append a buffer
1972 *      @old: buffer to insert after
1973 *      @newsk: buffer to insert
1974 *      @list: list to use
1975 *
1976 *      Place a packet after a given packet in a list. The list locks are taken
1977 *      and this function is atomic with respect to other list locked calls.
1978 *      A buffer cannot be placed on two lists at the same time.
1979 */
1980void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1981{
1982        unsigned long flags;
1983
1984        spin_lock_irqsave(&list->lock, flags);
1985        __skb_queue_after(list, old, newsk);
1986        spin_unlock_irqrestore(&list->lock, flags);
1987}
1988EXPORT_SYMBOL(skb_append);
1989
1990/**
1991 *      skb_insert      -       insert a buffer
1992 *      @old: buffer to insert before
1993 *      @newsk: buffer to insert
1994 *      @list: list to use
1995 *
1996 *      Place a packet before a given packet in a list. The list locks are
1997 *      taken and this function is atomic with respect to other list locked
1998 *      calls.
1999 *
2000 *      A buffer cannot be placed on two lists at the same time.
2001 */
2002void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2003{
2004        unsigned long flags;
2005
2006        spin_lock_irqsave(&list->lock, flags);
2007        __skb_insert(newsk, old->prev, old, list);
2008        spin_unlock_irqrestore(&list->lock, flags);
2009}
2010EXPORT_SYMBOL(skb_insert);
2011
2012static inline void skb_split_inside_header(struct sk_buff *skb,
2013                                           struct sk_buff* skb1,
2014                                           const u32 len, const int pos)
2015{
2016        int i;
2017
2018        skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2019                                         pos - len);
2020        /* And move data appendix as is. */
2021        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2022                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2023
2024        skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2025        skb_shinfo(skb)->nr_frags  = 0;
2026        skb1->data_len             = skb->data_len;
2027        skb1->len                  += skb1->data_len;
2028        skb->data_len              = 0;
2029        skb->len                   = len;
2030        skb_set_tail_pointer(skb, len);
2031}
2032
2033static inline void skb_split_no_header(struct sk_buff *skb,
2034                                       struct sk_buff* skb1,
2035                                       const u32 len, int pos)
2036{
2037        int i, k = 0;
2038        const int nfrags = skb_shinfo(skb)->nr_frags;
2039
2040        skb_shinfo(skb)->nr_frags = 0;
2041        skb1->len                 = skb1->data_len = skb->len - len;
2042        skb->len                  = len;
2043        skb->data_len             = len - pos;
2044
2045        for (i = 0; i < nfrags; i++) {
2046                int size = skb_shinfo(skb)->frags[i].size;
2047
2048                if (pos + size > len) {
2049                        skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2050
2051                        if (pos < len) {
2052                                /* Split frag.
2053                                 * We have two variants in this case:
2054                                 * 1. Move all the frag to the second
2055                                 *    part, if it is possible. F.e.
2056                                 *    this approach is mandatory for TUX,
2057                                 *    where splitting is expensive.
2058                                 * 2. Split is accurately. We make this.
2059                                 */
2060                                get_page(skb_shinfo(skb)->frags[i].page);
2061                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2062                                skb_shinfo(skb1)->frags[0].size -= len - pos;
2063                                skb_shinfo(skb)->frags[i].size  = len - pos;
2064                                skb_shinfo(skb)->nr_frags++;
2065                        }
2066                        k++;
2067                } else
2068                        skb_shinfo(skb)->nr_frags++;
2069                pos += size;
2070        }
2071        skb_shinfo(skb1)->nr_frags = k;
2072}
2073
2074/**
2075 * skb_split - Split fragmented skb to two parts at length len.
2076 * @skb: the buffer to split
2077 * @skb1: the buffer to receive the second part
2078 * @len: new length for skb
2079 */
2080void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2081{
2082        int pos = skb_headlen(skb);
2083
2084        if (len < pos)  /* Split line is inside header. */
2085                skb_split_inside_header(skb, skb1, len, pos);
2086        else            /* Second chunk has no header, nothing to copy. */
2087                skb_split_no_header(skb, skb1, len, pos);
2088}
2089EXPORT_SYMBOL(skb_split);
2090
2091/* Shifting from/to a cloned skb is a no-go.
2092 *
2093 * Caller cannot keep skb_shinfo related pointers past calling here!
2094 */
2095static int skb_prepare_for_shift(struct sk_buff *skb)
2096{
2097        return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2098}
2099
2100/**
2101 * skb_shift - Shifts paged data partially from skb to another
2102 * @tgt: buffer into which tail data gets added
2103 * @skb: buffer from which the paged data comes from
2104 * @shiftlen: shift up to this many bytes
2105 *
2106 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2107 * the length of the skb, from tgt to skb. Returns number bytes shifted.
2108 * It's up to caller to free skb if everything was shifted.
2109 *
2110 * If @tgt runs out of frags, the whole operation is aborted.
2111 *
2112 * Skb cannot include anything else but paged data while tgt is allowed
2113 * to have non-paged data as well.
2114 *
2115 * TODO: full sized shift could be optimized but that would need
2116 * specialized skb free'er to handle frags without up-to-date nr_frags.
2117 */
2118int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2119{
2120        int from, to, merge, todo;
2121        struct skb_frag_struct *fragfrom, *fragto;
2122
2123        BUG_ON(shiftlen > skb->len);
2124        BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
2125
2126        todo = shiftlen;
2127        from = 0;
2128        to = skb_shinfo(tgt)->nr_frags;
2129        fragfrom = &skb_shinfo(skb)->frags[from];
2130
2131        /* Actual merge is delayed until the point when we know we can
2132         * commit all, so that we don't have to undo partial changes
2133         */
2134        if (!to ||
2135            !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2136                merge = -1;
2137        } else {
2138                merge = to - 1;
2139
2140                todo -= fragfrom->size;
2141                if (todo < 0) {
2142                        if (skb_prepare_for_shift(skb) ||
2143                            skb_prepare_for_shift(tgt))
2144                                return 0;
2145
2146                        /* All previous frag pointers might be stale! */
2147                        fragfrom = &skb_shinfo(skb)->frags[from];
2148                        fragto = &skb_shinfo(tgt)->frags[merge];
2149
2150                        fragto->size += shiftlen;
2151                        fragfrom->size -= shiftlen;
2152                        fragfrom->page_offset += shiftlen;
2153
2154                        goto onlymerged;
2155                }
2156
2157                from++;
2158        }
2159
2160        /* Skip full, not-fitting skb to avoid expensive operations */
2161        if ((shiftlen == skb->len) &&
2162            (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2163                return 0;
2164
2165        if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2166                return 0;
2167
2168        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2169                if (to == MAX_SKB_FRAGS)
2170                        return 0;
2171
2172                fragfrom = &skb_shinfo(skb)->frags[from];
2173                fragto = &skb_shinfo(tgt)->frags[to];
2174
2175                if (todo >= fragfrom->size) {
2176                        *fragto = *fragfrom;
2177                        todo -= fragfrom->size;
2178                        from++;
2179                        to++;
2180
2181                } else {
2182                        get_page(fragfrom->page);
2183                        fragto->page = fragfrom->page;
2184                        fragto->page_offset = fragfrom->page_offset;
2185                        fragto->size = todo;
2186
2187                        fragfrom->page_offset += todo;
2188                        fragfrom->size -= todo;
2189                        todo = 0;
2190
2191                        to++;
2192                        break;
2193                }
2194        }
2195
2196        /* Ready to "commit" this state change to tgt */
2197        skb_shinfo(tgt)->nr_frags = to;
2198
2199        if (merge >= 0) {
2200                fragfrom = &skb_shinfo(skb)->frags[0];
2201                fragto = &skb_shinfo(tgt)->frags[merge];
2202
2203                fragto->size += fragfrom->size;
2204                put_page(fragfrom->page);
2205        }
2206
2207        /* Reposition in the original skb */
2208        to = 0;
2209        while (from < skb_shinfo(skb)->nr_frags)
2210                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2211        skb_shinfo(skb)->nr_frags = to;
2212
2213        BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2214
2215onlymerged:
2216        /* Most likely the tgt won't ever need its checksum anymore, skb on
2217         * the other hand might need it if it needs to be resent
2218         */
2219        tgt->ip_summed = CHECKSUM_PARTIAL;
2220        skb->ip_summed = CHECKSUM_PARTIAL;
2221
2222        /* Yak, is it really working this way? Some helper please? */
2223        skb->len -= shiftlen;
2224        skb->data_len -= shiftlen;
2225        skb->truesize -= shiftlen;
2226        tgt->len += shiftlen;
2227        tgt->data_len += shiftlen;
2228        tgt->truesize += shiftlen;
2229
2230        return shiftlen;
2231}
2232
2233/**
2234 * skb_prepare_seq_read - Prepare a sequential read of skb data
2235 * @skb: the buffer to read
2236 * @from: lower offset of data to be read
2237 * @to: upper offset of data to be read
2238 * @st: state variable
2239 *
2240 * Initializes the specified state variable. Must be called before
2241 * invoking skb_seq_read() for the first time.
2242 */
2243void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2244                          unsigned int to, struct skb_seq_state *st)
2245{
2246        st->lower_offset = from;
2247        st->upper_offset = to;
2248        st->root_skb = st->cur_skb = skb;
2249        st->frag_idx = st->stepped_offset = 0;
2250        st->frag_data = NULL;
2251}
2252EXPORT_SYMBOL(skb_prepare_seq_read);
2253
2254/**
2255 * skb_seq_read - Sequentially read skb data
2256 * @consumed: number of bytes consumed by the caller so far
2257 * @data: destination pointer for data to be returned
2258 * @st: state variable
2259 *
2260 * Reads a block of skb data at &consumed relative to the
2261 * lower offset specified to skb_prepare_seq_read(). Assigns
2262 * the head of the data block to &data and returns the length
2263 * of the block or 0 if the end of the skb data or the upper
2264 * offset has been reached.
2265 *
2266 * The caller is not required to consume all of the data
2267 * returned, i.e. &consumed is typically set to the number
2268 * of bytes already consumed and the next call to
2269 * skb_seq_read() will return the remaining part of the block.
2270 *
2271 * Note 1: The size of each block of data returned can be arbitary,
2272 *       this limitation is the cost for zerocopy seqeuental
2273 *       reads of potentially non linear data.
2274 *
2275 * Note 2: Fragment lists within fragments are not implemented
2276 *       at the moment, state->root_skb could be replaced with
2277 *       a stack for this purpose.
2278 */
2279unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2280                          struct skb_seq_state *st)
2281{
2282        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2283        skb_frag_t *frag;
2284
2285        if (unlikely(abs_offset >= st->upper_offset))
2286                return 0;
2287
2288next_skb:
2289        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290
2291        if (abs_offset < block_limit && !st->frag_data) {
2292                *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293                return block_limit - abs_offset;
2294        }
2295
2296        if (st->frag_idx == 0 && !st->frag_data)
2297                st->stepped_offset += skb_headlen(st->cur_skb);
2298
2299        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2300                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2301                block_limit = frag->size + st->stepped_offset;
2302
2303                if (abs_offset < block_limit) {
2304                        if (!st->frag_data)
2305                                st->frag_data = kmap_skb_frag(frag);
2306
2307                        *data = (u8 *) st->frag_data + frag->page_offset +
2308                                (abs_offset - st->stepped_offset);
2309
2310                        return block_limit - abs_offset;
2311                }
2312
2313                if (st->frag_data) {
2314                        kunmap_skb_frag(st->frag_data);
2315                        st->frag_data = NULL;
2316                }
2317
2318                st->frag_idx++;
2319                st->stepped_offset += frag->size;
2320        }
2321
2322        if (st->frag_data) {
2323                kunmap_skb_frag(st->frag_data);
2324                st->frag_data = NULL;
2325        }
2326
2327        if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2328                st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2329                st->frag_idx = 0;
2330                goto next_skb;
2331        } else if (st->cur_skb->next) {
2332                st->cur_skb = st->cur_skb->next;
2333                st->frag_idx = 0;
2334                goto next_skb;
2335        }
2336
2337        return 0;
2338}
2339EXPORT_SYMBOL(skb_seq_read);
2340
2341/**
2342 * skb_abort_seq_read - Abort a sequential read of skb data
2343 * @st: state variable
2344 *
2345 * Must be called if skb_seq_read() was not called until it
2346 * returned 0.
2347 */
2348void skb_abort_seq_read(struct skb_seq_state *st)
2349{
2350        if (st->frag_data)
2351                kunmap_skb_frag(st->frag_data);
2352}
2353EXPORT_SYMBOL(skb_abort_seq_read);
2354
2355#define TS_SKB_CB(state)        ((struct skb_seq_state *) &((state)->cb))
2356
2357static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2358                                          struct ts_config *conf,
2359                                          struct ts_state *state)
2360{
2361        return skb_seq_read(offset, text, TS_SKB_CB(state));
2362}
2363
2364static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2365{
2366        skb_abort_seq_read(TS_SKB_CB(state));
2367}
2368
2369/**
2370 * skb_find_text - Find a text pattern in skb data
2371 * @skb: the buffer to look in
2372 * @from: search offset
2373 * @to: search limit
2374 * @config: textsearch configuration
2375 * @state: uninitialized textsearch state variable
2376 *
2377 * Finds a pattern in the skb data according to the specified
2378 * textsearch configuration. Use textsearch_next() to retrieve
2379 * subsequent occurrences of the pattern. Returns the offset
2380 * to the first occurrence or UINT_MAX if no match was found.
2381 */
2382unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2383                           unsigned int to, struct ts_config *config,
2384                           struct ts_state *state)
2385{
2386        unsigned int ret;
2387
2388        config->get_next_block = skb_ts_get_next_block;
2389        config->finish = skb_ts_finish;
2390
2391        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2392
2393        ret = textsearch_find(config, state);
2394        return (ret <= to - from ? ret : UINT_MAX);
2395}
2396EXPORT_SYMBOL(skb_find_text);
2397
2398/**
2399 * skb_append_datato_frags: - append the user data to a skb
2400 * @sk: sock  structure
2401 * @skb: skb structure to be appened with user data.
2402 * @getfrag: call back function to be used for getting the user data
2403 * @from: pointer to user message iov
2404 * @length: length of the iov message
2405 *
2406 * Description: This procedure append the user data in the fragment part
2407 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2408 */
2409int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2410                        int (*getfrag)(void *from, char *to, int offset,
2411                                        int len, int odd, struct sk_buff *skb),
2412                        void *from, int length)
2413{
2414        int frg_cnt = 0;
2415        skb_frag_t *frag = NULL;
2416        struct page *page = NULL;
2417        int copy, left;
2418        int offset = 0;
2419        int ret;
2420
2421        do {
2422                /* Return error if we don't have space for new frag */
2423                frg_cnt = skb_shinfo(skb)->nr_frags;
2424                if (frg_cnt >= MAX_SKB_FRAGS)
2425                        return -EFAULT;
2426
2427                /* allocate a new page for next frag */
2428                page = alloc_pages(sk->sk_allocation, 0);
2429
2430                /* If alloc_page fails just return failure and caller will
2431                 * free previous allocated pages by doing kfree_skb()
2432                 */
2433                if (page == NULL)
2434                        return -ENOMEM;
2435
2436                /* initialize the next frag */
2437                sk->sk_sndmsg_page = page;
2438                sk->sk_sndmsg_off = 0;
2439                skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2440                skb->truesize += PAGE_SIZE;
2441                atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2442
2443                /* get the new initialized frag */
2444                frg_cnt = skb_shinfo(skb)->nr_frags;
2445                frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2446
2447                /* copy the user data to page */
2448                left = PAGE_SIZE - frag->page_offset;
2449                copy = (length > left)? left : length;
2450
2451                ret = getfrag(from, (page_address(frag->page) +
2452                            frag->page_offset + frag->size),
2453                            offset, copy, 0, skb);
2454                if (ret < 0)
2455                        return -EFAULT;
2456
2457                /* copy was successful so update the size parameters */
2458                sk->sk_sndmsg_off += copy;
2459                frag->size += copy;
2460                skb->len += copy;
2461                skb->data_len += copy;
2462                offset += copy;
2463                length -= copy;
2464
2465        } while (length > 0);
2466
2467        return 0;
2468}
2469EXPORT_SYMBOL(skb_append_datato_frags);
2470
2471/**
2472 *      skb_pull_rcsum - pull skb and update receive checksum
2473 *      @skb: buffer to update
2474 *      @len: length of data pulled
2475 *
2476 *      This function performs an skb_pull on the packet and updates
2477 *      the CHECKSUM_COMPLETE checksum.  It should be used on
2478 *      receive path processing instead of skb_pull unless you know
2479 *      that the checksum difference is zero (e.g., a valid IP header)
2480 *      or you are setting ip_summed to CHECKSUM_NONE.
2481 */
2482unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2483{
2484        BUG_ON(len > skb->len);
2485        skb->len -= len;
2486        BUG_ON(skb->len < skb->data_len);
2487        skb_postpull_rcsum(skb, skb->data, len);
2488        return skb->data += len;
2489}
2490EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2491
2492/**
2493 *      skb_segment - Perform protocol segmentation on skb.
2494 *      @skb: buffer to segment
2495 *      @features: features for the output path (see dev->features)
2496 *
2497 *      This function performs segmentation on the given skb.  It returns
2498 *      a pointer to the first in a list of new skbs for the segments.
2499 *      In case of error it returns ERR_PTR(err).
2500 */
2501struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2502{
2503        struct sk_buff *segs = NULL;
2504        struct sk_buff *tail = NULL;
2505        struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2506        unsigned int mss = skb_shinfo(skb)->gso_size;
2507        unsigned int doffset = skb->data - skb_mac_header(skb);
2508        unsigned int offset = doffset;
2509        unsigned int headroom;
2510        unsigned int len;
2511        int sg = features & NETIF_F_SG;
2512        int nfrags = skb_shinfo(skb)->nr_frags;
2513        int err = -ENOMEM;
2514        int i = 0;
2515        int pos;
2516
2517        __skb_push(skb, doffset);
2518        headroom = skb_headroom(skb);
2519        pos = skb_headlen(skb);
2520
2521        do {
2522                struct sk_buff *nskb;
2523                skb_frag_t *frag;
2524                int hsize;
2525                int size;
2526
2527                len = skb->len - offset;
2528                if (len > mss)
2529                        len = mss;
2530
2531                hsize = skb_headlen(skb) - offset;
2532                if (hsize < 0)
2533                        hsize = 0;
2534                if (hsize > len || !sg)
2535                        hsize = len;
2536
2537                if (!hsize && i >= nfrags) {
2538                        BUG_ON(fskb->len != len);
2539
2540                        pos += len;
2541                        nskb = skb_clone(fskb, GFP_ATOMIC);
2542                        fskb = fskb->next;
2543
2544                        if (unlikely(!nskb))
2545                                goto err;
2546
2547                        hsize = skb_end_pointer(nskb) - nskb->head;
2548                        if (skb_cow_head(nskb, doffset + headroom)) {
2549                                kfree_skb(nskb);
2550                                goto err;
2551                        }
2552
2553                        nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2554                                          hsize;
2555                        skb_release_head_state(nskb);
2556                        __skb_push(nskb, doffset);
2557                } else {
2558                        nskb = alloc_skb(hsize + doffset + headroom,
2559                                         GFP_ATOMIC);
2560
2561                        if (unlikely(!nskb))
2562                                goto err;
2563
2564                        skb_reserve(nskb, headroom);
2565                        __skb_put(nskb, doffset);
2566                }
2567
2568                if (segs)
2569                        tail->next = nskb;
2570                else
2571                        segs = nskb;
2572                tail = nskb;
2573
2574                __copy_skb_header(nskb, skb);
2575                nskb->mac_len = skb->mac_len;
2576
2577                /* nskb and skb might have different headroom */
2578                if (nskb->ip_summed == CHECKSUM_PARTIAL)
2579                        nskb->csum_start += skb_headroom(nskb) - headroom;
2580
2581                skb_reset_mac_header(nskb);
2582                skb_set_network_header(nskb, skb->mac_len);
2583                nskb->transport_header = (nskb->network_header +
2584                                          skb_network_header_len(skb));
2585                skb_copy_from_linear_data(skb, nskb->data, doffset);
2586
2587                if (fskb != skb_shinfo(skb)->frag_list)
2588                        continue;
2589
2590                if (!sg) {
2591                        nskb->ip_summed = CHECKSUM_NONE;
2592                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
2593                                                            skb_put(nskb, len),
2594                                                            len, 0);
2595                        continue;
2596                }
2597
2598                frag = skb_shinfo(nskb)->frags;
2599
2600                skb_copy_from_linear_data_offset(skb, offset,
2601                                                 skb_put(nskb, hsize), hsize);
2602
2603                while (pos < offset + len && i < nfrags) {
2604                        *frag = skb_shinfo(skb)->frags[i];
2605                        get_page(frag->page);
2606                        size = frag->size;
2607
2608                        if (pos < offset) {
2609                                frag->page_offset += offset - pos;
2610                                frag->size -= offset - pos;
2611                        }
2612
2613                        skb_shinfo(nskb)->nr_frags++;
2614
2615                        if (pos + size <= offset + len) {
2616                                i++;
2617                                pos += size;
2618                        } else {
2619                                frag->size -= pos + size - (offset + len);
2620                                goto skip_fraglist;
2621                        }
2622
2623                        frag++;
2624                }
2625
2626                if (pos < offset + len) {
2627                        struct sk_buff *fskb2 = fskb;
2628
2629                        BUG_ON(pos + fskb->len != offset + len);
2630
2631                        pos += fskb->len;
2632                        fskb = fskb->next;
2633
2634                        if (fskb2->next) {
2635                                fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2636                                if (!fskb2)
2637                                        goto err;
2638                        } else
2639                                skb_get(fskb2);
2640
2641                        SKB_FRAG_ASSERT(nskb);
2642                        skb_shinfo(nskb)->frag_list = fskb2;
2643                }
2644
2645skip_fraglist:
2646                nskb->data_len = len - hsize;
2647                nskb->len += nskb->data_len;
2648                nskb->truesize += nskb->data_len;
2649        } while ((offset += len) < skb->len);
2650
2651        return segs;
2652
2653err:
2654        while ((skb = segs)) {
2655                segs = skb->next;
2656                kfree_skb(skb);
2657        }
2658        return ERR_PTR(err);
2659}
2660EXPORT_SYMBOL_GPL(skb_segment);
2661
2662int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2663{
2664        struct sk_buff *p = *head;
2665        struct sk_buff *nskb;
2666        struct skb_shared_info *skbinfo = skb_shinfo(skb);
2667        struct skb_shared_info *pinfo = skb_shinfo(p);
2668        unsigned int headroom;
2669        unsigned int len = skb_gro_len(skb);
2670        unsigned int offset = skb_gro_offset(skb);
2671        unsigned int headlen = skb_headlen(skb);
2672
2673        if (p->len + len >= 65536)
2674                return -E2BIG;
2675
2676        if (pinfo->frag_list)
2677                goto merge;
2678        else if (headlen <= offset) {
2679                skb_frag_t *frag;
2680                skb_frag_t *frag2;
2681                int i = skbinfo->nr_frags;
2682                int nr_frags = pinfo->nr_frags + i;
2683
2684                offset -= headlen;
2685
2686                if (nr_frags > MAX_SKB_FRAGS)
2687                        return -E2BIG;
2688
2689                pinfo->nr_frags = nr_frags;
2690                skbinfo->nr_frags = 0;
2691
2692                frag = pinfo->frags + nr_frags;
2693                frag2 = skbinfo->frags + i;
2694                do {
2695                        *--frag = *--frag2;
2696                } while (--i);
2697
2698                frag->page_offset += offset;
2699                frag->size -= offset;
2700
2701                skb->truesize -= skb->data_len;
2702                skb->len -= skb->data_len;
2703                skb->data_len = 0;
2704
2705                NAPI_GRO_CB(skb)->free = 1;
2706                goto done;
2707        } else if (skb_gro_len(p) != pinfo->gso_size)
2708                return -E2BIG;
2709
2710        headroom = skb_headroom(p);
2711        nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2712        if (unlikely(!nskb))
2713                return -ENOMEM;
2714
2715        __copy_skb_header(nskb, p);
2716        nskb->mac_len = p->mac_len;
2717
2718        skb_reserve(nskb, headroom);
2719        __skb_put(nskb, skb_gro_offset(p));
2720
2721        skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2722        skb_set_network_header(nskb, skb_network_offset(p));
2723        skb_set_transport_header(nskb, skb_transport_offset(p));
2724
2725        __skb_pull(p, skb_gro_offset(p));
2726        memcpy(skb_mac_header(nskb), skb_mac_header(p),
2727               p->data - skb_mac_header(p));
2728
2729        *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2730        skb_shinfo(nskb)->frag_list = p;
2731        skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2732        pinfo->gso_size = 0;
2733        skb_header_release(p);
2734        nskb->prev = p;
2735
2736        nskb->data_len += p->len;
2737        nskb->truesize += p->len;
2738        nskb->len += p->len;
2739
2740        *head = nskb;
2741        nskb->next = p->next;
2742        p->next = NULL;
2743
2744        p = nskb;
2745
2746merge:
2747        if (offset > headlen) {
2748                unsigned int eat = offset - headlen;
2749
2750                skbinfo->frags[0].page_offset += eat;
2751                skbinfo->frags[0].size -= eat;
2752                skb->data_len -= eat;
2753                skb->len -= eat;
2754                offset = headlen;
2755        }
2756
2757        __skb_pull(skb, offset);
2758
2759        p->prev->next = skb;
2760        p->prev = skb;
2761        skb_header_release(skb);
2762
2763done:
2764        NAPI_GRO_CB(p)->count++;
2765        p->data_len += len;
2766        p->truesize += len;
2767        p->len += len;
2768
2769        NAPI_GRO_CB(skb)->same_flow = 1;
2770        return 0;
2771}
2772EXPORT_SYMBOL_GPL(skb_gro_receive);
2773
2774void __init skb_init(void)
2775{
2776        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2777                                              sizeof(struct sk_buff),
2778                                              0,
2779                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2780                                              NULL);
2781        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2782                                                (2*sizeof(struct sk_buff)) +
2783                                                sizeof(atomic_t),
2784                                                0,
2785                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2786                                                NULL);
2787}
2788
2789/**
2790 *      skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2791 *      @skb: Socket buffer containing the buffers to be mapped
2792 *      @sg: The scatter-gather list to map into
2793 *      @offset: The offset into the buffer's contents to start mapping
2794 *      @len: Length of buffer space to be mapped
2795 *
2796 *      Fill the specified scatter-gather list with mappings/pointers into a
2797 *      region of the buffer space attached to a socket buffer.
2798 */
2799static int
2800__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2801{
2802        int start = skb_headlen(skb);
2803        int i, copy = start - offset;
2804        struct sk_buff *frag_iter;
2805        int elt = 0;
2806
2807        if (copy > 0) {
2808                if (copy > len)
2809                        copy = len;
2810                sg_set_buf(sg, skb->data + offset, copy);
2811                elt++;
2812                if ((len -= copy) == 0)
2813                        return elt;
2814                offset += copy;
2815        }
2816
2817        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2818                int end;
2819
2820                WARN_ON(start > offset + len);
2821
2822                end = start + skb_shinfo(skb)->frags[i].size;
2823                if ((copy = end - offset) > 0) {
2824                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2825
2826                        if (copy > len)
2827                                copy = len;
2828                        sg_set_page(&sg[elt], frag->page, copy,
2829                                        frag->page_offset+offset-start);
2830                        elt++;
2831                        if (!(len -= copy))
2832                                return elt;
2833                        offset += copy;
2834                }
2835                start = end;
2836        }
2837
2838        skb_walk_frags(skb, frag_iter) {
2839                int end;
2840
2841                WARN_ON(start > offset + len);
2842
2843                end = start + frag_iter->len;
2844                if ((copy = end - offset) > 0) {
2845                        if (copy > len)
2846                                copy = len;
2847                        elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2848                                              copy);
2849                        if ((len -= copy) == 0)
2850                                return elt;
2851                        offset += copy;
2852                }
2853                start = end;
2854        }
2855        BUG_ON(len);
2856        return elt;
2857}
2858
2859int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2860{
2861        int nsg = __skb_to_sgvec(skb, sg, offset, len);
2862
2863        sg_mark_end(&sg[nsg - 1]);
2864
2865        return nsg;
2866}
2867EXPORT_SYMBOL_GPL(skb_to_sgvec);
2868
2869/**
2870 *      skb_cow_data - Check that a socket buffer's data buffers are writable
2871 *      @skb: The socket buffer to check.
2872 *      @tailbits: Amount of trailing space to be added
2873 *      @trailer: Returned pointer to the skb where the @tailbits space begins
2874 *
2875 *      Make sure that the data buffers attached to a socket buffer are
2876 *      writable. If they are not, private copies are made of the data buffers
2877 *      and the socket buffer is set to use these instead.
2878 *
2879 *      If @tailbits is given, make sure that there is space to write @tailbits
2880 *      bytes of data beyond current end of socket buffer.  @trailer will be
2881 *      set to point to the skb in which this space begins.
2882 *
2883 *      The number of scatterlist elements required to completely map the
2884 *      COW'd and extended socket buffer will be returned.
2885 */
2886int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2887{
2888        int copyflag;
2889        int elt;
2890        struct sk_buff *skb1, **skb_p;
2891
2892        /* If skb is cloned or its head is paged, reallocate
2893         * head pulling out all the pages (pages are considered not writable
2894         * at the moment even if they are anonymous).
2895         */
2896        if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2897            __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2898                return -ENOMEM;
2899
2900        /* Easy case. Most of packets will go this way. */
2901        if (!skb_has_frag_list(skb)) {
2902                /* A little of trouble, not enough of space for trailer.
2903                 * This should not happen, when stack is tuned to generate
2904                 * good frames. OK, on miss we reallocate and reserve even more
2905                 * space, 128 bytes is fair. */
2906
2907                if (skb_tailroom(skb) < tailbits &&
2908                    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2909                        return -ENOMEM;
2910
2911                /* Voila! */
2912                *trailer = skb;
2913                return 1;
2914        }
2915
2916        /* Misery. We are in troubles, going to mincer fragments... */
2917
2918        elt = 1;
2919        skb_p = &skb_shinfo(skb)->frag_list;
2920        copyflag = 0;
2921
2922        while ((skb1 = *skb_p) != NULL) {
2923                int ntail = 0;
2924
2925                /* The fragment is partially pulled by someone,
2926                 * this can happen on input. Copy it and everything
2927                 * after it. */
2928
2929                if (skb_shared(skb1))
2930                        copyflag = 1;
2931
2932                /* If the skb is the last, worry about trailer. */
2933
2934                if (skb1->next == NULL && tailbits) {
2935                        if (skb_shinfo(skb1)->nr_frags ||
2936                            skb_has_frag_list(skb1) ||
2937                            skb_tailroom(skb1) < tailbits)
2938                                ntail = tailbits + 128;
2939                }
2940
2941                if (copyflag ||
2942                    skb_cloned(skb1) ||
2943                    ntail ||
2944                    skb_shinfo(skb1)->nr_frags ||
2945                    skb_has_frag_list(skb1)) {
2946                        struct sk_buff *skb2;
2947
2948                        /* Fuck, we are miserable poor guys... */
2949                        if (ntail == 0)
2950                                skb2 = skb_copy(skb1, GFP_ATOMIC);
2951                        else
2952                                skb2 = skb_copy_expand(skb1,
2953                                                       skb_headroom(skb1),
2954                                                       ntail,
2955                                                       GFP_ATOMIC);
2956                        if (unlikely(skb2 == NULL))
2957                                return -ENOMEM;
2958
2959                        if (skb1->sk)
2960                                skb_set_owner_w(skb2, skb1->sk);
2961
2962                        /* Looking around. Are we still alive?
2963                         * OK, link new skb, drop old one */
2964
2965                        skb2->next = skb1->next;
2966                        *skb_p = skb2;
2967                        kfree_skb(skb1);
2968                        skb1 = skb2;
2969                }
2970                elt++;
2971                *trailer = skb1;
2972                skb_p = &skb1->next;
2973        }
2974
2975        return elt;
2976}
2977EXPORT_SYMBOL_GPL(skb_cow_data);
2978
2979static void sock_rmem_free(struct sk_buff *skb)
2980{
2981        struct sock *sk = skb->sk;
2982
2983        atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2984}
2985
2986/*
2987 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
2988 */
2989int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2990{
2991        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2992            (unsigned)sk->sk_rcvbuf)
2993                return -ENOMEM;
2994
2995        skb_orphan(skb);
2996        skb->sk = sk;
2997        skb->destructor = sock_rmem_free;
2998        atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2999
3000        skb_queue_tail(&sk->sk_error_queue, skb);
3001        if (!sock_flag(sk, SOCK_DEAD))
3002                sk->sk_data_ready(sk, skb->len);
3003        return 0;
3004}
3005EXPORT_SYMBOL(sock_queue_err_skb);
3006
3007void skb_tstamp_tx(struct sk_buff *orig_skb,
3008                struct skb_shared_hwtstamps *hwtstamps)
3009{
3010        struct sock *sk = orig_skb->sk;
3011        struct sock_exterr_skb *serr;
3012        struct sk_buff *skb;
3013        int err;
3014
3015        if (!sk)
3016                return;
3017
3018        skb = skb_clone(orig_skb, GFP_ATOMIC);
3019        if (!skb)
3020                return;
3021
3022        if (hwtstamps) {
3023                *skb_hwtstamps(skb) =
3024                        *hwtstamps;
3025        } else {
3026                /*
3027                 * no hardware time stamps available,
3028                 * so keep the shared tx_flags and only
3029                 * store software time stamp
3030                 */
3031                skb->tstamp = ktime_get_real();
3032        }
3033
3034        serr = SKB_EXT_ERR(skb);
3035        memset(serr, 0, sizeof(*serr));
3036        serr->ee.ee_errno = ENOMSG;
3037        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3038
3039        err = sock_queue_err_skb(sk, skb);
3040
3041        if (err)
3042                kfree_skb(skb);
3043}
3044EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3045
3046
3047/**
3048 * skb_partial_csum_set - set up and verify partial csum values for packet
3049 * @skb: the skb to set
3050 * @start: the number of bytes after skb->data to start checksumming.
3051 * @off: the offset from start to place the checksum.
3052 *
3053 * For untrusted partially-checksummed packets, we need to make sure the values
3054 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3055 *
3056 * This function checks and sets those values and skb->ip_summed: if this
3057 * returns false you should drop the packet.
3058 */
3059bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3060{
3061        if (unlikely(start > skb_headlen(skb)) ||
3062            unlikely((int)start + off > skb_headlen(skb) - 2)) {
3063                if (net_ratelimit())
3064                        printk(KERN_WARNING
3065                               "bad partial csum: csum=%u/%u len=%u\n",
3066                               start, off, skb_headlen(skb));
3067                return false;
3068        }
3069        skb->ip_summed = CHECKSUM_PARTIAL;
3070        skb->csum_start = skb_headroom(skb) + start;
3071        skb->csum_offset = off;
3072        return true;
3073}
3074EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3075
3076void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3077{
3078        if (net_ratelimit())
3079                pr_warning("%s: received packets cannot be forwarded"
3080                           " while LRO is enabled\n", skb->dev->name);
3081}
3082EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3083