linux/net/core/skbuff.c
<<
>>
Prefs
   1/*
   2 *      Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *      Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *                      Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *      Fixes:
   8 *              Alan Cox        :       Fixed the worst of the load
   9 *                                      balancer bugs.
  10 *              Dave Platt      :       Interrupt stacking fix.
  11 *      Richard Kooijman        :       Timestamp fixes.
  12 *              Alan Cox        :       Changed buffer format.
  13 *              Alan Cox        :       destructor hook for AF_UNIX etc.
  14 *              Linus Torvalds  :       Better skb_clone.
  15 *              Alan Cox        :       Added skb_copy.
  16 *              Alan Cox        :       Added all the changed routines Linus
  17 *                                      only put in the headers
  18 *              Ray VanTassle   :       Fixed --skb->lock in free
  19 *              Alan Cox        :       skb_copy copy arp field
  20 *              Andi Kleen      :       slabified it.
  21 *              Robert Olsson   :       Removed skb_head_pool
  22 *
  23 *      NOTE:
  24 *              The __skb_ routines should be called with interrupts
  25 *      disabled, or you better be *real* sure that the operation is atomic
  26 *      with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *      or via disabling bottom half handlers, etc).
  28 *
  29 *      This program is free software; you can redistribute it and/or
  30 *      modify it under the terms of the GNU General Public License
  31 *      as published by the Free Software Foundation; either version
  32 *      2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *      The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
  39#include <linux/module.h>
  40#include <linux/types.h>
  41#include <linux/kernel.h>
  42#include <linux/kmemcheck.h>
  43#include <linux/mm.h>
  44#include <linux/interrupt.h>
  45#include <linux/in.h>
  46#include <linux/inet.h>
  47#include <linux/slab.h>
  48#include <linux/netdevice.h>
  49#ifdef CONFIG_NET_CLS_ACT
  50#include <net/pkt_sched.h>
  51#endif
  52#include <linux/string.h>
  53#include <linux/skbuff.h>
  54#include <linux/splice.h>
  55#include <linux/cache.h>
  56#include <linux/rtnetlink.h>
  57#include <linux/init.h>
  58#include <linux/scatterlist.h>
  59#include <linux/errqueue.h>
  60
  61#include <net/protocol.h>
  62#include <net/dst.h>
  63#include <net/sock.h>
  64#include <net/checksum.h>
  65#include <net/xfrm.h>
  66
  67#include <asm/uaccess.h>
  68#include <asm/system.h>
  69#include <trace/events/skb.h>
  70
  71#include "kmap_skb.h"
  72
  73static struct kmem_cache *skbuff_head_cache __read_mostly;
  74static struct kmem_cache *skbuff_fclone_cache __read_mostly;
  75
  76static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
  77                                  struct pipe_buffer *buf)
  78{
  79        put_page(buf->page);
  80}
  81
  82static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
  83                                struct pipe_buffer *buf)
  84{
  85        get_page(buf->page);
  86}
  87
  88static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
  89                               struct pipe_buffer *buf)
  90{
  91        return 1;
  92}
  93
  94
  95/* Pipe buffer operations for a socket. */
  96static struct pipe_buf_operations sock_pipe_buf_ops = {
  97        .can_merge = 0,
  98        .map = generic_pipe_buf_map,
  99        .unmap = generic_pipe_buf_unmap,
 100        .confirm = generic_pipe_buf_confirm,
 101        .release = sock_pipe_buf_release,
 102        .steal = sock_pipe_buf_steal,
 103        .get = sock_pipe_buf_get,
 104};
 105
 106/*
 107 *      Keep out-of-line to prevent kernel bloat.
 108 *      __builtin_return_address is not used because it is not always
 109 *      reliable.
 110 */
 111
 112/**
 113 *      skb_over_panic  -       private function
 114 *      @skb: buffer
 115 *      @sz: size
 116 *      @here: address
 117 *
 118 *      Out of line support code for skb_put(). Not user callable.
 119 */
 120void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 121{
 122        printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
 123                          "data:%p tail:%#lx end:%#lx dev:%s\n",
 124               here, skb->len, sz, skb->head, skb->data,
 125               (unsigned long)skb->tail, (unsigned long)skb->end,
 126               skb->dev ? skb->dev->name : "<NULL>");
 127        BUG();
 128}
 129EXPORT_SYMBOL(skb_over_panic);
 130
 131/**
 132 *      skb_under_panic -       private function
 133 *      @skb: buffer
 134 *      @sz: size
 135 *      @here: address
 136 *
 137 *      Out of line support code for skb_push(). Not user callable.
 138 */
 139
 140void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 141{
 142        printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
 143                          "data:%p tail:%#lx end:%#lx dev:%s\n",
 144               here, skb->len, sz, skb->head, skb->data,
 145               (unsigned long)skb->tail, (unsigned long)skb->end,
 146               skb->dev ? skb->dev->name : "<NULL>");
 147        BUG();
 148}
 149EXPORT_SYMBOL(skb_under_panic);
 150
 151/*      Allocate a new skbuff. We do this ourselves so we can fill in a few
 152 *      'private' fields and also do memory statistics to find all the
 153 *      [BEEP] leaks.
 154 *
 155 */
 156
 157/**
 158 *      __alloc_skb     -       allocate a network buffer
 159 *      @size: size to allocate
 160 *      @gfp_mask: allocation mask
 161 *      @fclone: allocate from fclone cache instead of head cache
 162 *              and allocate a cloned (child) skb
 163 *      @node: numa node to allocate memory on
 164 *
 165 *      Allocate a new &sk_buff. The returned buffer has no headroom and a
 166 *      tail room of size bytes. The object has a reference count of one.
 167 *      The return is the buffer. On a failure the return is %NULL.
 168 *
 169 *      Buffers may only be allocated from interrupts using a @gfp_mask of
 170 *      %GFP_ATOMIC.
 171 */
 172struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 173                            int fclone, int node)
 174{
 175        struct kmem_cache *cache;
 176        struct skb_shared_info *shinfo;
 177        struct sk_buff *skb;
 178        u8 *data;
 179
 180        cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 181
 182        /* Get the HEAD */
 183        skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 184        if (!skb)
 185                goto out;
 186
 187        size = SKB_DATA_ALIGN(size);
 188        data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
 189                        gfp_mask, node);
 190        if (!data)
 191                goto nodata;
 192
 193        /*
 194         * Only clear those fields we need to clear, not those that we will
 195         * actually initialise below. Hence, don't put any more fields after
 196         * the tail pointer in struct sk_buff!
 197         */
 198        memset(skb, 0, offsetof(struct sk_buff, tail));
 199        skb->truesize = size + sizeof(struct sk_buff);
 200        atomic_set(&skb->users, 1);
 201        skb->head = data;
 202        skb->data = data;
 203        skb_reset_tail_pointer(skb);
 204        skb->end = skb->tail + size;
 205        kmemcheck_annotate_bitfield(skb, flags1);
 206        kmemcheck_annotate_bitfield(skb, flags2);
 207#ifdef NET_SKBUFF_DATA_USES_OFFSET
 208        skb->mac_header = ~0U;
 209#endif
 210
 211        /* make sure we initialize shinfo sequentially */
 212        shinfo = skb_shinfo(skb);
 213        atomic_set(&shinfo->dataref, 1);
 214        shinfo->nr_frags  = 0;
 215        shinfo->gso_size = 0;
 216        shinfo->gso_segs = 0;
 217        shinfo->gso_type = 0;
 218        shinfo->ip6_frag_id = 0;
 219        shinfo->tx_flags.flags = 0;
 220        skb_frag_list_init(skb);
 221        memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
 222
 223        if (fclone) {
 224                struct sk_buff *child = skb + 1;
 225                atomic_t *fclone_ref = (atomic_t *) (child + 1);
 226
 227                kmemcheck_annotate_bitfield(child, flags1);
 228                kmemcheck_annotate_bitfield(child, flags2);
 229                skb->fclone = SKB_FCLONE_ORIG;
 230                atomic_set(fclone_ref, 1);
 231
 232                child->fclone = SKB_FCLONE_UNAVAILABLE;
 233        }
 234out:
 235        return skb;
 236nodata:
 237        kmem_cache_free(cache, skb);
 238        skb = NULL;
 239        goto out;
 240}
 241EXPORT_SYMBOL(__alloc_skb);
 242
 243/**
 244 *      __netdev_alloc_skb - allocate an skbuff for rx on a specific device
 245 *      @dev: network device to receive on
 246 *      @length: length to allocate
 247 *      @gfp_mask: get_free_pages mask, passed to alloc_skb
 248 *
 249 *      Allocate a new &sk_buff and assign it a usage count of one. The
 250 *      buffer has unspecified headroom built in. Users should allocate
 251 *      the headroom they think they need without accounting for the
 252 *      built in space. The built in space is used for optimisations.
 253 *
 254 *      %NULL is returned if there is no free memory.
 255 */
 256struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 257                unsigned int length, gfp_t gfp_mask)
 258{
 259        int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
 260        struct sk_buff *skb;
 261
 262        skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
 263        if (likely(skb)) {
 264                skb_reserve(skb, NET_SKB_PAD);
 265                skb->dev = dev;
 266        }
 267        return skb;
 268}
 269EXPORT_SYMBOL(__netdev_alloc_skb);
 270
 271struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
 272{
 273        int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
 274        struct page *page;
 275
 276        page = alloc_pages_node(node, gfp_mask, 0);
 277        return page;
 278}
 279EXPORT_SYMBOL(__netdev_alloc_page);
 280
 281void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 282                int size)
 283{
 284        skb_fill_page_desc(skb, i, page, off, size);
 285        skb->len += size;
 286        skb->data_len += size;
 287        skb->truesize += size;
 288}
 289EXPORT_SYMBOL(skb_add_rx_frag);
 290
 291/**
 292 *      dev_alloc_skb - allocate an skbuff for receiving
 293 *      @length: length to allocate
 294 *
 295 *      Allocate a new &sk_buff and assign it a usage count of one. The
 296 *      buffer has unspecified headroom built in. Users should allocate
 297 *      the headroom they think they need without accounting for the
 298 *      built in space. The built in space is used for optimisations.
 299 *
 300 *      %NULL is returned if there is no free memory. Although this function
 301 *      allocates memory it can be called from an interrupt.
 302 */
 303struct sk_buff *dev_alloc_skb(unsigned int length)
 304{
 305        /*
 306         * There is more code here than it seems:
 307         * __dev_alloc_skb is an inline
 308         */
 309        return __dev_alloc_skb(length, GFP_ATOMIC);
 310}
 311EXPORT_SYMBOL(dev_alloc_skb);
 312
 313static void skb_drop_list(struct sk_buff **listp)
 314{
 315        struct sk_buff *list = *listp;
 316
 317        *listp = NULL;
 318
 319        do {
 320                struct sk_buff *this = list;
 321                list = list->next;
 322                kfree_skb(this);
 323        } while (list);
 324}
 325
 326static inline void skb_drop_fraglist(struct sk_buff *skb)
 327{
 328        skb_drop_list(&skb_shinfo(skb)->frag_list);
 329}
 330
 331static void skb_clone_fraglist(struct sk_buff *skb)
 332{
 333        struct sk_buff *list;
 334
 335        skb_walk_frags(skb, list)
 336                skb_get(list);
 337}
 338
 339static void skb_release_data(struct sk_buff *skb)
 340{
 341        if (!skb->cloned ||
 342            !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 343                               &skb_shinfo(skb)->dataref)) {
 344                if (skb_shinfo(skb)->nr_frags) {
 345                        int i;
 346                        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 347                                put_page(skb_shinfo(skb)->frags[i].page);
 348                }
 349
 350                if (skb_has_frags(skb))
 351                        skb_drop_fraglist(skb);
 352
 353                kfree(skb->head);
 354        }
 355}
 356
 357/*
 358 *      Free an skbuff by memory without cleaning the state.
 359 */
 360static void kfree_skbmem(struct sk_buff *skb)
 361{
 362        struct sk_buff *other;
 363        atomic_t *fclone_ref;
 364
 365        switch (skb->fclone) {
 366        case SKB_FCLONE_UNAVAILABLE:
 367                kmem_cache_free(skbuff_head_cache, skb);
 368                break;
 369
 370        case SKB_FCLONE_ORIG:
 371                fclone_ref = (atomic_t *) (skb + 2);
 372                if (atomic_dec_and_test(fclone_ref))
 373                        kmem_cache_free(skbuff_fclone_cache, skb);
 374                break;
 375
 376        case SKB_FCLONE_CLONE:
 377                fclone_ref = (atomic_t *) (skb + 1);
 378                other = skb - 1;
 379
 380                /* The clone portion is available for
 381                 * fast-cloning again.
 382                 */
 383                skb->fclone = SKB_FCLONE_UNAVAILABLE;
 384
 385                if (atomic_dec_and_test(fclone_ref))
 386                        kmem_cache_free(skbuff_fclone_cache, other);
 387                break;
 388        }
 389}
 390
 391static void skb_release_head_state(struct sk_buff *skb)
 392{
 393        skb_dst_drop(skb);
 394#ifdef CONFIG_XFRM
 395        secpath_put(skb->sp);
 396#endif
 397        if (skb->destructor) {
 398                WARN_ON(in_irq());
 399                skb->destructor(skb);
 400        }
 401#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 402        nf_conntrack_put(skb->nfct);
 403        nf_conntrack_put_reasm(skb->nfct_reasm);
 404#endif
 405#ifdef CONFIG_BRIDGE_NETFILTER
 406        nf_bridge_put(skb->nf_bridge);
 407#endif
 408/* XXX: IS this still necessary? - JHS */
 409#ifdef CONFIG_NET_SCHED
 410        skb->tc_index = 0;
 411#ifdef CONFIG_NET_CLS_ACT
 412        skb->tc_verd = 0;
 413#endif
 414#endif
 415}
 416
 417/* Free everything but the sk_buff shell. */
 418static void skb_release_all(struct sk_buff *skb)
 419{
 420        skb_release_head_state(skb);
 421        skb_release_data(skb);
 422}
 423
 424/**
 425 *      __kfree_skb - private function
 426 *      @skb: buffer
 427 *
 428 *      Free an sk_buff. Release anything attached to the buffer.
 429 *      Clean the state. This is an internal helper function. Users should
 430 *      always call kfree_skb
 431 */
 432
 433void __kfree_skb(struct sk_buff *skb)
 434{
 435        skb_release_all(skb);
 436        kfree_skbmem(skb);
 437}
 438EXPORT_SYMBOL(__kfree_skb);
 439
 440/**
 441 *      kfree_skb - free an sk_buff
 442 *      @skb: buffer to free
 443 *
 444 *      Drop a reference to the buffer and free it if the usage count has
 445 *      hit zero.
 446 */
 447void kfree_skb(struct sk_buff *skb)
 448{
 449        if (unlikely(!skb))
 450                return;
 451        if (likely(atomic_read(&skb->users) == 1))
 452                smp_rmb();
 453        else if (likely(!atomic_dec_and_test(&skb->users)))
 454                return;
 455        trace_kfree_skb(skb, __builtin_return_address(0));
 456        __kfree_skb(skb);
 457}
 458EXPORT_SYMBOL(kfree_skb);
 459
 460/**
 461 *      consume_skb - free an skbuff
 462 *      @skb: buffer to free
 463 *
 464 *      Drop a ref to the buffer and free it if the usage count has hit zero
 465 *      Functions identically to kfree_skb, but kfree_skb assumes that the frame
 466 *      is being dropped after a failure and notes that
 467 */
 468void consume_skb(struct sk_buff *skb)
 469{
 470        if (unlikely(!skb))
 471                return;
 472        if (likely(atomic_read(&skb->users) == 1))
 473                smp_rmb();
 474        else if (likely(!atomic_dec_and_test(&skb->users)))
 475                return;
 476        __kfree_skb(skb);
 477}
 478EXPORT_SYMBOL(consume_skb);
 479
 480/**
 481 *      skb_recycle_check - check if skb can be reused for receive
 482 *      @skb: buffer
 483 *      @skb_size: minimum receive buffer size
 484 *
 485 *      Checks that the skb passed in is not shared or cloned, and
 486 *      that it is linear and its head portion at least as large as
 487 *      skb_size so that it can be recycled as a receive buffer.
 488 *      If these conditions are met, this function does any necessary
 489 *      reference count dropping and cleans up the skbuff as if it
 490 *      just came from __alloc_skb().
 491 */
 492int skb_recycle_check(struct sk_buff *skb, int skb_size)
 493{
 494        struct skb_shared_info *shinfo;
 495
 496        if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
 497                return 0;
 498
 499        skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
 500        if (skb_end_pointer(skb) - skb->head < skb_size)
 501                return 0;
 502
 503        if (skb_shared(skb) || skb_cloned(skb))
 504                return 0;
 505
 506        skb_release_head_state(skb);
 507        shinfo = skb_shinfo(skb);
 508        atomic_set(&shinfo->dataref, 1);
 509        shinfo->nr_frags = 0;
 510        shinfo->gso_size = 0;
 511        shinfo->gso_segs = 0;
 512        shinfo->gso_type = 0;
 513        shinfo->ip6_frag_id = 0;
 514        shinfo->tx_flags.flags = 0;
 515        skb_frag_list_init(skb);
 516        memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
 517
 518        memset(skb, 0, offsetof(struct sk_buff, tail));
 519        skb->data = skb->head + NET_SKB_PAD;
 520        skb_reset_tail_pointer(skb);
 521
 522        return 1;
 523}
 524EXPORT_SYMBOL(skb_recycle_check);
 525
 526static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 527{
 528        new->tstamp             = old->tstamp;
 529        new->dev                = old->dev;
 530        new->transport_header   = old->transport_header;
 531        new->network_header     = old->network_header;
 532        new->mac_header         = old->mac_header;
 533        skb_dst_set(new, dst_clone(skb_dst(old)));
 534#ifdef CONFIG_XFRM
 535        new->sp                 = secpath_get(old->sp);
 536#endif
 537        memcpy(new->cb, old->cb, sizeof(old->cb));
 538        new->csum               = old->csum;
 539        new->local_df           = old->local_df;
 540        new->pkt_type           = old->pkt_type;
 541        new->ip_summed          = old->ip_summed;
 542        skb_copy_queue_mapping(new, old);
 543        new->priority           = old->priority;
 544#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 545        new->ipvs_property      = old->ipvs_property;
 546#endif
 547        new->protocol           = old->protocol;
 548        new->mark               = old->mark;
 549        new->iif                = old->iif;
 550        __nf_copy(new, old);
 551#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 552    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 553        new->nf_trace           = old->nf_trace;
 554#endif
 555#ifdef CONFIG_NET_SCHED
 556        new->tc_index           = old->tc_index;
 557#ifdef CONFIG_NET_CLS_ACT
 558        new->tc_verd            = old->tc_verd;
 559#endif
 560#endif
 561        new->vlan_tci           = old->vlan_tci;
 562
 563        skb_copy_secmark(new, old);
 564}
 565
 566/*
 567 * You should not add any new code to this function.  Add it to
 568 * __copy_skb_header above instead.
 569 */
 570static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 571{
 572#define C(x) n->x = skb->x
 573
 574        n->next = n->prev = NULL;
 575        n->sk = NULL;
 576        __copy_skb_header(n, skb);
 577
 578        C(len);
 579        C(data_len);
 580        C(mac_len);
 581        n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 582        n->cloned = 1;
 583        n->nohdr = 0;
 584        n->destructor = NULL;
 585        C(tail);
 586        C(end);
 587        C(head);
 588        C(data);
 589        C(truesize);
 590        atomic_set(&n->users, 1);
 591
 592        atomic_inc(&(skb_shinfo(skb)->dataref));
 593        skb->cloned = 1;
 594
 595        return n;
 596#undef C
 597}
 598
 599/**
 600 *      skb_morph       -       morph one skb into another
 601 *      @dst: the skb to receive the contents
 602 *      @src: the skb to supply the contents
 603 *
 604 *      This is identical to skb_clone except that the target skb is
 605 *      supplied by the user.
 606 *
 607 *      The target skb is returned upon exit.
 608 */
 609struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 610{
 611        skb_release_all(dst);
 612        return __skb_clone(dst, src);
 613}
 614EXPORT_SYMBOL_GPL(skb_morph);
 615
 616/**
 617 *      skb_clone       -       duplicate an sk_buff
 618 *      @skb: buffer to clone
 619 *      @gfp_mask: allocation priority
 620 *
 621 *      Duplicate an &sk_buff. The new one is not owned by a socket. Both
 622 *      copies share the same packet data but not structure. The new
 623 *      buffer has a reference count of 1. If the allocation fails the
 624 *      function returns %NULL otherwise the new buffer is returned.
 625 *
 626 *      If this function is called from an interrupt gfp_mask() must be
 627 *      %GFP_ATOMIC.
 628 */
 629
 630struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 631{
 632        struct sk_buff *n;
 633
 634        n = skb + 1;
 635        if (skb->fclone == SKB_FCLONE_ORIG &&
 636            n->fclone == SKB_FCLONE_UNAVAILABLE) {
 637                atomic_t *fclone_ref = (atomic_t *) (n + 1);
 638                n->fclone = SKB_FCLONE_CLONE;
 639                atomic_inc(fclone_ref);
 640        } else {
 641                n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 642                if (!n)
 643                        return NULL;
 644
 645                kmemcheck_annotate_bitfield(n, flags1);
 646                kmemcheck_annotate_bitfield(n, flags2);
 647                n->fclone = SKB_FCLONE_UNAVAILABLE;
 648        }
 649
 650        return __skb_clone(n, skb);
 651}
 652EXPORT_SYMBOL(skb_clone);
 653
 654static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 655{
 656#ifndef NET_SKBUFF_DATA_USES_OFFSET
 657        /*
 658         *      Shift between the two data areas in bytes
 659         */
 660        unsigned long offset = new->data - old->data;
 661#endif
 662
 663        __copy_skb_header(new, old);
 664
 665#ifndef NET_SKBUFF_DATA_USES_OFFSET
 666        /* {transport,network,mac}_header are relative to skb->head */
 667        new->transport_header += offset;
 668        new->network_header   += offset;
 669        if (skb_mac_header_was_set(new))
 670                new->mac_header       += offset;
 671#endif
 672        skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 673        skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 674        skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 675}
 676
 677/**
 678 *      skb_copy        -       create private copy of an sk_buff
 679 *      @skb: buffer to copy
 680 *      @gfp_mask: allocation priority
 681 *
 682 *      Make a copy of both an &sk_buff and its data. This is used when the
 683 *      caller wishes to modify the data and needs a private copy of the
 684 *      data to alter. Returns %NULL on failure or the pointer to the buffer
 685 *      on success. The returned buffer has a reference count of 1.
 686 *
 687 *      As by-product this function converts non-linear &sk_buff to linear
 688 *      one, so that &sk_buff becomes completely private and caller is allowed
 689 *      to modify all the data of returned buffer. This means that this
 690 *      function is not recommended for use in circumstances when only
 691 *      header is going to be modified. Use pskb_copy() instead.
 692 */
 693
 694struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 695{
 696        int headerlen = skb->data - skb->head;
 697        /*
 698         *      Allocate the copy buffer
 699         */
 700        struct sk_buff *n;
 701#ifdef NET_SKBUFF_DATA_USES_OFFSET
 702        n = alloc_skb(skb->end + skb->data_len, gfp_mask);
 703#else
 704        n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
 705#endif
 706        if (!n)
 707                return NULL;
 708
 709        /* Set the data pointer */
 710        skb_reserve(n, headerlen);
 711        /* Set the tail pointer and length */
 712        skb_put(n, skb->len);
 713
 714        if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
 715                BUG();
 716
 717        copy_skb_header(n, skb);
 718        return n;
 719}
 720EXPORT_SYMBOL(skb_copy);
 721
 722/**
 723 *      pskb_copy       -       create copy of an sk_buff with private head.
 724 *      @skb: buffer to copy
 725 *      @gfp_mask: allocation priority
 726 *
 727 *      Make a copy of both an &sk_buff and part of its data, located
 728 *      in header. Fragmented data remain shared. This is used when
 729 *      the caller wishes to modify only header of &sk_buff and needs
 730 *      private copy of the header to alter. Returns %NULL on failure
 731 *      or the pointer to the buffer on success.
 732 *      The returned buffer has a reference count of 1.
 733 */
 734
 735struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 736{
 737        /*
 738         *      Allocate the copy buffer
 739         */
 740        struct sk_buff *n;
 741#ifdef NET_SKBUFF_DATA_USES_OFFSET
 742        n = alloc_skb(skb->end, gfp_mask);
 743#else
 744        n = alloc_skb(skb->end - skb->head, gfp_mask);
 745#endif
 746        if (!n)
 747                goto out;
 748
 749        /* Set the data pointer */
 750        skb_reserve(n, skb->data - skb->head);
 751        /* Set the tail pointer and length */
 752        skb_put(n, skb_headlen(skb));
 753        /* Copy the bytes */
 754        skb_copy_from_linear_data(skb, n->data, n->len);
 755
 756        n->truesize += skb->data_len;
 757        n->data_len  = skb->data_len;
 758        n->len       = skb->len;
 759
 760        if (skb_shinfo(skb)->nr_frags) {
 761                int i;
 762
 763                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 764                        skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
 765                        get_page(skb_shinfo(n)->frags[i].page);
 766                }
 767                skb_shinfo(n)->nr_frags = i;
 768        }
 769
 770        if (skb_has_frags(skb)) {
 771                skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
 772                skb_clone_fraglist(n);
 773        }
 774
 775        copy_skb_header(n, skb);
 776out:
 777        return n;
 778}
 779EXPORT_SYMBOL(pskb_copy);
 780
 781/**
 782 *      pskb_expand_head - reallocate header of &sk_buff
 783 *      @skb: buffer to reallocate
 784 *      @nhead: room to add at head
 785 *      @ntail: room to add at tail
 786 *      @gfp_mask: allocation priority
 787 *
 788 *      Expands (or creates identical copy, if &nhead and &ntail are zero)
 789 *      header of skb. &sk_buff itself is not changed. &sk_buff MUST have
 790 *      reference count of 1. Returns zero in the case of success or error,
 791 *      if expansion failed. In the last case, &sk_buff is not changed.
 792 *
 793 *      All the pointers pointing into skb header may change and must be
 794 *      reloaded after call to this function.
 795 */
 796
 797int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 798                     gfp_t gfp_mask)
 799{
 800        int i;
 801        u8 *data;
 802#ifdef NET_SKBUFF_DATA_USES_OFFSET
 803        int size = nhead + skb->end + ntail;
 804#else
 805        int size = nhead + (skb->end - skb->head) + ntail;
 806#endif
 807        long off;
 808
 809        BUG_ON(nhead < 0);
 810
 811        if (skb_shared(skb))
 812                BUG();
 813
 814        size = SKB_DATA_ALIGN(size);
 815
 816        data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 817        if (!data)
 818                goto nodata;
 819
 820        /* Copy only real data... and, alas, header. This should be
 821         * optimized for the cases when header is void. */
 822#ifdef NET_SKBUFF_DATA_USES_OFFSET
 823        memcpy(data + nhead, skb->head, skb->tail);
 824#else
 825        memcpy(data + nhead, skb->head, skb->tail - skb->head);
 826#endif
 827        memcpy(data + size, skb_end_pointer(skb),
 828               sizeof(struct skb_shared_info));
 829
 830        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 831                get_page(skb_shinfo(skb)->frags[i].page);
 832
 833        if (skb_has_frags(skb))
 834                skb_clone_fraglist(skb);
 835
 836        skb_release_data(skb);
 837
 838        off = (data + nhead) - skb->head;
 839
 840        skb->head     = data;
 841        skb->data    += off;
 842#ifdef NET_SKBUFF_DATA_USES_OFFSET
 843        skb->end      = size;
 844        off           = nhead;
 845#else
 846        skb->end      = skb->head + size;
 847#endif
 848        /* {transport,network,mac}_header and tail are relative to skb->head */
 849        skb->tail             += off;
 850        skb->transport_header += off;
 851        skb->network_header   += off;
 852        if (skb_mac_header_was_set(skb))
 853                skb->mac_header += off;
 854        skb->csum_start       += nhead;
 855        skb->cloned   = 0;
 856        skb->hdr_len  = 0;
 857        skb->nohdr    = 0;
 858        atomic_set(&skb_shinfo(skb)->dataref, 1);
 859        return 0;
 860
 861nodata:
 862        return -ENOMEM;
 863}
 864EXPORT_SYMBOL(pskb_expand_head);
 865
 866/* Make private copy of skb with writable head and some headroom */
 867
 868struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 869{
 870        struct sk_buff *skb2;
 871        int delta = headroom - skb_headroom(skb);
 872
 873        if (delta <= 0)
 874                skb2 = pskb_copy(skb, GFP_ATOMIC);
 875        else {
 876                skb2 = skb_clone(skb, GFP_ATOMIC);
 877                if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
 878                                             GFP_ATOMIC)) {
 879                        kfree_skb(skb2);
 880                        skb2 = NULL;
 881                }
 882        }
 883        return skb2;
 884}
 885EXPORT_SYMBOL(skb_realloc_headroom);
 886
 887/**
 888 *      skb_copy_expand -       copy and expand sk_buff
 889 *      @skb: buffer to copy
 890 *      @newheadroom: new free bytes at head
 891 *      @newtailroom: new free bytes at tail
 892 *      @gfp_mask: allocation priority
 893 *
 894 *      Make a copy of both an &sk_buff and its data and while doing so
 895 *      allocate additional space.
 896 *
 897 *      This is used when the caller wishes to modify the data and needs a
 898 *      private copy of the data to alter as well as more space for new fields.
 899 *      Returns %NULL on failure or the pointer to the buffer
 900 *      on success. The returned buffer has a reference count of 1.
 901 *
 902 *      You must pass %GFP_ATOMIC as the allocation priority if this function
 903 *      is called from an interrupt.
 904 */
 905struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 906                                int newheadroom, int newtailroom,
 907                                gfp_t gfp_mask)
 908{
 909        /*
 910         *      Allocate the copy buffer
 911         */
 912        struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
 913                                      gfp_mask);
 914        int oldheadroom = skb_headroom(skb);
 915        int head_copy_len, head_copy_off;
 916        int off;
 917
 918        if (!n)
 919                return NULL;
 920
 921        skb_reserve(n, newheadroom);
 922
 923        /* Set the tail pointer and length */
 924        skb_put(n, skb->len);
 925
 926        head_copy_len = oldheadroom;
 927        head_copy_off = 0;
 928        if (newheadroom <= head_copy_len)
 929                head_copy_len = newheadroom;
 930        else
 931                head_copy_off = newheadroom - head_copy_len;
 932
 933        /* Copy the linear header and data. */
 934        if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
 935                          skb->len + head_copy_len))
 936                BUG();
 937
 938        copy_skb_header(n, skb);
 939
 940        off                  = newheadroom - oldheadroom;
 941        n->csum_start       += off;
 942#ifdef NET_SKBUFF_DATA_USES_OFFSET
 943        n->transport_header += off;
 944        n->network_header   += off;
 945        if (skb_mac_header_was_set(skb))
 946                n->mac_header += off;
 947#endif
 948
 949        return n;
 950}
 951EXPORT_SYMBOL(skb_copy_expand);
 952
 953/**
 954 *      skb_pad                 -       zero pad the tail of an skb
 955 *      @skb: buffer to pad
 956 *      @pad: space to pad
 957 *
 958 *      Ensure that a buffer is followed by a padding area that is zero
 959 *      filled. Used by network drivers which may DMA or transfer data
 960 *      beyond the buffer end onto the wire.
 961 *
 962 *      May return error in out of memory cases. The skb is freed on error.
 963 */
 964
 965int skb_pad(struct sk_buff *skb, int pad)
 966{
 967        int err;
 968        int ntail;
 969
 970        /* If the skbuff is non linear tailroom is always zero.. */
 971        if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
 972                memset(skb->data+skb->len, 0, pad);
 973                return 0;
 974        }
 975
 976        ntail = skb->data_len + pad - (skb->end - skb->tail);
 977        if (likely(skb_cloned(skb) || ntail > 0)) {
 978                err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
 979                if (unlikely(err))
 980                        goto free_skb;
 981        }
 982
 983        /* FIXME: The use of this function with non-linear skb's really needs
 984         * to be audited.
 985         */
 986        err = skb_linearize(skb);
 987        if (unlikely(err))
 988                goto free_skb;
 989
 990        memset(skb->data + skb->len, 0, pad);
 991        return 0;
 992
 993free_skb:
 994        kfree_skb(skb);
 995        return err;
 996}
 997EXPORT_SYMBOL(skb_pad);
 998
 999/**
1000 *      skb_put - add data to a buffer
1001 *      @skb: buffer to use
1002 *      @len: amount of data to add
1003 *
1004 *      This function extends the used data area of the buffer. If this would
1005 *      exceed the total buffer size the kernel will panic. A pointer to the
1006 *      first byte of the extra data is returned.
1007 */
1008unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1009{
1010        unsigned char *tmp = skb_tail_pointer(skb);
1011        SKB_LINEAR_ASSERT(skb);
1012        skb->tail += len;
1013        skb->len  += len;
1014        if (unlikely(skb->tail > skb->end))
1015                skb_over_panic(skb, len, __builtin_return_address(0));
1016        return tmp;
1017}
1018EXPORT_SYMBOL(skb_put);
1019
1020/**
1021 *      skb_push - add data to the start of a buffer
1022 *      @skb: buffer to use
1023 *      @len: amount of data to add
1024 *
1025 *      This function extends the used data area of the buffer at the buffer
1026 *      start. If this would exceed the total buffer headroom the kernel will
1027 *      panic. A pointer to the first byte of the extra data is returned.
1028 */
1029unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1030{
1031        skb->data -= len;
1032        skb->len  += len;
1033        if (unlikely(skb->data<skb->head))
1034                skb_under_panic(skb, len, __builtin_return_address(0));
1035        return skb->data;
1036}
1037EXPORT_SYMBOL(skb_push);
1038
1039/**
1040 *      skb_pull - remove data from the start of a buffer
1041 *      @skb: buffer to use
1042 *      @len: amount of data to remove
1043 *
1044 *      This function removes data from the start of a buffer, returning
1045 *      the memory to the headroom. A pointer to the next data in the buffer
1046 *      is returned. Once the data has been pulled future pushes will overwrite
1047 *      the old data.
1048 */
1049unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1050{
1051        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1052}
1053EXPORT_SYMBOL(skb_pull);
1054
1055/**
1056 *      skb_trim - remove end from a buffer
1057 *      @skb: buffer to alter
1058 *      @len: new length
1059 *
1060 *      Cut the length of a buffer down by removing data from the tail. If
1061 *      the buffer is already under the length specified it is not modified.
1062 *      The skb must be linear.
1063 */
1064void skb_trim(struct sk_buff *skb, unsigned int len)
1065{
1066        if (skb->len > len)
1067                __skb_trim(skb, len);
1068}
1069EXPORT_SYMBOL(skb_trim);
1070
1071/* Trims skb to length len. It can change skb pointers.
1072 */
1073
1074int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1075{
1076        struct sk_buff **fragp;
1077        struct sk_buff *frag;
1078        int offset = skb_headlen(skb);
1079        int nfrags = skb_shinfo(skb)->nr_frags;
1080        int i;
1081        int err;
1082
1083        if (skb_cloned(skb) &&
1084            unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1085                return err;
1086
1087        i = 0;
1088        if (offset >= len)
1089                goto drop_pages;
1090
1091        for (; i < nfrags; i++) {
1092                int end = offset + skb_shinfo(skb)->frags[i].size;
1093
1094                if (end < len) {
1095                        offset = end;
1096                        continue;
1097                }
1098
1099                skb_shinfo(skb)->frags[i++].size = len - offset;
1100
1101drop_pages:
1102                skb_shinfo(skb)->nr_frags = i;
1103
1104                for (; i < nfrags; i++)
1105                        put_page(skb_shinfo(skb)->frags[i].page);
1106
1107                if (skb_has_frags(skb))
1108                        skb_drop_fraglist(skb);
1109                goto done;
1110        }
1111
1112        for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1113             fragp = &frag->next) {
1114                int end = offset + frag->len;
1115
1116                if (skb_shared(frag)) {
1117                        struct sk_buff *nfrag;
1118
1119                        nfrag = skb_clone(frag, GFP_ATOMIC);
1120                        if (unlikely(!nfrag))
1121                                return -ENOMEM;
1122
1123                        nfrag->next = frag->next;
1124                        kfree_skb(frag);
1125                        frag = nfrag;
1126                        *fragp = frag;
1127                }
1128
1129                if (end < len) {
1130                        offset = end;
1131                        continue;
1132                }
1133
1134                if (end > len &&
1135                    unlikely((err = pskb_trim(frag, len - offset))))
1136                        return err;
1137
1138                if (frag->next)
1139                        skb_drop_list(&frag->next);
1140                break;
1141        }
1142
1143done:
1144        if (len > skb_headlen(skb)) {
1145                skb->data_len -= skb->len - len;
1146                skb->len       = len;
1147        } else {
1148                skb->len       = len;
1149                skb->data_len  = 0;
1150                skb_set_tail_pointer(skb, len);
1151        }
1152
1153        return 0;
1154}
1155EXPORT_SYMBOL(___pskb_trim);
1156
1157/**
1158 *      __pskb_pull_tail - advance tail of skb header
1159 *      @skb: buffer to reallocate
1160 *      @delta: number of bytes to advance tail
1161 *
1162 *      The function makes a sense only on a fragmented &sk_buff,
1163 *      it expands header moving its tail forward and copying necessary
1164 *      data from fragmented part.
1165 *
1166 *      &sk_buff MUST have reference count of 1.
1167 *
1168 *      Returns %NULL (and &sk_buff does not change) if pull failed
1169 *      or value of new tail of skb in the case of success.
1170 *
1171 *      All the pointers pointing into skb header may change and must be
1172 *      reloaded after call to this function.
1173 */
1174
1175/* Moves tail of skb head forward, copying data from fragmented part,
1176 * when it is necessary.
1177 * 1. It may fail due to malloc failure.
1178 * 2. It may change skb pointers.
1179 *
1180 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1181 */
1182unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1183{
1184        /* If skb has not enough free space at tail, get new one
1185         * plus 128 bytes for future expansions. If we have enough
1186         * room at tail, reallocate without expansion only if skb is cloned.
1187         */
1188        int i, k, eat = (skb->tail + delta) - skb->end;
1189
1190        if (eat > 0 || skb_cloned(skb)) {
1191                if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1192                                     GFP_ATOMIC))
1193                        return NULL;
1194        }
1195
1196        if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1197                BUG();
1198
1199        /* Optimization: no fragments, no reasons to preestimate
1200         * size of pulled pages. Superb.
1201         */
1202        if (!skb_has_frags(skb))
1203                goto pull_pages;
1204
1205        /* Estimate size of pulled pages. */
1206        eat = delta;
1207        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1208                if (skb_shinfo(skb)->frags[i].size >= eat)
1209                        goto pull_pages;
1210                eat -= skb_shinfo(skb)->frags[i].size;
1211        }
1212
1213        /* If we need update frag list, we are in troubles.
1214         * Certainly, it possible to add an offset to skb data,
1215         * but taking into account that pulling is expected to
1216         * be very rare operation, it is worth to fight against
1217         * further bloating skb head and crucify ourselves here instead.
1218         * Pure masohism, indeed. 8)8)
1219         */
1220        if (eat) {
1221                struct sk_buff *list = skb_shinfo(skb)->frag_list;
1222                struct sk_buff *clone = NULL;
1223                struct sk_buff *insp = NULL;
1224
1225                do {
1226                        BUG_ON(!list);
1227
1228                        if (list->len <= eat) {
1229                                /* Eaten as whole. */
1230                                eat -= list->len;
1231                                list = list->next;
1232                                insp = list;
1233                        } else {
1234                                /* Eaten partially. */
1235
1236                                if (skb_shared(list)) {
1237                                        /* Sucks! We need to fork list. :-( */
1238                                        clone = skb_clone(list, GFP_ATOMIC);
1239                                        if (!clone)
1240                                                return NULL;
1241                                        insp = list->next;
1242                                        list = clone;
1243                                } else {
1244                                        /* This may be pulled without
1245                                         * problems. */
1246                                        insp = list;
1247                                }
1248                                if (!pskb_pull(list, eat)) {
1249                                        kfree_skb(clone);
1250                                        return NULL;
1251                                }
1252                                break;
1253                        }
1254                } while (eat);
1255
1256                /* Free pulled out fragments. */
1257                while ((list = skb_shinfo(skb)->frag_list) != insp) {
1258                        skb_shinfo(skb)->frag_list = list->next;
1259                        kfree_skb(list);
1260                }
1261                /* And insert new clone at head. */
1262                if (clone) {
1263                        clone->next = list;
1264                        skb_shinfo(skb)->frag_list = clone;
1265                }
1266        }
1267        /* Success! Now we may commit changes to skb data. */
1268
1269pull_pages:
1270        eat = delta;
1271        k = 0;
1272        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1273                if (skb_shinfo(skb)->frags[i].size <= eat) {
1274                        put_page(skb_shinfo(skb)->frags[i].page);
1275                        eat -= skb_shinfo(skb)->frags[i].size;
1276                } else {
1277                        skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1278                        if (eat) {
1279                                skb_shinfo(skb)->frags[k].page_offset += eat;
1280                                skb_shinfo(skb)->frags[k].size -= eat;
1281                                eat = 0;
1282                        }
1283                        k++;
1284                }
1285        }
1286        skb_shinfo(skb)->nr_frags = k;
1287
1288        skb->tail     += delta;
1289        skb->data_len -= delta;
1290
1291        return skb_tail_pointer(skb);
1292}
1293EXPORT_SYMBOL(__pskb_pull_tail);
1294
1295/* Copy some data bits from skb to kernel buffer. */
1296
1297int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1298{
1299        int start = skb_headlen(skb);
1300        struct sk_buff *frag_iter;
1301        int i, copy;
1302
1303        if (offset > (int)skb->len - len)
1304                goto fault;
1305
1306        /* Copy header. */
1307        if ((copy = start - offset) > 0) {
1308                if (copy > len)
1309                        copy = len;
1310                skb_copy_from_linear_data_offset(skb, offset, to, copy);
1311                if ((len -= copy) == 0)
1312                        return 0;
1313                offset += copy;
1314                to     += copy;
1315        }
1316
1317        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1318                int end;
1319
1320                WARN_ON(start > offset + len);
1321
1322                end = start + skb_shinfo(skb)->frags[i].size;
1323                if ((copy = end - offset) > 0) {
1324                        u8 *vaddr;
1325
1326                        if (copy > len)
1327                                copy = len;
1328
1329                        vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1330                        memcpy(to,
1331                               vaddr + skb_shinfo(skb)->frags[i].page_offset+
1332                               offset - start, copy);
1333                        kunmap_skb_frag(vaddr);
1334
1335                        if ((len -= copy) == 0)
1336                                return 0;
1337                        offset += copy;
1338                        to     += copy;
1339                }
1340                start = end;
1341        }
1342
1343        skb_walk_frags(skb, frag_iter) {
1344                int end;
1345
1346                WARN_ON(start > offset + len);
1347
1348                end = start + frag_iter->len;
1349                if ((copy = end - offset) > 0) {
1350                        if (copy > len)
1351                                copy = len;
1352                        if (skb_copy_bits(frag_iter, offset - start, to, copy))
1353                                goto fault;
1354                        if ((len -= copy) == 0)
1355                                return 0;
1356                        offset += copy;
1357                        to     += copy;
1358                }
1359                start = end;
1360        }
1361        if (!len)
1362                return 0;
1363
1364fault:
1365        return -EFAULT;
1366}
1367EXPORT_SYMBOL(skb_copy_bits);
1368
1369/*
1370 * Callback from splice_to_pipe(), if we need to release some pages
1371 * at the end of the spd in case we error'ed out in filling the pipe.
1372 */
1373static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1374{
1375        put_page(spd->pages[i]);
1376}
1377
1378static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1379                                          unsigned int *offset,
1380                                          struct sk_buff *skb, struct sock *sk)
1381{
1382        struct page *p = sk->sk_sndmsg_page;
1383        unsigned int off;
1384
1385        if (!p) {
1386new_page:
1387                p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1388                if (!p)
1389                        return NULL;
1390
1391                off = sk->sk_sndmsg_off = 0;
1392                /* hold one ref to this page until it's full */
1393        } else {
1394                unsigned int mlen;
1395
1396                off = sk->sk_sndmsg_off;
1397                mlen = PAGE_SIZE - off;
1398                if (mlen < 64 && mlen < *len) {
1399                        put_page(p);
1400                        goto new_page;
1401                }
1402
1403                *len = min_t(unsigned int, *len, mlen);
1404        }
1405
1406        memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1407        sk->sk_sndmsg_off += *len;
1408        *offset = off;
1409        get_page(p);
1410
1411        return p;
1412}
1413
1414/*
1415 * Fill page/offset/length into spd, if it can hold more pages.
1416 */
1417static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1418                                unsigned int *len, unsigned int offset,
1419                                struct sk_buff *skb, int linear,
1420                                struct sock *sk)
1421{
1422        if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1423                return 1;
1424
1425        if (linear) {
1426                page = linear_to_page(page, len, &offset, skb, sk);
1427                if (!page)
1428                        return 1;
1429        } else
1430                get_page(page);
1431
1432        spd->pages[spd->nr_pages] = page;
1433        spd->partial[spd->nr_pages].len = *len;
1434        spd->partial[spd->nr_pages].offset = offset;
1435        spd->nr_pages++;
1436
1437        return 0;
1438}
1439
1440static inline void __segment_seek(struct page **page, unsigned int *poff,
1441                                  unsigned int *plen, unsigned int off)
1442{
1443        unsigned long n;
1444
1445        *poff += off;
1446        n = *poff / PAGE_SIZE;
1447        if (n)
1448                *page = nth_page(*page, n);
1449
1450        *poff = *poff % PAGE_SIZE;
1451        *plen -= off;
1452}
1453
1454static inline int __splice_segment(struct page *page, unsigned int poff,
1455                                   unsigned int plen, unsigned int *off,
1456                                   unsigned int *len, struct sk_buff *skb,
1457                                   struct splice_pipe_desc *spd, int linear,
1458                                   struct sock *sk)
1459{
1460        if (!*len)
1461                return 1;
1462
1463        /* skip this segment if already processed */
1464        if (*off >= plen) {
1465                *off -= plen;
1466                return 0;
1467        }
1468
1469        /* ignore any bits we already processed */
1470        if (*off) {
1471                __segment_seek(&page, &poff, &plen, *off);
1472                *off = 0;
1473        }
1474
1475        do {
1476                unsigned int flen = min(*len, plen);
1477
1478                /* the linear region may spread across several pages  */
1479                flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1480
1481                if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
1482                        return 1;
1483
1484                __segment_seek(&page, &poff, &plen, flen);
1485                *len -= flen;
1486
1487        } while (*len && plen);
1488
1489        return 0;
1490}
1491
1492/*
1493 * Map linear and fragment data from the skb to spd. It reports failure if the
1494 * pipe is full or if we already spliced the requested length.
1495 */
1496static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1497                             unsigned int *len, struct splice_pipe_desc *spd,
1498                             struct sock *sk)
1499{
1500        int seg;
1501
1502        /*
1503         * map the linear part
1504         */
1505        if (__splice_segment(virt_to_page(skb->data),
1506                             (unsigned long) skb->data & (PAGE_SIZE - 1),
1507                             skb_headlen(skb),
1508                             offset, len, skb, spd, 1, sk))
1509                return 1;
1510
1511        /*
1512         * then map the fragments
1513         */
1514        for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1515                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1516
1517                if (__splice_segment(f->page, f->page_offset, f->size,
1518                                     offset, len, skb, spd, 0, sk))
1519                        return 1;
1520        }
1521
1522        return 0;
1523}
1524
1525/*
1526 * Map data from the skb to a pipe. Should handle both the linear part,
1527 * the fragments, and the frag list. It does NOT handle frag lists within
1528 * the frag list, if such a thing exists. We'd probably need to recurse to
1529 * handle that cleanly.
1530 */
1531int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1532                    struct pipe_inode_info *pipe, unsigned int tlen,
1533                    unsigned int flags)
1534{
1535        struct partial_page partial[PIPE_BUFFERS];
1536        struct page *pages[PIPE_BUFFERS];
1537        struct splice_pipe_desc spd = {
1538                .pages = pages,
1539                .partial = partial,
1540                .flags = flags,
1541                .ops = &sock_pipe_buf_ops,
1542                .spd_release = sock_spd_release,
1543        };
1544        struct sk_buff *frag_iter;
1545        struct sock *sk = skb->sk;
1546
1547        /*
1548         * __skb_splice_bits() only fails if the output has no room left,
1549         * so no point in going over the frag_list for the error case.
1550         */
1551        if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
1552                goto done;
1553        else if (!tlen)
1554                goto done;
1555
1556        /*
1557         * now see if we have a frag_list to map
1558         */
1559        skb_walk_frags(skb, frag_iter) {
1560                if (!tlen)
1561                        break;
1562                if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk))
1563                        break;
1564        }
1565
1566done:
1567        if (spd.nr_pages) {
1568                int ret;
1569
1570                /*
1571                 * Drop the socket lock, otherwise we have reverse
1572                 * locking dependencies between sk_lock and i_mutex
1573                 * here as compared to sendfile(). We enter here
1574                 * with the socket lock held, and splice_to_pipe() will
1575                 * grab the pipe inode lock. For sendfile() emulation,
1576                 * we call into ->sendpage() with the i_mutex lock held
1577                 * and networking will grab the socket lock.
1578                 */
1579                release_sock(sk);
1580                ret = splice_to_pipe(pipe, &spd);
1581                lock_sock(sk);
1582                return ret;
1583        }
1584
1585        return 0;
1586}
1587
1588/**
1589 *      skb_store_bits - store bits from kernel buffer to skb
1590 *      @skb: destination buffer
1591 *      @offset: offset in destination
1592 *      @from: source buffer
1593 *      @len: number of bytes to copy
1594 *
1595 *      Copy the specified number of bytes from the source buffer to the
1596 *      destination skb.  This function handles all the messy bits of
1597 *      traversing fragment lists and such.
1598 */
1599
1600int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1601{
1602        int start = skb_headlen(skb);
1603        struct sk_buff *frag_iter;
1604        int i, copy;
1605
1606        if (offset > (int)skb->len - len)
1607                goto fault;
1608
1609        if ((copy = start - offset) > 0) {
1610                if (copy > len)
1611                        copy = len;
1612                skb_copy_to_linear_data_offset(skb, offset, from, copy);
1613                if ((len -= copy) == 0)
1614                        return 0;
1615                offset += copy;
1616                from += copy;
1617        }
1618
1619        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1620                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1621                int end;
1622
1623                WARN_ON(start > offset + len);
1624
1625                end = start + frag->size;
1626                if ((copy = end - offset) > 0) {
1627                        u8 *vaddr;
1628
1629                        if (copy > len)
1630                                copy = len;
1631
1632                        vaddr = kmap_skb_frag(frag);
1633                        memcpy(vaddr + frag->page_offset + offset - start,
1634                               from, copy);
1635                        kunmap_skb_frag(vaddr);
1636
1637                        if ((len -= copy) == 0)
1638                                return 0;
1639                        offset += copy;
1640                        from += copy;
1641                }
1642                start = end;
1643        }
1644
1645        skb_walk_frags(skb, frag_iter) {
1646                int end;
1647
1648                WARN_ON(start > offset + len);
1649
1650                end = start + frag_iter->len;
1651                if ((copy = end - offset) > 0) {
1652                        if (copy > len)
1653                                copy = len;
1654                        if (skb_store_bits(frag_iter, offset - start,
1655                                           from, copy))
1656                                goto fault;
1657                        if ((len -= copy) == 0)
1658                                return 0;
1659                        offset += copy;
1660                        from += copy;
1661                }
1662                start = end;
1663        }
1664        if (!len)
1665                return 0;
1666
1667fault:
1668        return -EFAULT;
1669}
1670EXPORT_SYMBOL(skb_store_bits);
1671
1672/* Checksum skb data. */
1673
1674__wsum skb_checksum(const struct sk_buff *skb, int offset,
1675                          int len, __wsum csum)
1676{
1677        int start = skb_headlen(skb);
1678        int i, copy = start - offset;
1679        struct sk_buff *frag_iter;
1680        int pos = 0;
1681
1682        /* Checksum header. */
1683        if (copy > 0) {
1684                if (copy > len)
1685                        copy = len;
1686                csum = csum_partial(skb->data + offset, copy, csum);
1687                if ((len -= copy) == 0)
1688                        return csum;
1689                offset += copy;
1690                pos     = copy;
1691        }
1692
1693        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1694                int end;
1695
1696                WARN_ON(start > offset + len);
1697
1698                end = start + skb_shinfo(skb)->frags[i].size;
1699                if ((copy = end - offset) > 0) {
1700                        __wsum csum2;
1701                        u8 *vaddr;
1702                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1703
1704                        if (copy > len)
1705                                copy = len;
1706                        vaddr = kmap_skb_frag(frag);
1707                        csum2 = csum_partial(vaddr + frag->page_offset +
1708                                             offset - start, copy, 0);
1709                        kunmap_skb_frag(vaddr);
1710                        csum = csum_block_add(csum, csum2, pos);
1711                        if (!(len -= copy))
1712                                return csum;
1713                        offset += copy;
1714                        pos    += copy;
1715                }
1716                start = end;
1717        }
1718
1719        skb_walk_frags(skb, frag_iter) {
1720                int end;
1721
1722                WARN_ON(start > offset + len);
1723
1724                end = start + frag_iter->len;
1725                if ((copy = end - offset) > 0) {
1726                        __wsum csum2;
1727                        if (copy > len)
1728                                copy = len;
1729                        csum2 = skb_checksum(frag_iter, offset - start,
1730                                             copy, 0);
1731                        csum = csum_block_add(csum, csum2, pos);
1732                        if ((len -= copy) == 0)
1733                                return csum;
1734                        offset += copy;
1735                        pos    += copy;
1736                }
1737                start = end;
1738        }
1739        BUG_ON(len);
1740
1741        return csum;
1742}
1743EXPORT_SYMBOL(skb_checksum);
1744
1745/* Both of above in one bottle. */
1746
1747__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1748                                    u8 *to, int len, __wsum csum)
1749{
1750        int start = skb_headlen(skb);
1751        int i, copy = start - offset;
1752        struct sk_buff *frag_iter;
1753        int pos = 0;
1754
1755        /* Copy header. */
1756        if (copy > 0) {
1757                if (copy > len)
1758                        copy = len;
1759                csum = csum_partial_copy_nocheck(skb->data + offset, to,
1760                                                 copy, csum);
1761                if ((len -= copy) == 0)
1762                        return csum;
1763                offset += copy;
1764                to     += copy;
1765                pos     = copy;
1766        }
1767
1768        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1769                int end;
1770
1771                WARN_ON(start > offset + len);
1772
1773                end = start + skb_shinfo(skb)->frags[i].size;
1774                if ((copy = end - offset) > 0) {
1775                        __wsum csum2;
1776                        u8 *vaddr;
1777                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1778
1779                        if (copy > len)
1780                                copy = len;
1781                        vaddr = kmap_skb_frag(frag);
1782                        csum2 = csum_partial_copy_nocheck(vaddr +
1783                                                          frag->page_offset +
1784                                                          offset - start, to,
1785                                                          copy, 0);
1786                        kunmap_skb_frag(vaddr);
1787                        csum = csum_block_add(csum, csum2, pos);
1788                        if (!(len -= copy))
1789                                return csum;
1790                        offset += copy;
1791                        to     += copy;
1792                        pos    += copy;
1793                }
1794                start = end;
1795        }
1796
1797        skb_walk_frags(skb, frag_iter) {
1798                __wsum csum2;
1799                int end;
1800
1801                WARN_ON(start > offset + len);
1802
1803                end = start + frag_iter->len;
1804                if ((copy = end - offset) > 0) {
1805                        if (copy > len)
1806                                copy = len;
1807                        csum2 = skb_copy_and_csum_bits(frag_iter,
1808                                                       offset - start,
1809                                                       to, copy, 0);
1810                        csum = csum_block_add(csum, csum2, pos);
1811                        if ((len -= copy) == 0)
1812                                return csum;
1813                        offset += copy;
1814                        to     += copy;
1815                        pos    += copy;
1816                }
1817                start = end;
1818        }
1819        BUG_ON(len);
1820        return csum;
1821}
1822EXPORT_SYMBOL(skb_copy_and_csum_bits);
1823
1824void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1825{
1826        __wsum csum;
1827        long csstart;
1828
1829        if (skb->ip_summed == CHECKSUM_PARTIAL)
1830                csstart = skb->csum_start - skb_headroom(skb);
1831        else
1832                csstart = skb_headlen(skb);
1833
1834        BUG_ON(csstart > skb_headlen(skb));
1835
1836        skb_copy_from_linear_data(skb, to, csstart);
1837
1838        csum = 0;
1839        if (csstart != skb->len)
1840                csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1841                                              skb->len - csstart, 0);
1842
1843        if (skb->ip_summed == CHECKSUM_PARTIAL) {
1844                long csstuff = csstart + skb->csum_offset;
1845
1846                *((__sum16 *)(to + csstuff)) = csum_fold(csum);
1847        }
1848}
1849EXPORT_SYMBOL(skb_copy_and_csum_dev);
1850
1851/**
1852 *      skb_dequeue - remove from the head of the queue
1853 *      @list: list to dequeue from
1854 *
1855 *      Remove the head of the list. The list lock is taken so the function
1856 *      may be used safely with other locking list functions. The head item is
1857 *      returned or %NULL if the list is empty.
1858 */
1859
1860struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1861{
1862        unsigned long flags;
1863        struct sk_buff *result;
1864
1865        spin_lock_irqsave(&list->lock, flags);
1866        result = __skb_dequeue(list);
1867        spin_unlock_irqrestore(&list->lock, flags);
1868        return result;
1869}
1870EXPORT_SYMBOL(skb_dequeue);
1871
1872/**
1873 *      skb_dequeue_tail - remove from the tail of the queue
1874 *      @list: list to dequeue from
1875 *
1876 *      Remove the tail of the list. The list lock is taken so the function
1877 *      may be used safely with other locking list functions. The tail item is
1878 *      returned or %NULL if the list is empty.
1879 */
1880struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1881{
1882        unsigned long flags;
1883        struct sk_buff *result;
1884
1885        spin_lock_irqsave(&list->lock, flags);
1886        result = __skb_dequeue_tail(list);
1887        spin_unlock_irqrestore(&list->lock, flags);
1888        return result;
1889}
1890EXPORT_SYMBOL(skb_dequeue_tail);
1891
1892/**
1893 *      skb_queue_purge - empty a list
1894 *      @list: list to empty
1895 *
1896 *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1897 *      the list and one reference dropped. This function takes the list
1898 *      lock and is atomic with respect to other list locking functions.
1899 */
1900void skb_queue_purge(struct sk_buff_head *list)
1901{
1902        struct sk_buff *skb;
1903        while ((skb = skb_dequeue(list)) != NULL)
1904                kfree_skb(skb);
1905}
1906EXPORT_SYMBOL(skb_queue_purge);
1907
1908/**
1909 *      skb_queue_head - queue a buffer at the list head
1910 *      @list: list to use
1911 *      @newsk: buffer to queue
1912 *
1913 *      Queue a buffer at the start of the list. This function takes the
1914 *      list lock and can be used safely with other locking &sk_buff functions
1915 *      safely.
1916 *
1917 *      A buffer cannot be placed on two lists at the same time.
1918 */
1919void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1920{
1921        unsigned long flags;
1922
1923        spin_lock_irqsave(&list->lock, flags);
1924        __skb_queue_head(list, newsk);
1925        spin_unlock_irqrestore(&list->lock, flags);
1926}
1927EXPORT_SYMBOL(skb_queue_head);
1928
1929/**
1930 *      skb_queue_tail - queue a buffer at the list tail
1931 *      @list: list to use
1932 *      @newsk: buffer to queue
1933 *
1934 *      Queue a buffer at the tail of the list. This function takes the
1935 *      list lock and can be used safely with other locking &sk_buff functions
1936 *      safely.
1937 *
1938 *      A buffer cannot be placed on two lists at the same time.
1939 */
1940void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1941{
1942        unsigned long flags;
1943
1944        spin_lock_irqsave(&list->lock, flags);
1945        __skb_queue_tail(list, newsk);
1946        spin_unlock_irqrestore(&list->lock, flags);
1947}
1948EXPORT_SYMBOL(skb_queue_tail);
1949
1950/**
1951 *      skb_unlink      -       remove a buffer from a list
1952 *      @skb: buffer to remove
1953 *      @list: list to use
1954 *
1955 *      Remove a packet from a list. The list locks are taken and this
1956 *      function is atomic with respect to other list locked calls
1957 *
1958 *      You must know what list the SKB is on.
1959 */
1960void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1961{
1962        unsigned long flags;
1963
1964        spin_lock_irqsave(&list->lock, flags);
1965        __skb_unlink(skb, list);
1966        spin_unlock_irqrestore(&list->lock, flags);
1967}
1968EXPORT_SYMBOL(skb_unlink);
1969
1970/**
1971 *      skb_append      -       append a buffer
1972 *      @old: buffer to insert after
1973 *      @newsk: buffer to insert
1974 *      @list: list to use
1975 *
1976 *      Place a packet after a given packet in a list. The list locks are taken
1977 *      and this function is atomic with respect to other list locked calls.
1978 *      A buffer cannot be placed on two lists at the same time.
1979 */
1980void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1981{
1982        unsigned long flags;
1983
1984        spin_lock_irqsave(&list->lock, flags);
1985        __skb_queue_after(list, old, newsk);
1986        spin_unlock_irqrestore(&list->lock, flags);
1987}
1988EXPORT_SYMBOL(skb_append);
1989
1990/**
1991 *      skb_insert      -       insert a buffer
1992 *      @old: buffer to insert before
1993 *      @newsk: buffer to insert
1994 *      @list: list to use
1995 *
1996 *      Place a packet before a given packet in a list. The list locks are
1997 *      taken and this function is atomic with respect to other list locked
1998 *      calls.
1999 *
2000 *      A buffer cannot be placed on two lists at the same time.
2001 */
2002void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2003{
2004        unsigned long flags;
2005
2006        spin_lock_irqsave(&list->lock, flags);
2007        __skb_insert(newsk, old->prev, old, list);
2008        spin_unlock_irqrestore(&list->lock, flags);
2009}
2010EXPORT_SYMBOL(skb_insert);
2011
2012static inline void skb_split_inside_header(struct sk_buff *skb,
2013                                           struct sk_buff* skb1,
2014                                           const u32 len, const int pos)
2015{
2016        int i;
2017
2018        skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2019                                         pos - len);
2020        /* And move data appendix as is. */
2021        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2022                skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2023
2024        skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2025        skb_shinfo(skb)->nr_frags  = 0;
2026        skb1->data_len             = skb->data_len;
2027        skb1->len                  += skb1->data_len;
2028        skb->data_len              = 0;
2029        skb->len                   = len;
2030        skb_set_tail_pointer(skb, len);
2031}
2032
2033static inline void skb_split_no_header(struct sk_buff *skb,
2034                                       struct sk_buff* skb1,
2035                                       const u32 len, int pos)
2036{
2037        int i, k = 0;
2038        const int nfrags = skb_shinfo(skb)->nr_frags;
2039
2040        skb_shinfo(skb)->nr_frags = 0;
2041        skb1->len                 = skb1->data_len = skb->len - len;
2042        skb->len                  = len;
2043        skb->data_len             = len - pos;
2044
2045        for (i = 0; i < nfrags; i++) {
2046                int size = skb_shinfo(skb)->frags[i].size;
2047
2048                if (pos + size > len) {
2049                        skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2050
2051                        if (pos < len) {
2052                                /* Split frag.
2053                                 * We have two variants in this case:
2054                                 * 1. Move all the frag to the second
2055                                 *    part, if it is possible. F.e.
2056                                 *    this approach is mandatory for TUX,
2057                                 *    where splitting is expensive.
2058                                 * 2. Split is accurately. We make this.
2059                                 */
2060                                get_page(skb_shinfo(skb)->frags[i].page);
2061                                skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2062                                skb_shinfo(skb1)->frags[0].size -= len - pos;
2063                                skb_shinfo(skb)->frags[i].size  = len - pos;
2064                                skb_shinfo(skb)->nr_frags++;
2065                        }
2066                        k++;
2067                } else
2068                        skb_shinfo(skb)->nr_frags++;
2069                pos += size;
2070        }
2071        skb_shinfo(skb1)->nr_frags = k;
2072}
2073
2074/**
2075 * skb_split - Split fragmented skb to two parts at length len.
2076 * @skb: the buffer to split
2077 * @skb1: the buffer to receive the second part
2078 * @len: new length for skb
2079 */
2080void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2081{
2082        int pos = skb_headlen(skb);
2083
2084        if (len < pos)  /* Split line is inside header. */
2085                skb_split_inside_header(skb, skb1, len, pos);
2086        else            /* Second chunk has no header, nothing to copy. */
2087                skb_split_no_header(skb, skb1, len, pos);
2088}
2089EXPORT_SYMBOL(skb_split);
2090
2091/* Shifting from/to a cloned skb is a no-go.
2092 *
2093 * Caller cannot keep skb_shinfo related pointers past calling here!
2094 */
2095static int skb_prepare_for_shift(struct sk_buff *skb)
2096{
2097        return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2098}
2099
2100/**
2101 * skb_shift - Shifts paged data partially from skb to another
2102 * @tgt: buffer into which tail data gets added
2103 * @skb: buffer from which the paged data comes from
2104 * @shiftlen: shift up to this many bytes
2105 *
2106 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2107 * the length of the skb, from tgt to skb. Returns number bytes shifted.
2108 * It's up to caller to free skb if everything was shifted.
2109 *
2110 * If @tgt runs out of frags, the whole operation is aborted.
2111 *
2112 * Skb cannot include anything else but paged data while tgt is allowed
2113 * to have non-paged data as well.
2114 *
2115 * TODO: full sized shift could be optimized but that would need
2116 * specialized skb free'er to handle frags without up-to-date nr_frags.
2117 */
2118int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2119{
2120        int from, to, merge, todo;
2121        struct skb_frag_struct *fragfrom, *fragto;
2122
2123        BUG_ON(shiftlen > skb->len);
2124        BUG_ON(skb_headlen(skb));       /* Would corrupt stream */
2125
2126        todo = shiftlen;
2127        from = 0;
2128        to = skb_shinfo(tgt)->nr_frags;
2129        fragfrom = &skb_shinfo(skb)->frags[from];
2130
2131        /* Actual merge is delayed until the point when we know we can
2132         * commit all, so that we don't have to undo partial changes
2133         */
2134        if (!to ||
2135            !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2136                merge = -1;
2137        } else {
2138                merge = to - 1;
2139
2140                todo -= fragfrom->size;
2141                if (todo < 0) {
2142                        if (skb_prepare_for_shift(skb) ||
2143                            skb_prepare_for_shift(tgt))
2144                                return 0;
2145
2146                        /* All previous frag pointers might be stale! */
2147                        fragfrom = &skb_shinfo(skb)->frags[from];
2148                        fragto = &skb_shinfo(tgt)->frags[merge];
2149
2150                        fragto->size += shiftlen;
2151                        fragfrom->size -= shiftlen;
2152                        fragfrom->page_offset += shiftlen;
2153
2154                        goto onlymerged;
2155                }
2156
2157                from++;
2158        }
2159
2160        /* Skip full, not-fitting skb to avoid expensive operations */
2161        if ((shiftlen == skb->len) &&
2162            (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2163                return 0;
2164
2165        if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2166                return 0;
2167
2168        while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2169                if (to == MAX_SKB_FRAGS)
2170                        return 0;
2171
2172                fragfrom = &skb_shinfo(skb)->frags[from];
2173                fragto = &skb_shinfo(tgt)->frags[to];
2174
2175                if (todo >= fragfrom->size) {
2176                        *fragto = *fragfrom;
2177                        todo -= fragfrom->size;
2178                        from++;
2179                        to++;
2180
2181                } else {
2182                        get_page(fragfrom->page);
2183                        fragto->page = fragfrom->page;
2184                        fragto->page_offset = fragfrom->page_offset;
2185                        fragto->size = todo;
2186
2187                        fragfrom->page_offset += todo;
2188                        fragfrom->size -= todo;
2189                        todo = 0;
2190
2191                        to++;
2192                        break;
2193                }
2194        }
2195
2196        /* Ready to "commit" this state change to tgt */
2197        skb_shinfo(tgt)->nr_frags = to;
2198
2199        if (merge >= 0) {
2200                fragfrom = &skb_shinfo(skb)->frags[0];
2201                fragto = &skb_shinfo(tgt)->frags[merge];
2202
2203                fragto->size += fragfrom->size;
2204                put_page(fragfrom->page);
2205        }
2206
2207        /* Reposition in the original skb */
2208        to = 0;
2209        while (from < skb_shinfo(skb)->nr_frags)
2210                skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2211        skb_shinfo(skb)->nr_frags = to;
2212
2213        BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2214
2215onlymerged:
2216        /* Most likely the tgt won't ever need its checksum anymore, skb on
2217         * the other hand might need it if it needs to be resent
2218         */
2219        tgt->ip_summed = CHECKSUM_PARTIAL;
2220        skb->ip_summed = CHECKSUM_PARTIAL;
2221
2222        /* Yak, is it really working this way? Some helper please? */
2223        skb->len -= shiftlen;
2224        skb->data_len -= shiftlen;
2225        skb->truesize -= shiftlen;
2226        tgt->len += shiftlen;
2227        tgt->data_len += shiftlen;
2228        tgt->truesize += shiftlen;
2229
2230        return shiftlen;
2231}
2232
2233/**
2234 * skb_prepare_seq_read - Prepare a sequential read of skb data
2235 * @skb: the buffer to read
2236 * @from: lower offset of data to be read
2237 * @to: upper offset of data to be read
2238 * @st: state variable
2239 *
2240 * Initializes the specified state variable. Must be called before
2241 * invoking skb_seq_read() for the first time.
2242 */
2243void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2244                          unsigned int to, struct skb_seq_state *st)
2245{
2246        st->lower_offset = from;
2247        st->upper_offset = to;
2248        st->root_skb = st->cur_skb = skb;
2249        st->frag_idx = st->stepped_offset = 0;
2250        st->frag_data = NULL;
2251}
2252EXPORT_SYMBOL(skb_prepare_seq_read);
2253
2254/**
2255 * skb_seq_read - Sequentially read skb data
2256 * @consumed: number of bytes consumed by the caller so far
2257 * @data: destination pointer for data to be returned
2258 * @st: state variable
2259 *
2260 * Reads a block of skb data at &consumed relative to the
2261 * lower offset specified to skb_prepare_seq_read(). Assigns
2262 * the head of the data block to &data and returns the length
2263 * of the block or 0 if the end of the skb data or the upper
2264 * offset has been reached.
2265 *
2266 * The caller is not required to consume all of the data
2267 * returned, i.e. &consumed is typically set to the number
2268 * of bytes already consumed and the next call to
2269 * skb_seq_read() will return the remaining part of the block.
2270 *
2271 * Note 1: The size of each block of data returned can be arbitary,
2272 *       this limitation is the cost for zerocopy seqeuental
2273 *       reads of potentially non linear data.
2274 *
2275 * Note 2: Fragment lists within fragments are not implemented
2276 *       at the moment, state->root_skb could be replaced with
2277 *       a stack for this purpose.
2278 */
2279unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2280                          struct skb_seq_state *st)
2281{
2282        unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2283        skb_frag_t *frag;
2284
2285        if (unlikely(abs_offset >= st->upper_offset))
2286                return 0;
2287
2288next_skb:
2289        block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290
2291        if (abs_offset < block_limit && !st->frag_data) {
2292                *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293                return block_limit - abs_offset;
2294        }
2295
2296        if (st->frag_idx == 0 && !st->frag_data)
2297                st->stepped_offset += skb_headlen(st->cur_skb);
2298
2299        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2300                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2301                block_limit = frag->size + st->stepped_offset;
2302
2303                if (abs_offset < block_limit) {
2304                        if (!st->frag_data)
2305                                st->frag_data = kmap_skb_frag(frag);
2306
2307                        *data = (u8 *) st->frag_data + frag->page_offset +
2308                                (abs_offset - st->stepped_offset);
2309
2310                        return block_limit - abs_offset;
2311                }
2312
2313                if (st->frag_data) {
2314                        kunmap_skb_frag(st->frag_data);
2315                        st->frag_data = NULL;
2316                }
2317
2318                st->frag_idx++;
2319                st->stepped_offset += frag->size;
2320        }
2321
2322        if (st->frag_data) {
2323                kunmap_skb_frag(st->frag_data);
2324                st->frag_data = NULL;
2325        }
2326
2327        if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
2328                st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2329                st->frag_idx = 0;
2330                goto next_skb;
2331        } else if (st->cur_skb->next) {
2332                st->cur_skb = st->cur_skb->next;
2333                st->frag_idx = 0;
2334                goto next_skb;
2335        }
2336
2337        return 0;
2338}
2339EXPORT_SYMBOL(skb_seq_read);
2340
2341/**
2342 * skb_abort_seq_read - Abort a sequential read of skb data
2343 * @st: state variable
2344 *
2345 * Must be called if skb_seq_read() was not called until it
2346 * returned 0.
2347 */
2348void skb_abort_seq_read(struct skb_seq_state *st)
2349{
2350        if (st->frag_data)
2351                kunmap_skb_frag(st->frag_data);
2352}
2353EXPORT_SYMBOL(skb_abort_seq_read);
2354
2355#define TS_SKB_CB(state)        ((struct skb_seq_state *) &((state)->cb))
2356
2357static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2358                                          struct ts_config *conf,
2359                                          struct ts_state *state)
2360{
2361        return skb_seq_read(offset, text, TS_SKB_CB(state));
2362}
2363
2364static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2365{
2366        skb_abort_seq_read(TS_SKB_CB(state));
2367}
2368
2369/**
2370 * skb_find_text - Find a text pattern in skb data
2371 * @skb: the buffer to look in
2372 * @from: search offset
2373 * @to: search limit
2374 * @config: textsearch configuration
2375 * @state: uninitialized textsearch state variable
2376 *
2377 * Finds a pattern in the skb data according to the specified
2378 * textsearch configuration. Use textsearch_next() to retrieve
2379 * subsequent occurrences of the pattern. Returns the offset
2380 * to the first occurrence or UINT_MAX if no match was found.
2381 */
2382unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2383                           unsigned int to, struct ts_config *config,
2384                           struct ts_state *state)
2385{
2386        unsigned int ret;
2387
2388        config->get_next_block = skb_ts_get_next_block;
2389        config->finish = skb_ts_finish;
2390
2391        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2392
2393        ret = textsearch_find(config, state);
2394        return (ret <= to - from ? ret : UINT_MAX);
2395}
2396EXPORT_SYMBOL(skb_find_text);
2397
2398/**
2399 * skb_append_datato_frags: - append the user data to a skb
2400 * @sk: sock  structure
2401 * @skb: skb structure to be appened with user data.
2402 * @getfrag: call back function to be used for getting the user data
2403 * @from: pointer to user message iov
2404 * @length: length of the iov message
2405 *
2406 * Description: This procedure append the user data in the fragment part
2407 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2408 */
2409int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2410                        int (*getfrag)(void *from, char *to, int offset,
2411                                        int len, int odd, struct sk_buff *skb),
2412                        void *from, int length)
2413{
2414        int frg_cnt = 0;
2415        skb_frag_t *frag = NULL;
2416        struct page *page = NULL;
2417        int copy, left;
2418        int offset = 0;
2419        int ret;
2420
2421        do {
2422                /* Return error if we don't have space for new frag */
2423                frg_cnt = skb_shinfo(skb)->nr_frags;
2424                if (frg_cnt >= MAX_SKB_FRAGS)
2425                        return -EFAULT;
2426
2427                /* allocate a new page for next frag */
2428                page = alloc_pages(sk->sk_allocation, 0);
2429
2430                /* If alloc_page fails just return failure and caller will
2431                 * free previous allocated pages by doing kfree_skb()
2432                 */
2433                if (page == NULL)
2434                        return -ENOMEM;
2435
2436                /* initialize the next frag */
2437                sk->sk_sndmsg_page = page;
2438                sk->sk_sndmsg_off = 0;
2439                skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2440                skb->truesize += PAGE_SIZE;
2441                atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2442
2443                /* get the new initialized frag */
2444                frg_cnt = skb_shinfo(skb)->nr_frags;
2445                frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2446
2447                /* copy the user data to page */
2448                left = PAGE_SIZE - frag->page_offset;
2449                copy = (length > left)? left : length;
2450
2451                ret = getfrag(from, (page_address(frag->page) +
2452                            frag->page_offset + frag->size),
2453                            offset, copy, 0, skb);
2454                if (ret < 0)
2455                        return -EFAULT;
2456
2457                /* copy was successful so update the size parameters */
2458                sk->sk_sndmsg_off += copy;
2459                frag->size += copy;
2460                skb->len += copy;
2461                skb->data_len += copy;
2462                offset += copy;
2463                length -= copy;
2464
2465        } while (length > 0);
2466
2467        return 0;
2468}
2469EXPORT_SYMBOL(skb_append_datato_frags);
2470
2471/**
2472 *      skb_pull_rcsum - pull skb and update receive checksum
2473 *      @skb: buffer to update
2474 *      @len: length of data pulled
2475 *
2476 *      This function performs an skb_pull on the packet and updates
2477 *      the CHECKSUM_COMPLETE checksum.  It should be used on
2478 *      receive path processing instead of skb_pull unless you know
2479 *      that the checksum difference is zero (e.g., a valid IP header)
2480 *      or you are setting ip_summed to CHECKSUM_NONE.
2481 */
2482unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2483{
2484        BUG_ON(len > skb->len);
2485        skb->len -= len;
2486        BUG_ON(skb->len < skb->data_len);
2487        skb_postpull_rcsum(skb, skb->data, len);
2488        return skb->data += len;
2489}
2490
2491EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2492
2493/**
2494 *      skb_segment - Perform protocol segmentation on skb.
2495 *      @skb: buffer to segment
2496 *      @features: features for the output path (see dev->features)
2497 *
2498 *      This function performs segmentation on the given skb.  It returns
2499 *      a pointer to the first in a list of new skbs for the segments.
2500 *      In case of error it returns ERR_PTR(err).
2501 */
2502struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2503{
2504        struct sk_buff *segs = NULL;
2505        struct sk_buff *tail = NULL;
2506        struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2507        unsigned int mss = skb_shinfo(skb)->gso_size;
2508        unsigned int doffset = skb->data - skb_mac_header(skb);
2509        unsigned int offset = doffset;
2510        unsigned int headroom;
2511        unsigned int len;
2512        int sg = features & NETIF_F_SG;
2513        int nfrags = skb_shinfo(skb)->nr_frags;
2514        int err = -ENOMEM;
2515        int i = 0;
2516        int pos;
2517
2518        __skb_push(skb, doffset);
2519        headroom = skb_headroom(skb);
2520        pos = skb_headlen(skb);
2521
2522        do {
2523                struct sk_buff *nskb;
2524                skb_frag_t *frag;
2525                int hsize;
2526                int size;
2527
2528                len = skb->len - offset;
2529                if (len > mss)
2530                        len = mss;
2531
2532                hsize = skb_headlen(skb) - offset;
2533                if (hsize < 0)
2534                        hsize = 0;
2535                if (hsize > len || !sg)
2536                        hsize = len;
2537
2538                if (!hsize && i >= nfrags) {
2539                        BUG_ON(fskb->len != len);
2540
2541                        pos += len;
2542                        nskb = skb_clone(fskb, GFP_ATOMIC);
2543                        fskb = fskb->next;
2544
2545                        if (unlikely(!nskb))
2546                                goto err;
2547
2548                        hsize = skb_end_pointer(nskb) - nskb->head;
2549                        if (skb_cow_head(nskb, doffset + headroom)) {
2550                                kfree_skb(nskb);
2551                                goto err;
2552                        }
2553
2554                        nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2555                                          hsize;
2556                        skb_release_head_state(nskb);
2557                        __skb_push(nskb, doffset);
2558                } else {
2559                        nskb = alloc_skb(hsize + doffset + headroom,
2560                                         GFP_ATOMIC);
2561
2562                        if (unlikely(!nskb))
2563                                goto err;
2564
2565                        skb_reserve(nskb, headroom);
2566                        __skb_put(nskb, doffset);
2567                }
2568
2569                if (segs)
2570                        tail->next = nskb;
2571                else
2572                        segs = nskb;
2573                tail = nskb;
2574
2575                __copy_skb_header(nskb, skb);
2576                nskb->mac_len = skb->mac_len;
2577
2578                skb_reset_mac_header(nskb);
2579                skb_set_network_header(nskb, skb->mac_len);
2580                nskb->transport_header = (nskb->network_header +
2581                                          skb_network_header_len(skb));
2582                skb_copy_from_linear_data(skb, nskb->data, doffset);
2583
2584                if (fskb != skb_shinfo(skb)->frag_list)
2585                        continue;
2586
2587                if (!sg) {
2588                        nskb->ip_summed = CHECKSUM_NONE;
2589                        nskb->csum = skb_copy_and_csum_bits(skb, offset,
2590                                                            skb_put(nskb, len),
2591                                                            len, 0);
2592                        continue;
2593                }
2594
2595                frag = skb_shinfo(nskb)->frags;
2596
2597                skb_copy_from_linear_data_offset(skb, offset,
2598                                                 skb_put(nskb, hsize), hsize);
2599
2600                while (pos < offset + len && i < nfrags) {
2601                        *frag = skb_shinfo(skb)->frags[i];
2602                        get_page(frag->page);
2603                        size = frag->size;
2604
2605                        if (pos < offset) {
2606                                frag->page_offset += offset - pos;
2607                                frag->size -= offset - pos;
2608                        }
2609
2610                        skb_shinfo(nskb)->nr_frags++;
2611
2612                        if (pos + size <= offset + len) {
2613                                i++;
2614                                pos += size;
2615                        } else {
2616                                frag->size -= pos + size - (offset + len);
2617                                goto skip_fraglist;
2618                        }
2619
2620                        frag++;
2621                }
2622
2623                if (pos < offset + len) {
2624                        struct sk_buff *fskb2 = fskb;
2625
2626                        BUG_ON(pos + fskb->len != offset + len);
2627
2628                        pos += fskb->len;
2629                        fskb = fskb->next;
2630
2631                        if (fskb2->next) {
2632                                fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2633                                if (!fskb2)
2634                                        goto err;
2635                        } else
2636                                skb_get(fskb2);
2637
2638                        SKB_FRAG_ASSERT(nskb);
2639                        skb_shinfo(nskb)->frag_list = fskb2;
2640                }
2641
2642skip_fraglist:
2643                nskb->data_len = len - hsize;
2644                nskb->len += nskb->data_len;
2645                nskb->truesize += nskb->data_len;
2646        } while ((offset += len) < skb->len);
2647
2648        return segs;
2649
2650err:
2651        while ((skb = segs)) {
2652                segs = skb->next;
2653                kfree_skb(skb);
2654        }
2655        return ERR_PTR(err);
2656}
2657EXPORT_SYMBOL_GPL(skb_segment);
2658
2659int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2660{
2661        struct sk_buff *p = *head;
2662        struct sk_buff *nskb;
2663        struct skb_shared_info *skbinfo = skb_shinfo(skb);
2664        struct skb_shared_info *pinfo = skb_shinfo(p);
2665        unsigned int headroom;
2666        unsigned int len = skb_gro_len(skb);
2667        unsigned int offset = skb_gro_offset(skb);
2668        unsigned int headlen = skb_headlen(skb);
2669
2670        if (p->len + len >= 65536)
2671                return -E2BIG;
2672
2673        if (pinfo->frag_list)
2674                goto merge;
2675        else if (headlen <= offset) {
2676                skb_frag_t *frag;
2677                skb_frag_t *frag2;
2678                int i = skbinfo->nr_frags;
2679                int nr_frags = pinfo->nr_frags + i;
2680
2681                offset -= headlen;
2682
2683                if (nr_frags > MAX_SKB_FRAGS)
2684                        return -E2BIG;
2685
2686                pinfo->nr_frags = nr_frags;
2687                skbinfo->nr_frags = 0;
2688
2689                frag = pinfo->frags + nr_frags;
2690                frag2 = skbinfo->frags + i;
2691                do {
2692                        *--frag = *--frag2;
2693                } while (--i);
2694
2695                frag->page_offset += offset;
2696                frag->size -= offset;
2697
2698                skb->truesize -= skb->data_len;
2699                skb->len -= skb->data_len;
2700                skb->data_len = 0;
2701
2702                NAPI_GRO_CB(skb)->free = 1;
2703                goto done;
2704        } else if (skb_gro_len(p) != pinfo->gso_size)
2705                return -E2BIG;
2706
2707        headroom = skb_headroom(p);
2708        nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
2709        if (unlikely(!nskb))
2710                return -ENOMEM;
2711
2712        __copy_skb_header(nskb, p);
2713        nskb->mac_len = p->mac_len;
2714
2715        skb_reserve(nskb, headroom);
2716        __skb_put(nskb, skb_gro_offset(p));
2717
2718        skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2719        skb_set_network_header(nskb, skb_network_offset(p));
2720        skb_set_transport_header(nskb, skb_transport_offset(p));
2721
2722        __skb_pull(p, skb_gro_offset(p));
2723        memcpy(skb_mac_header(nskb), skb_mac_header(p),
2724               p->data - skb_mac_header(p));
2725
2726        *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2727        skb_shinfo(nskb)->frag_list = p;
2728        skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2729        skb_header_release(p);
2730        nskb->prev = p;
2731
2732        nskb->data_len += p->len;
2733        nskb->truesize += p->len;
2734        nskb->len += p->len;
2735
2736        *head = nskb;
2737        nskb->next = p->next;
2738        p->next = NULL;
2739
2740        p = nskb;
2741
2742merge:
2743        if (offset > headlen) {
2744                skbinfo->frags[0].page_offset += offset - headlen;
2745                skbinfo->frags[0].size -= offset - headlen;
2746                offset = headlen;
2747        }
2748
2749        __skb_pull(skb, offset);
2750
2751        p->prev->next = skb;
2752        p->prev = skb;
2753        skb_header_release(skb);
2754
2755done:
2756        NAPI_GRO_CB(p)->count++;
2757        p->data_len += len;
2758        p->truesize += len;
2759        p->len += len;
2760
2761        NAPI_GRO_CB(skb)->same_flow = 1;
2762        return 0;
2763}
2764EXPORT_SYMBOL_GPL(skb_gro_receive);
2765
2766void __init skb_init(void)
2767{
2768        skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2769                                              sizeof(struct sk_buff),
2770                                              0,
2771                                              SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2772                                              NULL);
2773        skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2774                                                (2*sizeof(struct sk_buff)) +
2775                                                sizeof(atomic_t),
2776                                                0,
2777                                                SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2778                                                NULL);
2779}
2780
2781/**
2782 *      skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2783 *      @skb: Socket buffer containing the buffers to be mapped
2784 *      @sg: The scatter-gather list to map into
2785 *      @offset: The offset into the buffer's contents to start mapping
2786 *      @len: Length of buffer space to be mapped
2787 *
2788 *      Fill the specified scatter-gather list with mappings/pointers into a
2789 *      region of the buffer space attached to a socket buffer.
2790 */
2791static int
2792__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2793{
2794        int start = skb_headlen(skb);
2795        int i, copy = start - offset;
2796        struct sk_buff *frag_iter;
2797        int elt = 0;
2798
2799        if (copy > 0) {
2800                if (copy > len)
2801                        copy = len;
2802                sg_set_buf(sg, skb->data + offset, copy);
2803                elt++;
2804                if ((len -= copy) == 0)
2805                        return elt;
2806                offset += copy;
2807        }
2808
2809        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2810                int end;
2811
2812                WARN_ON(start > offset + len);
2813
2814                end = start + skb_shinfo(skb)->frags[i].size;
2815                if ((copy = end - offset) > 0) {
2816                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2817
2818                        if (copy > len)
2819                                copy = len;
2820                        sg_set_page(&sg[elt], frag->page, copy,
2821                                        frag->page_offset+offset-start);
2822                        elt++;
2823                        if (!(len -= copy))
2824                                return elt;
2825                        offset += copy;
2826                }
2827                start = end;
2828        }
2829
2830        skb_walk_frags(skb, frag_iter) {
2831                int end;
2832
2833                WARN_ON(start > offset + len);
2834
2835                end = start + frag_iter->len;
2836                if ((copy = end - offset) > 0) {
2837                        if (copy > len)
2838                                copy = len;
2839                        elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2840                                              copy);
2841                        if ((len -= copy) == 0)
2842                                return elt;
2843                        offset += copy;
2844                }
2845                start = end;
2846        }
2847        BUG_ON(len);
2848        return elt;
2849}
2850
2851int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2852{
2853        int nsg = __skb_to_sgvec(skb, sg, offset, len);
2854
2855        sg_mark_end(&sg[nsg - 1]);
2856
2857        return nsg;
2858}
2859EXPORT_SYMBOL_GPL(skb_to_sgvec);
2860
2861/**
2862 *      skb_cow_data - Check that a socket buffer's data buffers are writable
2863 *      @skb: The socket buffer to check.
2864 *      @tailbits: Amount of trailing space to be added
2865 *      @trailer: Returned pointer to the skb where the @tailbits space begins
2866 *
2867 *      Make sure that the data buffers attached to a socket buffer are
2868 *      writable. If they are not, private copies are made of the data buffers
2869 *      and the socket buffer is set to use these instead.
2870 *
2871 *      If @tailbits is given, make sure that there is space to write @tailbits
2872 *      bytes of data beyond current end of socket buffer.  @trailer will be
2873 *      set to point to the skb in which this space begins.
2874 *
2875 *      The number of scatterlist elements required to completely map the
2876 *      COW'd and extended socket buffer will be returned.
2877 */
2878int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2879{
2880        int copyflag;
2881        int elt;
2882        struct sk_buff *skb1, **skb_p;
2883
2884        /* If skb is cloned or its head is paged, reallocate
2885         * head pulling out all the pages (pages are considered not writable
2886         * at the moment even if they are anonymous).
2887         */
2888        if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2889            __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2890                return -ENOMEM;
2891
2892        /* Easy case. Most of packets will go this way. */
2893        if (!skb_has_frags(skb)) {
2894                /* A little of trouble, not enough of space for trailer.
2895                 * This should not happen, when stack is tuned to generate
2896                 * good frames. OK, on miss we reallocate and reserve even more
2897                 * space, 128 bytes is fair. */
2898
2899                if (skb_tailroom(skb) < tailbits &&
2900                    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2901                        return -ENOMEM;
2902
2903                /* Voila! */
2904                *trailer = skb;
2905                return 1;
2906        }
2907
2908        /* Misery. We are in troubles, going to mincer fragments... */
2909
2910        elt = 1;
2911        skb_p = &skb_shinfo(skb)->frag_list;
2912        copyflag = 0;
2913
2914        while ((skb1 = *skb_p) != NULL) {
2915                int ntail = 0;
2916
2917                /* The fragment is partially pulled by someone,
2918                 * this can happen on input. Copy it and everything
2919                 * after it. */
2920
2921                if (skb_shared(skb1))
2922                        copyflag = 1;
2923
2924                /* If the skb is the last, worry about trailer. */
2925
2926                if (skb1->next == NULL && tailbits) {
2927                        if (skb_shinfo(skb1)->nr_frags ||
2928                            skb_has_frags(skb1) ||
2929                            skb_tailroom(skb1) < tailbits)
2930                                ntail = tailbits + 128;
2931                }
2932
2933                if (copyflag ||
2934                    skb_cloned(skb1) ||
2935                    ntail ||
2936                    skb_shinfo(skb1)->nr_frags ||
2937                    skb_has_frags(skb1)) {
2938                        struct sk_buff *skb2;
2939
2940                        /* Fuck, we are miserable poor guys... */
2941                        if (ntail == 0)
2942                                skb2 = skb_copy(skb1, GFP_ATOMIC);
2943                        else
2944                                skb2 = skb_copy_expand(skb1,
2945                                                       skb_headroom(skb1),
2946                                                       ntail,
2947                                                       GFP_ATOMIC);
2948                        if (unlikely(skb2 == NULL))
2949                                return -ENOMEM;
2950
2951                        if (skb1->sk)
2952                                skb_set_owner_w(skb2, skb1->sk);
2953
2954                        /* Looking around. Are we still alive?
2955                         * OK, link new skb, drop old one */
2956
2957                        skb2->next = skb1->next;
2958                        *skb_p = skb2;
2959                        kfree_skb(skb1);
2960                        skb1 = skb2;
2961                }
2962                elt++;
2963                *trailer = skb1;
2964                skb_p = &skb1->next;
2965        }
2966
2967        return elt;
2968}
2969EXPORT_SYMBOL_GPL(skb_cow_data);
2970
2971void skb_tstamp_tx(struct sk_buff *orig_skb,
2972                struct skb_shared_hwtstamps *hwtstamps)
2973{
2974        struct sock *sk = orig_skb->sk;
2975        struct sock_exterr_skb *serr;
2976        struct sk_buff *skb;
2977        int err;
2978
2979        if (!sk)
2980                return;
2981
2982        skb = skb_clone(orig_skb, GFP_ATOMIC);
2983        if (!skb)
2984                return;
2985
2986        if (hwtstamps) {
2987                *skb_hwtstamps(skb) =
2988                        *hwtstamps;
2989        } else {
2990                /*
2991                 * no hardware time stamps available,
2992                 * so keep the skb_shared_tx and only
2993                 * store software time stamp
2994                 */
2995                skb->tstamp = ktime_get_real();
2996        }
2997
2998        serr = SKB_EXT_ERR(skb);
2999        memset(serr, 0, sizeof(*serr));
3000        serr->ee.ee_errno = ENOMSG;
3001        serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3002        err = sock_queue_err_skb(sk, skb);
3003        if (err)
3004                kfree_skb(skb);
3005}
3006EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3007
3008
3009/**
3010 * skb_partial_csum_set - set up and verify partial csum values for packet
3011 * @skb: the skb to set
3012 * @start: the number of bytes after skb->data to start checksumming.
3013 * @off: the offset from start to place the checksum.
3014 *
3015 * For untrusted partially-checksummed packets, we need to make sure the values
3016 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3017 *
3018 * This function checks and sets those values and skb->ip_summed: if this
3019 * returns false you should drop the packet.
3020 */
3021bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3022{
3023        if (unlikely(start > skb_headlen(skb)) ||
3024            unlikely((int)start + off > skb_headlen(skb) - 2)) {
3025                if (net_ratelimit())
3026                        printk(KERN_WARNING
3027                               "bad partial csum: csum=%u/%u len=%u\n",
3028                               start, off, skb_headlen(skb));
3029                return false;
3030        }
3031        skb->ip_summed = CHECKSUM_PARTIAL;
3032        skb->csum_start = skb_headroom(skb) + start;
3033        skb->csum_offset = off;
3034        return true;
3035}
3036EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3037
3038void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3039{
3040        if (net_ratelimit())
3041                pr_warning("%s: received packets cannot be forwarded"
3042                           " while LRO is enabled\n", skb->dev->name);
3043}
3044EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3045