1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/kernel.h>
42#include <linux/kmemcheck.h>
43#include <linux/mm.h>
44#include <linux/interrupt.h>
45#include <linux/in.h>
46#include <linux/inet.h>
47#include <linux/slab.h>
48#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
54#include <linux/splice.h>
55#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
58#include <linux/scatterlist.h>
59#include <linux/errqueue.h>
60
61#include <net/protocol.h>
62#include <net/dst.h>
63#include <net/sock.h>
64#include <net/checksum.h>
65#include <net/xfrm.h>
66
67#include <asm/uaccess.h>
68#include <asm/system.h>
69#include <trace/events/skb.h>
70
71#include "kmap_skb.h"
72
73static struct kmem_cache *skbuff_head_cache __read_mostly;
74static struct kmem_cache *skbuff_fclone_cache __read_mostly;
75
76static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
77 struct pipe_buffer *buf)
78{
79 put_page(buf->page);
80}
81
82static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
83 struct pipe_buffer *buf)
84{
85 get_page(buf->page);
86}
87
88static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
89 struct pipe_buffer *buf)
90{
91 return 1;
92}
93
94
95
96static const struct pipe_buf_operations sock_pipe_buf_ops = {
97 .can_merge = 0,
98 .map = generic_pipe_buf_map,
99 .unmap = generic_pipe_buf_unmap,
100 .confirm = generic_pipe_buf_confirm,
101 .release = sock_pipe_buf_release,
102 .steal = sock_pipe_buf_steal,
103 .get = sock_pipe_buf_get,
104};
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121{
122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 "data:%p tail:%#lx end:%#lx dev:%s\n",
124 here, skb->len, sz, skb->head, skb->data,
125 (unsigned long)skb->tail, (unsigned long)skb->end,
126 skb->dev ? skb->dev->name : "<NULL>");
127 BUG();
128}
129
130
131
132
133
134
135
136
137
138
139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
140{
141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
142 "data:%p tail:%#lx end:%#lx dev:%s\n",
143 here, skb->len, sz, skb->head, skb->data,
144 (unsigned long)skb->tail, (unsigned long)skb->end,
145 skb->dev ? skb->dev->name : "<NULL>");
146 BUG();
147}
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
171 int fclone, int node)
172{
173 struct kmem_cache *cache;
174 struct skb_shared_info *shinfo;
175 struct sk_buff *skb;
176 u8 *data;
177
178 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
179
180
181 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
182 if (!skb)
183 goto out;
184 prefetchw(skb);
185
186 size = SKB_DATA_ALIGN(size);
187 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
188 gfp_mask, node);
189 if (!data)
190 goto nodata;
191 prefetchw(data + size);
192
193
194
195
196
197
198 memset(skb, 0, offsetof(struct sk_buff, tail));
199 skb->truesize = size + sizeof(struct sk_buff);
200 atomic_set(&skb->users, 1);
201 skb->head = data;
202 skb->data = data;
203 skb_reset_tail_pointer(skb);
204 skb->end = skb->tail + size;
205#ifdef NET_SKBUFF_DATA_USES_OFFSET
206 skb->mac_header = ~0U;
207#endif
208
209
210 shinfo = skb_shinfo(skb);
211 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
212 atomic_set(&shinfo->dataref, 1);
213 kmemcheck_annotate_variable(shinfo->destructor_arg);
214
215 if (fclone) {
216 struct sk_buff *child = skb + 1;
217 atomic_t *fclone_ref = (atomic_t *) (child + 1);
218
219 kmemcheck_annotate_bitfield(child, flags1);
220 kmemcheck_annotate_bitfield(child, flags2);
221 skb->fclone = SKB_FCLONE_ORIG;
222 atomic_set(fclone_ref, 1);
223
224 child->fclone = SKB_FCLONE_UNAVAILABLE;
225 }
226out:
227 return skb;
228nodata:
229 kmem_cache_free(cache, skb);
230 skb = NULL;
231 goto out;
232}
233EXPORT_SYMBOL(__alloc_skb);
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
249 unsigned int length, gfp_t gfp_mask)
250{
251 struct sk_buff *skb;
252
253 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
254 if (likely(skb)) {
255 skb_reserve(skb, NET_SKB_PAD);
256 skb->dev = dev;
257 }
258 return skb;
259}
260EXPORT_SYMBOL(__netdev_alloc_skb);
261
262void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
263 int size)
264{
265 skb_fill_page_desc(skb, i, page, off, size);
266 skb->len += size;
267 skb->data_len += size;
268 skb->truesize += size;
269}
270EXPORT_SYMBOL(skb_add_rx_frag);
271
272
273
274
275
276
277
278
279
280
281
282
283
284struct sk_buff *dev_alloc_skb(unsigned int length)
285{
286
287
288
289
290 return __dev_alloc_skb(length, GFP_ATOMIC);
291}
292EXPORT_SYMBOL(dev_alloc_skb);
293
294static void skb_drop_list(struct sk_buff **listp)
295{
296 struct sk_buff *list = *listp;
297
298 *listp = NULL;
299
300 do {
301 struct sk_buff *this = list;
302 list = list->next;
303 kfree_skb(this);
304 } while (list);
305}
306
307static inline void skb_drop_fraglist(struct sk_buff *skb)
308{
309 skb_drop_list(&skb_shinfo(skb)->frag_list);
310}
311
312static void skb_clone_fraglist(struct sk_buff *skb)
313{
314 struct sk_buff *list;
315
316 skb_walk_frags(skb, list)
317 skb_get(list);
318}
319
320static void skb_release_data(struct sk_buff *skb)
321{
322 if (!skb->cloned ||
323 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
324 &skb_shinfo(skb)->dataref)) {
325 if (skb_shinfo(skb)->nr_frags) {
326 int i;
327 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
328 put_page(skb_shinfo(skb)->frags[i].page);
329 }
330
331 if (skb_has_frag_list(skb))
332 skb_drop_fraglist(skb);
333
334 kfree(skb->head);
335 }
336}
337
338
339
340
341static void kfree_skbmem(struct sk_buff *skb)
342{
343 struct sk_buff *other;
344 atomic_t *fclone_ref;
345
346 switch (skb->fclone) {
347 case SKB_FCLONE_UNAVAILABLE:
348 kmem_cache_free(skbuff_head_cache, skb);
349 break;
350
351 case SKB_FCLONE_ORIG:
352 fclone_ref = (atomic_t *) (skb + 2);
353 if (atomic_dec_and_test(fclone_ref))
354 kmem_cache_free(skbuff_fclone_cache, skb);
355 break;
356
357 case SKB_FCLONE_CLONE:
358 fclone_ref = (atomic_t *) (skb + 1);
359 other = skb - 1;
360
361
362
363
364 skb->fclone = SKB_FCLONE_UNAVAILABLE;
365
366 if (atomic_dec_and_test(fclone_ref))
367 kmem_cache_free(skbuff_fclone_cache, other);
368 break;
369 }
370}
371
372static void skb_release_head_state(struct sk_buff *skb)
373{
374 skb_dst_drop(skb);
375#ifdef CONFIG_XFRM
376 secpath_put(skb->sp);
377#endif
378 if (skb->destructor) {
379 WARN_ON(in_irq());
380 skb->destructor(skb);
381 }
382#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
383 nf_conntrack_put(skb->nfct);
384#endif
385#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
386 nf_conntrack_put_reasm(skb->nfct_reasm);
387#endif
388#ifdef CONFIG_BRIDGE_NETFILTER
389 nf_bridge_put(skb->nf_bridge);
390#endif
391
392#ifdef CONFIG_NET_SCHED
393 skb->tc_index = 0;
394#ifdef CONFIG_NET_CLS_ACT
395 skb->tc_verd = 0;
396#endif
397#endif
398}
399
400
401static void skb_release_all(struct sk_buff *skb)
402{
403 skb_release_head_state(skb);
404 skb_release_data(skb);
405}
406
407
408
409
410
411
412
413
414
415
416void __kfree_skb(struct sk_buff *skb)
417{
418 skb_release_all(skb);
419 kfree_skbmem(skb);
420}
421EXPORT_SYMBOL(__kfree_skb);
422
423
424
425
426
427
428
429
430void kfree_skb(struct sk_buff *skb)
431{
432 if (unlikely(!skb))
433 return;
434 if (likely(atomic_read(&skb->users) == 1))
435 smp_rmb();
436 else if (likely(!atomic_dec_and_test(&skb->users)))
437 return;
438 trace_kfree_skb(skb, __builtin_return_address(0));
439 __kfree_skb(skb);
440}
441EXPORT_SYMBOL(kfree_skb);
442
443
444
445
446
447
448
449
450
451void consume_skb(struct sk_buff *skb)
452{
453 if (unlikely(!skb))
454 return;
455 if (likely(atomic_read(&skb->users) == 1))
456 smp_rmb();
457 else if (likely(!atomic_dec_and_test(&skb->users)))
458 return;
459 trace_consume_skb(skb);
460 __kfree_skb(skb);
461}
462EXPORT_SYMBOL(consume_skb);
463
464
465
466
467
468
469
470
471
472
473
474
475
476bool skb_recycle_check(struct sk_buff *skb, int skb_size)
477{
478 struct skb_shared_info *shinfo;
479
480 if (irqs_disabled())
481 return false;
482
483 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
484 return false;
485
486 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
487 if (skb_end_pointer(skb) - skb->head < skb_size)
488 return false;
489
490 if (skb_shared(skb) || skb_cloned(skb))
491 return false;
492
493 skb_release_head_state(skb);
494
495 shinfo = skb_shinfo(skb);
496 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
497 atomic_set(&shinfo->dataref, 1);
498
499 memset(skb, 0, offsetof(struct sk_buff, tail));
500 skb->data = skb->head + NET_SKB_PAD;
501 skb_reset_tail_pointer(skb);
502
503 return true;
504}
505EXPORT_SYMBOL(skb_recycle_check);
506
507static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
508{
509 new->tstamp = old->tstamp;
510 new->dev = old->dev;
511 new->transport_header = old->transport_header;
512 new->network_header = old->network_header;
513 new->mac_header = old->mac_header;
514 skb_dst_copy(new, old);
515 new->rxhash = old->rxhash;
516#ifdef CONFIG_XFRM
517 new->sp = secpath_get(old->sp);
518#endif
519 memcpy(new->cb, old->cb, sizeof(old->cb));
520 new->csum = old->csum;
521 new->local_df = old->local_df;
522 new->pkt_type = old->pkt_type;
523 new->ip_summed = old->ip_summed;
524 skb_copy_queue_mapping(new, old);
525 new->priority = old->priority;
526 new->deliver_no_wcard = old->deliver_no_wcard;
527#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
528 new->ipvs_property = old->ipvs_property;
529#endif
530 new->protocol = old->protocol;
531 new->mark = old->mark;
532 new->skb_iif = old->skb_iif;
533 __nf_copy(new, old);
534#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
535 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
536 new->nf_trace = old->nf_trace;
537#endif
538#ifdef CONFIG_NET_SCHED
539 new->tc_index = old->tc_index;
540#ifdef CONFIG_NET_CLS_ACT
541 new->tc_verd = old->tc_verd;
542#endif
543#endif
544 new->vlan_tci = old->vlan_tci;
545
546 skb_copy_secmark(new, old);
547}
548
549
550
551
552
553static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
554{
555#define C(x) n->x = skb->x
556
557 n->next = n->prev = NULL;
558 n->sk = NULL;
559 __copy_skb_header(n, skb);
560
561 C(len);
562 C(data_len);
563 C(mac_len);
564 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
565 n->cloned = 1;
566 n->nohdr = 0;
567 n->destructor = NULL;
568 C(tail);
569 C(end);
570 C(head);
571 C(data);
572 C(truesize);
573 atomic_set(&n->users, 1);
574
575 atomic_inc(&(skb_shinfo(skb)->dataref));
576 skb->cloned = 1;
577
578 return n;
579#undef C
580}
581
582
583
584
585
586
587
588
589
590
591
592struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
593{
594 skb_release_all(dst);
595 return __skb_clone(dst, src);
596}
597EXPORT_SYMBOL_GPL(skb_morph);
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
614{
615 struct sk_buff *n;
616
617 n = skb + 1;
618 if (skb->fclone == SKB_FCLONE_ORIG &&
619 n->fclone == SKB_FCLONE_UNAVAILABLE) {
620 atomic_t *fclone_ref = (atomic_t *) (n + 1);
621 n->fclone = SKB_FCLONE_CLONE;
622 atomic_inc(fclone_ref);
623 } else {
624 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
625 if (!n)
626 return NULL;
627
628 kmemcheck_annotate_bitfield(n, flags1);
629 kmemcheck_annotate_bitfield(n, flags2);
630 n->fclone = SKB_FCLONE_UNAVAILABLE;
631 }
632
633 return __skb_clone(n, skb);
634}
635EXPORT_SYMBOL(skb_clone);
636
637static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
638{
639#ifndef NET_SKBUFF_DATA_USES_OFFSET
640
641
642
643 unsigned long offset = new->data - old->data;
644#endif
645
646 __copy_skb_header(new, old);
647
648#ifndef NET_SKBUFF_DATA_USES_OFFSET
649
650 new->transport_header += offset;
651 new->network_header += offset;
652 if (skb_mac_header_was_set(new))
653 new->mac_header += offset;
654#endif
655 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
656 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
657 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
658}
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
678{
679 int headerlen = skb_headroom(skb);
680 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
681 struct sk_buff *n = alloc_skb(size, gfp_mask);
682
683 if (!n)
684 return NULL;
685
686
687 skb_reserve(n, headerlen);
688
689 skb_put(n, skb->len);
690
691 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
692 BUG();
693
694 copy_skb_header(n, skb);
695 return n;
696}
697EXPORT_SYMBOL(skb_copy);
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
713{
714 unsigned int size = skb_end_pointer(skb) - skb->head;
715 struct sk_buff *n = alloc_skb(size, gfp_mask);
716
717 if (!n)
718 goto out;
719
720
721 skb_reserve(n, skb_headroom(skb));
722
723 skb_put(n, skb_headlen(skb));
724
725 skb_copy_from_linear_data(skb, n->data, n->len);
726
727 n->truesize += skb->data_len;
728 n->data_len = skb->data_len;
729 n->len = skb->len;
730
731 if (skb_shinfo(skb)->nr_frags) {
732 int i;
733
734 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
735 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
736 get_page(skb_shinfo(n)->frags[i].page);
737 }
738 skb_shinfo(n)->nr_frags = i;
739 }
740
741 if (skb_has_frag_list(skb)) {
742 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
743 skb_clone_fraglist(n);
744 }
745
746 copy_skb_header(n, skb);
747out:
748 return n;
749}
750EXPORT_SYMBOL(pskb_copy);
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
769 gfp_t gfp_mask)
770{
771 int i;
772 u8 *data;
773 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
774 long off;
775 bool fastpath;
776
777 BUG_ON(nhead < 0);
778
779 if (skb_shared(skb))
780 BUG();
781
782 size = SKB_DATA_ALIGN(size);
783
784
785
786
787 if (!skb->cloned)
788 fastpath = true;
789 else {
790 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
791
792 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
793 }
794
795 if (fastpath &&
796 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
797 memmove(skb->head + size, skb_shinfo(skb),
798 offsetof(struct skb_shared_info,
799 frags[skb_shinfo(skb)->nr_frags]));
800 memmove(skb->head + nhead, skb->head,
801 skb_tail_pointer(skb) - skb->head);
802 off = nhead;
803 goto adjust_others;
804 }
805
806 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
807 if (!data)
808 goto nodata;
809
810
811
812
813 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
814
815 memcpy((struct skb_shared_info *)(data + size),
816 skb_shinfo(skb),
817 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
818
819 if (fastpath) {
820 kfree(skb->head);
821 } else {
822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
823 get_page(skb_shinfo(skb)->frags[i].page);
824
825 if (skb_has_frag_list(skb))
826 skb_clone_fraglist(skb);
827
828 skb_release_data(skb);
829 }
830 off = (data + nhead) - skb->head;
831
832 skb->head = data;
833adjust_others:
834 skb->data += off;
835#ifdef NET_SKBUFF_DATA_USES_OFFSET
836 skb->end = size;
837 off = nhead;
838#else
839 skb->end = skb->head + size;
840#endif
841
842 skb->tail += off;
843 skb->transport_header += off;
844 skb->network_header += off;
845 if (skb_mac_header_was_set(skb))
846 skb->mac_header += off;
847
848 if (skb->ip_summed == CHECKSUM_PARTIAL)
849 skb->csum_start += nhead;
850 skb->cloned = 0;
851 skb->hdr_len = 0;
852 skb->nohdr = 0;
853 atomic_set(&skb_shinfo(skb)->dataref, 1);
854 return 0;
855
856nodata:
857 return -ENOMEM;
858}
859EXPORT_SYMBOL(pskb_expand_head);
860
861
862
863struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
864{
865 struct sk_buff *skb2;
866 int delta = headroom - skb_headroom(skb);
867
868 if (delta <= 0)
869 skb2 = pskb_copy(skb, GFP_ATOMIC);
870 else {
871 skb2 = skb_clone(skb, GFP_ATOMIC);
872 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
873 GFP_ATOMIC)) {
874 kfree_skb(skb2);
875 skb2 = NULL;
876 }
877 }
878 return skb2;
879}
880EXPORT_SYMBOL(skb_realloc_headroom);
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
901 int newheadroom, int newtailroom,
902 gfp_t gfp_mask)
903{
904
905
906
907 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
908 gfp_mask);
909 int oldheadroom = skb_headroom(skb);
910 int head_copy_len, head_copy_off;
911 int off;
912
913 if (!n)
914 return NULL;
915
916 skb_reserve(n, newheadroom);
917
918
919 skb_put(n, skb->len);
920
921 head_copy_len = oldheadroom;
922 head_copy_off = 0;
923 if (newheadroom <= head_copy_len)
924 head_copy_len = newheadroom;
925 else
926 head_copy_off = newheadroom - head_copy_len;
927
928
929 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
930 skb->len + head_copy_len))
931 BUG();
932
933 copy_skb_header(n, skb);
934
935 off = newheadroom - oldheadroom;
936 if (n->ip_summed == CHECKSUM_PARTIAL)
937 n->csum_start += off;
938#ifdef NET_SKBUFF_DATA_USES_OFFSET
939 n->transport_header += off;
940 n->network_header += off;
941 if (skb_mac_header_was_set(skb))
942 n->mac_header += off;
943#endif
944
945 return n;
946}
947EXPORT_SYMBOL(skb_copy_expand);
948
949
950
951
952
953
954
955
956
957
958
959
960
961int skb_pad(struct sk_buff *skb, int pad)
962{
963 int err;
964 int ntail;
965
966
967 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
968 memset(skb->data+skb->len, 0, pad);
969 return 0;
970 }
971
972 ntail = skb->data_len + pad - (skb->end - skb->tail);
973 if (likely(skb_cloned(skb) || ntail > 0)) {
974 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
975 if (unlikely(err))
976 goto free_skb;
977 }
978
979
980
981
982 err = skb_linearize(skb);
983 if (unlikely(err))
984 goto free_skb;
985
986 memset(skb->data + skb->len, 0, pad);
987 return 0;
988
989free_skb:
990 kfree_skb(skb);
991 return err;
992}
993EXPORT_SYMBOL(skb_pad);
994
995
996
997
998
999
1000
1001
1002
1003
1004unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1005{
1006 unsigned char *tmp = skb_tail_pointer(skb);
1007 SKB_LINEAR_ASSERT(skb);
1008 skb->tail += len;
1009 skb->len += len;
1010 if (unlikely(skb->tail > skb->end))
1011 skb_over_panic(skb, len, __builtin_return_address(0));
1012 return tmp;
1013}
1014EXPORT_SYMBOL(skb_put);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1026{
1027 skb->data -= len;
1028 skb->len += len;
1029 if (unlikely(skb->data<skb->head))
1030 skb_under_panic(skb, len, __builtin_return_address(0));
1031 return skb->data;
1032}
1033EXPORT_SYMBOL(skb_push);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1046{
1047 return skb_pull_inline(skb, len);
1048}
1049EXPORT_SYMBOL(skb_pull);
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060void skb_trim(struct sk_buff *skb, unsigned int len)
1061{
1062 if (skb->len > len)
1063 __skb_trim(skb, len);
1064}
1065EXPORT_SYMBOL(skb_trim);
1066
1067
1068
1069
1070int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1071{
1072 struct sk_buff **fragp;
1073 struct sk_buff *frag;
1074 int offset = skb_headlen(skb);
1075 int nfrags = skb_shinfo(skb)->nr_frags;
1076 int i;
1077 int err;
1078
1079 if (skb_cloned(skb) &&
1080 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1081 return err;
1082
1083 i = 0;
1084 if (offset >= len)
1085 goto drop_pages;
1086
1087 for (; i < nfrags; i++) {
1088 int end = offset + skb_shinfo(skb)->frags[i].size;
1089
1090 if (end < len) {
1091 offset = end;
1092 continue;
1093 }
1094
1095 skb_shinfo(skb)->frags[i++].size = len - offset;
1096
1097drop_pages:
1098 skb_shinfo(skb)->nr_frags = i;
1099
1100 for (; i < nfrags; i++)
1101 put_page(skb_shinfo(skb)->frags[i].page);
1102
1103 if (skb_has_frag_list(skb))
1104 skb_drop_fraglist(skb);
1105 goto done;
1106 }
1107
1108 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1109 fragp = &frag->next) {
1110 int end = offset + frag->len;
1111
1112 if (skb_shared(frag)) {
1113 struct sk_buff *nfrag;
1114
1115 nfrag = skb_clone(frag, GFP_ATOMIC);
1116 if (unlikely(!nfrag))
1117 return -ENOMEM;
1118
1119 nfrag->next = frag->next;
1120 kfree_skb(frag);
1121 frag = nfrag;
1122 *fragp = frag;
1123 }
1124
1125 if (end < len) {
1126 offset = end;
1127 continue;
1128 }
1129
1130 if (end > len &&
1131 unlikely((err = pskb_trim(frag, len - offset))))
1132 return err;
1133
1134 if (frag->next)
1135 skb_drop_list(&frag->next);
1136 break;
1137 }
1138
1139done:
1140 if (len > skb_headlen(skb)) {
1141 skb->data_len -= skb->len - len;
1142 skb->len = len;
1143 } else {
1144 skb->len = len;
1145 skb->data_len = 0;
1146 skb_set_tail_pointer(skb, len);
1147 }
1148
1149 return 0;
1150}
1151EXPORT_SYMBOL(___pskb_trim);
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1179{
1180
1181
1182
1183
1184 int i, k, eat = (skb->tail + delta) - skb->end;
1185
1186 if (eat > 0 || skb_cloned(skb)) {
1187 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1188 GFP_ATOMIC))
1189 return NULL;
1190 }
1191
1192 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1193 BUG();
1194
1195
1196
1197
1198 if (!skb_has_frag_list(skb))
1199 goto pull_pages;
1200
1201
1202 eat = delta;
1203 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1204 if (skb_shinfo(skb)->frags[i].size >= eat)
1205 goto pull_pages;
1206 eat -= skb_shinfo(skb)->frags[i].size;
1207 }
1208
1209
1210
1211
1212
1213
1214
1215
1216 if (eat) {
1217 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1218 struct sk_buff *clone = NULL;
1219 struct sk_buff *insp = NULL;
1220
1221 do {
1222 BUG_ON(!list);
1223
1224 if (list->len <= eat) {
1225
1226 eat -= list->len;
1227 list = list->next;
1228 insp = list;
1229 } else {
1230
1231
1232 if (skb_shared(list)) {
1233
1234 clone = skb_clone(list, GFP_ATOMIC);
1235 if (!clone)
1236 return NULL;
1237 insp = list->next;
1238 list = clone;
1239 } else {
1240
1241
1242 insp = list;
1243 }
1244 if (!pskb_pull(list, eat)) {
1245 kfree_skb(clone);
1246 return NULL;
1247 }
1248 break;
1249 }
1250 } while (eat);
1251
1252
1253 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1254 skb_shinfo(skb)->frag_list = list->next;
1255 kfree_skb(list);
1256 }
1257
1258 if (clone) {
1259 clone->next = list;
1260 skb_shinfo(skb)->frag_list = clone;
1261 }
1262 }
1263
1264
1265pull_pages:
1266 eat = delta;
1267 k = 0;
1268 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1269 if (skb_shinfo(skb)->frags[i].size <= eat) {
1270 put_page(skb_shinfo(skb)->frags[i].page);
1271 eat -= skb_shinfo(skb)->frags[i].size;
1272 } else {
1273 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1274 if (eat) {
1275 skb_shinfo(skb)->frags[k].page_offset += eat;
1276 skb_shinfo(skb)->frags[k].size -= eat;
1277 eat = 0;
1278 }
1279 k++;
1280 }
1281 }
1282 skb_shinfo(skb)->nr_frags = k;
1283
1284 skb->tail += delta;
1285 skb->data_len -= delta;
1286
1287 return skb_tail_pointer(skb);
1288}
1289EXPORT_SYMBOL(__pskb_pull_tail);
1290
1291
1292
1293int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1294{
1295 int start = skb_headlen(skb);
1296 struct sk_buff *frag_iter;
1297 int i, copy;
1298
1299 if (offset > (int)skb->len - len)
1300 goto fault;
1301
1302
1303 if ((copy = start - offset) > 0) {
1304 if (copy > len)
1305 copy = len;
1306 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1307 if ((len -= copy) == 0)
1308 return 0;
1309 offset += copy;
1310 to += copy;
1311 }
1312
1313 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1314 int end;
1315
1316 WARN_ON(start > offset + len);
1317
1318 end = start + skb_shinfo(skb)->frags[i].size;
1319 if ((copy = end - offset) > 0) {
1320 u8 *vaddr;
1321
1322 if (copy > len)
1323 copy = len;
1324
1325 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1326 memcpy(to,
1327 vaddr + skb_shinfo(skb)->frags[i].page_offset+
1328 offset - start, copy);
1329 kunmap_skb_frag(vaddr);
1330
1331 if ((len -= copy) == 0)
1332 return 0;
1333 offset += copy;
1334 to += copy;
1335 }
1336 start = end;
1337 }
1338
1339 skb_walk_frags(skb, frag_iter) {
1340 int end;
1341
1342 WARN_ON(start > offset + len);
1343
1344 end = start + frag_iter->len;
1345 if ((copy = end - offset) > 0) {
1346 if (copy > len)
1347 copy = len;
1348 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1349 goto fault;
1350 if ((len -= copy) == 0)
1351 return 0;
1352 offset += copy;
1353 to += copy;
1354 }
1355 start = end;
1356 }
1357 if (!len)
1358 return 0;
1359
1360fault:
1361 return -EFAULT;
1362}
1363EXPORT_SYMBOL(skb_copy_bits);
1364
1365
1366
1367
1368
1369static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1370{
1371 put_page(spd->pages[i]);
1372}
1373
1374static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1375 unsigned int *offset,
1376 struct sk_buff *skb, struct sock *sk)
1377{
1378 struct page *p = sk->sk_sndmsg_page;
1379 unsigned int off;
1380
1381 if (!p) {
1382new_page:
1383 p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1384 if (!p)
1385 return NULL;
1386
1387 off = sk->sk_sndmsg_off = 0;
1388
1389 } else {
1390 unsigned int mlen;
1391
1392 off = sk->sk_sndmsg_off;
1393 mlen = PAGE_SIZE - off;
1394 if (mlen < 64 && mlen < *len) {
1395 put_page(p);
1396 goto new_page;
1397 }
1398
1399 *len = min_t(unsigned int, *len, mlen);
1400 }
1401
1402 memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1403 sk->sk_sndmsg_off += *len;
1404 *offset = off;
1405 get_page(p);
1406
1407 return p;
1408}
1409
1410
1411
1412
1413static inline int spd_fill_page(struct splice_pipe_desc *spd,
1414 struct pipe_inode_info *pipe, struct page *page,
1415 unsigned int *len, unsigned int offset,
1416 struct sk_buff *skb, int linear,
1417 struct sock *sk)
1418{
1419 if (unlikely(spd->nr_pages == pipe->buffers))
1420 return 1;
1421
1422 if (linear) {
1423 page = linear_to_page(page, len, &offset, skb, sk);
1424 if (!page)
1425 return 1;
1426 } else
1427 get_page(page);
1428
1429 spd->pages[spd->nr_pages] = page;
1430 spd->partial[spd->nr_pages].len = *len;
1431 spd->partial[spd->nr_pages].offset = offset;
1432 spd->nr_pages++;
1433
1434 return 0;
1435}
1436
1437static inline void __segment_seek(struct page **page, unsigned int *poff,
1438 unsigned int *plen, unsigned int off)
1439{
1440 unsigned long n;
1441
1442 *poff += off;
1443 n = *poff / PAGE_SIZE;
1444 if (n)
1445 *page = nth_page(*page, n);
1446
1447 *poff = *poff % PAGE_SIZE;
1448 *plen -= off;
1449}
1450
1451static inline int __splice_segment(struct page *page, unsigned int poff,
1452 unsigned int plen, unsigned int *off,
1453 unsigned int *len, struct sk_buff *skb,
1454 struct splice_pipe_desc *spd, int linear,
1455 struct sock *sk,
1456 struct pipe_inode_info *pipe)
1457{
1458 if (!*len)
1459 return 1;
1460
1461
1462 if (*off >= plen) {
1463 *off -= plen;
1464 return 0;
1465 }
1466
1467
1468 if (*off) {
1469 __segment_seek(&page, &poff, &plen, *off);
1470 *off = 0;
1471 }
1472
1473 do {
1474 unsigned int flen = min(*len, plen);
1475
1476
1477 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1478
1479 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1480 return 1;
1481
1482 __segment_seek(&page, &poff, &plen, flen);
1483 *len -= flen;
1484
1485 } while (*len && plen);
1486
1487 return 0;
1488}
1489
1490
1491
1492
1493
1494static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1495 unsigned int *offset, unsigned int *len,
1496 struct splice_pipe_desc *spd, struct sock *sk)
1497{
1498 int seg;
1499
1500
1501
1502
1503 if (__splice_segment(virt_to_page(skb->data),
1504 (unsigned long) skb->data & (PAGE_SIZE - 1),
1505 skb_headlen(skb),
1506 offset, len, skb, spd, 1, sk, pipe))
1507 return 1;
1508
1509
1510
1511
1512 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1513 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1514
1515 if (__splice_segment(f->page, f->page_offset, f->size,
1516 offset, len, skb, spd, 0, sk, pipe))
1517 return 1;
1518 }
1519
1520 return 0;
1521}
1522
1523
1524
1525
1526
1527
1528
1529int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1530 struct pipe_inode_info *pipe, unsigned int tlen,
1531 unsigned int flags)
1532{
1533 struct partial_page partial[PIPE_DEF_BUFFERS];
1534 struct page *pages[PIPE_DEF_BUFFERS];
1535 struct splice_pipe_desc spd = {
1536 .pages = pages,
1537 .partial = partial,
1538 .flags = flags,
1539 .ops = &sock_pipe_buf_ops,
1540 .spd_release = sock_spd_release,
1541 };
1542 struct sk_buff *frag_iter;
1543 struct sock *sk = skb->sk;
1544 int ret = 0;
1545
1546 if (splice_grow_spd(pipe, &spd))
1547 return -ENOMEM;
1548
1549
1550
1551
1552
1553 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1554 goto done;
1555 else if (!tlen)
1556 goto done;
1557
1558
1559
1560
1561 skb_walk_frags(skb, frag_iter) {
1562 if (!tlen)
1563 break;
1564 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1565 break;
1566 }
1567
1568done:
1569 if (spd.nr_pages) {
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579 release_sock(sk);
1580 ret = splice_to_pipe(pipe, &spd);
1581 lock_sock(sk);
1582 }
1583
1584 splice_shrink_spd(pipe, &spd);
1585 return ret;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1601{
1602 int start = skb_headlen(skb);
1603 struct sk_buff *frag_iter;
1604 int i, copy;
1605
1606 if (offset > (int)skb->len - len)
1607 goto fault;
1608
1609 if ((copy = start - offset) > 0) {
1610 if (copy > len)
1611 copy = len;
1612 skb_copy_to_linear_data_offset(skb, offset, from, copy);
1613 if ((len -= copy) == 0)
1614 return 0;
1615 offset += copy;
1616 from += copy;
1617 }
1618
1619 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1620 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1621 int end;
1622
1623 WARN_ON(start > offset + len);
1624
1625 end = start + frag->size;
1626 if ((copy = end - offset) > 0) {
1627 u8 *vaddr;
1628
1629 if (copy > len)
1630 copy = len;
1631
1632 vaddr = kmap_skb_frag(frag);
1633 memcpy(vaddr + frag->page_offset + offset - start,
1634 from, copy);
1635 kunmap_skb_frag(vaddr);
1636
1637 if ((len -= copy) == 0)
1638 return 0;
1639 offset += copy;
1640 from += copy;
1641 }
1642 start = end;
1643 }
1644
1645 skb_walk_frags(skb, frag_iter) {
1646 int end;
1647
1648 WARN_ON(start > offset + len);
1649
1650 end = start + frag_iter->len;
1651 if ((copy = end - offset) > 0) {
1652 if (copy > len)
1653 copy = len;
1654 if (skb_store_bits(frag_iter, offset - start,
1655 from, copy))
1656 goto fault;
1657 if ((len -= copy) == 0)
1658 return 0;
1659 offset += copy;
1660 from += copy;
1661 }
1662 start = end;
1663 }
1664 if (!len)
1665 return 0;
1666
1667fault:
1668 return -EFAULT;
1669}
1670EXPORT_SYMBOL(skb_store_bits);
1671
1672
1673
1674__wsum skb_checksum(const struct sk_buff *skb, int offset,
1675 int len, __wsum csum)
1676{
1677 int start = skb_headlen(skb);
1678 int i, copy = start - offset;
1679 struct sk_buff *frag_iter;
1680 int pos = 0;
1681
1682
1683 if (copy > 0) {
1684 if (copy > len)
1685 copy = len;
1686 csum = csum_partial(skb->data + offset, copy, csum);
1687 if ((len -= copy) == 0)
1688 return csum;
1689 offset += copy;
1690 pos = copy;
1691 }
1692
1693 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1694 int end;
1695
1696 WARN_ON(start > offset + len);
1697
1698 end = start + skb_shinfo(skb)->frags[i].size;
1699 if ((copy = end - offset) > 0) {
1700 __wsum csum2;
1701 u8 *vaddr;
1702 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1703
1704 if (copy > len)
1705 copy = len;
1706 vaddr = kmap_skb_frag(frag);
1707 csum2 = csum_partial(vaddr + frag->page_offset +
1708 offset - start, copy, 0);
1709 kunmap_skb_frag(vaddr);
1710 csum = csum_block_add(csum, csum2, pos);
1711 if (!(len -= copy))
1712 return csum;
1713 offset += copy;
1714 pos += copy;
1715 }
1716 start = end;
1717 }
1718
1719 skb_walk_frags(skb, frag_iter) {
1720 int end;
1721
1722 WARN_ON(start > offset + len);
1723
1724 end = start + frag_iter->len;
1725 if ((copy = end - offset) > 0) {
1726 __wsum csum2;
1727 if (copy > len)
1728 copy = len;
1729 csum2 = skb_checksum(frag_iter, offset - start,
1730 copy, 0);
1731 csum = csum_block_add(csum, csum2, pos);
1732 if ((len -= copy) == 0)
1733 return csum;
1734 offset += copy;
1735 pos += copy;
1736 }
1737 start = end;
1738 }
1739 BUG_ON(len);
1740
1741 return csum;
1742}
1743EXPORT_SYMBOL(skb_checksum);
1744
1745
1746
1747__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1748 u8 *to, int len, __wsum csum)
1749{
1750 int start = skb_headlen(skb);
1751 int i, copy = start - offset;
1752 struct sk_buff *frag_iter;
1753 int pos = 0;
1754
1755
1756 if (copy > 0) {
1757 if (copy > len)
1758 copy = len;
1759 csum = csum_partial_copy_nocheck(skb->data + offset, to,
1760 copy, csum);
1761 if ((len -= copy) == 0)
1762 return csum;
1763 offset += copy;
1764 to += copy;
1765 pos = copy;
1766 }
1767
1768 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1769 int end;
1770
1771 WARN_ON(start > offset + len);
1772
1773 end = start + skb_shinfo(skb)->frags[i].size;
1774 if ((copy = end - offset) > 0) {
1775 __wsum csum2;
1776 u8 *vaddr;
1777 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1778
1779 if (copy > len)
1780 copy = len;
1781 vaddr = kmap_skb_frag(frag);
1782 csum2 = csum_partial_copy_nocheck(vaddr +
1783 frag->page_offset +
1784 offset - start, to,
1785 copy, 0);
1786 kunmap_skb_frag(vaddr);
1787 csum = csum_block_add(csum, csum2, pos);
1788 if (!(len -= copy))
1789 return csum;
1790 offset += copy;
1791 to += copy;
1792 pos += copy;
1793 }
1794 start = end;
1795 }
1796
1797 skb_walk_frags(skb, frag_iter) {
1798 __wsum csum2;
1799 int end;
1800
1801 WARN_ON(start > offset + len);
1802
1803 end = start + frag_iter->len;
1804 if ((copy = end - offset) > 0) {
1805 if (copy > len)
1806 copy = len;
1807 csum2 = skb_copy_and_csum_bits(frag_iter,
1808 offset - start,
1809 to, copy, 0);
1810 csum = csum_block_add(csum, csum2, pos);
1811 if ((len -= copy) == 0)
1812 return csum;
1813 offset += copy;
1814 to += copy;
1815 pos += copy;
1816 }
1817 start = end;
1818 }
1819 BUG_ON(len);
1820 return csum;
1821}
1822EXPORT_SYMBOL(skb_copy_and_csum_bits);
1823
1824void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1825{
1826 __wsum csum;
1827 long csstart;
1828
1829 if (skb->ip_summed == CHECKSUM_PARTIAL)
1830 csstart = skb_checksum_start_offset(skb);
1831 else
1832 csstart = skb_headlen(skb);
1833
1834 BUG_ON(csstart > skb_headlen(skb));
1835
1836 skb_copy_from_linear_data(skb, to, csstart);
1837
1838 csum = 0;
1839 if (csstart != skb->len)
1840 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1841 skb->len - csstart, 0);
1842
1843 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1844 long csstuff = csstart + skb->csum_offset;
1845
1846 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
1847 }
1848}
1849EXPORT_SYMBOL(skb_copy_and_csum_dev);
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1861{
1862 unsigned long flags;
1863 struct sk_buff *result;
1864
1865 spin_lock_irqsave(&list->lock, flags);
1866 result = __skb_dequeue(list);
1867 spin_unlock_irqrestore(&list->lock, flags);
1868 return result;
1869}
1870EXPORT_SYMBOL(skb_dequeue);
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1881{
1882 unsigned long flags;
1883 struct sk_buff *result;
1884
1885 spin_lock_irqsave(&list->lock, flags);
1886 result = __skb_dequeue_tail(list);
1887 spin_unlock_irqrestore(&list->lock, flags);
1888 return result;
1889}
1890EXPORT_SYMBOL(skb_dequeue_tail);
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900void skb_queue_purge(struct sk_buff_head *list)
1901{
1902 struct sk_buff *skb;
1903 while ((skb = skb_dequeue(list)) != NULL)
1904 kfree_skb(skb);
1905}
1906EXPORT_SYMBOL(skb_queue_purge);
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1920{
1921 unsigned long flags;
1922
1923 spin_lock_irqsave(&list->lock, flags);
1924 __skb_queue_head(list, newsk);
1925 spin_unlock_irqrestore(&list->lock, flags);
1926}
1927EXPORT_SYMBOL(skb_queue_head);
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1941{
1942 unsigned long flags;
1943
1944 spin_lock_irqsave(&list->lock, flags);
1945 __skb_queue_tail(list, newsk);
1946 spin_unlock_irqrestore(&list->lock, flags);
1947}
1948EXPORT_SYMBOL(skb_queue_tail);
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1961{
1962 unsigned long flags;
1963
1964 spin_lock_irqsave(&list->lock, flags);
1965 __skb_unlink(skb, list);
1966 spin_unlock_irqrestore(&list->lock, flags);
1967}
1968EXPORT_SYMBOL(skb_unlink);
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1981{
1982 unsigned long flags;
1983
1984 spin_lock_irqsave(&list->lock, flags);
1985 __skb_queue_after(list, old, newsk);
1986 spin_unlock_irqrestore(&list->lock, flags);
1987}
1988EXPORT_SYMBOL(skb_append);
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2003{
2004 unsigned long flags;
2005
2006 spin_lock_irqsave(&list->lock, flags);
2007 __skb_insert(newsk, old->prev, old, list);
2008 spin_unlock_irqrestore(&list->lock, flags);
2009}
2010EXPORT_SYMBOL(skb_insert);
2011
2012static inline void skb_split_inside_header(struct sk_buff *skb,
2013 struct sk_buff* skb1,
2014 const u32 len, const int pos)
2015{
2016 int i;
2017
2018 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2019 pos - len);
2020
2021 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2022 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2023
2024 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2025 skb_shinfo(skb)->nr_frags = 0;
2026 skb1->data_len = skb->data_len;
2027 skb1->len += skb1->data_len;
2028 skb->data_len = 0;
2029 skb->len = len;
2030 skb_set_tail_pointer(skb, len);
2031}
2032
2033static inline void skb_split_no_header(struct sk_buff *skb,
2034 struct sk_buff* skb1,
2035 const u32 len, int pos)
2036{
2037 int i, k = 0;
2038 const int nfrags = skb_shinfo(skb)->nr_frags;
2039
2040 skb_shinfo(skb)->nr_frags = 0;
2041 skb1->len = skb1->data_len = skb->len - len;
2042 skb->len = len;
2043 skb->data_len = len - pos;
2044
2045 for (i = 0; i < nfrags; i++) {
2046 int size = skb_shinfo(skb)->frags[i].size;
2047
2048 if (pos + size > len) {
2049 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2050
2051 if (pos < len) {
2052
2053
2054
2055
2056
2057
2058
2059
2060 get_page(skb_shinfo(skb)->frags[i].page);
2061 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2062 skb_shinfo(skb1)->frags[0].size -= len - pos;
2063 skb_shinfo(skb)->frags[i].size = len - pos;
2064 skb_shinfo(skb)->nr_frags++;
2065 }
2066 k++;
2067 } else
2068 skb_shinfo(skb)->nr_frags++;
2069 pos += size;
2070 }
2071 skb_shinfo(skb1)->nr_frags = k;
2072}
2073
2074
2075
2076
2077
2078
2079
2080void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2081{
2082 int pos = skb_headlen(skb);
2083
2084 if (len < pos)
2085 skb_split_inside_header(skb, skb1, len, pos);
2086 else
2087 skb_split_no_header(skb, skb1, len, pos);
2088}
2089EXPORT_SYMBOL(skb_split);
2090
2091
2092
2093
2094
2095static int skb_prepare_for_shift(struct sk_buff *skb)
2096{
2097 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2119{
2120 int from, to, merge, todo;
2121 struct skb_frag_struct *fragfrom, *fragto;
2122
2123 BUG_ON(shiftlen > skb->len);
2124 BUG_ON(skb_headlen(skb));
2125
2126 todo = shiftlen;
2127 from = 0;
2128 to = skb_shinfo(tgt)->nr_frags;
2129 fragfrom = &skb_shinfo(skb)->frags[from];
2130
2131
2132
2133
2134 if (!to ||
2135 !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2136 merge = -1;
2137 } else {
2138 merge = to - 1;
2139
2140 todo -= fragfrom->size;
2141 if (todo < 0) {
2142 if (skb_prepare_for_shift(skb) ||
2143 skb_prepare_for_shift(tgt))
2144 return 0;
2145
2146
2147 fragfrom = &skb_shinfo(skb)->frags[from];
2148 fragto = &skb_shinfo(tgt)->frags[merge];
2149
2150 fragto->size += shiftlen;
2151 fragfrom->size -= shiftlen;
2152 fragfrom->page_offset += shiftlen;
2153
2154 goto onlymerged;
2155 }
2156
2157 from++;
2158 }
2159
2160
2161 if ((shiftlen == skb->len) &&
2162 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2163 return 0;
2164
2165 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2166 return 0;
2167
2168 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2169 if (to == MAX_SKB_FRAGS)
2170 return 0;
2171
2172 fragfrom = &skb_shinfo(skb)->frags[from];
2173 fragto = &skb_shinfo(tgt)->frags[to];
2174
2175 if (todo >= fragfrom->size) {
2176 *fragto = *fragfrom;
2177 todo -= fragfrom->size;
2178 from++;
2179 to++;
2180
2181 } else {
2182 get_page(fragfrom->page);
2183 fragto->page = fragfrom->page;
2184 fragto->page_offset = fragfrom->page_offset;
2185 fragto->size = todo;
2186
2187 fragfrom->page_offset += todo;
2188 fragfrom->size -= todo;
2189 todo = 0;
2190
2191 to++;
2192 break;
2193 }
2194 }
2195
2196
2197 skb_shinfo(tgt)->nr_frags = to;
2198
2199 if (merge >= 0) {
2200 fragfrom = &skb_shinfo(skb)->frags[0];
2201 fragto = &skb_shinfo(tgt)->frags[merge];
2202
2203 fragto->size += fragfrom->size;
2204 put_page(fragfrom->page);
2205 }
2206
2207
2208 to = 0;
2209 while (from < skb_shinfo(skb)->nr_frags)
2210 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2211 skb_shinfo(skb)->nr_frags = to;
2212
2213 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2214
2215onlymerged:
2216
2217
2218
2219 tgt->ip_summed = CHECKSUM_PARTIAL;
2220 skb->ip_summed = CHECKSUM_PARTIAL;
2221
2222
2223 skb->len -= shiftlen;
2224 skb->data_len -= shiftlen;
2225 skb->truesize -= shiftlen;
2226 tgt->len += shiftlen;
2227 tgt->data_len += shiftlen;
2228 tgt->truesize += shiftlen;
2229
2230 return shiftlen;
2231}
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2244 unsigned int to, struct skb_seq_state *st)
2245{
2246 st->lower_offset = from;
2247 st->upper_offset = to;
2248 st->root_skb = st->cur_skb = skb;
2249 st->frag_idx = st->stepped_offset = 0;
2250 st->frag_data = NULL;
2251}
2252EXPORT_SYMBOL(skb_prepare_seq_read);
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2280 struct skb_seq_state *st)
2281{
2282 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2283 skb_frag_t *frag;
2284
2285 if (unlikely(abs_offset >= st->upper_offset))
2286 return 0;
2287
2288next_skb:
2289 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2290
2291 if (abs_offset < block_limit && !st->frag_data) {
2292 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2293 return block_limit - abs_offset;
2294 }
2295
2296 if (st->frag_idx == 0 && !st->frag_data)
2297 st->stepped_offset += skb_headlen(st->cur_skb);
2298
2299 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2300 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2301 block_limit = frag->size + st->stepped_offset;
2302
2303 if (abs_offset < block_limit) {
2304 if (!st->frag_data)
2305 st->frag_data = kmap_skb_frag(frag);
2306
2307 *data = (u8 *) st->frag_data + frag->page_offset +
2308 (abs_offset - st->stepped_offset);
2309
2310 return block_limit - abs_offset;
2311 }
2312
2313 if (st->frag_data) {
2314 kunmap_skb_frag(st->frag_data);
2315 st->frag_data = NULL;
2316 }
2317
2318 st->frag_idx++;
2319 st->stepped_offset += frag->size;
2320 }
2321
2322 if (st->frag_data) {
2323 kunmap_skb_frag(st->frag_data);
2324 st->frag_data = NULL;
2325 }
2326
2327 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2328 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2329 st->frag_idx = 0;
2330 goto next_skb;
2331 } else if (st->cur_skb->next) {
2332 st->cur_skb = st->cur_skb->next;
2333 st->frag_idx = 0;
2334 goto next_skb;
2335 }
2336
2337 return 0;
2338}
2339EXPORT_SYMBOL(skb_seq_read);
2340
2341
2342
2343
2344
2345
2346
2347
2348void skb_abort_seq_read(struct skb_seq_state *st)
2349{
2350 if (st->frag_data)
2351 kunmap_skb_frag(st->frag_data);
2352}
2353EXPORT_SYMBOL(skb_abort_seq_read);
2354
2355#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2356
2357static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2358 struct ts_config *conf,
2359 struct ts_state *state)
2360{
2361 return skb_seq_read(offset, text, TS_SKB_CB(state));
2362}
2363
2364static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2365{
2366 skb_abort_seq_read(TS_SKB_CB(state));
2367}
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2383 unsigned int to, struct ts_config *config,
2384 struct ts_state *state)
2385{
2386 unsigned int ret;
2387
2388 config->get_next_block = skb_ts_get_next_block;
2389 config->finish = skb_ts_finish;
2390
2391 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2392
2393 ret = textsearch_find(config, state);
2394 return (ret <= to - from ? ret : UINT_MAX);
2395}
2396EXPORT_SYMBOL(skb_find_text);
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2410 int (*getfrag)(void *from, char *to, int offset,
2411 int len, int odd, struct sk_buff *skb),
2412 void *from, int length)
2413{
2414 int frg_cnt = 0;
2415 skb_frag_t *frag = NULL;
2416 struct page *page = NULL;
2417 int copy, left;
2418 int offset = 0;
2419 int ret;
2420
2421 do {
2422
2423 frg_cnt = skb_shinfo(skb)->nr_frags;
2424 if (frg_cnt >= MAX_SKB_FRAGS)
2425 return -EFAULT;
2426
2427
2428 page = alloc_pages(sk->sk_allocation, 0);
2429
2430
2431
2432
2433 if (page == NULL)
2434 return -ENOMEM;
2435
2436
2437 sk->sk_sndmsg_page = page;
2438 sk->sk_sndmsg_off = 0;
2439 skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2440 skb->truesize += PAGE_SIZE;
2441 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2442
2443
2444 frg_cnt = skb_shinfo(skb)->nr_frags;
2445 frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2446
2447
2448 left = PAGE_SIZE - frag->page_offset;
2449 copy = (length > left)? left : length;
2450
2451 ret = getfrag(from, (page_address(frag->page) +
2452 frag->page_offset + frag->size),
2453 offset, copy, 0, skb);
2454 if (ret < 0)
2455 return -EFAULT;
2456
2457
2458 sk->sk_sndmsg_off += copy;
2459 frag->size += copy;
2460 skb->len += copy;
2461 skb->data_len += copy;
2462 offset += copy;
2463 length -= copy;
2464
2465 } while (length > 0);
2466
2467 return 0;
2468}
2469EXPORT_SYMBOL(skb_append_datato_frags);
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2483{
2484 BUG_ON(len > skb->len);
2485 skb->len -= len;
2486 BUG_ON(skb->len < skb->data_len);
2487 skb_postpull_rcsum(skb, skb->data, len);
2488 return skb->data += len;
2489}
2490EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2502{
2503 struct sk_buff *segs = NULL;
2504 struct sk_buff *tail = NULL;
2505 struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2506 unsigned int mss = skb_shinfo(skb)->gso_size;
2507 unsigned int doffset = skb->data - skb_mac_header(skb);
2508 unsigned int offset = doffset;
2509 unsigned int headroom;
2510 unsigned int len;
2511 int sg = features & NETIF_F_SG;
2512 int nfrags = skb_shinfo(skb)->nr_frags;
2513 int err = -ENOMEM;
2514 int i = 0;
2515 int pos;
2516
2517 __skb_push(skb, doffset);
2518 headroom = skb_headroom(skb);
2519 pos = skb_headlen(skb);
2520
2521 do {
2522 struct sk_buff *nskb;
2523 skb_frag_t *frag;
2524 int hsize;
2525 int size;
2526
2527 len = skb->len - offset;
2528 if (len > mss)
2529 len = mss;
2530
2531 hsize = skb_headlen(skb) - offset;
2532 if (hsize < 0)
2533 hsize = 0;
2534 if (hsize > len || !sg)
2535 hsize = len;
2536
2537 if (!hsize && i >= nfrags) {
2538 BUG_ON(fskb->len != len);
2539
2540 pos += len;
2541 nskb = skb_clone(fskb, GFP_ATOMIC);
2542 fskb = fskb->next;
2543
2544 if (unlikely(!nskb))
2545 goto err;
2546
2547 hsize = skb_end_pointer(nskb) - nskb->head;
2548 if (skb_cow_head(nskb, doffset + headroom)) {
2549 kfree_skb(nskb);
2550 goto err;
2551 }
2552
2553 nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2554 hsize;
2555 skb_release_head_state(nskb);
2556 __skb_push(nskb, doffset);
2557 } else {
2558 nskb = alloc_skb(hsize + doffset + headroom,
2559 GFP_ATOMIC);
2560
2561 if (unlikely(!nskb))
2562 goto err;
2563
2564 skb_reserve(nskb, headroom);
2565 __skb_put(nskb, doffset);
2566 }
2567
2568 if (segs)
2569 tail->next = nskb;
2570 else
2571 segs = nskb;
2572 tail = nskb;
2573
2574 __copy_skb_header(nskb, skb);
2575 nskb->mac_len = skb->mac_len;
2576
2577
2578 if (nskb->ip_summed == CHECKSUM_PARTIAL)
2579 nskb->csum_start += skb_headroom(nskb) - headroom;
2580
2581 skb_reset_mac_header(nskb);
2582 skb_set_network_header(nskb, skb->mac_len);
2583 nskb->transport_header = (nskb->network_header +
2584 skb_network_header_len(skb));
2585 skb_copy_from_linear_data(skb, nskb->data, doffset);
2586
2587 if (fskb != skb_shinfo(skb)->frag_list)
2588 continue;
2589
2590 if (!sg) {
2591 nskb->ip_summed = CHECKSUM_NONE;
2592 nskb->csum = skb_copy_and_csum_bits(skb, offset,
2593 skb_put(nskb, len),
2594 len, 0);
2595 continue;
2596 }
2597
2598 frag = skb_shinfo(nskb)->frags;
2599
2600 skb_copy_from_linear_data_offset(skb, offset,
2601 skb_put(nskb, hsize), hsize);
2602
2603 while (pos < offset + len && i < nfrags) {
2604 *frag = skb_shinfo(skb)->frags[i];
2605 get_page(frag->page);
2606 size = frag->size;
2607
2608 if (pos < offset) {
2609 frag->page_offset += offset - pos;
2610 frag->size -= offset - pos;
2611 }
2612
2613 skb_shinfo(nskb)->nr_frags++;
2614
2615 if (pos + size <= offset + len) {
2616 i++;
2617 pos += size;
2618 } else {
2619 frag->size -= pos + size - (offset + len);
2620 goto skip_fraglist;
2621 }
2622
2623 frag++;
2624 }
2625
2626 if (pos < offset + len) {
2627 struct sk_buff *fskb2 = fskb;
2628
2629 BUG_ON(pos + fskb->len != offset + len);
2630
2631 pos += fskb->len;
2632 fskb = fskb->next;
2633
2634 if (fskb2->next) {
2635 fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2636 if (!fskb2)
2637 goto err;
2638 } else
2639 skb_get(fskb2);
2640
2641 SKB_FRAG_ASSERT(nskb);
2642 skb_shinfo(nskb)->frag_list = fskb2;
2643 }
2644
2645skip_fraglist:
2646 nskb->data_len = len - hsize;
2647 nskb->len += nskb->data_len;
2648 nskb->truesize += nskb->data_len;
2649 } while ((offset += len) < skb->len);
2650
2651 return segs;
2652
2653err:
2654 while ((skb = segs)) {
2655 segs = skb->next;
2656 kfree_skb(skb);
2657 }
2658 return ERR_PTR(err);
2659}
2660EXPORT_SYMBOL_GPL(skb_segment);
2661
2662int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2663{
2664 struct sk_buff *p = *head;
2665 struct sk_buff *nskb;
2666 struct skb_shared_info *skbinfo = skb_shinfo(skb);
2667 struct skb_shared_info *pinfo = skb_shinfo(p);
2668 unsigned int headroom;
2669 unsigned int len = skb_gro_len(skb);
2670 unsigned int offset = skb_gro_offset(skb);
2671 unsigned int headlen = skb_headlen(skb);
2672
2673 if (p->len + len >= 65536)
2674 return -E2BIG;
2675
2676 if (pinfo->frag_list)
2677 goto merge;
2678 else if (headlen <= offset) {
2679 skb_frag_t *frag;
2680 skb_frag_t *frag2;
2681 int i = skbinfo->nr_frags;
2682 int nr_frags = pinfo->nr_frags + i;
2683
2684 offset -= headlen;
2685
2686 if (nr_frags > MAX_SKB_FRAGS)
2687 return -E2BIG;
2688
2689 pinfo->nr_frags = nr_frags;
2690 skbinfo->nr_frags = 0;
2691
2692 frag = pinfo->frags + nr_frags;
2693 frag2 = skbinfo->frags + i;
2694 do {
2695 *--frag = *--frag2;
2696 } while (--i);
2697
2698 frag->page_offset += offset;
2699 frag->size -= offset;
2700
2701 skb->truesize -= skb->data_len;
2702 skb->len -= skb->data_len;
2703 skb->data_len = 0;
2704
2705 NAPI_GRO_CB(skb)->free = 1;
2706 goto done;
2707 } else if (skb_gro_len(p) != pinfo->gso_size)
2708 return -E2BIG;
2709
2710 headroom = skb_headroom(p);
2711 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2712 if (unlikely(!nskb))
2713 return -ENOMEM;
2714
2715 __copy_skb_header(nskb, p);
2716 nskb->mac_len = p->mac_len;
2717
2718 skb_reserve(nskb, headroom);
2719 __skb_put(nskb, skb_gro_offset(p));
2720
2721 skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2722 skb_set_network_header(nskb, skb_network_offset(p));
2723 skb_set_transport_header(nskb, skb_transport_offset(p));
2724
2725 __skb_pull(p, skb_gro_offset(p));
2726 memcpy(skb_mac_header(nskb), skb_mac_header(p),
2727 p->data - skb_mac_header(p));
2728
2729 *NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2730 skb_shinfo(nskb)->frag_list = p;
2731 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2732 pinfo->gso_size = 0;
2733 skb_header_release(p);
2734 nskb->prev = p;
2735
2736 nskb->data_len += p->len;
2737 nskb->truesize += p->len;
2738 nskb->len += p->len;
2739
2740 *head = nskb;
2741 nskb->next = p->next;
2742 p->next = NULL;
2743
2744 p = nskb;
2745
2746merge:
2747 if (offset > headlen) {
2748 unsigned int eat = offset - headlen;
2749
2750 skbinfo->frags[0].page_offset += eat;
2751 skbinfo->frags[0].size -= eat;
2752 skb->data_len -= eat;
2753 skb->len -= eat;
2754 offset = headlen;
2755 }
2756
2757 __skb_pull(skb, offset);
2758
2759 p->prev->next = skb;
2760 p->prev = skb;
2761 skb_header_release(skb);
2762
2763done:
2764 NAPI_GRO_CB(p)->count++;
2765 p->data_len += len;
2766 p->truesize += len;
2767 p->len += len;
2768
2769 NAPI_GRO_CB(skb)->same_flow = 1;
2770 return 0;
2771}
2772EXPORT_SYMBOL_GPL(skb_gro_receive);
2773
2774void __init skb_init(void)
2775{
2776 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2777 sizeof(struct sk_buff),
2778 0,
2779 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2780 NULL);
2781 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2782 (2*sizeof(struct sk_buff)) +
2783 sizeof(atomic_t),
2784 0,
2785 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2786 NULL);
2787}
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799static int
2800__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2801{
2802 int start = skb_headlen(skb);
2803 int i, copy = start - offset;
2804 struct sk_buff *frag_iter;
2805 int elt = 0;
2806
2807 if (copy > 0) {
2808 if (copy > len)
2809 copy = len;
2810 sg_set_buf(sg, skb->data + offset, copy);
2811 elt++;
2812 if ((len -= copy) == 0)
2813 return elt;
2814 offset += copy;
2815 }
2816
2817 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2818 int end;
2819
2820 WARN_ON(start > offset + len);
2821
2822 end = start + skb_shinfo(skb)->frags[i].size;
2823 if ((copy = end - offset) > 0) {
2824 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2825
2826 if (copy > len)
2827 copy = len;
2828 sg_set_page(&sg[elt], frag->page, copy,
2829 frag->page_offset+offset-start);
2830 elt++;
2831 if (!(len -= copy))
2832 return elt;
2833 offset += copy;
2834 }
2835 start = end;
2836 }
2837
2838 skb_walk_frags(skb, frag_iter) {
2839 int end;
2840
2841 WARN_ON(start > offset + len);
2842
2843 end = start + frag_iter->len;
2844 if ((copy = end - offset) > 0) {
2845 if (copy > len)
2846 copy = len;
2847 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2848 copy);
2849 if ((len -= copy) == 0)
2850 return elt;
2851 offset += copy;
2852 }
2853 start = end;
2854 }
2855 BUG_ON(len);
2856 return elt;
2857}
2858
2859int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2860{
2861 int nsg = __skb_to_sgvec(skb, sg, offset, len);
2862
2863 sg_mark_end(&sg[nsg - 1]);
2864
2865 return nsg;
2866}
2867EXPORT_SYMBOL_GPL(skb_to_sgvec);
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2887{
2888 int copyflag;
2889 int elt;
2890 struct sk_buff *skb1, **skb_p;
2891
2892
2893
2894
2895
2896 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2897 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2898 return -ENOMEM;
2899
2900
2901 if (!skb_has_frag_list(skb)) {
2902
2903
2904
2905
2906
2907 if (skb_tailroom(skb) < tailbits &&
2908 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2909 return -ENOMEM;
2910
2911
2912 *trailer = skb;
2913 return 1;
2914 }
2915
2916
2917
2918 elt = 1;
2919 skb_p = &skb_shinfo(skb)->frag_list;
2920 copyflag = 0;
2921
2922 while ((skb1 = *skb_p) != NULL) {
2923 int ntail = 0;
2924
2925
2926
2927
2928
2929 if (skb_shared(skb1))
2930 copyflag = 1;
2931
2932
2933
2934 if (skb1->next == NULL && tailbits) {
2935 if (skb_shinfo(skb1)->nr_frags ||
2936 skb_has_frag_list(skb1) ||
2937 skb_tailroom(skb1) < tailbits)
2938 ntail = tailbits + 128;
2939 }
2940
2941 if (copyflag ||
2942 skb_cloned(skb1) ||
2943 ntail ||
2944 skb_shinfo(skb1)->nr_frags ||
2945 skb_has_frag_list(skb1)) {
2946 struct sk_buff *skb2;
2947
2948
2949 if (ntail == 0)
2950 skb2 = skb_copy(skb1, GFP_ATOMIC);
2951 else
2952 skb2 = skb_copy_expand(skb1,
2953 skb_headroom(skb1),
2954 ntail,
2955 GFP_ATOMIC);
2956 if (unlikely(skb2 == NULL))
2957 return -ENOMEM;
2958
2959 if (skb1->sk)
2960 skb_set_owner_w(skb2, skb1->sk);
2961
2962
2963
2964
2965 skb2->next = skb1->next;
2966 *skb_p = skb2;
2967 kfree_skb(skb1);
2968 skb1 = skb2;
2969 }
2970 elt++;
2971 *trailer = skb1;
2972 skb_p = &skb1->next;
2973 }
2974
2975 return elt;
2976}
2977EXPORT_SYMBOL_GPL(skb_cow_data);
2978
2979static void sock_rmem_free(struct sk_buff *skb)
2980{
2981 struct sock *sk = skb->sk;
2982
2983 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
2984}
2985
2986
2987
2988
2989int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2990{
2991 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
2992 (unsigned)sk->sk_rcvbuf)
2993 return -ENOMEM;
2994
2995 skb_orphan(skb);
2996 skb->sk = sk;
2997 skb->destructor = sock_rmem_free;
2998 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2999
3000 skb_queue_tail(&sk->sk_error_queue, skb);
3001 if (!sock_flag(sk, SOCK_DEAD))
3002 sk->sk_data_ready(sk, skb->len);
3003 return 0;
3004}
3005EXPORT_SYMBOL(sock_queue_err_skb);
3006
3007void skb_tstamp_tx(struct sk_buff *orig_skb,
3008 struct skb_shared_hwtstamps *hwtstamps)
3009{
3010 struct sock *sk = orig_skb->sk;
3011 struct sock_exterr_skb *serr;
3012 struct sk_buff *skb;
3013 int err;
3014
3015 if (!sk)
3016 return;
3017
3018 skb = skb_clone(orig_skb, GFP_ATOMIC);
3019 if (!skb)
3020 return;
3021
3022 if (hwtstamps) {
3023 *skb_hwtstamps(skb) =
3024 *hwtstamps;
3025 } else {
3026
3027
3028
3029
3030
3031 skb->tstamp = ktime_get_real();
3032 }
3033
3034 serr = SKB_EXT_ERR(skb);
3035 memset(serr, 0, sizeof(*serr));
3036 serr->ee.ee_errno = ENOMSG;
3037 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3038
3039 err = sock_queue_err_skb(sk, skb);
3040
3041 if (err)
3042 kfree_skb(skb);
3043}
3044EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3060{
3061 if (unlikely(start > skb_headlen(skb)) ||
3062 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3063 if (net_ratelimit())
3064 printk(KERN_WARNING
3065 "bad partial csum: csum=%u/%u len=%u\n",
3066 start, off, skb_headlen(skb));
3067 return false;
3068 }
3069 skb->ip_summed = CHECKSUM_PARTIAL;
3070 skb->csum_start = skb_headroom(skb) + start;
3071 skb->csum_offset = off;
3072 return true;
3073}
3074EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3075
3076void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3077{
3078 if (net_ratelimit())
3079 pr_warning("%s: received packets cannot be forwarded"
3080 " while LRO is enabled\n", skb->dev->name);
3081}
3082EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3083