1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/kmemcheck.h>
45#include <linux/mm.h>
46#include <linux/interrupt.h>
47#include <linux/in.h>
48#include <linux/inet.h>
49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65
66#include <net/protocol.h>
67#include <net/dst.h>
68#include <net/sock.h>
69#include <net/checksum.h>
70#include <net/ip6_checksum.h>
71#include <net/xfrm.h>
72
73#include <asm/uaccess.h>
74#include <trace/events/skb.h>
75#include <linux/highmem.h>
76
77struct kmem_cache *skbuff_head_cache __read_mostly;
78static struct kmem_cache *skbuff_fclone_cache __read_mostly;
79
80
81
82
83
84
85
86
87
88
89
90
91
92static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
93 const char msg[])
94{
95 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
96 msg, addr, skb->len, sz, skb->head, skb->data,
97 (unsigned long)skb->tail, (unsigned long)skb->end,
98 skb->dev ? skb->dev->name : "<NULL>");
99 BUG();
100}
101
102static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
103{
104 skb_panic(skb, sz, addr, __func__);
105}
106
107static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
108{
109 skb_panic(skb, sz, addr, __func__);
110}
111
112
113
114
115
116
117
118
119#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
120 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
121
122static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
123 unsigned long ip, bool *pfmemalloc)
124{
125 void *obj;
126 bool ret_pfmemalloc = false;
127
128
129
130
131
132 obj = kmalloc_node_track_caller(size,
133 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
134 node);
135 if (obj || !(gfp_pfmemalloc_allowed(flags)))
136 goto out;
137
138
139 ret_pfmemalloc = true;
140 obj = kmalloc_node_track_caller(size, flags, node);
141
142out:
143 if (pfmemalloc)
144 *pfmemalloc = ret_pfmemalloc;
145
146 return obj;
147}
148
149
150
151
152
153
154
155struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
156{
157 struct sk_buff *skb;
158
159
160 skb = kmem_cache_alloc_node(skbuff_head_cache,
161 gfp_mask & ~__GFP_DMA, node);
162 if (!skb)
163 goto out;
164
165
166
167
168
169
170 memset(skb, 0, offsetof(struct sk_buff, tail));
171 skb->head = NULL;
172 skb->truesize = sizeof(struct sk_buff);
173 atomic_set(&skb->users, 1);
174
175 skb->mac_header = (typeof(skb->mac_header))~0U;
176out:
177 return skb;
178}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
198 int flags, int node)
199{
200 struct kmem_cache *cache;
201 struct skb_shared_info *shinfo;
202 struct sk_buff *skb;
203 u8 *data;
204 bool pfmemalloc;
205
206 cache = (flags & SKB_ALLOC_FCLONE)
207 ? skbuff_fclone_cache : skbuff_head_cache;
208
209 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
210 gfp_mask |= __GFP_MEMALLOC;
211
212
213 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
214 if (!skb)
215 goto out;
216 prefetchw(skb);
217
218
219
220
221
222
223 size = SKB_DATA_ALIGN(size);
224 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
225 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
226 if (!data)
227 goto nodata;
228
229
230
231
232 size = SKB_WITH_OVERHEAD(ksize(data));
233 prefetchw(data + size);
234
235
236
237
238
239
240 memset(skb, 0, offsetof(struct sk_buff, tail));
241
242 skb->truesize = SKB_TRUESIZE(size);
243 skb->pfmemalloc = pfmemalloc;
244 atomic_set(&skb->users, 1);
245 skb->head = data;
246 skb->data = data;
247 skb_reset_tail_pointer(skb);
248 skb->end = skb->tail + size;
249 skb->mac_header = (typeof(skb->mac_header))~0U;
250 skb->transport_header = (typeof(skb->transport_header))~0U;
251
252
253 shinfo = skb_shinfo(skb);
254 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
255 atomic_set(&shinfo->dataref, 1);
256 kmemcheck_annotate_variable(shinfo->destructor_arg);
257
258 if (flags & SKB_ALLOC_FCLONE) {
259 struct sk_buff *child = skb + 1;
260 atomic_t *fclone_ref = (atomic_t *) (child + 1);
261
262 kmemcheck_annotate_bitfield(child, flags1);
263 kmemcheck_annotate_bitfield(child, flags2);
264 skb->fclone = SKB_FCLONE_ORIG;
265 atomic_set(fclone_ref, 1);
266
267 child->fclone = SKB_FCLONE_UNAVAILABLE;
268 child->pfmemalloc = pfmemalloc;
269 }
270out:
271 return skb;
272nodata:
273 kmem_cache_free(cache, skb);
274 skb = NULL;
275 goto out;
276}
277EXPORT_SYMBOL(__alloc_skb);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297struct sk_buff *build_skb(void *data, unsigned int frag_size)
298{
299 struct skb_shared_info *shinfo;
300 struct sk_buff *skb;
301 unsigned int size = frag_size ? : ksize(data);
302
303 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
308
309 memset(skb, 0, offsetof(struct sk_buff, tail));
310 skb->truesize = SKB_TRUESIZE(size);
311 skb->head_frag = frag_size != 0;
312 atomic_set(&skb->users, 1);
313 skb->head = data;
314 skb->data = data;
315 skb_reset_tail_pointer(skb);
316 skb->end = skb->tail + size;
317 skb->mac_header = (typeof(skb->mac_header))~0U;
318 skb->transport_header = (typeof(skb->transport_header))~0U;
319
320
321 shinfo = skb_shinfo(skb);
322 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
323 atomic_set(&shinfo->dataref, 1);
324 kmemcheck_annotate_variable(shinfo->destructor_arg);
325
326 return skb;
327}
328EXPORT_SYMBOL(build_skb);
329
330struct netdev_alloc_cache {
331 struct page_frag frag;
332
333
334
335 unsigned int pagecnt_bias;
336};
337static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
338
339static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
340{
341 struct netdev_alloc_cache *nc;
342 void *data = NULL;
343 int order;
344 unsigned long flags;
345
346 local_irq_save(flags);
347 nc = &__get_cpu_var(netdev_alloc_cache);
348 if (unlikely(!nc->frag.page)) {
349refill:
350 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
351 gfp_t gfp = gfp_mask;
352
353 if (order)
354 gfp |= __GFP_COMP | __GFP_NOWARN;
355 nc->frag.page = alloc_pages(gfp, order);
356 if (likely(nc->frag.page))
357 break;
358 if (--order < 0)
359 goto end;
360 }
361 nc->frag.size = PAGE_SIZE << order;
362recycle:
363 atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
364 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
365 nc->frag.offset = 0;
366 }
367
368 if (nc->frag.offset + fragsz > nc->frag.size) {
369
370 if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
371 atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
372 goto recycle;
373 goto refill;
374 }
375
376 data = page_address(nc->frag.page) + nc->frag.offset;
377 nc->frag.offset += fragsz;
378 nc->pagecnt_bias--;
379end:
380 local_irq_restore(flags);
381 return data;
382}
383
384
385
386
387
388
389
390
391void *netdev_alloc_frag(unsigned int fragsz)
392{
393 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
394}
395EXPORT_SYMBOL(netdev_alloc_frag);
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
411 unsigned int length, gfp_t gfp_mask)
412{
413 struct sk_buff *skb = NULL;
414 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
415 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
416
417 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
418 void *data;
419
420 if (sk_memalloc_socks())
421 gfp_mask |= __GFP_MEMALLOC;
422
423 data = __netdev_alloc_frag(fragsz, gfp_mask);
424
425 if (likely(data)) {
426 skb = build_skb(data, fragsz);
427 if (unlikely(!skb))
428 put_page(virt_to_head_page(data));
429 }
430 } else {
431 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
432 SKB_ALLOC_RX, NUMA_NO_NODE);
433 }
434 if (likely(skb)) {
435 skb_reserve(skb, NET_SKB_PAD);
436 skb->dev = dev;
437 }
438 return skb;
439}
440EXPORT_SYMBOL(__netdev_alloc_skb);
441
442void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
443 int size, unsigned int truesize)
444{
445 skb_fill_page_desc(skb, i, page, off, size);
446 skb->len += size;
447 skb->data_len += size;
448 skb->truesize += truesize;
449}
450EXPORT_SYMBOL(skb_add_rx_frag);
451
452void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
453 unsigned int truesize)
454{
455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
456
457 skb_frag_size_add(frag, size);
458 skb->len += size;
459 skb->data_len += size;
460 skb->truesize += truesize;
461}
462EXPORT_SYMBOL(skb_coalesce_rx_frag);
463
464static void skb_drop_list(struct sk_buff **listp)
465{
466 kfree_skb_list(*listp);
467 *listp = NULL;
468}
469
470static inline void skb_drop_fraglist(struct sk_buff *skb)
471{
472 skb_drop_list(&skb_shinfo(skb)->frag_list);
473}
474
475static void skb_clone_fraglist(struct sk_buff *skb)
476{
477 struct sk_buff *list;
478
479 skb_walk_frags(skb, list)
480 skb_get(list);
481}
482
483static void skb_free_head(struct sk_buff *skb)
484{
485 if (skb->head_frag)
486 put_page(virt_to_head_page(skb->head));
487 else
488 kfree(skb->head);
489}
490
491static void skb_release_data(struct sk_buff *skb)
492{
493 if (!skb->cloned ||
494 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
495 &skb_shinfo(skb)->dataref)) {
496 if (skb_shinfo(skb)->nr_frags) {
497 int i;
498 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
499 skb_frag_unref(skb, i);
500 }
501
502
503
504
505
506 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
507 struct ubuf_info *uarg;
508
509 uarg = skb_shinfo(skb)->destructor_arg;
510 if (uarg->callback)
511 uarg->callback(uarg, true);
512 }
513
514 if (skb_has_frag_list(skb))
515 skb_drop_fraglist(skb);
516
517 skb_free_head(skb);
518 }
519}
520
521
522
523
524static void kfree_skbmem(struct sk_buff *skb)
525{
526 struct sk_buff *other;
527 atomic_t *fclone_ref;
528
529 switch (skb->fclone) {
530 case SKB_FCLONE_UNAVAILABLE:
531 kmem_cache_free(skbuff_head_cache, skb);
532 break;
533
534 case SKB_FCLONE_ORIG:
535 fclone_ref = (atomic_t *) (skb + 2);
536 if (atomic_dec_and_test(fclone_ref))
537 kmem_cache_free(skbuff_fclone_cache, skb);
538 break;
539
540 case SKB_FCLONE_CLONE:
541 fclone_ref = (atomic_t *) (skb + 1);
542 other = skb - 1;
543
544
545
546
547 skb->fclone = SKB_FCLONE_UNAVAILABLE;
548
549 if (atomic_dec_and_test(fclone_ref))
550 kmem_cache_free(skbuff_fclone_cache, other);
551 break;
552 }
553}
554
555static void skb_release_head_state(struct sk_buff *skb)
556{
557 skb_dst_drop(skb);
558#ifdef CONFIG_XFRM
559 secpath_put(skb->sp);
560#endif
561 if (skb->destructor) {
562 WARN_ON(in_irq());
563 skb->destructor(skb);
564 }
565#if IS_ENABLED(CONFIG_NF_CONNTRACK)
566 nf_conntrack_put(skb->nfct);
567#endif
568#ifdef CONFIG_BRIDGE_NETFILTER
569 nf_bridge_put(skb->nf_bridge);
570#endif
571
572#ifdef CONFIG_NET_SCHED
573 skb->tc_index = 0;
574#ifdef CONFIG_NET_CLS_ACT
575 skb->tc_verd = 0;
576#endif
577#endif
578}
579
580
581static void skb_release_all(struct sk_buff *skb)
582{
583 skb_release_head_state(skb);
584 if (likely(skb->head))
585 skb_release_data(skb);
586}
587
588
589
590
591
592
593
594
595
596
597void __kfree_skb(struct sk_buff *skb)
598{
599 skb_release_all(skb);
600 kfree_skbmem(skb);
601}
602EXPORT_SYMBOL(__kfree_skb);
603
604
605
606
607
608
609
610
611void kfree_skb(struct sk_buff *skb)
612{
613 if (unlikely(!skb))
614 return;
615 if (likely(atomic_read(&skb->users) == 1))
616 smp_rmb();
617 else if (likely(!atomic_dec_and_test(&skb->users)))
618 return;
619 trace_kfree_skb(skb, __builtin_return_address(0));
620 __kfree_skb(skb);
621}
622EXPORT_SYMBOL(kfree_skb);
623
624void kfree_skb_list(struct sk_buff *segs)
625{
626 while (segs) {
627 struct sk_buff *next = segs->next;
628
629 kfree_skb(segs);
630 segs = next;
631 }
632}
633EXPORT_SYMBOL(kfree_skb_list);
634
635
636
637
638
639
640
641
642void skb_tx_error(struct sk_buff *skb)
643{
644 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
645 struct ubuf_info *uarg;
646
647 uarg = skb_shinfo(skb)->destructor_arg;
648 if (uarg->callback)
649 uarg->callback(uarg, false);
650 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
651 }
652}
653EXPORT_SYMBOL(skb_tx_error);
654
655
656
657
658
659
660
661
662
663void consume_skb(struct sk_buff *skb)
664{
665 if (unlikely(!skb))
666 return;
667 if (likely(atomic_read(&skb->users) == 1))
668 smp_rmb();
669 else if (likely(!atomic_dec_and_test(&skb->users)))
670 return;
671 trace_consume_skb(skb);
672 __kfree_skb(skb);
673}
674EXPORT_SYMBOL(consume_skb);
675
676static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
677{
678 new->tstamp = old->tstamp;
679 new->dev = old->dev;
680 new->transport_header = old->transport_header;
681 new->network_header = old->network_header;
682 new->mac_header = old->mac_header;
683 new->inner_protocol = old->inner_protocol;
684 new->inner_transport_header = old->inner_transport_header;
685 new->inner_network_header = old->inner_network_header;
686 new->inner_mac_header = old->inner_mac_header;
687 skb_dst_copy(new, old);
688 skb_copy_hash(new, old);
689 new->ooo_okay = old->ooo_okay;
690 new->no_fcs = old->no_fcs;
691 new->encapsulation = old->encapsulation;
692#ifdef CONFIG_XFRM
693 new->sp = secpath_get(old->sp);
694#endif
695 memcpy(new->cb, old->cb, sizeof(old->cb));
696 new->csum = old->csum;
697 new->local_df = old->local_df;
698 new->pkt_type = old->pkt_type;
699 new->ip_summed = old->ip_summed;
700 skb_copy_queue_mapping(new, old);
701 new->priority = old->priority;
702#if IS_ENABLED(CONFIG_IP_VS)
703 new->ipvs_property = old->ipvs_property;
704#endif
705 new->pfmemalloc = old->pfmemalloc;
706 new->protocol = old->protocol;
707 new->mark = old->mark;
708 new->skb_iif = old->skb_iif;
709 __nf_copy(new, old);
710#ifdef CONFIG_NET_SCHED
711 new->tc_index = old->tc_index;
712#ifdef CONFIG_NET_CLS_ACT
713 new->tc_verd = old->tc_verd;
714#endif
715#endif
716 new->vlan_proto = old->vlan_proto;
717 new->vlan_tci = old->vlan_tci;
718
719 skb_copy_secmark(new, old);
720
721#ifdef CONFIG_NET_RX_BUSY_POLL
722 new->napi_id = old->napi_id;
723#endif
724}
725
726
727
728
729
730static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
731{
732#define C(x) n->x = skb->x
733
734 n->next = n->prev = NULL;
735 n->sk = NULL;
736 __copy_skb_header(n, skb);
737
738 C(len);
739 C(data_len);
740 C(mac_len);
741 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
742 n->cloned = 1;
743 n->nohdr = 0;
744 n->destructor = NULL;
745 C(tail);
746 C(end);
747 C(head);
748 C(head_frag);
749 C(data);
750 C(truesize);
751 atomic_set(&n->users, 1);
752
753 atomic_inc(&(skb_shinfo(skb)->dataref));
754 skb->cloned = 1;
755
756 return n;
757#undef C
758}
759
760
761
762
763
764
765
766
767
768
769
770struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
771{
772 skb_release_all(dst);
773 return __skb_clone(dst, src);
774}
775EXPORT_SYMBOL_GPL(skb_morph);
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
793{
794 int i;
795 int num_frags = skb_shinfo(skb)->nr_frags;
796 struct page *page, *head = NULL;
797 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
798
799 for (i = 0; i < num_frags; i++) {
800 u8 *vaddr;
801 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
802
803 page = alloc_page(gfp_mask);
804 if (!page) {
805 while (head) {
806 struct page *next = (struct page *)page_private(head);
807 put_page(head);
808 head = next;
809 }
810 return -ENOMEM;
811 }
812 vaddr = kmap_atomic(skb_frag_page(f));
813 memcpy(page_address(page),
814 vaddr + f->page_offset, skb_frag_size(f));
815 kunmap_atomic(vaddr);
816 set_page_private(page, (unsigned long)head);
817 head = page;
818 }
819
820
821 for (i = 0; i < num_frags; i++)
822 skb_frag_unref(skb, i);
823
824 uarg->callback(uarg, false);
825
826
827 for (i = num_frags - 1; i >= 0; i--) {
828 __skb_fill_page_desc(skb, i, head, 0,
829 skb_shinfo(skb)->frags[i].size);
830 head = (struct page *)page_private(head);
831 }
832
833 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
834 return 0;
835}
836EXPORT_SYMBOL_GPL(skb_copy_ubufs);
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
853{
854 struct sk_buff *n;
855
856 if (skb_orphan_frags(skb, gfp_mask))
857 return NULL;
858
859 n = skb + 1;
860 if (skb->fclone == SKB_FCLONE_ORIG &&
861 n->fclone == SKB_FCLONE_UNAVAILABLE) {
862 atomic_t *fclone_ref = (atomic_t *) (n + 1);
863 n->fclone = SKB_FCLONE_CLONE;
864 atomic_inc(fclone_ref);
865 } else {
866 if (skb_pfmemalloc(skb))
867 gfp_mask |= __GFP_MEMALLOC;
868
869 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
870 if (!n)
871 return NULL;
872
873 kmemcheck_annotate_bitfield(n, flags1);
874 kmemcheck_annotate_bitfield(n, flags2);
875 n->fclone = SKB_FCLONE_UNAVAILABLE;
876 }
877
878 return __skb_clone(n, skb);
879}
880EXPORT_SYMBOL(skb_clone);
881
882static void skb_headers_offset_update(struct sk_buff *skb, int off)
883{
884
885 if (skb->ip_summed == CHECKSUM_PARTIAL)
886 skb->csum_start += off;
887
888 skb->transport_header += off;
889 skb->network_header += off;
890 if (skb_mac_header_was_set(skb))
891 skb->mac_header += off;
892 skb->inner_transport_header += off;
893 skb->inner_network_header += off;
894 skb->inner_mac_header += off;
895}
896
897static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
898{
899 __copy_skb_header(new, old);
900
901 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
902 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
903 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
904}
905
906static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
907{
908 if (skb_pfmemalloc(skb))
909 return SKB_ALLOC_RX;
910 return 0;
911}
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
931{
932 int headerlen = skb_headroom(skb);
933 unsigned int size = skb_end_offset(skb) + skb->data_len;
934 struct sk_buff *n = __alloc_skb(size, gfp_mask,
935 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
936
937 if (!n)
938 return NULL;
939
940
941 skb_reserve(n, headerlen);
942
943 skb_put(n, skb->len);
944
945 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
946 BUG();
947
948 copy_skb_header(n, skb);
949 return n;
950}
951EXPORT_SYMBOL(skb_copy);
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
968{
969 unsigned int size = skb_headlen(skb) + headroom;
970 struct sk_buff *n = __alloc_skb(size, gfp_mask,
971 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
972
973 if (!n)
974 goto out;
975
976
977 skb_reserve(n, headroom);
978
979 skb_put(n, skb_headlen(skb));
980
981 skb_copy_from_linear_data(skb, n->data, n->len);
982
983 n->truesize += skb->data_len;
984 n->data_len = skb->data_len;
985 n->len = skb->len;
986
987 if (skb_shinfo(skb)->nr_frags) {
988 int i;
989
990 if (skb_orphan_frags(skb, gfp_mask)) {
991 kfree_skb(n);
992 n = NULL;
993 goto out;
994 }
995 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
996 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
997 skb_frag_ref(skb, i);
998 }
999 skb_shinfo(n)->nr_frags = i;
1000 }
1001
1002 if (skb_has_frag_list(skb)) {
1003 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1004 skb_clone_fraglist(n);
1005 }
1006
1007 copy_skb_header(n, skb);
1008out:
1009 return n;
1010}
1011EXPORT_SYMBOL(__pskb_copy);
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1030 gfp_t gfp_mask)
1031{
1032 int i;
1033 u8 *data;
1034 int size = nhead + skb_end_offset(skb) + ntail;
1035 long off;
1036
1037 BUG_ON(nhead < 0);
1038
1039 if (skb_shared(skb))
1040 BUG();
1041
1042 size = SKB_DATA_ALIGN(size);
1043
1044 if (skb_pfmemalloc(skb))
1045 gfp_mask |= __GFP_MEMALLOC;
1046 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1047 gfp_mask, NUMA_NO_NODE, NULL);
1048 if (!data)
1049 goto nodata;
1050 size = SKB_WITH_OVERHEAD(ksize(data));
1051
1052
1053
1054
1055 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1056
1057 memcpy((struct skb_shared_info *)(data + size),
1058 skb_shinfo(skb),
1059 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1060
1061
1062
1063
1064
1065
1066 if (skb_cloned(skb)) {
1067
1068 if (skb_orphan_frags(skb, gfp_mask))
1069 goto nofrags;
1070 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1071 skb_frag_ref(skb, i);
1072
1073 if (skb_has_frag_list(skb))
1074 skb_clone_fraglist(skb);
1075
1076 skb_release_data(skb);
1077 } else {
1078 skb_free_head(skb);
1079 }
1080 off = (data + nhead) - skb->head;
1081
1082 skb->head = data;
1083 skb->head_frag = 0;
1084 skb->data += off;
1085#ifdef NET_SKBUFF_DATA_USES_OFFSET
1086 skb->end = size;
1087 off = nhead;
1088#else
1089 skb->end = skb->head + size;
1090#endif
1091 skb->tail += off;
1092 skb_headers_offset_update(skb, nhead);
1093 skb->cloned = 0;
1094 skb->hdr_len = 0;
1095 skb->nohdr = 0;
1096 atomic_set(&skb_shinfo(skb)->dataref, 1);
1097 return 0;
1098
1099nofrags:
1100 kfree(data);
1101nodata:
1102 return -ENOMEM;
1103}
1104EXPORT_SYMBOL(pskb_expand_head);
1105
1106
1107
1108struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1109{
1110 struct sk_buff *skb2;
1111 int delta = headroom - skb_headroom(skb);
1112
1113 if (delta <= 0)
1114 skb2 = pskb_copy(skb, GFP_ATOMIC);
1115 else {
1116 skb2 = skb_clone(skb, GFP_ATOMIC);
1117 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1118 GFP_ATOMIC)) {
1119 kfree_skb(skb2);
1120 skb2 = NULL;
1121 }
1122 }
1123 return skb2;
1124}
1125EXPORT_SYMBOL(skb_realloc_headroom);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1146 int newheadroom, int newtailroom,
1147 gfp_t gfp_mask)
1148{
1149
1150
1151
1152 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1153 gfp_mask, skb_alloc_rx_flag(skb),
1154 NUMA_NO_NODE);
1155 int oldheadroom = skb_headroom(skb);
1156 int head_copy_len, head_copy_off;
1157
1158 if (!n)
1159 return NULL;
1160
1161 skb_reserve(n, newheadroom);
1162
1163
1164 skb_put(n, skb->len);
1165
1166 head_copy_len = oldheadroom;
1167 head_copy_off = 0;
1168 if (newheadroom <= head_copy_len)
1169 head_copy_len = newheadroom;
1170 else
1171 head_copy_off = newheadroom - head_copy_len;
1172
1173
1174 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1175 skb->len + head_copy_len))
1176 BUG();
1177
1178 copy_skb_header(n, skb);
1179
1180 skb_headers_offset_update(n, newheadroom - oldheadroom);
1181
1182 return n;
1183}
1184EXPORT_SYMBOL(skb_copy_expand);
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198int skb_pad(struct sk_buff *skb, int pad)
1199{
1200 int err;
1201 int ntail;
1202
1203
1204 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1205 memset(skb->data+skb->len, 0, pad);
1206 return 0;
1207 }
1208
1209 ntail = skb->data_len + pad - (skb->end - skb->tail);
1210 if (likely(skb_cloned(skb) || ntail > 0)) {
1211 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1212 if (unlikely(err))
1213 goto free_skb;
1214 }
1215
1216
1217
1218
1219 err = skb_linearize(skb);
1220 if (unlikely(err))
1221 goto free_skb;
1222
1223 memset(skb->data + skb->len, 0, pad);
1224 return 0;
1225
1226free_skb:
1227 kfree_skb(skb);
1228 return err;
1229}
1230EXPORT_SYMBOL(skb_pad);
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1246{
1247 if (tail != skb) {
1248 skb->data_len += len;
1249 skb->len += len;
1250 }
1251 return skb_put(tail, len);
1252}
1253EXPORT_SYMBOL_GPL(pskb_put);
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1265{
1266 unsigned char *tmp = skb_tail_pointer(skb);
1267 SKB_LINEAR_ASSERT(skb);
1268 skb->tail += len;
1269 skb->len += len;
1270 if (unlikely(skb->tail > skb->end))
1271 skb_over_panic(skb, len, __builtin_return_address(0));
1272 return tmp;
1273}
1274EXPORT_SYMBOL(skb_put);
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1286{
1287 skb->data -= len;
1288 skb->len += len;
1289 if (unlikely(skb->data<skb->head))
1290 skb_under_panic(skb, len, __builtin_return_address(0));
1291 return skb->data;
1292}
1293EXPORT_SYMBOL(skb_push);
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1306{
1307 return skb_pull_inline(skb, len);
1308}
1309EXPORT_SYMBOL(skb_pull);
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320void skb_trim(struct sk_buff *skb, unsigned int len)
1321{
1322 if (skb->len > len)
1323 __skb_trim(skb, len);
1324}
1325EXPORT_SYMBOL(skb_trim);
1326
1327
1328
1329
1330int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1331{
1332 struct sk_buff **fragp;
1333 struct sk_buff *frag;
1334 int offset = skb_headlen(skb);
1335 int nfrags = skb_shinfo(skb)->nr_frags;
1336 int i;
1337 int err;
1338
1339 if (skb_cloned(skb) &&
1340 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1341 return err;
1342
1343 i = 0;
1344 if (offset >= len)
1345 goto drop_pages;
1346
1347 for (; i < nfrags; i++) {
1348 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1349
1350 if (end < len) {
1351 offset = end;
1352 continue;
1353 }
1354
1355 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1356
1357drop_pages:
1358 skb_shinfo(skb)->nr_frags = i;
1359
1360 for (; i < nfrags; i++)
1361 skb_frag_unref(skb, i);
1362
1363 if (skb_has_frag_list(skb))
1364 skb_drop_fraglist(skb);
1365 goto done;
1366 }
1367
1368 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1369 fragp = &frag->next) {
1370 int end = offset + frag->len;
1371
1372 if (skb_shared(frag)) {
1373 struct sk_buff *nfrag;
1374
1375 nfrag = skb_clone(frag, GFP_ATOMIC);
1376 if (unlikely(!nfrag))
1377 return -ENOMEM;
1378
1379 nfrag->next = frag->next;
1380 consume_skb(frag);
1381 frag = nfrag;
1382 *fragp = frag;
1383 }
1384
1385 if (end < len) {
1386 offset = end;
1387 continue;
1388 }
1389
1390 if (end > len &&
1391 unlikely((err = pskb_trim(frag, len - offset))))
1392 return err;
1393
1394 if (frag->next)
1395 skb_drop_list(&frag->next);
1396 break;
1397 }
1398
1399done:
1400 if (len > skb_headlen(skb)) {
1401 skb->data_len -= skb->len - len;
1402 skb->len = len;
1403 } else {
1404 skb->len = len;
1405 skb->data_len = 0;
1406 skb_set_tail_pointer(skb, len);
1407 }
1408
1409 return 0;
1410}
1411EXPORT_SYMBOL(___pskb_trim);
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1439{
1440
1441
1442
1443
1444 int i, k, eat = (skb->tail + delta) - skb->end;
1445
1446 if (eat > 0 || skb_cloned(skb)) {
1447 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1448 GFP_ATOMIC))
1449 return NULL;
1450 }
1451
1452 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1453 BUG();
1454
1455
1456
1457
1458 if (!skb_has_frag_list(skb))
1459 goto pull_pages;
1460
1461
1462 eat = delta;
1463 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1464 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1465
1466 if (size >= eat)
1467 goto pull_pages;
1468 eat -= size;
1469 }
1470
1471
1472
1473
1474
1475
1476
1477
1478 if (eat) {
1479 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1480 struct sk_buff *clone = NULL;
1481 struct sk_buff *insp = NULL;
1482
1483 do {
1484 BUG_ON(!list);
1485
1486 if (list->len <= eat) {
1487
1488 eat -= list->len;
1489 list = list->next;
1490 insp = list;
1491 } else {
1492
1493
1494 if (skb_shared(list)) {
1495
1496 clone = skb_clone(list, GFP_ATOMIC);
1497 if (!clone)
1498 return NULL;
1499 insp = list->next;
1500 list = clone;
1501 } else {
1502
1503
1504 insp = list;
1505 }
1506 if (!pskb_pull(list, eat)) {
1507 kfree_skb(clone);
1508 return NULL;
1509 }
1510 break;
1511 }
1512 } while (eat);
1513
1514
1515 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1516 skb_shinfo(skb)->frag_list = list->next;
1517 kfree_skb(list);
1518 }
1519
1520 if (clone) {
1521 clone->next = list;
1522 skb_shinfo(skb)->frag_list = clone;
1523 }
1524 }
1525
1526
1527pull_pages:
1528 eat = delta;
1529 k = 0;
1530 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1531 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1532
1533 if (size <= eat) {
1534 skb_frag_unref(skb, i);
1535 eat -= size;
1536 } else {
1537 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1538 if (eat) {
1539 skb_shinfo(skb)->frags[k].page_offset += eat;
1540 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1541 eat = 0;
1542 }
1543 k++;
1544 }
1545 }
1546 skb_shinfo(skb)->nr_frags = k;
1547
1548 skb->tail += delta;
1549 skb->data_len -= delta;
1550
1551 return skb_tail_pointer(skb);
1552}
1553EXPORT_SYMBOL(__pskb_pull_tail);
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1571{
1572 int start = skb_headlen(skb);
1573 struct sk_buff *frag_iter;
1574 int i, copy;
1575
1576 if (offset > (int)skb->len - len)
1577 goto fault;
1578
1579
1580 if ((copy = start - offset) > 0) {
1581 if (copy > len)
1582 copy = len;
1583 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1584 if ((len -= copy) == 0)
1585 return 0;
1586 offset += copy;
1587 to += copy;
1588 }
1589
1590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1591 int end;
1592 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1593
1594 WARN_ON(start > offset + len);
1595
1596 end = start + skb_frag_size(f);
1597 if ((copy = end - offset) > 0) {
1598 u8 *vaddr;
1599
1600 if (copy > len)
1601 copy = len;
1602
1603 vaddr = kmap_atomic(skb_frag_page(f));
1604 memcpy(to,
1605 vaddr + f->page_offset + offset - start,
1606 copy);
1607 kunmap_atomic(vaddr);
1608
1609 if ((len -= copy) == 0)
1610 return 0;
1611 offset += copy;
1612 to += copy;
1613 }
1614 start = end;
1615 }
1616
1617 skb_walk_frags(skb, frag_iter) {
1618 int end;
1619
1620 WARN_ON(start > offset + len);
1621
1622 end = start + frag_iter->len;
1623 if ((copy = end - offset) > 0) {
1624 if (copy > len)
1625 copy = len;
1626 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1627 goto fault;
1628 if ((len -= copy) == 0)
1629 return 0;
1630 offset += copy;
1631 to += copy;
1632 }
1633 start = end;
1634 }
1635
1636 if (!len)
1637 return 0;
1638
1639fault:
1640 return -EFAULT;
1641}
1642EXPORT_SYMBOL(skb_copy_bits);
1643
1644
1645
1646
1647
1648static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1649{
1650 put_page(spd->pages[i]);
1651}
1652
1653static struct page *linear_to_page(struct page *page, unsigned int *len,
1654 unsigned int *offset,
1655 struct sock *sk)
1656{
1657 struct page_frag *pfrag = sk_page_frag(sk);
1658
1659 if (!sk_page_frag_refill(sk, pfrag))
1660 return NULL;
1661
1662 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1663
1664 memcpy(page_address(pfrag->page) + pfrag->offset,
1665 page_address(page) + *offset, *len);
1666 *offset = pfrag->offset;
1667 pfrag->offset += *len;
1668
1669 return pfrag->page;
1670}
1671
1672static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1673 struct page *page,
1674 unsigned int offset)
1675{
1676 return spd->nr_pages &&
1677 spd->pages[spd->nr_pages - 1] == page &&
1678 (spd->partial[spd->nr_pages - 1].offset +
1679 spd->partial[spd->nr_pages - 1].len == offset);
1680}
1681
1682
1683
1684
1685static bool spd_fill_page(struct splice_pipe_desc *spd,
1686 struct pipe_inode_info *pipe, struct page *page,
1687 unsigned int *len, unsigned int offset,
1688 bool linear,
1689 struct sock *sk)
1690{
1691 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1692 return true;
1693
1694 if (linear) {
1695 page = linear_to_page(page, len, &offset, sk);
1696 if (!page)
1697 return true;
1698 }
1699 if (spd_can_coalesce(spd, page, offset)) {
1700 spd->partial[spd->nr_pages - 1].len += *len;
1701 return false;
1702 }
1703 get_page(page);
1704 spd->pages[spd->nr_pages] = page;
1705 spd->partial[spd->nr_pages].len = *len;
1706 spd->partial[spd->nr_pages].offset = offset;
1707 spd->nr_pages++;
1708
1709 return false;
1710}
1711
1712static bool __splice_segment(struct page *page, unsigned int poff,
1713 unsigned int plen, unsigned int *off,
1714 unsigned int *len,
1715 struct splice_pipe_desc *spd, bool linear,
1716 struct sock *sk,
1717 struct pipe_inode_info *pipe)
1718{
1719 if (!*len)
1720 return true;
1721
1722
1723 if (*off >= plen) {
1724 *off -= plen;
1725 return false;
1726 }
1727
1728
1729 poff += *off;
1730 plen -= *off;
1731 *off = 0;
1732
1733 do {
1734 unsigned int flen = min(*len, plen);
1735
1736 if (spd_fill_page(spd, pipe, page, &flen, poff,
1737 linear, sk))
1738 return true;
1739 poff += flen;
1740 plen -= flen;
1741 *len -= flen;
1742 } while (*len && plen);
1743
1744 return false;
1745}
1746
1747
1748
1749
1750
1751static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1752 unsigned int *offset, unsigned int *len,
1753 struct splice_pipe_desc *spd, struct sock *sk)
1754{
1755 int seg;
1756
1757
1758
1759
1760
1761
1762 if (__splice_segment(virt_to_page(skb->data),
1763 (unsigned long) skb->data & (PAGE_SIZE - 1),
1764 skb_headlen(skb),
1765 offset, len, spd,
1766 skb_head_is_locked(skb),
1767 sk, pipe))
1768 return true;
1769
1770
1771
1772
1773 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1774 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1775
1776 if (__splice_segment(skb_frag_page(f),
1777 f->page_offset, skb_frag_size(f),
1778 offset, len, spd, false, sk, pipe))
1779 return true;
1780 }
1781
1782 return false;
1783}
1784
1785
1786
1787
1788
1789
1790
1791int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1792 struct pipe_inode_info *pipe, unsigned int tlen,
1793 unsigned int flags)
1794{
1795 struct partial_page partial[MAX_SKB_FRAGS];
1796 struct page *pages[MAX_SKB_FRAGS];
1797 struct splice_pipe_desc spd = {
1798 .pages = pages,
1799 .partial = partial,
1800 .nr_pages_max = MAX_SKB_FRAGS,
1801 .flags = flags,
1802 .ops = &nosteal_pipe_buf_ops,
1803 .spd_release = sock_spd_release,
1804 };
1805 struct sk_buff *frag_iter;
1806 struct sock *sk = skb->sk;
1807 int ret = 0;
1808
1809
1810
1811
1812
1813 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1814 goto done;
1815 else if (!tlen)
1816 goto done;
1817
1818
1819
1820
1821 skb_walk_frags(skb, frag_iter) {
1822 if (!tlen)
1823 break;
1824 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1825 break;
1826 }
1827
1828done:
1829 if (spd.nr_pages) {
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 release_sock(sk);
1840 ret = splice_to_pipe(pipe, &spd);
1841 lock_sock(sk);
1842 }
1843
1844 return ret;
1845}
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1860{
1861 int start = skb_headlen(skb);
1862 struct sk_buff *frag_iter;
1863 int i, copy;
1864
1865 if (offset > (int)skb->len - len)
1866 goto fault;
1867
1868 if ((copy = start - offset) > 0) {
1869 if (copy > len)
1870 copy = len;
1871 skb_copy_to_linear_data_offset(skb, offset, from, copy);
1872 if ((len -= copy) == 0)
1873 return 0;
1874 offset += copy;
1875 from += copy;
1876 }
1877
1878 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1879 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1880 int end;
1881
1882 WARN_ON(start > offset + len);
1883
1884 end = start + skb_frag_size(frag);
1885 if ((copy = end - offset) > 0) {
1886 u8 *vaddr;
1887
1888 if (copy > len)
1889 copy = len;
1890
1891 vaddr = kmap_atomic(skb_frag_page(frag));
1892 memcpy(vaddr + frag->page_offset + offset - start,
1893 from, copy);
1894 kunmap_atomic(vaddr);
1895
1896 if ((len -= copy) == 0)
1897 return 0;
1898 offset += copy;
1899 from += copy;
1900 }
1901 start = end;
1902 }
1903
1904 skb_walk_frags(skb, frag_iter) {
1905 int end;
1906
1907 WARN_ON(start > offset + len);
1908
1909 end = start + frag_iter->len;
1910 if ((copy = end - offset) > 0) {
1911 if (copy > len)
1912 copy = len;
1913 if (skb_store_bits(frag_iter, offset - start,
1914 from, copy))
1915 goto fault;
1916 if ((len -= copy) == 0)
1917 return 0;
1918 offset += copy;
1919 from += copy;
1920 }
1921 start = end;
1922 }
1923 if (!len)
1924 return 0;
1925
1926fault:
1927 return -EFAULT;
1928}
1929EXPORT_SYMBOL(skb_store_bits);
1930
1931
1932__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
1933 __wsum csum, const struct skb_checksum_ops *ops)
1934{
1935 int start = skb_headlen(skb);
1936 int i, copy = start - offset;
1937 struct sk_buff *frag_iter;
1938 int pos = 0;
1939
1940
1941 if (copy > 0) {
1942 if (copy > len)
1943 copy = len;
1944 csum = ops->update(skb->data + offset, copy, csum);
1945 if ((len -= copy) == 0)
1946 return csum;
1947 offset += copy;
1948 pos = copy;
1949 }
1950
1951 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1952 int end;
1953 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1954
1955 WARN_ON(start > offset + len);
1956
1957 end = start + skb_frag_size(frag);
1958 if ((copy = end - offset) > 0) {
1959 __wsum csum2;
1960 u8 *vaddr;
1961
1962 if (copy > len)
1963 copy = len;
1964 vaddr = kmap_atomic(skb_frag_page(frag));
1965 csum2 = ops->update(vaddr + frag->page_offset +
1966 offset - start, copy, 0);
1967 kunmap_atomic(vaddr);
1968 csum = ops->combine(csum, csum2, pos, copy);
1969 if (!(len -= copy))
1970 return csum;
1971 offset += copy;
1972 pos += copy;
1973 }
1974 start = end;
1975 }
1976
1977 skb_walk_frags(skb, frag_iter) {
1978 int end;
1979
1980 WARN_ON(start > offset + len);
1981
1982 end = start + frag_iter->len;
1983 if ((copy = end - offset) > 0) {
1984 __wsum csum2;
1985 if (copy > len)
1986 copy = len;
1987 csum2 = __skb_checksum(frag_iter, offset - start,
1988 copy, 0, ops);
1989 csum = ops->combine(csum, csum2, pos, copy);
1990 if ((len -= copy) == 0)
1991 return csum;
1992 offset += copy;
1993 pos += copy;
1994 }
1995 start = end;
1996 }
1997 BUG_ON(len);
1998
1999 return csum;
2000}
2001EXPORT_SYMBOL(__skb_checksum);
2002
2003__wsum skb_checksum(const struct sk_buff *skb, int offset,
2004 int len, __wsum csum)
2005{
2006 const struct skb_checksum_ops ops = {
2007 .update = csum_partial_ext,
2008 .combine = csum_block_add_ext,
2009 };
2010
2011 return __skb_checksum(skb, offset, len, csum, &ops);
2012}
2013EXPORT_SYMBOL(skb_checksum);
2014
2015
2016
2017__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2018 u8 *to, int len, __wsum csum)
2019{
2020 int start = skb_headlen(skb);
2021 int i, copy = start - offset;
2022 struct sk_buff *frag_iter;
2023 int pos = 0;
2024
2025
2026 if (copy > 0) {
2027 if (copy > len)
2028 copy = len;
2029 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2030 copy, csum);
2031 if ((len -= copy) == 0)
2032 return csum;
2033 offset += copy;
2034 to += copy;
2035 pos = copy;
2036 }
2037
2038 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2039 int end;
2040
2041 WARN_ON(start > offset + len);
2042
2043 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2044 if ((copy = end - offset) > 0) {
2045 __wsum csum2;
2046 u8 *vaddr;
2047 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2048
2049 if (copy > len)
2050 copy = len;
2051 vaddr = kmap_atomic(skb_frag_page(frag));
2052 csum2 = csum_partial_copy_nocheck(vaddr +
2053 frag->page_offset +
2054 offset - start, to,
2055 copy, 0);
2056 kunmap_atomic(vaddr);
2057 csum = csum_block_add(csum, csum2, pos);
2058 if (!(len -= copy))
2059 return csum;
2060 offset += copy;
2061 to += copy;
2062 pos += copy;
2063 }
2064 start = end;
2065 }
2066
2067 skb_walk_frags(skb, frag_iter) {
2068 __wsum csum2;
2069 int end;
2070
2071 WARN_ON(start > offset + len);
2072
2073 end = start + frag_iter->len;
2074 if ((copy = end - offset) > 0) {
2075 if (copy > len)
2076 copy = len;
2077 csum2 = skb_copy_and_csum_bits(frag_iter,
2078 offset - start,
2079 to, copy, 0);
2080 csum = csum_block_add(csum, csum2, pos);
2081 if ((len -= copy) == 0)
2082 return csum;
2083 offset += copy;
2084 to += copy;
2085 pos += copy;
2086 }
2087 start = end;
2088 }
2089 BUG_ON(len);
2090 return csum;
2091}
2092EXPORT_SYMBOL(skb_copy_and_csum_bits);
2093
2094
2095
2096
2097
2098
2099
2100
2101unsigned int
2102skb_zerocopy_headlen(const struct sk_buff *from)
2103{
2104 unsigned int hlen = 0;
2105
2106 if (!from->head_frag ||
2107 skb_headlen(from) < L1_CACHE_BYTES ||
2108 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2109 hlen = skb_headlen(from);
2110
2111 if (skb_has_frag_list(from))
2112 hlen = from->len;
2113
2114 return hlen;
2115}
2116EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136int
2137skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2138{
2139 int i, j = 0;
2140 int plen = 0;
2141 int ret;
2142 struct page *page;
2143 unsigned int offset;
2144
2145 BUG_ON(!from->head_frag && !hlen);
2146
2147
2148 if (len <= skb_tailroom(to))
2149 return skb_copy_bits(from, 0, skb_put(to, len), len);
2150
2151 if (hlen) {
2152 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2153 if (unlikely(ret))
2154 return ret;
2155 len -= hlen;
2156 } else {
2157 plen = min_t(int, skb_headlen(from), len);
2158 if (plen) {
2159 page = virt_to_head_page(from->head);
2160 offset = from->data - (unsigned char *)page_address(page);
2161 __skb_fill_page_desc(to, 0, page, offset, plen);
2162 get_page(page);
2163 j = 1;
2164 len -= plen;
2165 }
2166 }
2167
2168 to->truesize += len + plen;
2169 to->len += len + plen;
2170 to->data_len += len + plen;
2171
2172 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2173 skb_tx_error(from);
2174 return -ENOMEM;
2175 }
2176
2177 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2178 if (!len)
2179 break;
2180 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2181 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2182 len -= skb_shinfo(to)->frags[j].size;
2183 skb_frag_ref(to, j);
2184 j++;
2185 }
2186 skb_shinfo(to)->nr_frags = j;
2187
2188 return 0;
2189}
2190EXPORT_SYMBOL_GPL(skb_zerocopy);
2191
2192void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2193{
2194 __wsum csum;
2195 long csstart;
2196
2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2198 csstart = skb_checksum_start_offset(skb);
2199 else
2200 csstart = skb_headlen(skb);
2201
2202 BUG_ON(csstart > skb_headlen(skb));
2203
2204 skb_copy_from_linear_data(skb, to, csstart);
2205
2206 csum = 0;
2207 if (csstart != skb->len)
2208 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2209 skb->len - csstart, 0);
2210
2211 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2212 long csstuff = csstart + skb->csum_offset;
2213
2214 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2215 }
2216}
2217EXPORT_SYMBOL(skb_copy_and_csum_dev);
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2229{
2230 unsigned long flags;
2231 struct sk_buff *result;
2232
2233 spin_lock_irqsave(&list->lock, flags);
2234 result = __skb_dequeue(list);
2235 spin_unlock_irqrestore(&list->lock, flags);
2236 return result;
2237}
2238EXPORT_SYMBOL(skb_dequeue);
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2249{
2250 unsigned long flags;
2251 struct sk_buff *result;
2252
2253 spin_lock_irqsave(&list->lock, flags);
2254 result = __skb_dequeue_tail(list);
2255 spin_unlock_irqrestore(&list->lock, flags);
2256 return result;
2257}
2258EXPORT_SYMBOL(skb_dequeue_tail);
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268void skb_queue_purge(struct sk_buff_head *list)
2269{
2270 struct sk_buff *skb;
2271 while ((skb = skb_dequeue(list)) != NULL)
2272 kfree_skb(skb);
2273}
2274EXPORT_SYMBOL(skb_queue_purge);
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2288{
2289 unsigned long flags;
2290
2291 spin_lock_irqsave(&list->lock, flags);
2292 __skb_queue_head(list, newsk);
2293 spin_unlock_irqrestore(&list->lock, flags);
2294}
2295EXPORT_SYMBOL(skb_queue_head);
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2309{
2310 unsigned long flags;
2311
2312 spin_lock_irqsave(&list->lock, flags);
2313 __skb_queue_tail(list, newsk);
2314 spin_unlock_irqrestore(&list->lock, flags);
2315}
2316EXPORT_SYMBOL(skb_queue_tail);
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2329{
2330 unsigned long flags;
2331
2332 spin_lock_irqsave(&list->lock, flags);
2333 __skb_unlink(skb, list);
2334 spin_unlock_irqrestore(&list->lock, flags);
2335}
2336EXPORT_SYMBOL(skb_unlink);
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2349{
2350 unsigned long flags;
2351
2352 spin_lock_irqsave(&list->lock, flags);
2353 __skb_queue_after(list, old, newsk);
2354 spin_unlock_irqrestore(&list->lock, flags);
2355}
2356EXPORT_SYMBOL(skb_append);
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2371{
2372 unsigned long flags;
2373
2374 spin_lock_irqsave(&list->lock, flags);
2375 __skb_insert(newsk, old->prev, old, list);
2376 spin_unlock_irqrestore(&list->lock, flags);
2377}
2378EXPORT_SYMBOL(skb_insert);
2379
2380static inline void skb_split_inside_header(struct sk_buff *skb,
2381 struct sk_buff* skb1,
2382 const u32 len, const int pos)
2383{
2384 int i;
2385
2386 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2387 pos - len);
2388
2389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2390 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2391
2392 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2393 skb_shinfo(skb)->nr_frags = 0;
2394 skb1->data_len = skb->data_len;
2395 skb1->len += skb1->data_len;
2396 skb->data_len = 0;
2397 skb->len = len;
2398 skb_set_tail_pointer(skb, len);
2399}
2400
2401static inline void skb_split_no_header(struct sk_buff *skb,
2402 struct sk_buff* skb1,
2403 const u32 len, int pos)
2404{
2405 int i, k = 0;
2406 const int nfrags = skb_shinfo(skb)->nr_frags;
2407
2408 skb_shinfo(skb)->nr_frags = 0;
2409 skb1->len = skb1->data_len = skb->len - len;
2410 skb->len = len;
2411 skb->data_len = len - pos;
2412
2413 for (i = 0; i < nfrags; i++) {
2414 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2415
2416 if (pos + size > len) {
2417 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2418
2419 if (pos < len) {
2420
2421
2422
2423
2424
2425
2426
2427
2428 skb_frag_ref(skb, i);
2429 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2430 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2431 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2432 skb_shinfo(skb)->nr_frags++;
2433 }
2434 k++;
2435 } else
2436 skb_shinfo(skb)->nr_frags++;
2437 pos += size;
2438 }
2439 skb_shinfo(skb1)->nr_frags = k;
2440}
2441
2442
2443
2444
2445
2446
2447
2448void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2449{
2450 int pos = skb_headlen(skb);
2451
2452 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2453 if (len < pos)
2454 skb_split_inside_header(skb, skb1, len, pos);
2455 else
2456 skb_split_no_header(skb, skb1, len, pos);
2457}
2458EXPORT_SYMBOL(skb_split);
2459
2460
2461
2462
2463
2464static int skb_prepare_for_shift(struct sk_buff *skb)
2465{
2466 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2488{
2489 int from, to, merge, todo;
2490 struct skb_frag_struct *fragfrom, *fragto;
2491
2492 BUG_ON(shiftlen > skb->len);
2493 BUG_ON(skb_headlen(skb));
2494
2495 todo = shiftlen;
2496 from = 0;
2497 to = skb_shinfo(tgt)->nr_frags;
2498 fragfrom = &skb_shinfo(skb)->frags[from];
2499
2500
2501
2502
2503 if (!to ||
2504 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2505 fragfrom->page_offset)) {
2506 merge = -1;
2507 } else {
2508 merge = to - 1;
2509
2510 todo -= skb_frag_size(fragfrom);
2511 if (todo < 0) {
2512 if (skb_prepare_for_shift(skb) ||
2513 skb_prepare_for_shift(tgt))
2514 return 0;
2515
2516
2517 fragfrom = &skb_shinfo(skb)->frags[from];
2518 fragto = &skb_shinfo(tgt)->frags[merge];
2519
2520 skb_frag_size_add(fragto, shiftlen);
2521 skb_frag_size_sub(fragfrom, shiftlen);
2522 fragfrom->page_offset += shiftlen;
2523
2524 goto onlymerged;
2525 }
2526
2527 from++;
2528 }
2529
2530
2531 if ((shiftlen == skb->len) &&
2532 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2533 return 0;
2534
2535 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2536 return 0;
2537
2538 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2539 if (to == MAX_SKB_FRAGS)
2540 return 0;
2541
2542 fragfrom = &skb_shinfo(skb)->frags[from];
2543 fragto = &skb_shinfo(tgt)->frags[to];
2544
2545 if (todo >= skb_frag_size(fragfrom)) {
2546 *fragto = *fragfrom;
2547 todo -= skb_frag_size(fragfrom);
2548 from++;
2549 to++;
2550
2551 } else {
2552 __skb_frag_ref(fragfrom);
2553 fragto->page = fragfrom->page;
2554 fragto->page_offset = fragfrom->page_offset;
2555 skb_frag_size_set(fragto, todo);
2556
2557 fragfrom->page_offset += todo;
2558 skb_frag_size_sub(fragfrom, todo);
2559 todo = 0;
2560
2561 to++;
2562 break;
2563 }
2564 }
2565
2566
2567 skb_shinfo(tgt)->nr_frags = to;
2568
2569 if (merge >= 0) {
2570 fragfrom = &skb_shinfo(skb)->frags[0];
2571 fragto = &skb_shinfo(tgt)->frags[merge];
2572
2573 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2574 __skb_frag_unref(fragfrom);
2575 }
2576
2577
2578 to = 0;
2579 while (from < skb_shinfo(skb)->nr_frags)
2580 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2581 skb_shinfo(skb)->nr_frags = to;
2582
2583 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2584
2585onlymerged:
2586
2587
2588
2589 tgt->ip_summed = CHECKSUM_PARTIAL;
2590 skb->ip_summed = CHECKSUM_PARTIAL;
2591
2592
2593 skb->len -= shiftlen;
2594 skb->data_len -= shiftlen;
2595 skb->truesize -= shiftlen;
2596 tgt->len += shiftlen;
2597 tgt->data_len += shiftlen;
2598 tgt->truesize += shiftlen;
2599
2600 return shiftlen;
2601}
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2614 unsigned int to, struct skb_seq_state *st)
2615{
2616 st->lower_offset = from;
2617 st->upper_offset = to;
2618 st->root_skb = st->cur_skb = skb;
2619 st->frag_idx = st->stepped_offset = 0;
2620 st->frag_data = NULL;
2621}
2622EXPORT_SYMBOL(skb_prepare_seq_read);
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2650 struct skb_seq_state *st)
2651{
2652 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2653 skb_frag_t *frag;
2654
2655 if (unlikely(abs_offset >= st->upper_offset)) {
2656 if (st->frag_data) {
2657 kunmap_atomic(st->frag_data);
2658 st->frag_data = NULL;
2659 }
2660 return 0;
2661 }
2662
2663next_skb:
2664 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2665
2666 if (abs_offset < block_limit && !st->frag_data) {
2667 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2668 return block_limit - abs_offset;
2669 }
2670
2671 if (st->frag_idx == 0 && !st->frag_data)
2672 st->stepped_offset += skb_headlen(st->cur_skb);
2673
2674 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2675 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2676 block_limit = skb_frag_size(frag) + st->stepped_offset;
2677
2678 if (abs_offset < block_limit) {
2679 if (!st->frag_data)
2680 st->frag_data = kmap_atomic(skb_frag_page(frag));
2681
2682 *data = (u8 *) st->frag_data + frag->page_offset +
2683 (abs_offset - st->stepped_offset);
2684
2685 return block_limit - abs_offset;
2686 }
2687
2688 if (st->frag_data) {
2689 kunmap_atomic(st->frag_data);
2690 st->frag_data = NULL;
2691 }
2692
2693 st->frag_idx++;
2694 st->stepped_offset += skb_frag_size(frag);
2695 }
2696
2697 if (st->frag_data) {
2698 kunmap_atomic(st->frag_data);
2699 st->frag_data = NULL;
2700 }
2701
2702 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2703 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2704 st->frag_idx = 0;
2705 goto next_skb;
2706 } else if (st->cur_skb->next) {
2707 st->cur_skb = st->cur_skb->next;
2708 st->frag_idx = 0;
2709 goto next_skb;
2710 }
2711
2712 return 0;
2713}
2714EXPORT_SYMBOL(skb_seq_read);
2715
2716
2717
2718
2719
2720
2721
2722
2723void skb_abort_seq_read(struct skb_seq_state *st)
2724{
2725 if (st->frag_data)
2726 kunmap_atomic(st->frag_data);
2727}
2728EXPORT_SYMBOL(skb_abort_seq_read);
2729
2730#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2731
2732static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2733 struct ts_config *conf,
2734 struct ts_state *state)
2735{
2736 return skb_seq_read(offset, text, TS_SKB_CB(state));
2737}
2738
2739static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2740{
2741 skb_abort_seq_read(TS_SKB_CB(state));
2742}
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2758 unsigned int to, struct ts_config *config,
2759 struct ts_state *state)
2760{
2761 unsigned int ret;
2762
2763 config->get_next_block = skb_ts_get_next_block;
2764 config->finish = skb_ts_finish;
2765
2766 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2767
2768 ret = textsearch_find(config, state);
2769 return (ret <= to - from ? ret : UINT_MAX);
2770}
2771EXPORT_SYMBOL(skb_find_text);
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2785 int (*getfrag)(void *from, char *to, int offset,
2786 int len, int odd, struct sk_buff *skb),
2787 void *from, int length)
2788{
2789 int frg_cnt = skb_shinfo(skb)->nr_frags;
2790 int copy;
2791 int offset = 0;
2792 int ret;
2793 struct page_frag *pfrag = ¤t->task_frag;
2794
2795 do {
2796
2797 if (frg_cnt >= MAX_SKB_FRAGS)
2798 return -EMSGSIZE;
2799
2800 if (!sk_page_frag_refill(sk, pfrag))
2801 return -ENOMEM;
2802
2803
2804 copy = min_t(int, length, pfrag->size - pfrag->offset);
2805
2806 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2807 offset, copy, 0, skb);
2808 if (ret < 0)
2809 return -EFAULT;
2810
2811
2812 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2813 copy);
2814 frg_cnt++;
2815 pfrag->offset += copy;
2816 get_page(pfrag->page);
2817
2818 skb->truesize += copy;
2819 atomic_add(copy, &sk->sk_wmem_alloc);
2820 skb->len += copy;
2821 skb->data_len += copy;
2822 offset += copy;
2823 length -= copy;
2824
2825 } while (length > 0);
2826
2827 return 0;
2828}
2829EXPORT_SYMBOL(skb_append_datato_frags);
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2843{
2844 BUG_ON(len > skb->len);
2845 skb->len -= len;
2846 BUG_ON(skb->len < skb->data_len);
2847 skb_postpull_rcsum(skb, skb->data, len);
2848 return skb->data += len;
2849}
2850EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861struct sk_buff *skb_segment(struct sk_buff *head_skb,
2862 netdev_features_t features)
2863{
2864 struct sk_buff *segs = NULL;
2865 struct sk_buff *tail = NULL;
2866 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2867 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2868 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2869 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2870 struct sk_buff *frag_skb = head_skb;
2871 unsigned int offset = doffset;
2872 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2873 unsigned int headroom;
2874 unsigned int len;
2875 __be16 proto;
2876 bool csum;
2877 int sg = !!(features & NETIF_F_SG);
2878 int nfrags = skb_shinfo(head_skb)->nr_frags;
2879 int err = -ENOMEM;
2880 int i = 0;
2881 int pos;
2882 int dummy;
2883
2884 proto = skb_network_protocol(head_skb, &dummy);
2885 if (unlikely(!proto))
2886 return ERR_PTR(-EINVAL);
2887
2888 csum = !!can_checksum_protocol(features, proto);
2889 __skb_push(head_skb, doffset);
2890 headroom = skb_headroom(head_skb);
2891 pos = skb_headlen(head_skb);
2892
2893 do {
2894 struct sk_buff *nskb;
2895 skb_frag_t *nskb_frag;
2896 int hsize;
2897 int size;
2898
2899 len = head_skb->len - offset;
2900 if (len > mss)
2901 len = mss;
2902
2903 hsize = skb_headlen(head_skb) - offset;
2904 if (hsize < 0)
2905 hsize = 0;
2906 if (hsize > len || !sg)
2907 hsize = len;
2908
2909 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2910 (skb_headlen(list_skb) == len || sg)) {
2911 BUG_ON(skb_headlen(list_skb) > len);
2912
2913 i = 0;
2914 nfrags = skb_shinfo(list_skb)->nr_frags;
2915 frag = skb_shinfo(list_skb)->frags;
2916 frag_skb = list_skb;
2917 pos += skb_headlen(list_skb);
2918
2919 while (pos < offset + len) {
2920 BUG_ON(i >= nfrags);
2921
2922 size = skb_frag_size(frag);
2923 if (pos + size > offset + len)
2924 break;
2925
2926 i++;
2927 pos += size;
2928 frag++;
2929 }
2930
2931 nskb = skb_clone(list_skb, GFP_ATOMIC);
2932 list_skb = list_skb->next;
2933
2934 if (unlikely(!nskb))
2935 goto err;
2936
2937 if (unlikely(pskb_trim(nskb, len))) {
2938 kfree_skb(nskb);
2939 goto err;
2940 }
2941
2942 hsize = skb_end_offset(nskb);
2943 if (skb_cow_head(nskb, doffset + headroom)) {
2944 kfree_skb(nskb);
2945 goto err;
2946 }
2947
2948 nskb->truesize += skb_end_offset(nskb) - hsize;
2949 skb_release_head_state(nskb);
2950 __skb_push(nskb, doffset);
2951 } else {
2952 nskb = __alloc_skb(hsize + doffset + headroom,
2953 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2954 NUMA_NO_NODE);
2955
2956 if (unlikely(!nskb))
2957 goto err;
2958
2959 skb_reserve(nskb, headroom);
2960 __skb_put(nskb, doffset);
2961 }
2962
2963 if (segs)
2964 tail->next = nskb;
2965 else
2966 segs = nskb;
2967 tail = nskb;
2968
2969 __copy_skb_header(nskb, head_skb);
2970 nskb->mac_len = head_skb->mac_len;
2971
2972 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2973
2974 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2975 nskb->data - tnl_hlen,
2976 doffset + tnl_hlen);
2977
2978 if (nskb->len == len + doffset)
2979 goto perform_csum_check;
2980
2981 if (!sg) {
2982 nskb->ip_summed = CHECKSUM_NONE;
2983 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2984 skb_put(nskb, len),
2985 len, 0);
2986 continue;
2987 }
2988
2989 nskb_frag = skb_shinfo(nskb)->frags;
2990
2991 skb_copy_from_linear_data_offset(head_skb, offset,
2992 skb_put(nskb, hsize), hsize);
2993
2994 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
2995 SKBTX_SHARED_FRAG;
2996
2997 while (pos < offset + len) {
2998 if (i >= nfrags) {
2999 BUG_ON(skb_headlen(list_skb));
3000
3001 i = 0;
3002 nfrags = skb_shinfo(list_skb)->nr_frags;
3003 frag = skb_shinfo(list_skb)->frags;
3004 frag_skb = list_skb;
3005
3006 BUG_ON(!nfrags);
3007
3008 list_skb = list_skb->next;
3009 }
3010
3011 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3012 MAX_SKB_FRAGS)) {
3013 net_warn_ratelimited(
3014 "skb_segment: too many frags: %u %u\n",
3015 pos, mss);
3016 goto err;
3017 }
3018
3019 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3020 goto err;
3021
3022 *nskb_frag = *frag;
3023 __skb_frag_ref(nskb_frag);
3024 size = skb_frag_size(nskb_frag);
3025
3026 if (pos < offset) {
3027 nskb_frag->page_offset += offset - pos;
3028 skb_frag_size_sub(nskb_frag, offset - pos);
3029 }
3030
3031 skb_shinfo(nskb)->nr_frags++;
3032
3033 if (pos + size <= offset + len) {
3034 i++;
3035 frag++;
3036 pos += size;
3037 } else {
3038 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3039 goto skip_fraglist;
3040 }
3041
3042 nskb_frag++;
3043 }
3044
3045skip_fraglist:
3046 nskb->data_len = len - hsize;
3047 nskb->len += nskb->data_len;
3048 nskb->truesize += nskb->data_len;
3049
3050perform_csum_check:
3051 if (!csum) {
3052 nskb->csum = skb_checksum(nskb, doffset,
3053 nskb->len - doffset, 0);
3054 nskb->ip_summed = CHECKSUM_NONE;
3055 }
3056 } while ((offset += len) < head_skb->len);
3057
3058 return segs;
3059
3060err:
3061 kfree_skb_list(segs);
3062 return ERR_PTR(err);
3063}
3064EXPORT_SYMBOL_GPL(skb_segment);
3065
3066int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3067{
3068 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3069 unsigned int offset = skb_gro_offset(skb);
3070 unsigned int headlen = skb_headlen(skb);
3071 struct sk_buff *nskb, *lp, *p = *head;
3072 unsigned int len = skb_gro_len(skb);
3073 unsigned int delta_truesize;
3074 unsigned int headroom;
3075
3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG;
3078
3079 lp = NAPI_GRO_CB(p)->last ?: p;
3080 pinfo = skb_shinfo(lp);
3081
3082 if (headlen <= offset) {
3083 skb_frag_t *frag;
3084 skb_frag_t *frag2;
3085 int i = skbinfo->nr_frags;
3086 int nr_frags = pinfo->nr_frags + i;
3087
3088 if (nr_frags > MAX_SKB_FRAGS)
3089 goto merge;
3090
3091 offset -= headlen;
3092 pinfo->nr_frags = nr_frags;
3093 skbinfo->nr_frags = 0;
3094
3095 frag = pinfo->frags + nr_frags;
3096 frag2 = skbinfo->frags + i;
3097 do {
3098 *--frag = *--frag2;
3099 } while (--i);
3100
3101 frag->page_offset += offset;
3102 skb_frag_size_sub(frag, offset);
3103
3104
3105 delta_truesize = skb->truesize -
3106 SKB_TRUESIZE(skb_end_offset(skb));
3107
3108 skb->truesize -= skb->data_len;
3109 skb->len -= skb->data_len;
3110 skb->data_len = 0;
3111
3112 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3113 goto done;
3114 } else if (skb->head_frag) {
3115 int nr_frags = pinfo->nr_frags;
3116 skb_frag_t *frag = pinfo->frags + nr_frags;
3117 struct page *page = virt_to_head_page(skb->head);
3118 unsigned int first_size = headlen - offset;
3119 unsigned int first_offset;
3120
3121 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3122 goto merge;
3123
3124 first_offset = skb->data -
3125 (unsigned char *)page_address(page) +
3126 offset;
3127
3128 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3129
3130 frag->page.p = page;
3131 frag->page_offset = first_offset;
3132 skb_frag_size_set(frag, first_size);
3133
3134 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3135
3136
3137 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3138 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3139 goto done;
3140 }
3141 if (pinfo->frag_list)
3142 goto merge;
3143 if (skb_gro_len(p) != pinfo->gso_size)
3144 return -E2BIG;
3145
3146 headroom = skb_headroom(p);
3147 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
3148 if (unlikely(!nskb))
3149 return -ENOMEM;
3150
3151 __copy_skb_header(nskb, p);
3152 nskb->mac_len = p->mac_len;
3153
3154 skb_reserve(nskb, headroom);
3155 __skb_put(nskb, skb_gro_offset(p));
3156
3157 skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
3158 skb_set_network_header(nskb, skb_network_offset(p));
3159 skb_set_transport_header(nskb, skb_transport_offset(p));
3160
3161 __skb_pull(p, skb_gro_offset(p));
3162 memcpy(skb_mac_header(nskb), skb_mac_header(p),
3163 p->data - skb_mac_header(p));
3164
3165 skb_shinfo(nskb)->frag_list = p;
3166 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3167 pinfo->gso_size = 0;
3168 skb_header_release(p);
3169 NAPI_GRO_CB(nskb)->last = p;
3170
3171 nskb->data_len += p->len;
3172 nskb->truesize += p->truesize;
3173 nskb->len += p->len;
3174
3175 *head = nskb;
3176 nskb->next = p->next;
3177 p->next = NULL;
3178
3179 p = nskb;
3180
3181merge:
3182 delta_truesize = skb->truesize;
3183 if (offset > headlen) {
3184 unsigned int eat = offset - headlen;
3185
3186 skbinfo->frags[0].page_offset += eat;
3187 skb_frag_size_sub(&skbinfo->frags[0], eat);
3188 skb->data_len -= eat;
3189 skb->len -= eat;
3190 offset = headlen;
3191 }
3192
3193 __skb_pull(skb, offset);
3194
3195 if (!NAPI_GRO_CB(p)->last)
3196 skb_shinfo(p)->frag_list = skb;
3197 else
3198 NAPI_GRO_CB(p)->last->next = skb;
3199 NAPI_GRO_CB(p)->last = skb;
3200 skb_header_release(skb);
3201 lp = p;
3202
3203done:
3204 NAPI_GRO_CB(p)->count++;
3205 p->data_len += len;
3206 p->truesize += delta_truesize;
3207 p->len += len;
3208 if (lp != p) {
3209 lp->data_len += len;
3210 lp->truesize += delta_truesize;
3211 lp->len += len;
3212 }
3213 NAPI_GRO_CB(skb)->same_flow = 1;
3214 return 0;
3215}
3216EXPORT_SYMBOL_GPL(skb_gro_receive);
3217
3218void __init skb_init(void)
3219{
3220 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3221 sizeof(struct sk_buff),
3222 0,
3223 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3224 NULL);
3225 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3226 (2*sizeof(struct sk_buff)) +
3227 sizeof(atomic_t),
3228 0,
3229 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3230 NULL);
3231}
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243static int
3244__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3245{
3246 int start = skb_headlen(skb);
3247 int i, copy = start - offset;
3248 struct sk_buff *frag_iter;
3249 int elt = 0;
3250
3251 if (copy > 0) {
3252 if (copy > len)
3253 copy = len;
3254 sg_set_buf(sg, skb->data + offset, copy);
3255 elt++;
3256 if ((len -= copy) == 0)
3257 return elt;
3258 offset += copy;
3259 }
3260
3261 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3262 int end;
3263
3264 WARN_ON(start > offset + len);
3265
3266 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3267 if ((copy = end - offset) > 0) {
3268 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3269
3270 if (copy > len)
3271 copy = len;
3272 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3273 frag->page_offset+offset-start);
3274 elt++;
3275 if (!(len -= copy))
3276 return elt;
3277 offset += copy;
3278 }
3279 start = end;
3280 }
3281
3282 skb_walk_frags(skb, frag_iter) {
3283 int end;
3284
3285 WARN_ON(start > offset + len);
3286
3287 end = start + frag_iter->len;
3288 if ((copy = end - offset) > 0) {
3289 if (copy > len)
3290 copy = len;
3291 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3292 copy);
3293 if ((len -= copy) == 0)
3294 return elt;
3295 offset += copy;
3296 }
3297 start = end;
3298 }
3299 BUG_ON(len);
3300 return elt;
3301}
3302
3303int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3304{
3305 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3306
3307 sg_mark_end(&sg[nsg - 1]);
3308
3309 return nsg;
3310}
3311EXPORT_SYMBOL_GPL(skb_to_sgvec);
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3331{
3332 int copyflag;
3333 int elt;
3334 struct sk_buff *skb1, **skb_p;
3335
3336
3337
3338
3339
3340 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3341 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3342 return -ENOMEM;
3343
3344
3345 if (!skb_has_frag_list(skb)) {
3346
3347
3348
3349
3350
3351 if (skb_tailroom(skb) < tailbits &&
3352 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3353 return -ENOMEM;
3354
3355
3356 *trailer = skb;
3357 return 1;
3358 }
3359
3360
3361
3362 elt = 1;
3363 skb_p = &skb_shinfo(skb)->frag_list;
3364 copyflag = 0;
3365
3366 while ((skb1 = *skb_p) != NULL) {
3367 int ntail = 0;
3368
3369
3370
3371
3372
3373 if (skb_shared(skb1))
3374 copyflag = 1;
3375
3376
3377
3378 if (skb1->next == NULL && tailbits) {
3379 if (skb_shinfo(skb1)->nr_frags ||
3380 skb_has_frag_list(skb1) ||
3381 skb_tailroom(skb1) < tailbits)
3382 ntail = tailbits + 128;
3383 }
3384
3385 if (copyflag ||
3386 skb_cloned(skb1) ||
3387 ntail ||
3388 skb_shinfo(skb1)->nr_frags ||
3389 skb_has_frag_list(skb1)) {
3390 struct sk_buff *skb2;
3391
3392
3393 if (ntail == 0)
3394 skb2 = skb_copy(skb1, GFP_ATOMIC);
3395 else
3396 skb2 = skb_copy_expand(skb1,
3397 skb_headroom(skb1),
3398 ntail,
3399 GFP_ATOMIC);
3400 if (unlikely(skb2 == NULL))
3401 return -ENOMEM;
3402
3403 if (skb1->sk)
3404 skb_set_owner_w(skb2, skb1->sk);
3405
3406
3407
3408
3409 skb2->next = skb1->next;
3410 *skb_p = skb2;
3411 kfree_skb(skb1);
3412 skb1 = skb2;
3413 }
3414 elt++;
3415 *trailer = skb1;
3416 skb_p = &skb1->next;
3417 }
3418
3419 return elt;
3420}
3421EXPORT_SYMBOL_GPL(skb_cow_data);
3422
3423static void sock_rmem_free(struct sk_buff *skb)
3424{
3425 struct sock *sk = skb->sk;
3426
3427 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3428}
3429
3430
3431
3432
3433int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3434{
3435 int len = skb->len;
3436
3437 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3438 (unsigned int)sk->sk_rcvbuf)
3439 return -ENOMEM;
3440
3441 skb_orphan(skb);
3442 skb->sk = sk;
3443 skb->destructor = sock_rmem_free;
3444 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3445
3446
3447 skb_dst_force(skb);
3448
3449 skb_queue_tail(&sk->sk_error_queue, skb);
3450 if (!sock_flag(sk, SOCK_DEAD))
3451 sk->sk_data_ready(sk, len);
3452 return 0;
3453}
3454EXPORT_SYMBOL(sock_queue_err_skb);
3455
3456void skb_tstamp_tx(struct sk_buff *orig_skb,
3457 struct skb_shared_hwtstamps *hwtstamps)
3458{
3459 struct sock *sk = orig_skb->sk;
3460 struct sock_exterr_skb *serr;
3461 struct sk_buff *skb;
3462 int err;
3463
3464 if (!sk)
3465 return;
3466
3467 if (hwtstamps) {
3468 *skb_hwtstamps(orig_skb) =
3469 *hwtstamps;
3470 } else {
3471
3472
3473
3474
3475
3476 orig_skb->tstamp = ktime_get_real();
3477 }
3478
3479 skb = skb_clone(orig_skb, GFP_ATOMIC);
3480 if (!skb)
3481 return;
3482
3483 serr = SKB_EXT_ERR(skb);
3484 memset(serr, 0, sizeof(*serr));
3485 serr->ee.ee_errno = ENOMSG;
3486 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3487
3488 err = sock_queue_err_skb(sk, skb);
3489
3490 if (err)
3491 kfree_skb(skb);
3492}
3493EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3494
3495void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3496{
3497 struct sock *sk = skb->sk;
3498 struct sock_exterr_skb *serr;
3499 int err;
3500
3501 skb->wifi_acked_valid = 1;
3502 skb->wifi_acked = acked;
3503
3504 serr = SKB_EXT_ERR(skb);
3505 memset(serr, 0, sizeof(*serr));
3506 serr->ee.ee_errno = ENOMSG;
3507 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3508
3509 err = sock_queue_err_skb(sk, skb);
3510 if (err)
3511 kfree_skb(skb);
3512}
3513EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3529{
3530 if (unlikely(start > skb_headlen(skb)) ||
3531 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3532 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3533 start, off, skb_headlen(skb));
3534 return false;
3535 }
3536 skb->ip_summed = CHECKSUM_PARTIAL;
3537 skb->csum_start = skb_headroom(skb) + start;
3538 skb->csum_offset = off;
3539 skb_set_transport_header(skb, start);
3540 return true;
3541}
3542EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3543
3544static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3545 unsigned int max)
3546{
3547 if (skb_headlen(skb) >= len)
3548 return 0;
3549
3550
3551
3552
3553 if (max > skb->len)
3554 max = skb->len;
3555
3556 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3557 return -ENOMEM;
3558
3559 if (skb_headlen(skb) < len)
3560 return -EPROTO;
3561
3562 return 0;
3563}
3564
3565
3566
3567
3568#define MAX_IP_HDR_LEN 128
3569
3570static int skb_checksum_setup_ip(struct sk_buff *skb, bool recalculate)
3571{
3572 unsigned int off;
3573 bool fragment;
3574 int err;
3575
3576 fragment = false;
3577
3578 err = skb_maybe_pull_tail(skb,
3579 sizeof(struct iphdr),
3580 MAX_IP_HDR_LEN);
3581 if (err < 0)
3582 goto out;
3583
3584 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3585 fragment = true;
3586
3587 off = ip_hdrlen(skb);
3588
3589 err = -EPROTO;
3590
3591 if (fragment)
3592 goto out;
3593
3594 switch (ip_hdr(skb)->protocol) {
3595 case IPPROTO_TCP:
3596 err = skb_maybe_pull_tail(skb,
3597 off + sizeof(struct tcphdr),
3598 MAX_IP_HDR_LEN);
3599 if (err < 0)
3600 goto out;
3601
3602 if (!skb_partial_csum_set(skb, off,
3603 offsetof(struct tcphdr, check))) {
3604 err = -EPROTO;
3605 goto out;
3606 }
3607
3608 if (recalculate)
3609 tcp_hdr(skb)->check =
3610 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3611 ip_hdr(skb)->daddr,
3612 skb->len - off,
3613 IPPROTO_TCP, 0);
3614 break;
3615 case IPPROTO_UDP:
3616 err = skb_maybe_pull_tail(skb,
3617 off + sizeof(struct udphdr),
3618 MAX_IP_HDR_LEN);
3619 if (err < 0)
3620 goto out;
3621
3622 if (!skb_partial_csum_set(skb, off,
3623 offsetof(struct udphdr, check))) {
3624 err = -EPROTO;
3625 goto out;
3626 }
3627
3628 if (recalculate)
3629 udp_hdr(skb)->check =
3630 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3631 ip_hdr(skb)->daddr,
3632 skb->len - off,
3633 IPPROTO_UDP, 0);
3634 break;
3635 default:
3636 goto out;
3637 }
3638
3639 err = 0;
3640
3641out:
3642 return err;
3643}
3644
3645
3646
3647
3648#define MAX_IPV6_HDR_LEN 256
3649
3650#define OPT_HDR(type, skb, off) \
3651 (type *)(skb_network_header(skb) + (off))
3652
3653static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3654{
3655 int err;
3656 u8 nexthdr;
3657 unsigned int off;
3658 unsigned int len;
3659 bool fragment;
3660 bool done;
3661
3662 fragment = false;
3663 done = false;
3664
3665 off = sizeof(struct ipv6hdr);
3666
3667 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3668 if (err < 0)
3669 goto out;
3670
3671 nexthdr = ipv6_hdr(skb)->nexthdr;
3672
3673 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3674 while (off <= len && !done) {
3675 switch (nexthdr) {
3676 case IPPROTO_DSTOPTS:
3677 case IPPROTO_HOPOPTS:
3678 case IPPROTO_ROUTING: {
3679 struct ipv6_opt_hdr *hp;
3680
3681 err = skb_maybe_pull_tail(skb,
3682 off +
3683 sizeof(struct ipv6_opt_hdr),
3684 MAX_IPV6_HDR_LEN);
3685 if (err < 0)
3686 goto out;
3687
3688 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3689 nexthdr = hp->nexthdr;
3690 off += ipv6_optlen(hp);
3691 break;
3692 }
3693 case IPPROTO_AH: {
3694 struct ip_auth_hdr *hp;
3695
3696 err = skb_maybe_pull_tail(skb,
3697 off +
3698 sizeof(struct ip_auth_hdr),
3699 MAX_IPV6_HDR_LEN);
3700 if (err < 0)
3701 goto out;
3702
3703 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3704 nexthdr = hp->nexthdr;
3705 off += ipv6_authlen(hp);
3706 break;
3707 }
3708 case IPPROTO_FRAGMENT: {
3709 struct frag_hdr *hp;
3710
3711 err = skb_maybe_pull_tail(skb,
3712 off +
3713 sizeof(struct frag_hdr),
3714 MAX_IPV6_HDR_LEN);
3715 if (err < 0)
3716 goto out;
3717
3718 hp = OPT_HDR(struct frag_hdr, skb, off);
3719
3720 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3721 fragment = true;
3722
3723 nexthdr = hp->nexthdr;
3724 off += sizeof(struct frag_hdr);
3725 break;
3726 }
3727 default:
3728 done = true;
3729 break;
3730 }
3731 }
3732
3733 err = -EPROTO;
3734
3735 if (!done || fragment)
3736 goto out;
3737
3738 switch (nexthdr) {
3739 case IPPROTO_TCP:
3740 err = skb_maybe_pull_tail(skb,
3741 off + sizeof(struct tcphdr),
3742 MAX_IPV6_HDR_LEN);
3743 if (err < 0)
3744 goto out;
3745
3746 if (!skb_partial_csum_set(skb, off,
3747 offsetof(struct tcphdr, check))) {
3748 err = -EPROTO;
3749 goto out;
3750 }
3751
3752 if (recalculate)
3753 tcp_hdr(skb)->check =
3754 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3755 &ipv6_hdr(skb)->daddr,
3756 skb->len - off,
3757 IPPROTO_TCP, 0);
3758 break;
3759 case IPPROTO_UDP:
3760 err = skb_maybe_pull_tail(skb,
3761 off + sizeof(struct udphdr),
3762 MAX_IPV6_HDR_LEN);
3763 if (err < 0)
3764 goto out;
3765
3766 if (!skb_partial_csum_set(skb, off,
3767 offsetof(struct udphdr, check))) {
3768 err = -EPROTO;
3769 goto out;
3770 }
3771
3772 if (recalculate)
3773 udp_hdr(skb)->check =
3774 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3775 &ipv6_hdr(skb)->daddr,
3776 skb->len - off,
3777 IPPROTO_UDP, 0);
3778 break;
3779 default:
3780 goto out;
3781 }
3782
3783 err = 0;
3784
3785out:
3786 return err;
3787}
3788
3789
3790
3791
3792
3793
3794int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3795{
3796 int err;
3797
3798 switch (skb->protocol) {
3799 case htons(ETH_P_IP):
3800 err = skb_checksum_setup_ip(skb, recalculate);
3801 break;
3802
3803 case htons(ETH_P_IPV6):
3804 err = skb_checksum_setup_ipv6(skb, recalculate);
3805 break;
3806
3807 default:
3808 err = -EPROTO;
3809 break;
3810 }
3811
3812 return err;
3813}
3814EXPORT_SYMBOL(skb_checksum_setup);
3815
3816void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3817{
3818 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3819 skb->dev->name);
3820}
3821EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3822
3823void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3824{
3825 if (head_stolen) {
3826 skb_release_head_state(skb);
3827 kmem_cache_free(skbuff_head_cache, skb);
3828 } else {
3829 __kfree_skb(skb);
3830 }
3831}
3832EXPORT_SYMBOL(kfree_skb_partial);
3833
3834
3835
3836
3837
3838
3839
3840
3841bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3842 bool *fragstolen, int *delta_truesize)
3843{
3844 int i, delta, len = from->len;
3845
3846 *fragstolen = false;
3847
3848 if (skb_cloned(to))
3849 return false;
3850
3851 if (len <= skb_tailroom(to)) {
3852 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3853 *delta_truesize = 0;
3854 return true;
3855 }
3856
3857 if (skb_has_frag_list(to) || skb_has_frag_list(from))
3858 return false;
3859
3860 if (skb_headlen(from) != 0) {
3861 struct page *page;
3862 unsigned int offset;
3863
3864 if (skb_shinfo(to)->nr_frags +
3865 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3866 return false;
3867
3868 if (skb_head_is_locked(from))
3869 return false;
3870
3871 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3872
3873 page = virt_to_head_page(from->head);
3874 offset = from->data - (unsigned char *)page_address(page);
3875
3876 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3877 page, offset, skb_headlen(from));
3878 *fragstolen = true;
3879 } else {
3880 if (skb_shinfo(to)->nr_frags +
3881 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3882 return false;
3883
3884 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
3885 }
3886
3887 WARN_ON_ONCE(delta < len);
3888
3889 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3890 skb_shinfo(from)->frags,
3891 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3892 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3893
3894 if (!skb_cloned(from))
3895 skb_shinfo(from)->nr_frags = 0;
3896
3897
3898
3899
3900 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3901 skb_frag_ref(from, i);
3902
3903 to->truesize += delta;
3904 to->len += len;
3905 to->data_len += len;
3906
3907 *delta_truesize = delta;
3908 return true;
3909}
3910EXPORT_SYMBOL(skb_try_coalesce);
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3926{
3927 if (xnet)
3928 skb_orphan(skb);
3929 skb->tstamp.tv64 = 0;
3930 skb->pkt_type = PACKET_HOST;
3931 skb->skb_iif = 0;
3932 skb->local_df = 0;
3933 skb_dst_drop(skb);
3934 skb->mark = 0;
3935 secpath_reset(skb);
3936 nf_reset(skb);
3937 nf_reset_trace(skb);
3938}
3939EXPORT_SYMBOL_GPL(skb_scrub_packet);
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3952{
3953 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3954 unsigned int hdr_len;
3955
3956 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3957 hdr_len = tcp_hdrlen(skb);
3958 else
3959 hdr_len = sizeof(struct udphdr);
3960 return hdr_len + shinfo->gso_size;
3961}
3962EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
3963