1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/kmemcheck.h>
45#include <linux/mm.h>
46#include <linux/interrupt.h>
47#include <linux/in.h>
48#include <linux/inet.h>
49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
52#include <linux/sctp.h>
53#include <linux/netdevice.h>
54#ifdef CONFIG_NET_CLS_ACT
55#include <net/pkt_sched.h>
56#endif
57#include <linux/string.h>
58#include <linux/skbuff.h>
59#include <linux/splice.h>
60#include <linux/cache.h>
61#include <linux/rtnetlink.h>
62#include <linux/init.h>
63#include <linux/scatterlist.h>
64#include <linux/errqueue.h>
65#include <linux/prefetch.h>
66#include <linux/if_vlan.h>
67
68#include <net/protocol.h>
69#include <net/dst.h>
70#include <net/sock.h>
71#include <net/checksum.h>
72#include <net/ip6_checksum.h>
73#include <net/xfrm.h>
74
75#include <linux/uaccess.h>
76#include <trace/events/skb.h>
77#include <linux/highmem.h>
78#include <linux/capability.h>
79#include <linux/user_namespace.h>
80
81struct kmem_cache *skbuff_head_cache __read_mostly;
82static struct kmem_cache *skbuff_fclone_cache __read_mostly;
83int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
84EXPORT_SYMBOL(sysctl_max_skb_frags);
85
86
87
88
89
90
91
92
93
94
95
96
97
98static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
99 const char msg[])
100{
101 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
102 msg, addr, skb->len, sz, skb->head, skb->data,
103 (unsigned long)skb->tail, (unsigned long)skb->end,
104 skb->dev ? skb->dev->name : "<NULL>");
105 BUG();
106}
107
108static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
109{
110 skb_panic(skb, sz, addr, __func__);
111}
112
113static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
114{
115 skb_panic(skb, sz, addr, __func__);
116}
117
118
119
120
121
122
123
124
125#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
126 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
127
128static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
129 unsigned long ip, bool *pfmemalloc)
130{
131 void *obj;
132 bool ret_pfmemalloc = false;
133
134
135
136
137
138 obj = kmalloc_node_track_caller(size,
139 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
140 node);
141 if (obj || !(gfp_pfmemalloc_allowed(flags)))
142 goto out;
143
144
145 ret_pfmemalloc = true;
146 obj = kmalloc_node_track_caller(size, flags, node);
147
148out:
149 if (pfmemalloc)
150 *pfmemalloc = ret_pfmemalloc;
151
152 return obj;
153}
154
155
156
157
158
159
160
161struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
162{
163 struct sk_buff *skb;
164
165
166 skb = kmem_cache_alloc_node(skbuff_head_cache,
167 gfp_mask & ~__GFP_DMA, node);
168 if (!skb)
169 goto out;
170
171
172
173
174
175
176 memset(skb, 0, offsetof(struct sk_buff, tail));
177 skb->head = NULL;
178 skb->truesize = sizeof(struct sk_buff);
179 refcount_set(&skb->users, 1);
180
181 skb->mac_header = (typeof(skb->mac_header))~0U;
182out:
183 return skb;
184}
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
204 int flags, int node)
205{
206 struct kmem_cache *cache;
207 struct skb_shared_info *shinfo;
208 struct sk_buff *skb;
209 u8 *data;
210 bool pfmemalloc;
211
212 cache = (flags & SKB_ALLOC_FCLONE)
213 ? skbuff_fclone_cache : skbuff_head_cache;
214
215 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
216 gfp_mask |= __GFP_MEMALLOC;
217
218
219 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
220 if (!skb)
221 goto out;
222 prefetchw(skb);
223
224
225
226
227
228
229 size = SKB_DATA_ALIGN(size);
230 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
231 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
232 if (!data)
233 goto nodata;
234
235
236
237
238 size = SKB_WITH_OVERHEAD(ksize(data));
239 prefetchw(data + size);
240
241
242
243
244
245
246 memset(skb, 0, offsetof(struct sk_buff, tail));
247
248 skb->truesize = SKB_TRUESIZE(size);
249 skb->pfmemalloc = pfmemalloc;
250 refcount_set(&skb->users, 1);
251 skb->head = data;
252 skb->data = data;
253 skb_reset_tail_pointer(skb);
254 skb->end = skb->tail + size;
255 skb->mac_header = (typeof(skb->mac_header))~0U;
256 skb->transport_header = (typeof(skb->transport_header))~0U;
257
258
259 shinfo = skb_shinfo(skb);
260 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
261 atomic_set(&shinfo->dataref, 1);
262 kmemcheck_annotate_variable(shinfo->destructor_arg);
263
264 if (flags & SKB_ALLOC_FCLONE) {
265 struct sk_buff_fclones *fclones;
266
267 fclones = container_of(skb, struct sk_buff_fclones, skb1);
268
269 kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
270 skb->fclone = SKB_FCLONE_ORIG;
271 refcount_set(&fclones->fclone_ref, 1);
272
273 fclones->skb2.fclone = SKB_FCLONE_CLONE;
274 }
275out:
276 return skb;
277nodata:
278 kmem_cache_free(cache, skb);
279 skb = NULL;
280 goto out;
281}
282EXPORT_SYMBOL(__alloc_skb);
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303struct sk_buff *__build_skb(void *data, unsigned int frag_size)
304{
305 struct skb_shared_info *shinfo;
306 struct sk_buff *skb;
307 unsigned int size = frag_size ? : ksize(data);
308
309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
310 if (!skb)
311 return NULL;
312
313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
314
315 memset(skb, 0, offsetof(struct sk_buff, tail));
316 skb->truesize = SKB_TRUESIZE(size);
317 refcount_set(&skb->users, 1);
318 skb->head = data;
319 skb->data = data;
320 skb_reset_tail_pointer(skb);
321 skb->end = skb->tail + size;
322 skb->mac_header = (typeof(skb->mac_header))~0U;
323 skb->transport_header = (typeof(skb->transport_header))~0U;
324
325
326 shinfo = skb_shinfo(skb);
327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
328 atomic_set(&shinfo->dataref, 1);
329 kmemcheck_annotate_variable(shinfo->destructor_arg);
330
331 return skb;
332}
333
334
335
336
337
338
339struct sk_buff *build_skb(void *data, unsigned int frag_size)
340{
341 struct sk_buff *skb = __build_skb(data, frag_size);
342
343 if (skb && frag_size) {
344 skb->head_frag = 1;
345 if (page_is_pfmemalloc(virt_to_head_page(data)))
346 skb->pfmemalloc = 1;
347 }
348 return skb;
349}
350EXPORT_SYMBOL(build_skb);
351
352#define NAPI_SKB_CACHE_SIZE 64
353
354struct napi_alloc_cache {
355 struct page_frag_cache page;
356 unsigned int skb_count;
357 void *skb_cache[NAPI_SKB_CACHE_SIZE];
358};
359
360static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
361static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
362
363static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
364{
365 struct page_frag_cache *nc;
366 unsigned long flags;
367 void *data;
368
369 local_irq_save(flags);
370 nc = this_cpu_ptr(&netdev_alloc_cache);
371 data = page_frag_alloc(nc, fragsz, gfp_mask);
372 local_irq_restore(flags);
373 return data;
374}
375
376
377
378
379
380
381
382
383void *netdev_alloc_frag(unsigned int fragsz)
384{
385 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
386}
387EXPORT_SYMBOL(netdev_alloc_frag);
388
389static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
390{
391 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
392
393 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
394}
395
396void *napi_alloc_frag(unsigned int fragsz)
397{
398 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
399}
400EXPORT_SYMBOL(napi_alloc_frag);
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
416 gfp_t gfp_mask)
417{
418 struct page_frag_cache *nc;
419 unsigned long flags;
420 struct sk_buff *skb;
421 bool pfmemalloc;
422 void *data;
423
424 len += NET_SKB_PAD;
425
426 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
427 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
428 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
429 if (!skb)
430 goto skb_fail;
431 goto skb_success;
432 }
433
434 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
435 len = SKB_DATA_ALIGN(len);
436
437 if (sk_memalloc_socks())
438 gfp_mask |= __GFP_MEMALLOC;
439
440 local_irq_save(flags);
441
442 nc = this_cpu_ptr(&netdev_alloc_cache);
443 data = page_frag_alloc(nc, len, gfp_mask);
444 pfmemalloc = nc->pfmemalloc;
445
446 local_irq_restore(flags);
447
448 if (unlikely(!data))
449 return NULL;
450
451 skb = __build_skb(data, len);
452 if (unlikely(!skb)) {
453 skb_free_frag(data);
454 return NULL;
455 }
456
457
458 if (pfmemalloc)
459 skb->pfmemalloc = 1;
460 skb->head_frag = 1;
461
462skb_success:
463 skb_reserve(skb, NET_SKB_PAD);
464 skb->dev = dev;
465
466skb_fail:
467 return skb;
468}
469EXPORT_SYMBOL(__netdev_alloc_skb);
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
485 gfp_t gfp_mask)
486{
487 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
488 struct sk_buff *skb;
489 void *data;
490
491 len += NET_SKB_PAD + NET_IP_ALIGN;
492
493 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
494 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
495 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
496 if (!skb)
497 goto skb_fail;
498 goto skb_success;
499 }
500
501 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
502 len = SKB_DATA_ALIGN(len);
503
504 if (sk_memalloc_socks())
505 gfp_mask |= __GFP_MEMALLOC;
506
507 data = page_frag_alloc(&nc->page, len, gfp_mask);
508 if (unlikely(!data))
509 return NULL;
510
511 skb = __build_skb(data, len);
512 if (unlikely(!skb)) {
513 skb_free_frag(data);
514 return NULL;
515 }
516
517
518 if (nc->page.pfmemalloc)
519 skb->pfmemalloc = 1;
520 skb->head_frag = 1;
521
522skb_success:
523 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
524 skb->dev = napi->dev;
525
526skb_fail:
527 return skb;
528}
529EXPORT_SYMBOL(__napi_alloc_skb);
530
531void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
532 int size, unsigned int truesize)
533{
534 skb_fill_page_desc(skb, i, page, off, size);
535 skb->len += size;
536 skb->data_len += size;
537 skb->truesize += truesize;
538}
539EXPORT_SYMBOL(skb_add_rx_frag);
540
541void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
542 unsigned int truesize)
543{
544 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
545
546 skb_frag_size_add(frag, size);
547 skb->len += size;
548 skb->data_len += size;
549 skb->truesize += truesize;
550}
551EXPORT_SYMBOL(skb_coalesce_rx_frag);
552
553static void skb_drop_list(struct sk_buff **listp)
554{
555 kfree_skb_list(*listp);
556 *listp = NULL;
557}
558
559static inline void skb_drop_fraglist(struct sk_buff *skb)
560{
561 skb_drop_list(&skb_shinfo(skb)->frag_list);
562}
563
564static void skb_clone_fraglist(struct sk_buff *skb)
565{
566 struct sk_buff *list;
567
568 skb_walk_frags(skb, list)
569 skb_get(list);
570}
571
572static void skb_free_head(struct sk_buff *skb)
573{
574 unsigned char *head = skb->head;
575
576 if (skb->head_frag)
577 skb_free_frag(head);
578 else
579 kfree(head);
580}
581
582static void skb_release_data(struct sk_buff *skb)
583{
584 struct skb_shared_info *shinfo = skb_shinfo(skb);
585 int i;
586
587 if (skb->cloned &&
588 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
589 &shinfo->dataref))
590 return;
591
592 for (i = 0; i < shinfo->nr_frags; i++)
593 __skb_frag_unref(&shinfo->frags[i]);
594
595
596
597
598
599 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
600 struct ubuf_info *uarg;
601
602 uarg = shinfo->destructor_arg;
603 if (uarg->callback)
604 uarg->callback(uarg, true);
605 }
606
607 if (shinfo->frag_list)
608 kfree_skb_list(shinfo->frag_list);
609
610 skb_free_head(skb);
611}
612
613
614
615
616static void kfree_skbmem(struct sk_buff *skb)
617{
618 struct sk_buff_fclones *fclones;
619
620 switch (skb->fclone) {
621 case SKB_FCLONE_UNAVAILABLE:
622 kmem_cache_free(skbuff_head_cache, skb);
623 return;
624
625 case SKB_FCLONE_ORIG:
626 fclones = container_of(skb, struct sk_buff_fclones, skb1);
627
628
629
630
631
632 if (refcount_read(&fclones->fclone_ref) == 1)
633 goto fastpath;
634 break;
635
636 default:
637 fclones = container_of(skb, struct sk_buff_fclones, skb2);
638 break;
639 }
640 if (!refcount_dec_and_test(&fclones->fclone_ref))
641 return;
642fastpath:
643 kmem_cache_free(skbuff_fclone_cache, fclones);
644}
645
646void skb_release_head_state(struct sk_buff *skb)
647{
648 skb_dst_drop(skb);
649 secpath_reset(skb);
650 if (skb->destructor) {
651 WARN_ON(in_irq());
652 skb->destructor(skb);
653 }
654#if IS_ENABLED(CONFIG_NF_CONNTRACK)
655 nf_conntrack_put(skb_nfct(skb));
656#endif
657#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
658 nf_bridge_put(skb->nf_bridge);
659#endif
660}
661
662
663static void skb_release_all(struct sk_buff *skb)
664{
665 skb_release_head_state(skb);
666 if (likely(skb->head))
667 skb_release_data(skb);
668}
669
670
671
672
673
674
675
676
677
678
679void __kfree_skb(struct sk_buff *skb)
680{
681 skb_release_all(skb);
682 kfree_skbmem(skb);
683}
684EXPORT_SYMBOL(__kfree_skb);
685
686
687
688
689
690
691
692
693void kfree_skb(struct sk_buff *skb)
694{
695 if (!skb_unref(skb))
696 return;
697
698 trace_kfree_skb(skb, __builtin_return_address(0));
699 __kfree_skb(skb);
700}
701EXPORT_SYMBOL(kfree_skb);
702
703void kfree_skb_list(struct sk_buff *segs)
704{
705 while (segs) {
706 struct sk_buff *next = segs->next;
707
708 kfree_skb(segs);
709 segs = next;
710 }
711}
712EXPORT_SYMBOL(kfree_skb_list);
713
714
715
716
717
718
719
720
721void skb_tx_error(struct sk_buff *skb)
722{
723 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
724 struct ubuf_info *uarg;
725
726 uarg = skb_shinfo(skb)->destructor_arg;
727 if (uarg->callback)
728 uarg->callback(uarg, false);
729 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
730 }
731}
732EXPORT_SYMBOL(skb_tx_error);
733
734
735
736
737
738
739
740
741
742void consume_skb(struct sk_buff *skb)
743{
744 if (!skb_unref(skb))
745 return;
746
747 trace_consume_skb(skb);
748 __kfree_skb(skb);
749}
750EXPORT_SYMBOL(consume_skb);
751
752
753
754
755
756
757
758
759void consume_stateless_skb(struct sk_buff *skb)
760{
761 if (!skb_unref(skb))
762 return;
763
764 trace_consume_skb(skb);
765 if (likely(skb->head))
766 skb_release_data(skb);
767 kfree_skbmem(skb);
768}
769
770void __kfree_skb_flush(void)
771{
772 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
773
774
775 if (nc->skb_count) {
776 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
777 nc->skb_cache);
778 nc->skb_count = 0;
779 }
780}
781
782static inline void _kfree_skb_defer(struct sk_buff *skb)
783{
784 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
785
786
787 skb_release_all(skb);
788
789
790 nc->skb_cache[nc->skb_count++] = skb;
791
792#ifdef CONFIG_SLUB
793
794 prefetchw(skb);
795#endif
796
797
798 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
799 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
800 nc->skb_cache);
801 nc->skb_count = 0;
802 }
803}
804void __kfree_skb_defer(struct sk_buff *skb)
805{
806 _kfree_skb_defer(skb);
807}
808
809void napi_consume_skb(struct sk_buff *skb, int budget)
810{
811 if (unlikely(!skb))
812 return;
813
814
815 if (unlikely(!budget)) {
816 dev_consume_skb_any(skb);
817 return;
818 }
819
820 if (!skb_unref(skb))
821 return;
822
823
824 trace_consume_skb(skb);
825
826
827 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
828 __kfree_skb(skb);
829 return;
830 }
831
832 _kfree_skb_defer(skb);
833}
834EXPORT_SYMBOL(napi_consume_skb);
835
836
837#define CHECK_SKB_FIELD(field) \
838 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
839 offsetof(struct sk_buff, headers_start)); \
840 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
841 offsetof(struct sk_buff, headers_end)); \
842
843static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
844{
845 new->tstamp = old->tstamp;
846
847 new->dev = old->dev;
848 memcpy(new->cb, old->cb, sizeof(old->cb));
849 skb_dst_copy(new, old);
850#ifdef CONFIG_XFRM
851 new->sp = secpath_get(old->sp);
852#endif
853 __nf_copy(new, old, false);
854
855
856
857
858 new->queue_mapping = old->queue_mapping;
859
860 memcpy(&new->headers_start, &old->headers_start,
861 offsetof(struct sk_buff, headers_end) -
862 offsetof(struct sk_buff, headers_start));
863 CHECK_SKB_FIELD(protocol);
864 CHECK_SKB_FIELD(csum);
865 CHECK_SKB_FIELD(hash);
866 CHECK_SKB_FIELD(priority);
867 CHECK_SKB_FIELD(skb_iif);
868 CHECK_SKB_FIELD(vlan_proto);
869 CHECK_SKB_FIELD(vlan_tci);
870 CHECK_SKB_FIELD(transport_header);
871 CHECK_SKB_FIELD(network_header);
872 CHECK_SKB_FIELD(mac_header);
873 CHECK_SKB_FIELD(inner_protocol);
874 CHECK_SKB_FIELD(inner_transport_header);
875 CHECK_SKB_FIELD(inner_network_header);
876 CHECK_SKB_FIELD(inner_mac_header);
877 CHECK_SKB_FIELD(mark);
878#ifdef CONFIG_NETWORK_SECMARK
879 CHECK_SKB_FIELD(secmark);
880#endif
881#ifdef CONFIG_NET_RX_BUSY_POLL
882 CHECK_SKB_FIELD(napi_id);
883#endif
884#ifdef CONFIG_XPS
885 CHECK_SKB_FIELD(sender_cpu);
886#endif
887#ifdef CONFIG_NET_SCHED
888 CHECK_SKB_FIELD(tc_index);
889#endif
890
891}
892
893
894
895
896
897static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
898{
899#define C(x) n->x = skb->x
900
901 n->next = n->prev = NULL;
902 n->sk = NULL;
903 __copy_skb_header(n, skb);
904
905 C(len);
906 C(data_len);
907 C(mac_len);
908 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
909 n->cloned = 1;
910 n->nohdr = 0;
911 n->destructor = NULL;
912 C(tail);
913 C(end);
914 C(head);
915 C(head_frag);
916 C(data);
917 C(truesize);
918 refcount_set(&n->users, 1);
919
920 atomic_inc(&(skb_shinfo(skb)->dataref));
921 skb->cloned = 1;
922
923 return n;
924#undef C
925}
926
927
928
929
930
931
932
933
934
935
936
937struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
938{
939 skb_release_all(dst);
940 return __skb_clone(dst, src);
941}
942EXPORT_SYMBOL_GPL(skb_morph);
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
960{
961 int i;
962 int num_frags = skb_shinfo(skb)->nr_frags;
963 struct page *page, *head = NULL;
964 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
965
966 for (i = 0; i < num_frags; i++) {
967 u8 *vaddr;
968 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
969
970 page = alloc_page(gfp_mask);
971 if (!page) {
972 while (head) {
973 struct page *next = (struct page *)page_private(head);
974 put_page(head);
975 head = next;
976 }
977 return -ENOMEM;
978 }
979 vaddr = kmap_atomic(skb_frag_page(f));
980 memcpy(page_address(page),
981 vaddr + f->page_offset, skb_frag_size(f));
982 kunmap_atomic(vaddr);
983 set_page_private(page, (unsigned long)head);
984 head = page;
985 }
986
987
988 for (i = 0; i < num_frags; i++)
989 skb_frag_unref(skb, i);
990
991 uarg->callback(uarg, false);
992
993
994 for (i = num_frags - 1; i >= 0; i--) {
995 __skb_fill_page_desc(skb, i, head, 0,
996 skb_shinfo(skb)->frags[i].size);
997 head = (struct page *)page_private(head);
998 }
999
1000 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
1001 return 0;
1002}
1003EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1020{
1021 struct sk_buff_fclones *fclones = container_of(skb,
1022 struct sk_buff_fclones,
1023 skb1);
1024 struct sk_buff *n;
1025
1026 if (skb_orphan_frags(skb, gfp_mask))
1027 return NULL;
1028
1029 if (skb->fclone == SKB_FCLONE_ORIG &&
1030 refcount_read(&fclones->fclone_ref) == 1) {
1031 n = &fclones->skb2;
1032 refcount_set(&fclones->fclone_ref, 2);
1033 } else {
1034 if (skb_pfmemalloc(skb))
1035 gfp_mask |= __GFP_MEMALLOC;
1036
1037 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1038 if (!n)
1039 return NULL;
1040
1041 kmemcheck_annotate_bitfield(n, flags1);
1042 n->fclone = SKB_FCLONE_UNAVAILABLE;
1043 }
1044
1045 return __skb_clone(n, skb);
1046}
1047EXPORT_SYMBOL(skb_clone);
1048
1049static void skb_headers_offset_update(struct sk_buff *skb, int off)
1050{
1051
1052 if (skb->ip_summed == CHECKSUM_PARTIAL)
1053 skb->csum_start += off;
1054
1055 skb->transport_header += off;
1056 skb->network_header += off;
1057 if (skb_mac_header_was_set(skb))
1058 skb->mac_header += off;
1059 skb->inner_transport_header += off;
1060 skb->inner_network_header += off;
1061 skb->inner_mac_header += off;
1062}
1063
1064static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1065{
1066 __copy_skb_header(new, old);
1067
1068 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1069 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1070 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1071}
1072
1073static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1074{
1075 if (skb_pfmemalloc(skb))
1076 return SKB_ALLOC_RX;
1077 return 0;
1078}
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1098{
1099 int headerlen = skb_headroom(skb);
1100 unsigned int size = skb_end_offset(skb) + skb->data_len;
1101 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1102 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1103
1104 if (!n)
1105 return NULL;
1106
1107
1108 skb_reserve(n, headerlen);
1109
1110 skb_put(n, skb->len);
1111
1112 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
1113 BUG();
1114
1115 copy_skb_header(n, skb);
1116 return n;
1117}
1118EXPORT_SYMBOL(skb_copy);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1138 gfp_t gfp_mask, bool fclone)
1139{
1140 unsigned int size = skb_headlen(skb) + headroom;
1141 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1142 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1143
1144 if (!n)
1145 goto out;
1146
1147
1148 skb_reserve(n, headroom);
1149
1150 skb_put(n, skb_headlen(skb));
1151
1152 skb_copy_from_linear_data(skb, n->data, n->len);
1153
1154 n->truesize += skb->data_len;
1155 n->data_len = skb->data_len;
1156 n->len = skb->len;
1157
1158 if (skb_shinfo(skb)->nr_frags) {
1159 int i;
1160
1161 if (skb_orphan_frags(skb, gfp_mask)) {
1162 kfree_skb(n);
1163 n = NULL;
1164 goto out;
1165 }
1166 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1167 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1168 skb_frag_ref(skb, i);
1169 }
1170 skb_shinfo(n)->nr_frags = i;
1171 }
1172
1173 if (skb_has_frag_list(skb)) {
1174 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1175 skb_clone_fraglist(n);
1176 }
1177
1178 copy_skb_header(n, skb);
1179out:
1180 return n;
1181}
1182EXPORT_SYMBOL(__pskb_copy_fclone);
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1201 gfp_t gfp_mask)
1202{
1203 int i, osize = skb_end_offset(skb);
1204 int size = osize + nhead + ntail;
1205 long off;
1206 u8 *data;
1207
1208 BUG_ON(nhead < 0);
1209
1210 if (skb_shared(skb))
1211 BUG();
1212
1213 size = SKB_DATA_ALIGN(size);
1214
1215 if (skb_pfmemalloc(skb))
1216 gfp_mask |= __GFP_MEMALLOC;
1217 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1218 gfp_mask, NUMA_NO_NODE, NULL);
1219 if (!data)
1220 goto nodata;
1221 size = SKB_WITH_OVERHEAD(ksize(data));
1222
1223
1224
1225
1226 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1227
1228 memcpy((struct skb_shared_info *)(data + size),
1229 skb_shinfo(skb),
1230 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1231
1232
1233
1234
1235
1236
1237 if (skb_cloned(skb)) {
1238
1239 if (skb_orphan_frags(skb, gfp_mask))
1240 goto nofrags;
1241 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1242 skb_frag_ref(skb, i);
1243
1244 if (skb_has_frag_list(skb))
1245 skb_clone_fraglist(skb);
1246
1247 skb_release_data(skb);
1248 } else {
1249 skb_free_head(skb);
1250 }
1251 off = (data + nhead) - skb->head;
1252
1253 skb->head = data;
1254 skb->head_frag = 0;
1255 skb->data += off;
1256#ifdef NET_SKBUFF_DATA_USES_OFFSET
1257 skb->end = size;
1258 off = nhead;
1259#else
1260 skb->end = skb->head + size;
1261#endif
1262 skb->tail += off;
1263 skb_headers_offset_update(skb, nhead);
1264 skb->cloned = 0;
1265 skb->hdr_len = 0;
1266 skb->nohdr = 0;
1267 atomic_set(&skb_shinfo(skb)->dataref, 1);
1268
1269
1270
1271
1272
1273 if (!skb->sk || skb->destructor == sock_edemux)
1274 skb->truesize += size - osize;
1275
1276 return 0;
1277
1278nofrags:
1279 kfree(data);
1280nodata:
1281 return -ENOMEM;
1282}
1283EXPORT_SYMBOL(pskb_expand_head);
1284
1285
1286
1287struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1288{
1289 struct sk_buff *skb2;
1290 int delta = headroom - skb_headroom(skb);
1291
1292 if (delta <= 0)
1293 skb2 = pskb_copy(skb, GFP_ATOMIC);
1294 else {
1295 skb2 = skb_clone(skb, GFP_ATOMIC);
1296 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1297 GFP_ATOMIC)) {
1298 kfree_skb(skb2);
1299 skb2 = NULL;
1300 }
1301 }
1302 return skb2;
1303}
1304EXPORT_SYMBOL(skb_realloc_headroom);
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1325 int newheadroom, int newtailroom,
1326 gfp_t gfp_mask)
1327{
1328
1329
1330
1331 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1332 gfp_mask, skb_alloc_rx_flag(skb),
1333 NUMA_NO_NODE);
1334 int oldheadroom = skb_headroom(skb);
1335 int head_copy_len, head_copy_off;
1336
1337 if (!n)
1338 return NULL;
1339
1340 skb_reserve(n, newheadroom);
1341
1342
1343 skb_put(n, skb->len);
1344
1345 head_copy_len = oldheadroom;
1346 head_copy_off = 0;
1347 if (newheadroom <= head_copy_len)
1348 head_copy_len = newheadroom;
1349 else
1350 head_copy_off = newheadroom - head_copy_len;
1351
1352
1353 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1354 skb->len + head_copy_len))
1355 BUG();
1356
1357 copy_skb_header(n, skb);
1358
1359 skb_headers_offset_update(n, newheadroom - oldheadroom);
1360
1361 return n;
1362}
1363EXPORT_SYMBOL(skb_copy_expand);
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1380{
1381 int err;
1382 int ntail;
1383
1384
1385 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1386 memset(skb->data+skb->len, 0, pad);
1387 return 0;
1388 }
1389
1390 ntail = skb->data_len + pad - (skb->end - skb->tail);
1391 if (likely(skb_cloned(skb) || ntail > 0)) {
1392 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1393 if (unlikely(err))
1394 goto free_skb;
1395 }
1396
1397
1398
1399
1400 err = skb_linearize(skb);
1401 if (unlikely(err))
1402 goto free_skb;
1403
1404 memset(skb->data + skb->len, 0, pad);
1405 return 0;
1406
1407free_skb:
1408 if (free_on_error)
1409 kfree_skb(skb);
1410 return err;
1411}
1412EXPORT_SYMBOL(__skb_pad);
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1428{
1429 if (tail != skb) {
1430 skb->data_len += len;
1431 skb->len += len;
1432 }
1433 return skb_put(tail, len);
1434}
1435EXPORT_SYMBOL_GPL(pskb_put);
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446void *skb_put(struct sk_buff *skb, unsigned int len)
1447{
1448 void *tmp = skb_tail_pointer(skb);
1449 SKB_LINEAR_ASSERT(skb);
1450 skb->tail += len;
1451 skb->len += len;
1452 if (unlikely(skb->tail > skb->end))
1453 skb_over_panic(skb, len, __builtin_return_address(0));
1454 return tmp;
1455}
1456EXPORT_SYMBOL(skb_put);
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467void *skb_push(struct sk_buff *skb, unsigned int len)
1468{
1469 skb->data -= len;
1470 skb->len += len;
1471 if (unlikely(skb->data<skb->head))
1472 skb_under_panic(skb, len, __builtin_return_address(0));
1473 return skb->data;
1474}
1475EXPORT_SYMBOL(skb_push);
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487void *skb_pull(struct sk_buff *skb, unsigned int len)
1488{
1489 return skb_pull_inline(skb, len);
1490}
1491EXPORT_SYMBOL(skb_pull);
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502void skb_trim(struct sk_buff *skb, unsigned int len)
1503{
1504 if (skb->len > len)
1505 __skb_trim(skb, len);
1506}
1507EXPORT_SYMBOL(skb_trim);
1508
1509
1510
1511
1512int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1513{
1514 struct sk_buff **fragp;
1515 struct sk_buff *frag;
1516 int offset = skb_headlen(skb);
1517 int nfrags = skb_shinfo(skb)->nr_frags;
1518 int i;
1519 int err;
1520
1521 if (skb_cloned(skb) &&
1522 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1523 return err;
1524
1525 i = 0;
1526 if (offset >= len)
1527 goto drop_pages;
1528
1529 for (; i < nfrags; i++) {
1530 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1531
1532 if (end < len) {
1533 offset = end;
1534 continue;
1535 }
1536
1537 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1538
1539drop_pages:
1540 skb_shinfo(skb)->nr_frags = i;
1541
1542 for (; i < nfrags; i++)
1543 skb_frag_unref(skb, i);
1544
1545 if (skb_has_frag_list(skb))
1546 skb_drop_fraglist(skb);
1547 goto done;
1548 }
1549
1550 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1551 fragp = &frag->next) {
1552 int end = offset + frag->len;
1553
1554 if (skb_shared(frag)) {
1555 struct sk_buff *nfrag;
1556
1557 nfrag = skb_clone(frag, GFP_ATOMIC);
1558 if (unlikely(!nfrag))
1559 return -ENOMEM;
1560
1561 nfrag->next = frag->next;
1562 consume_skb(frag);
1563 frag = nfrag;
1564 *fragp = frag;
1565 }
1566
1567 if (end < len) {
1568 offset = end;
1569 continue;
1570 }
1571
1572 if (end > len &&
1573 unlikely((err = pskb_trim(frag, len - offset))))
1574 return err;
1575
1576 if (frag->next)
1577 skb_drop_list(&frag->next);
1578 break;
1579 }
1580
1581done:
1582 if (len > skb_headlen(skb)) {
1583 skb->data_len -= skb->len - len;
1584 skb->len = len;
1585 } else {
1586 skb->len = len;
1587 skb->data_len = 0;
1588 skb_set_tail_pointer(skb, len);
1589 }
1590
1591 if (!skb->sk || skb->destructor == sock_edemux)
1592 skb_condense(skb);
1593 return 0;
1594}
1595EXPORT_SYMBOL(___pskb_trim);
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1623{
1624
1625
1626
1627
1628 int i, k, eat = (skb->tail + delta) - skb->end;
1629
1630 if (eat > 0 || skb_cloned(skb)) {
1631 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1632 GFP_ATOMIC))
1633 return NULL;
1634 }
1635
1636 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1637 BUG();
1638
1639
1640
1641
1642 if (!skb_has_frag_list(skb))
1643 goto pull_pages;
1644
1645
1646 eat = delta;
1647 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1648 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1649
1650 if (size >= eat)
1651 goto pull_pages;
1652 eat -= size;
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662 if (eat) {
1663 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1664 struct sk_buff *clone = NULL;
1665 struct sk_buff *insp = NULL;
1666
1667 do {
1668 BUG_ON(!list);
1669
1670 if (list->len <= eat) {
1671
1672 eat -= list->len;
1673 list = list->next;
1674 insp = list;
1675 } else {
1676
1677
1678 if (skb_shared(list)) {
1679
1680 clone = skb_clone(list, GFP_ATOMIC);
1681 if (!clone)
1682 return NULL;
1683 insp = list->next;
1684 list = clone;
1685 } else {
1686
1687
1688 insp = list;
1689 }
1690 if (!pskb_pull(list, eat)) {
1691 kfree_skb(clone);
1692 return NULL;
1693 }
1694 break;
1695 }
1696 } while (eat);
1697
1698
1699 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1700 skb_shinfo(skb)->frag_list = list->next;
1701 kfree_skb(list);
1702 }
1703
1704 if (clone) {
1705 clone->next = list;
1706 skb_shinfo(skb)->frag_list = clone;
1707 }
1708 }
1709
1710
1711pull_pages:
1712 eat = delta;
1713 k = 0;
1714 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1715 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1716
1717 if (size <= eat) {
1718 skb_frag_unref(skb, i);
1719 eat -= size;
1720 } else {
1721 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1722 if (eat) {
1723 skb_shinfo(skb)->frags[k].page_offset += eat;
1724 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1725 eat = 0;
1726 }
1727 k++;
1728 }
1729 }
1730 skb_shinfo(skb)->nr_frags = k;
1731
1732 skb->tail += delta;
1733 skb->data_len -= delta;
1734
1735 return skb_tail_pointer(skb);
1736}
1737EXPORT_SYMBOL(__pskb_pull_tail);
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1755{
1756 int start = skb_headlen(skb);
1757 struct sk_buff *frag_iter;
1758 int i, copy;
1759
1760 if (offset > (int)skb->len - len)
1761 goto fault;
1762
1763
1764 if ((copy = start - offset) > 0) {
1765 if (copy > len)
1766 copy = len;
1767 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1768 if ((len -= copy) == 0)
1769 return 0;
1770 offset += copy;
1771 to += copy;
1772 }
1773
1774 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1775 int end;
1776 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1777
1778 WARN_ON(start > offset + len);
1779
1780 end = start + skb_frag_size(f);
1781 if ((copy = end - offset) > 0) {
1782 u8 *vaddr;
1783
1784 if (copy > len)
1785 copy = len;
1786
1787 vaddr = kmap_atomic(skb_frag_page(f));
1788 memcpy(to,
1789 vaddr + f->page_offset + offset - start,
1790 copy);
1791 kunmap_atomic(vaddr);
1792
1793 if ((len -= copy) == 0)
1794 return 0;
1795 offset += copy;
1796 to += copy;
1797 }
1798 start = end;
1799 }
1800
1801 skb_walk_frags(skb, frag_iter) {
1802 int end;
1803
1804 WARN_ON(start > offset + len);
1805
1806 end = start + frag_iter->len;
1807 if ((copy = end - offset) > 0) {
1808 if (copy > len)
1809 copy = len;
1810 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1811 goto fault;
1812 if ((len -= copy) == 0)
1813 return 0;
1814 offset += copy;
1815 to += copy;
1816 }
1817 start = end;
1818 }
1819
1820 if (!len)
1821 return 0;
1822
1823fault:
1824 return -EFAULT;
1825}
1826EXPORT_SYMBOL(skb_copy_bits);
1827
1828
1829
1830
1831
1832static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1833{
1834 put_page(spd->pages[i]);
1835}
1836
1837static struct page *linear_to_page(struct page *page, unsigned int *len,
1838 unsigned int *offset,
1839 struct sock *sk)
1840{
1841 struct page_frag *pfrag = sk_page_frag(sk);
1842
1843 if (!sk_page_frag_refill(sk, pfrag))
1844 return NULL;
1845
1846 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1847
1848 memcpy(page_address(pfrag->page) + pfrag->offset,
1849 page_address(page) + *offset, *len);
1850 *offset = pfrag->offset;
1851 pfrag->offset += *len;
1852
1853 return pfrag->page;
1854}
1855
1856static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1857 struct page *page,
1858 unsigned int offset)
1859{
1860 return spd->nr_pages &&
1861 spd->pages[spd->nr_pages - 1] == page &&
1862 (spd->partial[spd->nr_pages - 1].offset +
1863 spd->partial[spd->nr_pages - 1].len == offset);
1864}
1865
1866
1867
1868
1869static bool spd_fill_page(struct splice_pipe_desc *spd,
1870 struct pipe_inode_info *pipe, struct page *page,
1871 unsigned int *len, unsigned int offset,
1872 bool linear,
1873 struct sock *sk)
1874{
1875 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1876 return true;
1877
1878 if (linear) {
1879 page = linear_to_page(page, len, &offset, sk);
1880 if (!page)
1881 return true;
1882 }
1883 if (spd_can_coalesce(spd, page, offset)) {
1884 spd->partial[spd->nr_pages - 1].len += *len;
1885 return false;
1886 }
1887 get_page(page);
1888 spd->pages[spd->nr_pages] = page;
1889 spd->partial[spd->nr_pages].len = *len;
1890 spd->partial[spd->nr_pages].offset = offset;
1891 spd->nr_pages++;
1892
1893 return false;
1894}
1895
1896static bool __splice_segment(struct page *page, unsigned int poff,
1897 unsigned int plen, unsigned int *off,
1898 unsigned int *len,
1899 struct splice_pipe_desc *spd, bool linear,
1900 struct sock *sk,
1901 struct pipe_inode_info *pipe)
1902{
1903 if (!*len)
1904 return true;
1905
1906
1907 if (*off >= plen) {
1908 *off -= plen;
1909 return false;
1910 }
1911
1912
1913 poff += *off;
1914 plen -= *off;
1915 *off = 0;
1916
1917 do {
1918 unsigned int flen = min(*len, plen);
1919
1920 if (spd_fill_page(spd, pipe, page, &flen, poff,
1921 linear, sk))
1922 return true;
1923 poff += flen;
1924 plen -= flen;
1925 *len -= flen;
1926 } while (*len && plen);
1927
1928 return false;
1929}
1930
1931
1932
1933
1934
1935static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1936 unsigned int *offset, unsigned int *len,
1937 struct splice_pipe_desc *spd, struct sock *sk)
1938{
1939 int seg;
1940 struct sk_buff *iter;
1941
1942
1943
1944
1945
1946
1947 if (__splice_segment(virt_to_page(skb->data),
1948 (unsigned long) skb->data & (PAGE_SIZE - 1),
1949 skb_headlen(skb),
1950 offset, len, spd,
1951 skb_head_is_locked(skb),
1952 sk, pipe))
1953 return true;
1954
1955
1956
1957
1958 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1959 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1960
1961 if (__splice_segment(skb_frag_page(f),
1962 f->page_offset, skb_frag_size(f),
1963 offset, len, spd, false, sk, pipe))
1964 return true;
1965 }
1966
1967 skb_walk_frags(skb, iter) {
1968 if (*offset >= iter->len) {
1969 *offset -= iter->len;
1970 continue;
1971 }
1972
1973
1974
1975
1976 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
1977 return true;
1978 }
1979
1980 return false;
1981}
1982
1983
1984
1985
1986
1987int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
1988 struct pipe_inode_info *pipe, unsigned int tlen,
1989 unsigned int flags)
1990{
1991 struct partial_page partial[MAX_SKB_FRAGS];
1992 struct page *pages[MAX_SKB_FRAGS];
1993 struct splice_pipe_desc spd = {
1994 .pages = pages,
1995 .partial = partial,
1996 .nr_pages_max = MAX_SKB_FRAGS,
1997 .ops = &nosteal_pipe_buf_ops,
1998 .spd_release = sock_spd_release,
1999 };
2000 int ret = 0;
2001
2002 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2003
2004 if (spd.nr_pages)
2005 ret = splice_to_pipe(pipe, &spd);
2006
2007 return ret;
2008}
2009EXPORT_SYMBOL_GPL(skb_splice_bits);
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2024{
2025 int start = skb_headlen(skb);
2026 struct sk_buff *frag_iter;
2027 int i, copy;
2028
2029 if (offset > (int)skb->len - len)
2030 goto fault;
2031
2032 if ((copy = start - offset) > 0) {
2033 if (copy > len)
2034 copy = len;
2035 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2036 if ((len -= copy) == 0)
2037 return 0;
2038 offset += copy;
2039 from += copy;
2040 }
2041
2042 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2043 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2044 int end;
2045
2046 WARN_ON(start > offset + len);
2047
2048 end = start + skb_frag_size(frag);
2049 if ((copy = end - offset) > 0) {
2050 u8 *vaddr;
2051
2052 if (copy > len)
2053 copy = len;
2054
2055 vaddr = kmap_atomic(skb_frag_page(frag));
2056 memcpy(vaddr + frag->page_offset + offset - start,
2057 from, copy);
2058 kunmap_atomic(vaddr);
2059
2060 if ((len -= copy) == 0)
2061 return 0;
2062 offset += copy;
2063 from += copy;
2064 }
2065 start = end;
2066 }
2067
2068 skb_walk_frags(skb, frag_iter) {
2069 int end;
2070
2071 WARN_ON(start > offset + len);
2072
2073 end = start + frag_iter->len;
2074 if ((copy = end - offset) > 0) {
2075 if (copy > len)
2076 copy = len;
2077 if (skb_store_bits(frag_iter, offset - start,
2078 from, copy))
2079 goto fault;
2080 if ((len -= copy) == 0)
2081 return 0;
2082 offset += copy;
2083 from += copy;
2084 }
2085 start = end;
2086 }
2087 if (!len)
2088 return 0;
2089
2090fault:
2091 return -EFAULT;
2092}
2093EXPORT_SYMBOL(skb_store_bits);
2094
2095
2096__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2097 __wsum csum, const struct skb_checksum_ops *ops)
2098{
2099 int start = skb_headlen(skb);
2100 int i, copy = start - offset;
2101 struct sk_buff *frag_iter;
2102 int pos = 0;
2103
2104
2105 if (copy > 0) {
2106 if (copy > len)
2107 copy = len;
2108 csum = ops->update(skb->data + offset, copy, csum);
2109 if ((len -= copy) == 0)
2110 return csum;
2111 offset += copy;
2112 pos = copy;
2113 }
2114
2115 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2116 int end;
2117 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2118
2119 WARN_ON(start > offset + len);
2120
2121 end = start + skb_frag_size(frag);
2122 if ((copy = end - offset) > 0) {
2123 __wsum csum2;
2124 u8 *vaddr;
2125
2126 if (copy > len)
2127 copy = len;
2128 vaddr = kmap_atomic(skb_frag_page(frag));
2129 csum2 = ops->update(vaddr + frag->page_offset +
2130 offset - start, copy, 0);
2131 kunmap_atomic(vaddr);
2132 csum = ops->combine(csum, csum2, pos, copy);
2133 if (!(len -= copy))
2134 return csum;
2135 offset += copy;
2136 pos += copy;
2137 }
2138 start = end;
2139 }
2140
2141 skb_walk_frags(skb, frag_iter) {
2142 int end;
2143
2144 WARN_ON(start > offset + len);
2145
2146 end = start + frag_iter->len;
2147 if ((copy = end - offset) > 0) {
2148 __wsum csum2;
2149 if (copy > len)
2150 copy = len;
2151 csum2 = __skb_checksum(frag_iter, offset - start,
2152 copy, 0, ops);
2153 csum = ops->combine(csum, csum2, pos, copy);
2154 if ((len -= copy) == 0)
2155 return csum;
2156 offset += copy;
2157 pos += copy;
2158 }
2159 start = end;
2160 }
2161 BUG_ON(len);
2162
2163 return csum;
2164}
2165EXPORT_SYMBOL(__skb_checksum);
2166
2167__wsum skb_checksum(const struct sk_buff *skb, int offset,
2168 int len, __wsum csum)
2169{
2170 const struct skb_checksum_ops ops = {
2171 .update = csum_partial_ext,
2172 .combine = csum_block_add_ext,
2173 };
2174
2175 return __skb_checksum(skb, offset, len, csum, &ops);
2176}
2177EXPORT_SYMBOL(skb_checksum);
2178
2179
2180
2181__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2182 u8 *to, int len, __wsum csum)
2183{
2184 int start = skb_headlen(skb);
2185 int i, copy = start - offset;
2186 struct sk_buff *frag_iter;
2187 int pos = 0;
2188
2189
2190 if (copy > 0) {
2191 if (copy > len)
2192 copy = len;
2193 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2194 copy, csum);
2195 if ((len -= copy) == 0)
2196 return csum;
2197 offset += copy;
2198 to += copy;
2199 pos = copy;
2200 }
2201
2202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2203 int end;
2204
2205 WARN_ON(start > offset + len);
2206
2207 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2208 if ((copy = end - offset) > 0) {
2209 __wsum csum2;
2210 u8 *vaddr;
2211 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2212
2213 if (copy > len)
2214 copy = len;
2215 vaddr = kmap_atomic(skb_frag_page(frag));
2216 csum2 = csum_partial_copy_nocheck(vaddr +
2217 frag->page_offset +
2218 offset - start, to,
2219 copy, 0);
2220 kunmap_atomic(vaddr);
2221 csum = csum_block_add(csum, csum2, pos);
2222 if (!(len -= copy))
2223 return csum;
2224 offset += copy;
2225 to += copy;
2226 pos += copy;
2227 }
2228 start = end;
2229 }
2230
2231 skb_walk_frags(skb, frag_iter) {
2232 __wsum csum2;
2233 int end;
2234
2235 WARN_ON(start > offset + len);
2236
2237 end = start + frag_iter->len;
2238 if ((copy = end - offset) > 0) {
2239 if (copy > len)
2240 copy = len;
2241 csum2 = skb_copy_and_csum_bits(frag_iter,
2242 offset - start,
2243 to, copy, 0);
2244 csum = csum_block_add(csum, csum2, pos);
2245 if ((len -= copy) == 0)
2246 return csum;
2247 offset += copy;
2248 to += copy;
2249 pos += copy;
2250 }
2251 start = end;
2252 }
2253 BUG_ON(len);
2254 return csum;
2255}
2256EXPORT_SYMBOL(skb_copy_and_csum_bits);
2257
2258static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2259{
2260 net_warn_ratelimited(
2261 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2262 __func__);
2263 return 0;
2264}
2265
2266static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2267 int offset, int len)
2268{
2269 net_warn_ratelimited(
2270 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2271 __func__);
2272 return 0;
2273}
2274
2275static const struct skb_checksum_ops default_crc32c_ops = {
2276 .update = warn_crc32c_csum_update,
2277 .combine = warn_crc32c_csum_combine,
2278};
2279
2280const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2281 &default_crc32c_ops;
2282EXPORT_SYMBOL(crc32c_csum_stub);
2283
2284
2285
2286
2287
2288
2289
2290
2291unsigned int
2292skb_zerocopy_headlen(const struct sk_buff *from)
2293{
2294 unsigned int hlen = 0;
2295
2296 if (!from->head_frag ||
2297 skb_headlen(from) < L1_CACHE_BYTES ||
2298 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2299 hlen = skb_headlen(from);
2300
2301 if (skb_has_frag_list(from))
2302 hlen = from->len;
2303
2304 return hlen;
2305}
2306EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326int
2327skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2328{
2329 int i, j = 0;
2330 int plen = 0;
2331 int ret;
2332 struct page *page;
2333 unsigned int offset;
2334
2335 BUG_ON(!from->head_frag && !hlen);
2336
2337
2338 if (len <= skb_tailroom(to))
2339 return skb_copy_bits(from, 0, skb_put(to, len), len);
2340
2341 if (hlen) {
2342 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2343 if (unlikely(ret))
2344 return ret;
2345 len -= hlen;
2346 } else {
2347 plen = min_t(int, skb_headlen(from), len);
2348 if (plen) {
2349 page = virt_to_head_page(from->head);
2350 offset = from->data - (unsigned char *)page_address(page);
2351 __skb_fill_page_desc(to, 0, page, offset, plen);
2352 get_page(page);
2353 j = 1;
2354 len -= plen;
2355 }
2356 }
2357
2358 to->truesize += len + plen;
2359 to->len += len + plen;
2360 to->data_len += len + plen;
2361
2362 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2363 skb_tx_error(from);
2364 return -ENOMEM;
2365 }
2366
2367 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2368 if (!len)
2369 break;
2370 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2371 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2372 len -= skb_shinfo(to)->frags[j].size;
2373 skb_frag_ref(to, j);
2374 j++;
2375 }
2376 skb_shinfo(to)->nr_frags = j;
2377
2378 return 0;
2379}
2380EXPORT_SYMBOL_GPL(skb_zerocopy);
2381
2382void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2383{
2384 __wsum csum;
2385 long csstart;
2386
2387 if (skb->ip_summed == CHECKSUM_PARTIAL)
2388 csstart = skb_checksum_start_offset(skb);
2389 else
2390 csstart = skb_headlen(skb);
2391
2392 BUG_ON(csstart > skb_headlen(skb));
2393
2394 skb_copy_from_linear_data(skb, to, csstart);
2395
2396 csum = 0;
2397 if (csstart != skb->len)
2398 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2399 skb->len - csstart, 0);
2400
2401 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2402 long csstuff = csstart + skb->csum_offset;
2403
2404 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2405 }
2406}
2407EXPORT_SYMBOL(skb_copy_and_csum_dev);
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2419{
2420 unsigned long flags;
2421 struct sk_buff *result;
2422
2423 spin_lock_irqsave(&list->lock, flags);
2424 result = __skb_dequeue(list);
2425 spin_unlock_irqrestore(&list->lock, flags);
2426 return result;
2427}
2428EXPORT_SYMBOL(skb_dequeue);
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2439{
2440 unsigned long flags;
2441 struct sk_buff *result;
2442
2443 spin_lock_irqsave(&list->lock, flags);
2444 result = __skb_dequeue_tail(list);
2445 spin_unlock_irqrestore(&list->lock, flags);
2446 return result;
2447}
2448EXPORT_SYMBOL(skb_dequeue_tail);
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458void skb_queue_purge(struct sk_buff_head *list)
2459{
2460 struct sk_buff *skb;
2461 while ((skb = skb_dequeue(list)) != NULL)
2462 kfree_skb(skb);
2463}
2464EXPORT_SYMBOL(skb_queue_purge);
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475void skb_rbtree_purge(struct rb_root *root)
2476{
2477 struct sk_buff *skb, *next;
2478
2479 rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
2480 kfree_skb(skb);
2481
2482 *root = RB_ROOT;
2483}
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2497{
2498 unsigned long flags;
2499
2500 spin_lock_irqsave(&list->lock, flags);
2501 __skb_queue_head(list, newsk);
2502 spin_unlock_irqrestore(&list->lock, flags);
2503}
2504EXPORT_SYMBOL(skb_queue_head);
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2518{
2519 unsigned long flags;
2520
2521 spin_lock_irqsave(&list->lock, flags);
2522 __skb_queue_tail(list, newsk);
2523 spin_unlock_irqrestore(&list->lock, flags);
2524}
2525EXPORT_SYMBOL(skb_queue_tail);
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2538{
2539 unsigned long flags;
2540
2541 spin_lock_irqsave(&list->lock, flags);
2542 __skb_unlink(skb, list);
2543 spin_unlock_irqrestore(&list->lock, flags);
2544}
2545EXPORT_SYMBOL(skb_unlink);
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2558{
2559 unsigned long flags;
2560
2561 spin_lock_irqsave(&list->lock, flags);
2562 __skb_queue_after(list, old, newsk);
2563 spin_unlock_irqrestore(&list->lock, flags);
2564}
2565EXPORT_SYMBOL(skb_append);
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2580{
2581 unsigned long flags;
2582
2583 spin_lock_irqsave(&list->lock, flags);
2584 __skb_insert(newsk, old->prev, old, list);
2585 spin_unlock_irqrestore(&list->lock, flags);
2586}
2587EXPORT_SYMBOL(skb_insert);
2588
2589static inline void skb_split_inside_header(struct sk_buff *skb,
2590 struct sk_buff* skb1,
2591 const u32 len, const int pos)
2592{
2593 int i;
2594
2595 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2596 pos - len);
2597
2598 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2599 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2600
2601 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2602 skb_shinfo(skb)->nr_frags = 0;
2603 skb1->data_len = skb->data_len;
2604 skb1->len += skb1->data_len;
2605 skb->data_len = 0;
2606 skb->len = len;
2607 skb_set_tail_pointer(skb, len);
2608}
2609
2610static inline void skb_split_no_header(struct sk_buff *skb,
2611 struct sk_buff* skb1,
2612 const u32 len, int pos)
2613{
2614 int i, k = 0;
2615 const int nfrags = skb_shinfo(skb)->nr_frags;
2616
2617 skb_shinfo(skb)->nr_frags = 0;
2618 skb1->len = skb1->data_len = skb->len - len;
2619 skb->len = len;
2620 skb->data_len = len - pos;
2621
2622 for (i = 0; i < nfrags; i++) {
2623 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2624
2625 if (pos + size > len) {
2626 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2627
2628 if (pos < len) {
2629
2630
2631
2632
2633
2634
2635
2636
2637 skb_frag_ref(skb, i);
2638 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2639 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2640 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2641 skb_shinfo(skb)->nr_frags++;
2642 }
2643 k++;
2644 } else
2645 skb_shinfo(skb)->nr_frags++;
2646 pos += size;
2647 }
2648 skb_shinfo(skb1)->nr_frags = k;
2649}
2650
2651
2652
2653
2654
2655
2656
2657void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2658{
2659 int pos = skb_headlen(skb);
2660
2661 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
2662 SKBTX_SHARED_FRAG;
2663 if (len < pos)
2664 skb_split_inside_header(skb, skb1, len, pos);
2665 else
2666 skb_split_no_header(skb, skb1, len, pos);
2667}
2668EXPORT_SYMBOL(skb_split);
2669
2670
2671
2672
2673
2674static int skb_prepare_for_shift(struct sk_buff *skb)
2675{
2676 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2698{
2699 int from, to, merge, todo;
2700 struct skb_frag_struct *fragfrom, *fragto;
2701
2702 BUG_ON(shiftlen > skb->len);
2703
2704 if (skb_headlen(skb))
2705 return 0;
2706
2707 todo = shiftlen;
2708 from = 0;
2709 to = skb_shinfo(tgt)->nr_frags;
2710 fragfrom = &skb_shinfo(skb)->frags[from];
2711
2712
2713
2714
2715 if (!to ||
2716 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2717 fragfrom->page_offset)) {
2718 merge = -1;
2719 } else {
2720 merge = to - 1;
2721
2722 todo -= skb_frag_size(fragfrom);
2723 if (todo < 0) {
2724 if (skb_prepare_for_shift(skb) ||
2725 skb_prepare_for_shift(tgt))
2726 return 0;
2727
2728
2729 fragfrom = &skb_shinfo(skb)->frags[from];
2730 fragto = &skb_shinfo(tgt)->frags[merge];
2731
2732 skb_frag_size_add(fragto, shiftlen);
2733 skb_frag_size_sub(fragfrom, shiftlen);
2734 fragfrom->page_offset += shiftlen;
2735
2736 goto onlymerged;
2737 }
2738
2739 from++;
2740 }
2741
2742
2743 if ((shiftlen == skb->len) &&
2744 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2745 return 0;
2746
2747 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2748 return 0;
2749
2750 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2751 if (to == MAX_SKB_FRAGS)
2752 return 0;
2753
2754 fragfrom = &skb_shinfo(skb)->frags[from];
2755 fragto = &skb_shinfo(tgt)->frags[to];
2756
2757 if (todo >= skb_frag_size(fragfrom)) {
2758 *fragto = *fragfrom;
2759 todo -= skb_frag_size(fragfrom);
2760 from++;
2761 to++;
2762
2763 } else {
2764 __skb_frag_ref(fragfrom);
2765 fragto->page = fragfrom->page;
2766 fragto->page_offset = fragfrom->page_offset;
2767 skb_frag_size_set(fragto, todo);
2768
2769 fragfrom->page_offset += todo;
2770 skb_frag_size_sub(fragfrom, todo);
2771 todo = 0;
2772
2773 to++;
2774 break;
2775 }
2776 }
2777
2778
2779 skb_shinfo(tgt)->nr_frags = to;
2780
2781 if (merge >= 0) {
2782 fragfrom = &skb_shinfo(skb)->frags[0];
2783 fragto = &skb_shinfo(tgt)->frags[merge];
2784
2785 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2786 __skb_frag_unref(fragfrom);
2787 }
2788
2789
2790 to = 0;
2791 while (from < skb_shinfo(skb)->nr_frags)
2792 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2793 skb_shinfo(skb)->nr_frags = to;
2794
2795 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2796
2797onlymerged:
2798
2799
2800
2801 tgt->ip_summed = CHECKSUM_PARTIAL;
2802 skb->ip_summed = CHECKSUM_PARTIAL;
2803
2804
2805 skb->len -= shiftlen;
2806 skb->data_len -= shiftlen;
2807 skb->truesize -= shiftlen;
2808 tgt->len += shiftlen;
2809 tgt->data_len += shiftlen;
2810 tgt->truesize += shiftlen;
2811
2812 return shiftlen;
2813}
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2826 unsigned int to, struct skb_seq_state *st)
2827{
2828 st->lower_offset = from;
2829 st->upper_offset = to;
2830 st->root_skb = st->cur_skb = skb;
2831 st->frag_idx = st->stepped_offset = 0;
2832 st->frag_data = NULL;
2833}
2834EXPORT_SYMBOL(skb_prepare_seq_read);
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2862 struct skb_seq_state *st)
2863{
2864 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2865 skb_frag_t *frag;
2866
2867 if (unlikely(abs_offset >= st->upper_offset)) {
2868 if (st->frag_data) {
2869 kunmap_atomic(st->frag_data);
2870 st->frag_data = NULL;
2871 }
2872 return 0;
2873 }
2874
2875next_skb:
2876 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2877
2878 if (abs_offset < block_limit && !st->frag_data) {
2879 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2880 return block_limit - abs_offset;
2881 }
2882
2883 if (st->frag_idx == 0 && !st->frag_data)
2884 st->stepped_offset += skb_headlen(st->cur_skb);
2885
2886 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2887 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2888 block_limit = skb_frag_size(frag) + st->stepped_offset;
2889
2890 if (abs_offset < block_limit) {
2891 if (!st->frag_data)
2892 st->frag_data = kmap_atomic(skb_frag_page(frag));
2893
2894 *data = (u8 *) st->frag_data + frag->page_offset +
2895 (abs_offset - st->stepped_offset);
2896
2897 return block_limit - abs_offset;
2898 }
2899
2900 if (st->frag_data) {
2901 kunmap_atomic(st->frag_data);
2902 st->frag_data = NULL;
2903 }
2904
2905 st->frag_idx++;
2906 st->stepped_offset += skb_frag_size(frag);
2907 }
2908
2909 if (st->frag_data) {
2910 kunmap_atomic(st->frag_data);
2911 st->frag_data = NULL;
2912 }
2913
2914 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2915 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2916 st->frag_idx = 0;
2917 goto next_skb;
2918 } else if (st->cur_skb->next) {
2919 st->cur_skb = st->cur_skb->next;
2920 st->frag_idx = 0;
2921 goto next_skb;
2922 }
2923
2924 return 0;
2925}
2926EXPORT_SYMBOL(skb_seq_read);
2927
2928
2929
2930
2931
2932
2933
2934
2935void skb_abort_seq_read(struct skb_seq_state *st)
2936{
2937 if (st->frag_data)
2938 kunmap_atomic(st->frag_data);
2939}
2940EXPORT_SYMBOL(skb_abort_seq_read);
2941
2942#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2943
2944static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2945 struct ts_config *conf,
2946 struct ts_state *state)
2947{
2948 return skb_seq_read(offset, text, TS_SKB_CB(state));
2949}
2950
2951static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2952{
2953 skb_abort_seq_read(TS_SKB_CB(state));
2954}
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2969 unsigned int to, struct ts_config *config)
2970{
2971 struct ts_state state;
2972 unsigned int ret;
2973
2974 config->get_next_block = skb_ts_get_next_block;
2975 config->finish = skb_ts_finish;
2976
2977 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
2978
2979 ret = textsearch_find(config, &state);
2980 return (ret <= to - from ? ret : UINT_MAX);
2981}
2982EXPORT_SYMBOL(skb_find_text);
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2996 int (*getfrag)(void *from, char *to, int offset,
2997 int len, int odd, struct sk_buff *skb),
2998 void *from, int length)
2999{
3000 int frg_cnt = skb_shinfo(skb)->nr_frags;
3001 int copy;
3002 int offset = 0;
3003 int ret;
3004 struct page_frag *pfrag = ¤t->task_frag;
3005
3006 do {
3007
3008 if (frg_cnt >= MAX_SKB_FRAGS)
3009 return -EMSGSIZE;
3010
3011 if (!sk_page_frag_refill(sk, pfrag))
3012 return -ENOMEM;
3013
3014
3015 copy = min_t(int, length, pfrag->size - pfrag->offset);
3016
3017 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
3018 offset, copy, 0, skb);
3019 if (ret < 0)
3020 return -EFAULT;
3021
3022
3023 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
3024 copy);
3025 frg_cnt++;
3026 pfrag->offset += copy;
3027 get_page(pfrag->page);
3028
3029 skb->truesize += copy;
3030 refcount_add(copy, &sk->sk_wmem_alloc);
3031 skb->len += copy;
3032 skb->data_len += copy;
3033 offset += copy;
3034 length -= copy;
3035
3036 } while (length > 0);
3037
3038 return 0;
3039}
3040EXPORT_SYMBOL(skb_append_datato_frags);
3041
3042int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3043 int offset, size_t size)
3044{
3045 int i = skb_shinfo(skb)->nr_frags;
3046
3047 if (skb_can_coalesce(skb, i, page, offset)) {
3048 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3049 } else if (i < MAX_SKB_FRAGS) {
3050 get_page(page);
3051 skb_fill_page_desc(skb, i, page, offset, size);
3052 } else {
3053 return -EMSGSIZE;
3054 }
3055
3056 return 0;
3057}
3058EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3072{
3073 unsigned char *data = skb->data;
3074
3075 BUG_ON(len > skb->len);
3076 __skb_pull(skb, len);
3077 skb_postpull_rcsum(skb, data, len);
3078 return skb->data;
3079}
3080EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091struct sk_buff *skb_segment(struct sk_buff *head_skb,
3092 netdev_features_t features)
3093{
3094 struct sk_buff *segs = NULL;
3095 struct sk_buff *tail = NULL;
3096 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3097 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3098 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3099 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3100 struct sk_buff *frag_skb = head_skb;
3101 unsigned int offset = doffset;
3102 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3103 unsigned int partial_segs = 0;
3104 unsigned int headroom;
3105 unsigned int len = head_skb->len;
3106 __be16 proto;
3107 bool csum, sg;
3108 int nfrags = skb_shinfo(head_skb)->nr_frags;
3109 int err = -ENOMEM;
3110 int i = 0;
3111 int pos;
3112 int dummy;
3113
3114 __skb_push(head_skb, doffset);
3115 proto = skb_network_protocol(head_skb, &dummy);
3116 if (unlikely(!proto))
3117 return ERR_PTR(-EINVAL);
3118
3119 sg = !!(features & NETIF_F_SG);
3120 csum = !!can_checksum_protocol(features, proto);
3121
3122 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3123 if (!(features & NETIF_F_GSO_PARTIAL)) {
3124 struct sk_buff *iter;
3125 unsigned int frag_len;
3126
3127 if (!list_skb ||
3128 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3129 goto normal;
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139 frag_len = list_skb->len;
3140 skb_walk_frags(head_skb, iter) {
3141 if (frag_len != iter->len && iter->next)
3142 goto normal;
3143 if (skb_headlen(iter) && !iter->head_frag)
3144 goto normal;
3145
3146 len -= iter->len;
3147 }
3148
3149 if (len != frag_len)
3150 goto normal;
3151 }
3152
3153
3154
3155
3156
3157 partial_segs = len / mss;
3158 if (partial_segs > 1)
3159 mss *= partial_segs;
3160 else
3161 partial_segs = 0;
3162 }
3163
3164normal:
3165 headroom = skb_headroom(head_skb);
3166 pos = skb_headlen(head_skb);
3167
3168 do {
3169 struct sk_buff *nskb;
3170 skb_frag_t *nskb_frag;
3171 int hsize;
3172 int size;
3173
3174 if (unlikely(mss == GSO_BY_FRAGS)) {
3175 len = list_skb->len;
3176 } else {
3177 len = head_skb->len - offset;
3178 if (len > mss)
3179 len = mss;
3180 }
3181
3182 hsize = skb_headlen(head_skb) - offset;
3183 if (hsize < 0)
3184 hsize = 0;
3185 if (hsize > len || !sg)
3186 hsize = len;
3187
3188 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3189 (skb_headlen(list_skb) == len || sg)) {
3190 BUG_ON(skb_headlen(list_skb) > len);
3191
3192 i = 0;
3193 nfrags = skb_shinfo(list_skb)->nr_frags;
3194 frag = skb_shinfo(list_skb)->frags;
3195 frag_skb = list_skb;
3196 pos += skb_headlen(list_skb);
3197
3198 while (pos < offset + len) {
3199 BUG_ON(i >= nfrags);
3200
3201 size = skb_frag_size(frag);
3202 if (pos + size > offset + len)
3203 break;
3204
3205 i++;
3206 pos += size;
3207 frag++;
3208 }
3209
3210 nskb = skb_clone(list_skb, GFP_ATOMIC);
3211 list_skb = list_skb->next;
3212
3213 if (unlikely(!nskb))
3214 goto err;
3215
3216 if (unlikely(pskb_trim(nskb, len))) {
3217 kfree_skb(nskb);
3218 goto err;
3219 }
3220
3221 hsize = skb_end_offset(nskb);
3222 if (skb_cow_head(nskb, doffset + headroom)) {
3223 kfree_skb(nskb);
3224 goto err;
3225 }
3226
3227 nskb->truesize += skb_end_offset(nskb) - hsize;
3228 skb_release_head_state(nskb);
3229 __skb_push(nskb, doffset);
3230 } else {
3231 nskb = __alloc_skb(hsize + doffset + headroom,
3232 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3233 NUMA_NO_NODE);
3234
3235 if (unlikely(!nskb))
3236 goto err;
3237
3238 skb_reserve(nskb, headroom);
3239 __skb_put(nskb, doffset);
3240 }
3241
3242 if (segs)
3243 tail->next = nskb;
3244 else
3245 segs = nskb;
3246 tail = nskb;
3247
3248 __copy_skb_header(nskb, head_skb);
3249
3250 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3251 skb_reset_mac_len(nskb);
3252
3253 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3254 nskb->data - tnl_hlen,
3255 doffset + tnl_hlen);
3256
3257 if (nskb->len == len + doffset)
3258 goto perform_csum_check;
3259
3260 if (!sg) {
3261 if (!nskb->remcsum_offload)
3262 nskb->ip_summed = CHECKSUM_NONE;
3263 SKB_GSO_CB(nskb)->csum =
3264 skb_copy_and_csum_bits(head_skb, offset,
3265 skb_put(nskb, len),
3266 len, 0);
3267 SKB_GSO_CB(nskb)->csum_start =
3268 skb_headroom(nskb) + doffset;
3269 continue;
3270 }
3271
3272 nskb_frag = skb_shinfo(nskb)->frags;
3273
3274 skb_copy_from_linear_data_offset(head_skb, offset,
3275 skb_put(nskb, hsize), hsize);
3276
3277 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3278 SKBTX_SHARED_FRAG;
3279
3280 while (pos < offset + len) {
3281 if (i >= nfrags) {
3282 BUG_ON(skb_headlen(list_skb));
3283
3284 i = 0;
3285 nfrags = skb_shinfo(list_skb)->nr_frags;
3286 frag = skb_shinfo(list_skb)->frags;
3287 frag_skb = list_skb;
3288
3289 BUG_ON(!nfrags);
3290
3291 list_skb = list_skb->next;
3292 }
3293
3294 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3295 MAX_SKB_FRAGS)) {
3296 net_warn_ratelimited(
3297 "skb_segment: too many frags: %u %u\n",
3298 pos, mss);
3299 goto err;
3300 }
3301
3302 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3303 goto err;
3304
3305 *nskb_frag = *frag;
3306 __skb_frag_ref(nskb_frag);
3307 size = skb_frag_size(nskb_frag);
3308
3309 if (pos < offset) {
3310 nskb_frag->page_offset += offset - pos;
3311 skb_frag_size_sub(nskb_frag, offset - pos);
3312 }
3313
3314 skb_shinfo(nskb)->nr_frags++;
3315
3316 if (pos + size <= offset + len) {
3317 i++;
3318 frag++;
3319 pos += size;
3320 } else {
3321 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3322 goto skip_fraglist;
3323 }
3324
3325 nskb_frag++;
3326 }
3327
3328skip_fraglist:
3329 nskb->data_len = len - hsize;
3330 nskb->len += nskb->data_len;
3331 nskb->truesize += nskb->data_len;
3332
3333perform_csum_check:
3334 if (!csum) {
3335 if (skb_has_shared_frag(nskb)) {
3336 err = __skb_linearize(nskb);
3337 if (err)
3338 goto err;
3339 }
3340 if (!nskb->remcsum_offload)
3341 nskb->ip_summed = CHECKSUM_NONE;
3342 SKB_GSO_CB(nskb)->csum =
3343 skb_checksum(nskb, doffset,
3344 nskb->len - doffset, 0);
3345 SKB_GSO_CB(nskb)->csum_start =
3346 skb_headroom(nskb) + doffset;
3347 }
3348 } while ((offset += len) < head_skb->len);
3349
3350
3351
3352
3353
3354 segs->prev = tail;
3355
3356 if (partial_segs) {
3357 struct sk_buff *iter;
3358 int type = skb_shinfo(head_skb)->gso_type;
3359 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3360
3361
3362 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3363 type &= ~SKB_GSO_DODGY;
3364
3365
3366
3367
3368 for (iter = segs; iter; iter = iter->next) {
3369 skb_shinfo(iter)->gso_size = gso_size;
3370 skb_shinfo(iter)->gso_segs = partial_segs;
3371 skb_shinfo(iter)->gso_type = type;
3372 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3373 }
3374
3375 if (tail->len - doffset <= gso_size)
3376 skb_shinfo(tail)->gso_size = 0;
3377 else if (tail != segs)
3378 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3379 }
3380
3381
3382
3383
3384
3385 if (head_skb->destructor == sock_wfree) {
3386 swap(tail->truesize, head_skb->truesize);
3387 swap(tail->destructor, head_skb->destructor);
3388 swap(tail->sk, head_skb->sk);
3389 }
3390 return segs;
3391
3392err:
3393 kfree_skb_list(segs);
3394 return ERR_PTR(err);
3395}
3396EXPORT_SYMBOL_GPL(skb_segment);
3397
3398int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3399{
3400 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3401 unsigned int offset = skb_gro_offset(skb);
3402 unsigned int headlen = skb_headlen(skb);
3403 unsigned int len = skb_gro_len(skb);
3404 struct sk_buff *lp, *p = *head;
3405 unsigned int delta_truesize;
3406
3407 if (unlikely(p->len + len >= 65536))
3408 return -E2BIG;
3409
3410 lp = NAPI_GRO_CB(p)->last;
3411 pinfo = skb_shinfo(lp);
3412
3413 if (headlen <= offset) {
3414 skb_frag_t *frag;
3415 skb_frag_t *frag2;
3416 int i = skbinfo->nr_frags;
3417 int nr_frags = pinfo->nr_frags + i;
3418
3419 if (nr_frags > MAX_SKB_FRAGS)
3420 goto merge;
3421
3422 offset -= headlen;
3423 pinfo->nr_frags = nr_frags;
3424 skbinfo->nr_frags = 0;
3425
3426 frag = pinfo->frags + nr_frags;
3427 frag2 = skbinfo->frags + i;
3428 do {
3429 *--frag = *--frag2;
3430 } while (--i);
3431
3432 frag->page_offset += offset;
3433 skb_frag_size_sub(frag, offset);
3434
3435
3436 delta_truesize = skb->truesize -
3437 SKB_TRUESIZE(skb_end_offset(skb));
3438
3439 skb->truesize -= skb->data_len;
3440 skb->len -= skb->data_len;
3441 skb->data_len = 0;
3442
3443 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3444 goto done;
3445 } else if (skb->head_frag) {
3446 int nr_frags = pinfo->nr_frags;
3447 skb_frag_t *frag = pinfo->frags + nr_frags;
3448 struct page *page = virt_to_head_page(skb->head);
3449 unsigned int first_size = headlen - offset;
3450 unsigned int first_offset;
3451
3452 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3453 goto merge;
3454
3455 first_offset = skb->data -
3456 (unsigned char *)page_address(page) +
3457 offset;
3458
3459 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3460
3461 frag->page.p = page;
3462 frag->page_offset = first_offset;
3463 skb_frag_size_set(frag, first_size);
3464
3465 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3466
3467
3468 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3469 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3470 goto done;
3471 }
3472
3473merge:
3474 delta_truesize = skb->truesize;
3475 if (offset > headlen) {
3476 unsigned int eat = offset - headlen;
3477
3478 skbinfo->frags[0].page_offset += eat;
3479 skb_frag_size_sub(&skbinfo->frags[0], eat);
3480 skb->data_len -= eat;
3481 skb->len -= eat;
3482 offset = headlen;
3483 }
3484
3485 __skb_pull(skb, offset);
3486
3487 if (NAPI_GRO_CB(p)->last == p)
3488 skb_shinfo(p)->frag_list = skb;
3489 else
3490 NAPI_GRO_CB(p)->last->next = skb;
3491 NAPI_GRO_CB(p)->last = skb;
3492 __skb_header_release(skb);
3493 lp = p;
3494
3495done:
3496 NAPI_GRO_CB(p)->count++;
3497 p->data_len += len;
3498 p->truesize += delta_truesize;
3499 p->len += len;
3500 if (lp != p) {
3501 lp->data_len += len;
3502 lp->truesize += delta_truesize;
3503 lp->len += len;
3504 }
3505 NAPI_GRO_CB(skb)->same_flow = 1;
3506 return 0;
3507}
3508EXPORT_SYMBOL_GPL(skb_gro_receive);
3509
3510void __init skb_init(void)
3511{
3512 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3513 sizeof(struct sk_buff),
3514 0,
3515 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3516 NULL);
3517 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3518 sizeof(struct sk_buff_fclones),
3519 0,
3520 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3521 NULL);
3522}
3523
3524static int
3525__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3526 unsigned int recursion_level)
3527{
3528 int start = skb_headlen(skb);
3529 int i, copy = start - offset;
3530 struct sk_buff *frag_iter;
3531 int elt = 0;
3532
3533 if (unlikely(recursion_level >= 24))
3534 return -EMSGSIZE;
3535
3536 if (copy > 0) {
3537 if (copy > len)
3538 copy = len;
3539 sg_set_buf(sg, skb->data + offset, copy);
3540 elt++;
3541 if ((len -= copy) == 0)
3542 return elt;
3543 offset += copy;
3544 }
3545
3546 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3547 int end;
3548
3549 WARN_ON(start > offset + len);
3550
3551 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3552 if ((copy = end - offset) > 0) {
3553 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3554 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3555 return -EMSGSIZE;
3556
3557 if (copy > len)
3558 copy = len;
3559 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3560 frag->page_offset+offset-start);
3561 elt++;
3562 if (!(len -= copy))
3563 return elt;
3564 offset += copy;
3565 }
3566 start = end;
3567 }
3568
3569 skb_walk_frags(skb, frag_iter) {
3570 int end, ret;
3571
3572 WARN_ON(start > offset + len);
3573
3574 end = start + frag_iter->len;
3575 if ((copy = end - offset) > 0) {
3576 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3577 return -EMSGSIZE;
3578
3579 if (copy > len)
3580 copy = len;
3581 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3582 copy, recursion_level + 1);
3583 if (unlikely(ret < 0))
3584 return ret;
3585 elt += ret;
3586 if ((len -= copy) == 0)
3587 return elt;
3588 offset += copy;
3589 }
3590 start = end;
3591 }
3592 BUG_ON(len);
3593 return elt;
3594}
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3609{
3610 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
3611
3612 if (nsg <= 0)
3613 return nsg;
3614
3615 sg_mark_end(&sg[nsg - 1]);
3616
3617 return nsg;
3618}
3619EXPORT_SYMBOL_GPL(skb_to_sgvec);
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3641 int offset, int len)
3642{
3643 return __skb_to_sgvec(skb, sg, offset, len, 0);
3644}
3645EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3667{
3668 int copyflag;
3669 int elt;
3670 struct sk_buff *skb1, **skb_p;
3671
3672
3673
3674
3675
3676 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3677 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3678 return -ENOMEM;
3679
3680
3681 if (!skb_has_frag_list(skb)) {
3682
3683
3684
3685
3686
3687 if (skb_tailroom(skb) < tailbits &&
3688 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3689 return -ENOMEM;
3690
3691
3692 *trailer = skb;
3693 return 1;
3694 }
3695
3696
3697
3698 elt = 1;
3699 skb_p = &skb_shinfo(skb)->frag_list;
3700 copyflag = 0;
3701
3702 while ((skb1 = *skb_p) != NULL) {
3703 int ntail = 0;
3704
3705
3706
3707
3708
3709 if (skb_shared(skb1))
3710 copyflag = 1;
3711
3712
3713
3714 if (skb1->next == NULL && tailbits) {
3715 if (skb_shinfo(skb1)->nr_frags ||
3716 skb_has_frag_list(skb1) ||
3717 skb_tailroom(skb1) < tailbits)
3718 ntail = tailbits + 128;
3719 }
3720
3721 if (copyflag ||
3722 skb_cloned(skb1) ||
3723 ntail ||
3724 skb_shinfo(skb1)->nr_frags ||
3725 skb_has_frag_list(skb1)) {
3726 struct sk_buff *skb2;
3727
3728
3729 if (ntail == 0)
3730 skb2 = skb_copy(skb1, GFP_ATOMIC);
3731 else
3732 skb2 = skb_copy_expand(skb1,
3733 skb_headroom(skb1),
3734 ntail,
3735 GFP_ATOMIC);
3736 if (unlikely(skb2 == NULL))
3737 return -ENOMEM;
3738
3739 if (skb1->sk)
3740 skb_set_owner_w(skb2, skb1->sk);
3741
3742
3743
3744
3745 skb2->next = skb1->next;
3746 *skb_p = skb2;
3747 kfree_skb(skb1);
3748 skb1 = skb2;
3749 }
3750 elt++;
3751 *trailer = skb1;
3752 skb_p = &skb1->next;
3753 }
3754
3755 return elt;
3756}
3757EXPORT_SYMBOL_GPL(skb_cow_data);
3758
3759static void sock_rmem_free(struct sk_buff *skb)
3760{
3761 struct sock *sk = skb->sk;
3762
3763 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3764}
3765
3766static void skb_set_err_queue(struct sk_buff *skb)
3767{
3768
3769
3770
3771 skb->pkt_type = PACKET_OUTGOING;
3772 BUILD_BUG_ON(PACKET_OUTGOING == 0);
3773}
3774
3775
3776
3777
3778int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3779{
3780 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3781 (unsigned int)sk->sk_rcvbuf)
3782 return -ENOMEM;
3783
3784 skb_orphan(skb);
3785 skb->sk = sk;
3786 skb->destructor = sock_rmem_free;
3787 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3788 skb_set_err_queue(skb);
3789
3790
3791 skb_dst_force(skb);
3792
3793 skb_queue_tail(&sk->sk_error_queue, skb);
3794 if (!sock_flag(sk, SOCK_DEAD))
3795 sk->sk_data_ready(sk);
3796 return 0;
3797}
3798EXPORT_SYMBOL(sock_queue_err_skb);
3799
3800static bool is_icmp_err_skb(const struct sk_buff *skb)
3801{
3802 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
3803 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
3804}
3805
3806struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3807{
3808 struct sk_buff_head *q = &sk->sk_error_queue;
3809 struct sk_buff *skb, *skb_next = NULL;
3810 bool icmp_next = false;
3811 unsigned long flags;
3812
3813 spin_lock_irqsave(&q->lock, flags);
3814 skb = __skb_dequeue(q);
3815 if (skb && (skb_next = skb_peek(q))) {
3816 icmp_next = is_icmp_err_skb(skb_next);
3817 if (icmp_next)
3818 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
3819 }
3820 spin_unlock_irqrestore(&q->lock, flags);
3821
3822 if (is_icmp_err_skb(skb) && !icmp_next)
3823 sk->sk_err = 0;
3824
3825 if (skb_next)
3826 sk->sk_error_report(sk);
3827
3828 return skb;
3829}
3830EXPORT_SYMBOL(sock_dequeue_err_skb);
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845struct sk_buff *skb_clone_sk(struct sk_buff *skb)
3846{
3847 struct sock *sk = skb->sk;
3848 struct sk_buff *clone;
3849
3850 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
3851 return NULL;
3852
3853 clone = skb_clone(skb, GFP_ATOMIC);
3854 if (!clone) {
3855 sock_put(sk);
3856 return NULL;
3857 }
3858
3859 clone->sk = sk;
3860 clone->destructor = sock_efree;
3861
3862 return clone;
3863}
3864EXPORT_SYMBOL(skb_clone_sk);
3865
3866static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3867 struct sock *sk,
3868 int tstype,
3869 bool opt_stats)
3870{
3871 struct sock_exterr_skb *serr;
3872 int err;
3873
3874 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
3875
3876 serr = SKB_EXT_ERR(skb);
3877 memset(serr, 0, sizeof(*serr));
3878 serr->ee.ee_errno = ENOMSG;
3879 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3880 serr->ee.ee_info = tstype;
3881 serr->opt_stats = opt_stats;
3882 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
3883 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3884 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3885 if (sk->sk_protocol == IPPROTO_TCP &&
3886 sk->sk_type == SOCK_STREAM)
3887 serr->ee.ee_data -= sk->sk_tskey;
3888 }
3889
3890 err = sock_queue_err_skb(sk, skb);
3891
3892 if (err)
3893 kfree_skb(skb);
3894}
3895
3896static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
3897{
3898 bool ret;
3899
3900 if (likely(sysctl_tstamp_allow_data || tsonly))
3901 return true;
3902
3903 read_lock_bh(&sk->sk_callback_lock);
3904 ret = sk->sk_socket && sk->sk_socket->file &&
3905 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
3906 read_unlock_bh(&sk->sk_callback_lock);
3907 return ret;
3908}
3909
3910void skb_complete_tx_timestamp(struct sk_buff *skb,
3911 struct skb_shared_hwtstamps *hwtstamps)
3912{
3913 struct sock *sk = skb->sk;
3914
3915 if (!skb_may_tx_timestamp(sk, false))
3916 return;
3917
3918
3919
3920
3921 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
3922 *skb_hwtstamps(skb) = *hwtstamps;
3923 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
3924 sock_put(sk);
3925 }
3926}
3927EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3928
3929void __skb_tstamp_tx(struct sk_buff *orig_skb,
3930 struct skb_shared_hwtstamps *hwtstamps,
3931 struct sock *sk, int tstype)
3932{
3933 struct sk_buff *skb;
3934 bool tsonly, opt_stats = false;
3935
3936 if (!sk)
3937 return;
3938
3939 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
3940 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
3941 return;
3942
3943 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
3944 if (!skb_may_tx_timestamp(sk, tsonly))
3945 return;
3946
3947 if (tsonly) {
3948#ifdef CONFIG_INET
3949 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
3950 sk->sk_protocol == IPPROTO_TCP &&
3951 sk->sk_type == SOCK_STREAM) {
3952 skb = tcp_get_timestamping_opt_stats(sk);
3953 opt_stats = true;
3954 } else
3955#endif
3956 skb = alloc_skb(0, GFP_ATOMIC);
3957 } else {
3958 skb = skb_clone(orig_skb, GFP_ATOMIC);
3959 }
3960 if (!skb)
3961 return;
3962
3963 if (tsonly) {
3964 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
3965 SKBTX_ANY_TSTAMP;
3966 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
3967 }
3968
3969 if (hwtstamps)
3970 *skb_hwtstamps(skb) = *hwtstamps;
3971 else
3972 skb->tstamp = ktime_get_real();
3973
3974 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
3975}
3976EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3977
3978void skb_tstamp_tx(struct sk_buff *orig_skb,
3979 struct skb_shared_hwtstamps *hwtstamps)
3980{
3981 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
3982 SCM_TSTAMP_SND);
3983}
3984EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3985
3986void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3987{
3988 struct sock *sk = skb->sk;
3989 struct sock_exterr_skb *serr;
3990 int err = 1;
3991
3992 skb->wifi_acked_valid = 1;
3993 skb->wifi_acked = acked;
3994
3995 serr = SKB_EXT_ERR(skb);
3996 memset(serr, 0, sizeof(*serr));
3997 serr->ee.ee_errno = ENOMSG;
3998 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3999
4000
4001
4002
4003 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4004 err = sock_queue_err_skb(sk, skb);
4005 sock_put(sk);
4006 }
4007 if (err)
4008 kfree_skb(skb);
4009}
4010EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023
4024bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4025{
4026 if (unlikely(start > skb_headlen(skb)) ||
4027 unlikely((int)start + off > skb_headlen(skb) - 2)) {
4028 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
4029 start, off, skb_headlen(skb));
4030 return false;
4031 }
4032 skb->ip_summed = CHECKSUM_PARTIAL;
4033 skb->csum_start = skb_headroom(skb) + start;
4034 skb->csum_offset = off;
4035 skb_set_transport_header(skb, start);
4036 return true;
4037}
4038EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4039
4040static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4041 unsigned int max)
4042{
4043 if (skb_headlen(skb) >= len)
4044 return 0;
4045
4046
4047
4048
4049 if (max > skb->len)
4050 max = skb->len;
4051
4052 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4053 return -ENOMEM;
4054
4055 if (skb_headlen(skb) < len)
4056 return -EPROTO;
4057
4058 return 0;
4059}
4060
4061#define MAX_TCP_HDR_LEN (15 * 4)
4062
4063static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4064 typeof(IPPROTO_IP) proto,
4065 unsigned int off)
4066{
4067 switch (proto) {
4068 int err;
4069
4070 case IPPROTO_TCP:
4071 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4072 off + MAX_TCP_HDR_LEN);
4073 if (!err && !skb_partial_csum_set(skb, off,
4074 offsetof(struct tcphdr,
4075 check)))
4076 err = -EPROTO;
4077 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4078
4079 case IPPROTO_UDP:
4080 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4081 off + sizeof(struct udphdr));
4082 if (!err && !skb_partial_csum_set(skb, off,
4083 offsetof(struct udphdr,
4084 check)))
4085 err = -EPROTO;
4086 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4087 }
4088
4089 return ERR_PTR(-EPROTO);
4090}
4091
4092
4093
4094
4095#define MAX_IP_HDR_LEN 128
4096
4097static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4098{
4099 unsigned int off;
4100 bool fragment;
4101 __sum16 *csum;
4102 int err;
4103
4104 fragment = false;
4105
4106 err = skb_maybe_pull_tail(skb,
4107 sizeof(struct iphdr),
4108 MAX_IP_HDR_LEN);
4109 if (err < 0)
4110 goto out;
4111
4112 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4113 fragment = true;
4114
4115 off = ip_hdrlen(skb);
4116
4117 err = -EPROTO;
4118
4119 if (fragment)
4120 goto out;
4121
4122 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4123 if (IS_ERR(csum))
4124 return PTR_ERR(csum);
4125
4126 if (recalculate)
4127 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4128 ip_hdr(skb)->daddr,
4129 skb->len - off,
4130 ip_hdr(skb)->protocol, 0);
4131 err = 0;
4132
4133out:
4134 return err;
4135}
4136
4137
4138
4139
4140#define MAX_IPV6_HDR_LEN 256
4141
4142#define OPT_HDR(type, skb, off) \
4143 (type *)(skb_network_header(skb) + (off))
4144
4145static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4146{
4147 int err;
4148 u8 nexthdr;
4149 unsigned int off;
4150 unsigned int len;
4151 bool fragment;
4152 bool done;
4153 __sum16 *csum;
4154
4155 fragment = false;
4156 done = false;
4157
4158 off = sizeof(struct ipv6hdr);
4159
4160 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4161 if (err < 0)
4162 goto out;
4163
4164 nexthdr = ipv6_hdr(skb)->nexthdr;
4165
4166 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4167 while (off <= len && !done) {
4168 switch (nexthdr) {
4169 case IPPROTO_DSTOPTS:
4170 case IPPROTO_HOPOPTS:
4171 case IPPROTO_ROUTING: {
4172 struct ipv6_opt_hdr *hp;
4173
4174 err = skb_maybe_pull_tail(skb,
4175 off +
4176 sizeof(struct ipv6_opt_hdr),
4177 MAX_IPV6_HDR_LEN);
4178 if (err < 0)
4179 goto out;
4180
4181 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4182 nexthdr = hp->nexthdr;
4183 off += ipv6_optlen(hp);
4184 break;
4185 }
4186 case IPPROTO_AH: {
4187 struct ip_auth_hdr *hp;
4188
4189 err = skb_maybe_pull_tail(skb,
4190 off +
4191 sizeof(struct ip_auth_hdr),
4192 MAX_IPV6_HDR_LEN);
4193 if (err < 0)
4194 goto out;
4195
4196 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4197 nexthdr = hp->nexthdr;
4198 off += ipv6_authlen(hp);
4199 break;
4200 }
4201 case IPPROTO_FRAGMENT: {
4202 struct frag_hdr *hp;
4203
4204 err = skb_maybe_pull_tail(skb,
4205 off +
4206 sizeof(struct frag_hdr),
4207 MAX_IPV6_HDR_LEN);
4208 if (err < 0)
4209 goto out;
4210
4211 hp = OPT_HDR(struct frag_hdr, skb, off);
4212
4213 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4214 fragment = true;
4215
4216 nexthdr = hp->nexthdr;
4217 off += sizeof(struct frag_hdr);
4218 break;
4219 }
4220 default:
4221 done = true;
4222 break;
4223 }
4224 }
4225
4226 err = -EPROTO;
4227
4228 if (!done || fragment)
4229 goto out;
4230
4231 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4232 if (IS_ERR(csum))
4233 return PTR_ERR(csum);
4234
4235 if (recalculate)
4236 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4237 &ipv6_hdr(skb)->daddr,
4238 skb->len - off, nexthdr, 0);
4239 err = 0;
4240
4241out:
4242 return err;
4243}
4244
4245
4246
4247
4248
4249
4250int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4251{
4252 int err;
4253
4254 switch (skb->protocol) {
4255 case htons(ETH_P_IP):
4256 err = skb_checksum_setup_ipv4(skb, recalculate);
4257 break;
4258
4259 case htons(ETH_P_IPV6):
4260 err = skb_checksum_setup_ipv6(skb, recalculate);
4261 break;
4262
4263 default:
4264 err = -EPROTO;
4265 break;
4266 }
4267
4268 return err;
4269}
4270EXPORT_SYMBOL(skb_checksum_setup);
4271
4272
4273
4274
4275
4276
4277
4278
4279
4280
4281
4282
4283
4284
4285static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4286 unsigned int transport_len)
4287{
4288 struct sk_buff *skb_chk;
4289 unsigned int len = skb_transport_offset(skb) + transport_len;
4290 int ret;
4291
4292 if (skb->len < len)
4293 return NULL;
4294 else if (skb->len == len)
4295 return skb;
4296
4297 skb_chk = skb_clone(skb, GFP_ATOMIC);
4298 if (!skb_chk)
4299 return NULL;
4300
4301 ret = pskb_trim_rcsum(skb_chk, len);
4302 if (ret) {
4303 kfree_skb(skb_chk);
4304 return NULL;
4305 }
4306
4307 return skb_chk;
4308}
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4326 unsigned int transport_len,
4327 __sum16(*skb_chkf)(struct sk_buff *skb))
4328{
4329 struct sk_buff *skb_chk;
4330 unsigned int offset = skb_transport_offset(skb);
4331 __sum16 ret;
4332
4333 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4334 if (!skb_chk)
4335 goto err;
4336
4337 if (!pskb_may_pull(skb_chk, offset))
4338 goto err;
4339
4340 skb_pull_rcsum(skb_chk, offset);
4341 ret = skb_chkf(skb_chk);
4342 skb_push_rcsum(skb_chk, offset);
4343
4344 if (ret)
4345 goto err;
4346
4347 return skb_chk;
4348
4349err:
4350 if (skb_chk && skb_chk != skb)
4351 kfree_skb(skb_chk);
4352
4353 return NULL;
4354
4355}
4356EXPORT_SYMBOL(skb_checksum_trimmed);
4357
4358void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4359{
4360 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4361 skb->dev->name);
4362}
4363EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4364
4365void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4366{
4367 if (head_stolen) {
4368 skb_release_head_state(skb);
4369 kmem_cache_free(skbuff_head_cache, skb);
4370 } else {
4371 __kfree_skb(skb);
4372 }
4373}
4374EXPORT_SYMBOL(kfree_skb_partial);
4375
4376
4377
4378
4379
4380
4381
4382
4383bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4384 bool *fragstolen, int *delta_truesize)
4385{
4386 int i, delta, len = from->len;
4387
4388 *fragstolen = false;
4389
4390 if (skb_cloned(to))
4391 return false;
4392
4393 if (len <= skb_tailroom(to)) {
4394 if (len)
4395 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4396 *delta_truesize = 0;
4397 return true;
4398 }
4399
4400 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4401 return false;
4402
4403 if (skb_headlen(from) != 0) {
4404 struct page *page;
4405 unsigned int offset;
4406
4407 if (skb_shinfo(to)->nr_frags +
4408 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4409 return false;
4410
4411 if (skb_head_is_locked(from))
4412 return false;
4413
4414 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4415
4416 page = virt_to_head_page(from->head);
4417 offset = from->data - (unsigned char *)page_address(page);
4418
4419 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4420 page, offset, skb_headlen(from));
4421 *fragstolen = true;
4422 } else {
4423 if (skb_shinfo(to)->nr_frags +
4424 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4425 return false;
4426
4427 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4428 }
4429
4430 WARN_ON_ONCE(delta < len);
4431
4432 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4433 skb_shinfo(from)->frags,
4434 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4435 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4436
4437 if (!skb_cloned(from))
4438 skb_shinfo(from)->nr_frags = 0;
4439
4440
4441
4442
4443 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4444 skb_frag_ref(from, i);
4445
4446 to->truesize += delta;
4447 to->len += len;
4448 to->data_len += len;
4449
4450 *delta_truesize = delta;
4451 return true;
4452}
4453EXPORT_SYMBOL(skb_try_coalesce);
4454
4455
4456
4457
4458
4459
4460
4461
4462
4463
4464
4465
4466
4467
4468void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4469{
4470 skb->tstamp = 0;
4471 skb->pkt_type = PACKET_HOST;
4472 skb->skb_iif = 0;
4473 skb->ignore_df = 0;
4474 skb_dst_drop(skb);
4475 secpath_reset(skb);
4476 nf_reset(skb);
4477 nf_reset_trace(skb);
4478
4479 if (!xnet)
4480 return;
4481
4482 skb_orphan(skb);
4483 skb->mark = 0;
4484}
4485EXPORT_SYMBOL_GPL(skb_scrub_packet);
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4498{
4499 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4500 unsigned int thlen = 0;
4501
4502 if (skb->encapsulation) {
4503 thlen = skb_inner_transport_header(skb) -
4504 skb_transport_header(skb);
4505
4506 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4507 thlen += inner_tcp_hdrlen(skb);
4508 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4509 thlen = tcp_hdrlen(skb);
4510 } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) {
4511 thlen = sizeof(struct sctphdr);
4512 }
4513
4514
4515
4516
4517 return thlen + shinfo->gso_size;
4518}
4519EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4520
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
4531{
4532 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4533 const struct sk_buff *iter;
4534 unsigned int hlen;
4535
4536 hlen = skb_gso_network_seglen(skb);
4537
4538 if (shinfo->gso_size != GSO_BY_FRAGS)
4539 return hlen <= mtu;
4540
4541
4542 hlen -= GSO_BY_FRAGS;
4543
4544 skb_walk_frags(skb, iter) {
4545 if (hlen + skb_headlen(iter) > mtu)
4546 return false;
4547 }
4548
4549 return true;
4550}
4551EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
4552
4553static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4554{
4555 if (skb_cow(skb, skb_headroom(skb)) < 0) {
4556 kfree_skb(skb);
4557 return NULL;
4558 }
4559
4560 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
4561 2 * ETH_ALEN);
4562 skb->mac_header += VLAN_HLEN;
4563 return skb;
4564}
4565
4566struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
4567{
4568 struct vlan_hdr *vhdr;
4569 u16 vlan_tci;
4570
4571 if (unlikely(skb_vlan_tag_present(skb))) {
4572
4573 return skb;
4574 }
4575
4576 skb = skb_share_check(skb, GFP_ATOMIC);
4577 if (unlikely(!skb))
4578 goto err_free;
4579
4580 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
4581 goto err_free;
4582
4583 vhdr = (struct vlan_hdr *)skb->data;
4584 vlan_tci = ntohs(vhdr->h_vlan_TCI);
4585 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
4586
4587 skb_pull_rcsum(skb, VLAN_HLEN);
4588 vlan_set_encap_proto(skb, vhdr);
4589
4590 skb = skb_reorder_vlan_header(skb);
4591 if (unlikely(!skb))
4592 goto err_free;
4593
4594 skb_reset_network_header(skb);
4595 skb_reset_transport_header(skb);
4596 skb_reset_mac_len(skb);
4597
4598 return skb;
4599
4600err_free:
4601 kfree_skb(skb);
4602 return NULL;
4603}
4604EXPORT_SYMBOL(skb_vlan_untag);
4605
4606int skb_ensure_writable(struct sk_buff *skb, int write_len)
4607{
4608 if (!pskb_may_pull(skb, write_len))
4609 return -ENOMEM;
4610
4611 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
4612 return 0;
4613
4614 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4615}
4616EXPORT_SYMBOL(skb_ensure_writable);
4617
4618
4619
4620
4621int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
4622{
4623 struct vlan_hdr *vhdr;
4624 int offset = skb->data - skb_mac_header(skb);
4625 int err;
4626
4627 if (WARN_ONCE(offset,
4628 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
4629 offset)) {
4630 return -EINVAL;
4631 }
4632
4633 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
4634 if (unlikely(err))
4635 return err;
4636
4637 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4638
4639 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
4640 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
4641
4642 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
4643 __skb_pull(skb, VLAN_HLEN);
4644
4645 vlan_set_encap_proto(skb, vhdr);
4646 skb->mac_header += VLAN_HLEN;
4647
4648 if (skb_network_offset(skb) < ETH_HLEN)
4649 skb_set_network_header(skb, ETH_HLEN);
4650
4651 skb_reset_mac_len(skb);
4652
4653 return err;
4654}
4655EXPORT_SYMBOL(__skb_vlan_pop);
4656
4657
4658
4659
4660int skb_vlan_pop(struct sk_buff *skb)
4661{
4662 u16 vlan_tci;
4663 __be16 vlan_proto;
4664 int err;
4665
4666 if (likely(skb_vlan_tag_present(skb))) {
4667 skb->vlan_tci = 0;
4668 } else {
4669 if (unlikely(!eth_type_vlan(skb->protocol)))
4670 return 0;
4671
4672 err = __skb_vlan_pop(skb, &vlan_tci);
4673 if (err)
4674 return err;
4675 }
4676
4677 if (likely(!eth_type_vlan(skb->protocol)))
4678 return 0;
4679
4680 vlan_proto = skb->protocol;
4681 err = __skb_vlan_pop(skb, &vlan_tci);
4682 if (unlikely(err))
4683 return err;
4684
4685 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4686 return 0;
4687}
4688EXPORT_SYMBOL(skb_vlan_pop);
4689
4690
4691
4692
4693int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4694{
4695 if (skb_vlan_tag_present(skb)) {
4696 int offset = skb->data - skb_mac_header(skb);
4697 int err;
4698
4699 if (WARN_ONCE(offset,
4700 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
4701 offset)) {
4702 return -EINVAL;
4703 }
4704
4705 err = __vlan_insert_tag(skb, skb->vlan_proto,
4706 skb_vlan_tag_get(skb));
4707 if (err)
4708 return err;
4709
4710 skb->protocol = skb->vlan_proto;
4711 skb->mac_len += VLAN_HLEN;
4712
4713 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4714 }
4715 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4716 return 0;
4717}
4718EXPORT_SYMBOL(skb_vlan_push);
4719
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4732 unsigned long data_len,
4733 int max_page_order,
4734 int *errcode,
4735 gfp_t gfp_mask)
4736{
4737 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
4738 unsigned long chunk;
4739 struct sk_buff *skb;
4740 struct page *page;
4741 gfp_t gfp_head;
4742 int i;
4743
4744 *errcode = -EMSGSIZE;
4745
4746
4747
4748 if (npages > MAX_SKB_FRAGS)
4749 return NULL;
4750
4751 gfp_head = gfp_mask;
4752 if (gfp_head & __GFP_DIRECT_RECLAIM)
4753 gfp_head |= __GFP_RETRY_MAYFAIL;
4754
4755 *errcode = -ENOBUFS;
4756 skb = alloc_skb(header_len, gfp_head);
4757 if (!skb)
4758 return NULL;
4759
4760 skb->truesize += npages << PAGE_SHIFT;
4761
4762 for (i = 0; npages > 0; i++) {
4763 int order = max_page_order;
4764
4765 while (order) {
4766 if (npages >= 1 << order) {
4767 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
4768 __GFP_COMP |
4769 __GFP_NOWARN |
4770 __GFP_NORETRY,
4771 order);
4772 if (page)
4773 goto fill_page;
4774
4775 order = 1;
4776 max_page_order = 0;
4777 }
4778 order--;
4779 }
4780 page = alloc_page(gfp_mask);
4781 if (!page)
4782 goto failure;
4783fill_page:
4784 chunk = min_t(unsigned long, data_len,
4785 PAGE_SIZE << order);
4786 skb_fill_page_desc(skb, i, page, 0, chunk);
4787 data_len -= chunk;
4788 npages -= 1 << order;
4789 }
4790 return skb;
4791
4792failure:
4793 kfree_skb(skb);
4794 return NULL;
4795}
4796EXPORT_SYMBOL(alloc_skb_with_frags);
4797
4798
4799static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
4800 const int headlen, gfp_t gfp_mask)
4801{
4802 int i;
4803 int size = skb_end_offset(skb);
4804 int new_hlen = headlen - off;
4805 u8 *data;
4806
4807 size = SKB_DATA_ALIGN(size);
4808
4809 if (skb_pfmemalloc(skb))
4810 gfp_mask |= __GFP_MEMALLOC;
4811 data = kmalloc_reserve(size +
4812 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
4813 gfp_mask, NUMA_NO_NODE, NULL);
4814 if (!data)
4815 return -ENOMEM;
4816
4817 size = SKB_WITH_OVERHEAD(ksize(data));
4818
4819
4820 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
4821 skb->len -= off;
4822
4823 memcpy((struct skb_shared_info *)(data + size),
4824 skb_shinfo(skb),
4825 offsetof(struct skb_shared_info,
4826 frags[skb_shinfo(skb)->nr_frags]));
4827 if (skb_cloned(skb)) {
4828
4829 if (skb_orphan_frags(skb, gfp_mask)) {
4830 kfree(data);
4831 return -ENOMEM;
4832 }
4833 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
4834 skb_frag_ref(skb, i);
4835 if (skb_has_frag_list(skb))
4836 skb_clone_fraglist(skb);
4837 skb_release_data(skb);
4838 } else {
4839
4840
4841
4842 skb_free_head(skb);
4843 }
4844
4845 skb->head = data;
4846 skb->data = data;
4847 skb->head_frag = 0;
4848#ifdef NET_SKBUFF_DATA_USES_OFFSET
4849 skb->end = size;
4850#else
4851 skb->end = skb->head + size;
4852#endif
4853 skb_set_tail_pointer(skb, skb_headlen(skb));
4854 skb_headers_offset_update(skb, 0);
4855 skb->cloned = 0;
4856 skb->hdr_len = 0;
4857 skb->nohdr = 0;
4858 atomic_set(&skb_shinfo(skb)->dataref, 1);
4859
4860 return 0;
4861}
4862
4863static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
4864
4865
4866
4867
4868static int pskb_carve_frag_list(struct sk_buff *skb,
4869 struct skb_shared_info *shinfo, int eat,
4870 gfp_t gfp_mask)
4871{
4872 struct sk_buff *list = shinfo->frag_list;
4873 struct sk_buff *clone = NULL;
4874 struct sk_buff *insp = NULL;
4875
4876 do {
4877 if (!list) {
4878 pr_err("Not enough bytes to eat. Want %d\n", eat);
4879 return -EFAULT;
4880 }
4881 if (list->len <= eat) {
4882
4883 eat -= list->len;
4884 list = list->next;
4885 insp = list;
4886 } else {
4887
4888 if (skb_shared(list)) {
4889 clone = skb_clone(list, gfp_mask);
4890 if (!clone)
4891 return -ENOMEM;
4892 insp = list->next;
4893 list = clone;
4894 } else {
4895
4896 insp = list;
4897 }
4898 if (pskb_carve(list, eat, gfp_mask) < 0) {
4899 kfree_skb(clone);
4900 return -ENOMEM;
4901 }
4902 break;
4903 }
4904 } while (eat);
4905
4906
4907 while ((list = shinfo->frag_list) != insp) {
4908 shinfo->frag_list = list->next;
4909 kfree_skb(list);
4910 }
4911
4912 if (clone) {
4913 clone->next = list;
4914 shinfo->frag_list = clone;
4915 }
4916 return 0;
4917}
4918
4919
4920
4921
4922static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
4923 int pos, gfp_t gfp_mask)
4924{
4925 int i, k = 0;
4926 int size = skb_end_offset(skb);
4927 u8 *data;
4928 const int nfrags = skb_shinfo(skb)->nr_frags;
4929 struct skb_shared_info *shinfo;
4930
4931 size = SKB_DATA_ALIGN(size);
4932
4933 if (skb_pfmemalloc(skb))
4934 gfp_mask |= __GFP_MEMALLOC;
4935 data = kmalloc_reserve(size +
4936 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
4937 gfp_mask, NUMA_NO_NODE, NULL);
4938 if (!data)
4939 return -ENOMEM;
4940
4941 size = SKB_WITH_OVERHEAD(ksize(data));
4942
4943 memcpy((struct skb_shared_info *)(data + size),
4944 skb_shinfo(skb), offsetof(struct skb_shared_info,
4945 frags[skb_shinfo(skb)->nr_frags]));
4946 if (skb_orphan_frags(skb, gfp_mask)) {
4947 kfree(data);
4948 return -ENOMEM;
4949 }
4950 shinfo = (struct skb_shared_info *)(data + size);
4951 for (i = 0; i < nfrags; i++) {
4952 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
4953
4954 if (pos + fsize > off) {
4955 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
4956
4957 if (pos < off) {
4958
4959
4960
4961
4962
4963
4964
4965
4966 shinfo->frags[0].page_offset += off - pos;
4967 skb_frag_size_sub(&shinfo->frags[0], off - pos);
4968 }
4969 skb_frag_ref(skb, i);
4970 k++;
4971 }
4972 pos += fsize;
4973 }
4974 shinfo->nr_frags = k;
4975 if (skb_has_frag_list(skb))
4976 skb_clone_fraglist(skb);
4977
4978 if (k == 0) {
4979
4980 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
4981 }
4982 skb_release_data(skb);
4983
4984 skb->head = data;
4985 skb->head_frag = 0;
4986 skb->data = data;
4987#ifdef NET_SKBUFF_DATA_USES_OFFSET
4988 skb->end = size;
4989#else
4990 skb->end = skb->head + size;
4991#endif
4992 skb_reset_tail_pointer(skb);
4993 skb_headers_offset_update(skb, 0);
4994 skb->cloned = 0;
4995 skb->hdr_len = 0;
4996 skb->nohdr = 0;
4997 skb->len -= off;
4998 skb->data_len = skb->len;
4999 atomic_set(&skb_shinfo(skb)->dataref, 1);
5000 return 0;
5001}
5002
5003
5004static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5005{
5006 int headlen = skb_headlen(skb);
5007
5008 if (len < headlen)
5009 return pskb_carve_inside_header(skb, len, headlen, gfp);
5010 else
5011 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5012}
5013
5014
5015
5016
5017struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5018 int to_copy, gfp_t gfp)
5019{
5020 struct sk_buff *clone = skb_clone(skb, gfp);
5021
5022 if (!clone)
5023 return NULL;
5024
5025 if (pskb_carve(clone, off, gfp) < 0 ||
5026 pskb_trim(clone, to_copy)) {
5027 kfree_skb(clone);
5028 return NULL;
5029 }
5030 return clone;
5031}
5032EXPORT_SYMBOL(pskb_extract);
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043
5044
5045
5046void skb_condense(struct sk_buff *skb)
5047{
5048 if (skb->data_len) {
5049 if (skb->data_len > skb->end - skb->tail ||
5050 skb_cloned(skb))
5051 return;
5052
5053
5054 __pskb_pull_tail(skb, skb->data_len);
5055 }
5056
5057
5058
5059
5060
5061
5062
5063 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5064}
5065