1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/slab.h>
45#include <linux/tcp.h>
46#include <linux/udp.h>
47#include <linux/sctp.h>
48#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
54#include <linux/splice.h>
55#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
58#include <linux/scatterlist.h>
59#include <linux/errqueue.h>
60#include <linux/prefetch.h>
61#include <linux/if_vlan.h>
62#include <linux/mpls.h>
63
64#include <net/protocol.h>
65#include <net/dst.h>
66#include <net/sock.h>
67#include <net/checksum.h>
68#include <net/ip6_checksum.h>
69#include <net/xfrm.h>
70#include <net/mpls.h>
71#include <net/mptcp.h>
72
73#include <linux/uaccess.h>
74#include <trace/events/skb.h>
75#include <linux/highmem.h>
76#include <linux/capability.h>
77#include <linux/user_namespace.h>
78#include <linux/indirect_call_wrapper.h>
79
80#include "datagram.h"
81
82struct kmem_cache *skbuff_head_cache __ro_after_init;
83static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
84#ifdef CONFIG_SKB_EXTENSIONS
85static struct kmem_cache *skbuff_ext_cache __ro_after_init;
86#endif
87int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
88EXPORT_SYMBOL(sysctl_max_skb_frags);
89
90
91
92
93
94
95
96
97
98
99
100
101
102static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
103 const char msg[])
104{
105 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
106 msg, addr, skb->len, sz, skb->head, skb->data,
107 (unsigned long)skb->tail, (unsigned long)skb->end,
108 skb->dev ? skb->dev->name : "<NULL>");
109 BUG();
110}
111
112static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
113{
114 skb_panic(skb, sz, addr, __func__);
115}
116
117static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
118{
119 skb_panic(skb, sz, addr, __func__);
120}
121
122
123
124
125
126
127
128
129#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
130 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
131
132static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
133 unsigned long ip, bool *pfmemalloc)
134{
135 void *obj;
136 bool ret_pfmemalloc = false;
137
138
139
140
141
142 obj = kmalloc_node_track_caller(size,
143 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
144 node);
145 if (obj || !(gfp_pfmemalloc_allowed(flags)))
146 goto out;
147
148
149 ret_pfmemalloc = true;
150 obj = kmalloc_node_track_caller(size, flags, node);
151
152out:
153 if (pfmemalloc)
154 *pfmemalloc = ret_pfmemalloc;
155
156 return obj;
157}
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
183 int flags, int node)
184{
185 struct kmem_cache *cache;
186 struct skb_shared_info *shinfo;
187 struct sk_buff *skb;
188 u8 *data;
189 bool pfmemalloc;
190
191 cache = (flags & SKB_ALLOC_FCLONE)
192 ? skbuff_fclone_cache : skbuff_head_cache;
193
194 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
195 gfp_mask |= __GFP_MEMALLOC;
196
197
198 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
199 if (!skb)
200 goto out;
201 prefetchw(skb);
202
203
204
205
206
207
208 size = SKB_DATA_ALIGN(size);
209 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
210 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
211 if (!data)
212 goto nodata;
213
214
215
216
217 size = SKB_WITH_OVERHEAD(ksize(data));
218 prefetchw(data + size);
219
220
221
222
223
224
225 memset(skb, 0, offsetof(struct sk_buff, tail));
226
227 skb->truesize = SKB_TRUESIZE(size);
228 skb->pfmemalloc = pfmemalloc;
229 refcount_set(&skb->users, 1);
230 skb->head = data;
231 skb->data = data;
232 skb_reset_tail_pointer(skb);
233 skb->end = skb->tail + size;
234 skb->mac_header = (typeof(skb->mac_header))~0U;
235 skb->transport_header = (typeof(skb->transport_header))~0U;
236
237
238 shinfo = skb_shinfo(skb);
239 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
240 atomic_set(&shinfo->dataref, 1);
241
242 if (flags & SKB_ALLOC_FCLONE) {
243 struct sk_buff_fclones *fclones;
244
245 fclones = container_of(skb, struct sk_buff_fclones, skb1);
246
247 skb->fclone = SKB_FCLONE_ORIG;
248 refcount_set(&fclones->fclone_ref, 1);
249
250 fclones->skb2.fclone = SKB_FCLONE_CLONE;
251 }
252out:
253 return skb;
254nodata:
255 kmem_cache_free(cache, skb);
256 skb = NULL;
257 goto out;
258}
259EXPORT_SYMBOL(__alloc_skb);
260
261
262static struct sk_buff *__build_skb_around(struct sk_buff *skb,
263 void *data, unsigned int frag_size)
264{
265 struct skb_shared_info *shinfo;
266 unsigned int size = frag_size ? : ksize(data);
267
268 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
269
270
271 skb->truesize = SKB_TRUESIZE(size);
272 refcount_set(&skb->users, 1);
273 skb->head = data;
274 skb->data = data;
275 skb_reset_tail_pointer(skb);
276 skb->end = skb->tail + size;
277 skb->mac_header = (typeof(skb->mac_header))~0U;
278 skb->transport_header = (typeof(skb->transport_header))~0U;
279
280
281 shinfo = skb_shinfo(skb);
282 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
283 atomic_set(&shinfo->dataref, 1);
284
285 return skb;
286}
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307struct sk_buff *__build_skb(void *data, unsigned int frag_size)
308{
309 struct sk_buff *skb;
310
311 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
312 if (unlikely(!skb))
313 return NULL;
314
315 memset(skb, 0, offsetof(struct sk_buff, tail));
316
317 return __build_skb_around(skb, data, frag_size);
318}
319
320
321
322
323
324
325struct sk_buff *build_skb(void *data, unsigned int frag_size)
326{
327 struct sk_buff *skb = __build_skb(data, frag_size);
328
329 if (skb && frag_size) {
330 skb->head_frag = 1;
331 if (page_is_pfmemalloc(virt_to_head_page(data)))
332 skb->pfmemalloc = 1;
333 }
334 return skb;
335}
336EXPORT_SYMBOL(build_skb);
337
338
339
340
341
342
343
344struct sk_buff *build_skb_around(struct sk_buff *skb,
345 void *data, unsigned int frag_size)
346{
347 if (unlikely(!skb))
348 return NULL;
349
350 skb = __build_skb_around(skb, data, frag_size);
351
352 if (skb && frag_size) {
353 skb->head_frag = 1;
354 if (page_is_pfmemalloc(virt_to_head_page(data)))
355 skb->pfmemalloc = 1;
356 }
357 return skb;
358}
359EXPORT_SYMBOL(build_skb_around);
360
361#define NAPI_SKB_CACHE_SIZE 64
362
363struct napi_alloc_cache {
364 struct page_frag_cache page;
365 unsigned int skb_count;
366 void *skb_cache[NAPI_SKB_CACHE_SIZE];
367};
368
369static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
370static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
371
372static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
373{
374 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
375
376 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
377}
378
379void *napi_alloc_frag(unsigned int fragsz)
380{
381 fragsz = SKB_DATA_ALIGN(fragsz);
382
383 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
384}
385EXPORT_SYMBOL(napi_alloc_frag);
386
387
388
389
390
391
392
393
394void *netdev_alloc_frag(unsigned int fragsz)
395{
396 struct page_frag_cache *nc;
397 void *data;
398
399 fragsz = SKB_DATA_ALIGN(fragsz);
400 if (in_irq() || irqs_disabled()) {
401 nc = this_cpu_ptr(&netdev_alloc_cache);
402 data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
403 } else {
404 local_bh_disable();
405 data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
406 local_bh_enable();
407 }
408 return data;
409}
410EXPORT_SYMBOL(netdev_alloc_frag);
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
426 gfp_t gfp_mask)
427{
428 struct page_frag_cache *nc;
429 struct sk_buff *skb;
430 bool pfmemalloc;
431 void *data;
432
433 len += NET_SKB_PAD;
434
435 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
436 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
437 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
438 if (!skb)
439 goto skb_fail;
440 goto skb_success;
441 }
442
443 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
444 len = SKB_DATA_ALIGN(len);
445
446 if (sk_memalloc_socks())
447 gfp_mask |= __GFP_MEMALLOC;
448
449 if (in_irq() || irqs_disabled()) {
450 nc = this_cpu_ptr(&netdev_alloc_cache);
451 data = page_frag_alloc(nc, len, gfp_mask);
452 pfmemalloc = nc->pfmemalloc;
453 } else {
454 local_bh_disable();
455 nc = this_cpu_ptr(&napi_alloc_cache.page);
456 data = page_frag_alloc(nc, len, gfp_mask);
457 pfmemalloc = nc->pfmemalloc;
458 local_bh_enable();
459 }
460
461 if (unlikely(!data))
462 return NULL;
463
464 skb = __build_skb(data, len);
465 if (unlikely(!skb)) {
466 skb_free_frag(data);
467 return NULL;
468 }
469
470 if (pfmemalloc)
471 skb->pfmemalloc = 1;
472 skb->head_frag = 1;
473
474skb_success:
475 skb_reserve(skb, NET_SKB_PAD);
476 skb->dev = dev;
477
478skb_fail:
479 return skb;
480}
481EXPORT_SYMBOL(__netdev_alloc_skb);
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
497 gfp_t gfp_mask)
498{
499 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
500 struct sk_buff *skb;
501 void *data;
502
503 len += NET_SKB_PAD + NET_IP_ALIGN;
504
505 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
506 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
507 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
508 if (!skb)
509 goto skb_fail;
510 goto skb_success;
511 }
512
513 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
514 len = SKB_DATA_ALIGN(len);
515
516 if (sk_memalloc_socks())
517 gfp_mask |= __GFP_MEMALLOC;
518
519 data = page_frag_alloc(&nc->page, len, gfp_mask);
520 if (unlikely(!data))
521 return NULL;
522
523 skb = __build_skb(data, len);
524 if (unlikely(!skb)) {
525 skb_free_frag(data);
526 return NULL;
527 }
528
529 if (nc->page.pfmemalloc)
530 skb->pfmemalloc = 1;
531 skb->head_frag = 1;
532
533skb_success:
534 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
535 skb->dev = napi->dev;
536
537skb_fail:
538 return skb;
539}
540EXPORT_SYMBOL(__napi_alloc_skb);
541
542void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
543 int size, unsigned int truesize)
544{
545 skb_fill_page_desc(skb, i, page, off, size);
546 skb->len += size;
547 skb->data_len += size;
548 skb->truesize += truesize;
549}
550EXPORT_SYMBOL(skb_add_rx_frag);
551
552void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
553 unsigned int truesize)
554{
555 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
556
557 skb_frag_size_add(frag, size);
558 skb->len += size;
559 skb->data_len += size;
560 skb->truesize += truesize;
561}
562EXPORT_SYMBOL(skb_coalesce_rx_frag);
563
564static void skb_drop_list(struct sk_buff **listp)
565{
566 kfree_skb_list(*listp);
567 *listp = NULL;
568}
569
570static inline void skb_drop_fraglist(struct sk_buff *skb)
571{
572 skb_drop_list(&skb_shinfo(skb)->frag_list);
573}
574
575static void skb_clone_fraglist(struct sk_buff *skb)
576{
577 struct sk_buff *list;
578
579 skb_walk_frags(skb, list)
580 skb_get(list);
581}
582
583static void skb_free_head(struct sk_buff *skb)
584{
585 unsigned char *head = skb->head;
586
587 if (skb->head_frag)
588 skb_free_frag(head);
589 else
590 kfree(head);
591}
592
593static void skb_release_data(struct sk_buff *skb)
594{
595 struct skb_shared_info *shinfo = skb_shinfo(skb);
596 int i;
597
598 if (skb->cloned &&
599 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
600 &shinfo->dataref))
601 return;
602
603 for (i = 0; i < shinfo->nr_frags; i++)
604 __skb_frag_unref(&shinfo->frags[i]);
605
606 if (shinfo->frag_list)
607 kfree_skb_list(shinfo->frag_list);
608
609 skb_zcopy_clear(skb, true);
610 skb_free_head(skb);
611}
612
613
614
615
616static void kfree_skbmem(struct sk_buff *skb)
617{
618 struct sk_buff_fclones *fclones;
619
620 switch (skb->fclone) {
621 case SKB_FCLONE_UNAVAILABLE:
622 kmem_cache_free(skbuff_head_cache, skb);
623 return;
624
625 case SKB_FCLONE_ORIG:
626 fclones = container_of(skb, struct sk_buff_fclones, skb1);
627
628
629
630
631
632 if (refcount_read(&fclones->fclone_ref) == 1)
633 goto fastpath;
634 break;
635
636 default:
637 fclones = container_of(skb, struct sk_buff_fclones, skb2);
638 break;
639 }
640 if (!refcount_dec_and_test(&fclones->fclone_ref))
641 return;
642fastpath:
643 kmem_cache_free(skbuff_fclone_cache, fclones);
644}
645
646void skb_release_head_state(struct sk_buff *skb)
647{
648 skb_dst_drop(skb);
649 if (skb->destructor) {
650 WARN_ON(in_irq());
651 skb->destructor(skb);
652 }
653#if IS_ENABLED(CONFIG_NF_CONNTRACK)
654 nf_conntrack_put(skb_nfct(skb));
655#endif
656 skb_ext_put(skb);
657}
658
659
660static void skb_release_all(struct sk_buff *skb)
661{
662 skb_release_head_state(skb);
663 if (likely(skb->head))
664 skb_release_data(skb);
665}
666
667
668
669
670
671
672
673
674
675
676void __kfree_skb(struct sk_buff *skb)
677{
678 skb_release_all(skb);
679 kfree_skbmem(skb);
680}
681EXPORT_SYMBOL(__kfree_skb);
682
683
684
685
686
687
688
689
690void kfree_skb(struct sk_buff *skb)
691{
692 if (!skb_unref(skb))
693 return;
694
695 trace_kfree_skb(skb, __builtin_return_address(0));
696 __kfree_skb(skb);
697}
698EXPORT_SYMBOL(kfree_skb);
699
700void kfree_skb_list(struct sk_buff *segs)
701{
702 while (segs) {
703 struct sk_buff *next = segs->next;
704
705 kfree_skb(segs);
706 segs = next;
707 }
708}
709EXPORT_SYMBOL(kfree_skb_list);
710
711
712
713
714
715
716
717void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
718{
719 static atomic_t can_dump_full = ATOMIC_INIT(5);
720 struct skb_shared_info *sh = skb_shinfo(skb);
721 struct net_device *dev = skb->dev;
722 struct sock *sk = skb->sk;
723 struct sk_buff *list_skb;
724 bool has_mac, has_trans;
725 int headroom, tailroom;
726 int i, len, seg_len;
727
728 if (full_pkt)
729 full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
730
731 if (full_pkt)
732 len = skb->len;
733 else
734 len = min_t(int, skb->len, MAX_HEADER + 128);
735
736 headroom = skb_headroom(skb);
737 tailroom = skb_tailroom(skb);
738
739 has_mac = skb_mac_header_was_set(skb);
740 has_trans = skb_transport_header_was_set(skb);
741
742 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
743 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
744 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
745 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
746 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
747 level, skb->len, headroom, skb_headlen(skb), tailroom,
748 has_mac ? skb->mac_header : -1,
749 has_mac ? skb_mac_header_len(skb) : -1,
750 skb->network_header,
751 has_trans ? skb_network_header_len(skb) : -1,
752 has_trans ? skb->transport_header : -1,
753 sh->tx_flags, sh->nr_frags,
754 sh->gso_size, sh->gso_type, sh->gso_segs,
755 skb->csum, skb->ip_summed, skb->csum_complete_sw,
756 skb->csum_valid, skb->csum_level,
757 skb->hash, skb->sw_hash, skb->l4_hash,
758 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
759
760 if (dev)
761 printk("%sdev name=%s feat=0x%pNF\n",
762 level, dev->name, &dev->features);
763 if (sk)
764 printk("%ssk family=%hu type=%u proto=%u\n",
765 level, sk->sk_family, sk->sk_type, sk->sk_protocol);
766
767 if (full_pkt && headroom)
768 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
769 16, 1, skb->head, headroom, false);
770
771 seg_len = min_t(int, skb_headlen(skb), len);
772 if (seg_len)
773 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
774 16, 1, skb->data, seg_len, false);
775 len -= seg_len;
776
777 if (full_pkt && tailroom)
778 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
779 16, 1, skb_tail_pointer(skb), tailroom, false);
780
781 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
782 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
783 u32 p_off, p_len, copied;
784 struct page *p;
785 u8 *vaddr;
786
787 skb_frag_foreach_page(frag, skb_frag_off(frag),
788 skb_frag_size(frag), p, p_off, p_len,
789 copied) {
790 seg_len = min_t(int, p_len, len);
791 vaddr = kmap_atomic(p);
792 print_hex_dump(level, "skb frag: ",
793 DUMP_PREFIX_OFFSET,
794 16, 1, vaddr + p_off, seg_len, false);
795 kunmap_atomic(vaddr);
796 len -= seg_len;
797 if (!len)
798 break;
799 }
800 }
801
802 if (full_pkt && skb_has_frag_list(skb)) {
803 printk("skb fraglist:\n");
804 skb_walk_frags(skb, list_skb)
805 skb_dump(level, list_skb, true);
806 }
807}
808EXPORT_SYMBOL(skb_dump);
809
810
811
812
813
814
815
816
817void skb_tx_error(struct sk_buff *skb)
818{
819 skb_zcopy_clear(skb, true);
820}
821EXPORT_SYMBOL(skb_tx_error);
822
823#ifdef CONFIG_TRACEPOINTS
824
825
826
827
828
829
830
831
832void consume_skb(struct sk_buff *skb)
833{
834 if (!skb_unref(skb))
835 return;
836
837 trace_consume_skb(skb);
838 __kfree_skb(skb);
839}
840EXPORT_SYMBOL(consume_skb);
841#endif
842
843
844
845
846
847
848
849
850void __consume_stateless_skb(struct sk_buff *skb)
851{
852 trace_consume_skb(skb);
853 skb_release_data(skb);
854 kfree_skbmem(skb);
855}
856
857void __kfree_skb_flush(void)
858{
859 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
860
861
862 if (nc->skb_count) {
863 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
864 nc->skb_cache);
865 nc->skb_count = 0;
866 }
867}
868
869static inline void _kfree_skb_defer(struct sk_buff *skb)
870{
871 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
872
873
874 skb_release_all(skb);
875
876
877 nc->skb_cache[nc->skb_count++] = skb;
878
879#ifdef CONFIG_SLUB
880
881 prefetchw(skb);
882#endif
883
884
885 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
886 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
887 nc->skb_cache);
888 nc->skb_count = 0;
889 }
890}
891void __kfree_skb_defer(struct sk_buff *skb)
892{
893 _kfree_skb_defer(skb);
894}
895
896void napi_consume_skb(struct sk_buff *skb, int budget)
897{
898 if (unlikely(!skb))
899 return;
900
901
902 if (unlikely(!budget)) {
903 dev_consume_skb_any(skb);
904 return;
905 }
906
907 if (!skb_unref(skb))
908 return;
909
910
911 trace_consume_skb(skb);
912
913
914 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
915 __kfree_skb(skb);
916 return;
917 }
918
919 _kfree_skb_defer(skb);
920}
921EXPORT_SYMBOL(napi_consume_skb);
922
923
924#define CHECK_SKB_FIELD(field) \
925 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
926 offsetof(struct sk_buff, headers_start)); \
927 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
928 offsetof(struct sk_buff, headers_end)); \
929
930static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
931{
932 new->tstamp = old->tstamp;
933
934 new->dev = old->dev;
935 memcpy(new->cb, old->cb, sizeof(old->cb));
936 skb_dst_copy(new, old);
937 __skb_ext_copy(new, old);
938 __nf_copy(new, old, false);
939
940
941
942
943 new->queue_mapping = old->queue_mapping;
944
945 memcpy(&new->headers_start, &old->headers_start,
946 offsetof(struct sk_buff, headers_end) -
947 offsetof(struct sk_buff, headers_start));
948 CHECK_SKB_FIELD(protocol);
949 CHECK_SKB_FIELD(csum);
950 CHECK_SKB_FIELD(hash);
951 CHECK_SKB_FIELD(priority);
952 CHECK_SKB_FIELD(skb_iif);
953 CHECK_SKB_FIELD(vlan_proto);
954 CHECK_SKB_FIELD(vlan_tci);
955 CHECK_SKB_FIELD(transport_header);
956 CHECK_SKB_FIELD(network_header);
957 CHECK_SKB_FIELD(mac_header);
958 CHECK_SKB_FIELD(inner_protocol);
959 CHECK_SKB_FIELD(inner_transport_header);
960 CHECK_SKB_FIELD(inner_network_header);
961 CHECK_SKB_FIELD(inner_mac_header);
962 CHECK_SKB_FIELD(mark);
963#ifdef CONFIG_NETWORK_SECMARK
964 CHECK_SKB_FIELD(secmark);
965#endif
966#ifdef CONFIG_NET_RX_BUSY_POLL
967 CHECK_SKB_FIELD(napi_id);
968#endif
969#ifdef CONFIG_XPS
970 CHECK_SKB_FIELD(sender_cpu);
971#endif
972#ifdef CONFIG_NET_SCHED
973 CHECK_SKB_FIELD(tc_index);
974#endif
975
976}
977
978
979
980
981
982static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
983{
984#define C(x) n->x = skb->x
985
986 n->next = n->prev = NULL;
987 n->sk = NULL;
988 __copy_skb_header(n, skb);
989
990 C(len);
991 C(data_len);
992 C(mac_len);
993 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
994 n->cloned = 1;
995 n->nohdr = 0;
996 n->peeked = 0;
997 C(pfmemalloc);
998 n->destructor = NULL;
999 C(tail);
1000 C(end);
1001 C(head);
1002 C(head_frag);
1003 C(data);
1004 C(truesize);
1005 refcount_set(&n->users, 1);
1006
1007 atomic_inc(&(skb_shinfo(skb)->dataref));
1008 skb->cloned = 1;
1009
1010 return n;
1011#undef C
1012}
1013
1014
1015
1016
1017
1018struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1019{
1020 struct sk_buff *n;
1021
1022 n = alloc_skb(0, GFP_ATOMIC);
1023 if (!n)
1024 return NULL;
1025
1026 n->len = first->len;
1027 n->data_len = first->len;
1028 n->truesize = first->truesize;
1029
1030 skb_shinfo(n)->frag_list = first;
1031
1032 __copy_skb_header(n, first);
1033 n->destructor = NULL;
1034
1035 return n;
1036}
1037EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1050{
1051 skb_release_all(dst);
1052 return __skb_clone(dst, src);
1053}
1054EXPORT_SYMBOL_GPL(skb_morph);
1055
1056int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1057{
1058 unsigned long max_pg, num_pg, new_pg, old_pg;
1059 struct user_struct *user;
1060
1061 if (capable(CAP_IPC_LOCK) || !size)
1062 return 0;
1063
1064 num_pg = (size >> PAGE_SHIFT) + 2;
1065 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1066 user = mmp->user ? : current_user();
1067
1068 do {
1069 old_pg = atomic_long_read(&user->locked_vm);
1070 new_pg = old_pg + num_pg;
1071 if (new_pg > max_pg)
1072 return -ENOBUFS;
1073 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1074 old_pg);
1075
1076 if (!mmp->user) {
1077 mmp->user = get_uid(user);
1078 mmp->num_pg = num_pg;
1079 } else {
1080 mmp->num_pg += num_pg;
1081 }
1082
1083 return 0;
1084}
1085EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1086
1087void mm_unaccount_pinned_pages(struct mmpin *mmp)
1088{
1089 if (mmp->user) {
1090 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1091 free_uid(mmp->user);
1092 }
1093}
1094EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1095
1096struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
1097{
1098 struct ubuf_info *uarg;
1099 struct sk_buff *skb;
1100
1101 WARN_ON_ONCE(!in_task());
1102
1103 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1104 if (!skb)
1105 return NULL;
1106
1107 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1108 uarg = (void *)skb->cb;
1109 uarg->mmp.user = NULL;
1110
1111 if (mm_account_pinned_pages(&uarg->mmp, size)) {
1112 kfree_skb(skb);
1113 return NULL;
1114 }
1115
1116 uarg->callback = sock_zerocopy_callback;
1117 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1118 uarg->len = 1;
1119 uarg->bytelen = size;
1120 uarg->zerocopy = 1;
1121 refcount_set(&uarg->refcnt, 1);
1122 sock_hold(sk);
1123
1124 return uarg;
1125}
1126EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
1127
1128static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
1129{
1130 return container_of((void *)uarg, struct sk_buff, cb);
1131}
1132
1133struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
1134 struct ubuf_info *uarg)
1135{
1136 if (uarg) {
1137 const u32 byte_limit = 1 << 19;
1138 u32 bytelen, next;
1139
1140
1141
1142
1143 if (!sock_owned_by_user(sk)) {
1144 WARN_ON_ONCE(1);
1145 return NULL;
1146 }
1147
1148 bytelen = uarg->bytelen + size;
1149 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1150
1151 if (sk->sk_type == SOCK_STREAM)
1152 goto new_alloc;
1153 return NULL;
1154 }
1155
1156 next = (u32)atomic_read(&sk->sk_zckey);
1157 if ((u32)(uarg->id + uarg->len) == next) {
1158 if (mm_account_pinned_pages(&uarg->mmp, size))
1159 return NULL;
1160 uarg->len++;
1161 uarg->bytelen = bytelen;
1162 atomic_set(&sk->sk_zckey, ++next);
1163
1164
1165 if (sk->sk_type == SOCK_STREAM)
1166 sock_zerocopy_get(uarg);
1167
1168 return uarg;
1169 }
1170 }
1171
1172new_alloc:
1173 return sock_zerocopy_alloc(sk, size);
1174}
1175EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1176
1177static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1178{
1179 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1180 u32 old_lo, old_hi;
1181 u64 sum_len;
1182
1183 old_lo = serr->ee.ee_info;
1184 old_hi = serr->ee.ee_data;
1185 sum_len = old_hi - old_lo + 1ULL + len;
1186
1187 if (sum_len >= (1ULL << 32))
1188 return false;
1189
1190 if (lo != old_hi + 1)
1191 return false;
1192
1193 serr->ee.ee_data += len;
1194 return true;
1195}
1196
1197void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1198{
1199 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1200 struct sock_exterr_skb *serr;
1201 struct sock *sk = skb->sk;
1202 struct sk_buff_head *q;
1203 unsigned long flags;
1204 u32 lo, hi;
1205 u16 len;
1206
1207 mm_unaccount_pinned_pages(&uarg->mmp);
1208
1209
1210
1211
1212 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1213 goto release;
1214
1215 len = uarg->len;
1216 lo = uarg->id;
1217 hi = uarg->id + len - 1;
1218
1219 serr = SKB_EXT_ERR(skb);
1220 memset(serr, 0, sizeof(*serr));
1221 serr->ee.ee_errno = 0;
1222 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1223 serr->ee.ee_data = hi;
1224 serr->ee.ee_info = lo;
1225 if (!success)
1226 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1227
1228 q = &sk->sk_error_queue;
1229 spin_lock_irqsave(&q->lock, flags);
1230 tail = skb_peek_tail(q);
1231 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1232 !skb_zerocopy_notify_extend(tail, lo, len)) {
1233 __skb_queue_tail(q, skb);
1234 skb = NULL;
1235 }
1236 spin_unlock_irqrestore(&q->lock, flags);
1237
1238 sk->sk_error_report(sk);
1239
1240release:
1241 consume_skb(skb);
1242 sock_put(sk);
1243}
1244EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1245
1246void sock_zerocopy_put(struct ubuf_info *uarg)
1247{
1248 if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1249 if (uarg->callback)
1250 uarg->callback(uarg, uarg->zerocopy);
1251 else
1252 consume_skb(skb_from_uarg(uarg));
1253 }
1254}
1255EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1256
1257void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1258{
1259 if (uarg) {
1260 struct sock *sk = skb_from_uarg(uarg)->sk;
1261
1262 atomic_dec(&sk->sk_zckey);
1263 uarg->len--;
1264
1265 if (have_uref)
1266 sock_zerocopy_put(uarg);
1267 }
1268}
1269EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1270
1271int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1272{
1273 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1274}
1275EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1276
1277int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1278 struct msghdr *msg, int len,
1279 struct ubuf_info *uarg)
1280{
1281 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1282 struct iov_iter orig_iter = msg->msg_iter;
1283 int err, orig_len = skb->len;
1284
1285
1286
1287
1288 if (orig_uarg && uarg != orig_uarg)
1289 return -EEXIST;
1290
1291 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1292 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1293 struct sock *save_sk = skb->sk;
1294
1295
1296 msg->msg_iter = orig_iter;
1297 skb->sk = sk;
1298 ___pskb_trim(skb, orig_len);
1299 skb->sk = save_sk;
1300 return err;
1301 }
1302
1303 skb_zcopy_set(skb, uarg, NULL);
1304 return skb->len - orig_len;
1305}
1306EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1307
1308static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1309 gfp_t gfp_mask)
1310{
1311 if (skb_zcopy(orig)) {
1312 if (skb_zcopy(nskb)) {
1313
1314 if (!gfp_mask) {
1315 WARN_ON_ONCE(1);
1316 return -ENOMEM;
1317 }
1318 if (skb_uarg(nskb) == skb_uarg(orig))
1319 return 0;
1320 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1321 return -EIO;
1322 }
1323 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1324 }
1325 return 0;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1344{
1345 int num_frags = skb_shinfo(skb)->nr_frags;
1346 struct page *page, *head = NULL;
1347 int i, new_frags;
1348 u32 d_off;
1349
1350 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1351 return -EINVAL;
1352
1353 if (!num_frags)
1354 goto release;
1355
1356 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1357 for (i = 0; i < new_frags; i++) {
1358 page = alloc_page(gfp_mask);
1359 if (!page) {
1360 while (head) {
1361 struct page *next = (struct page *)page_private(head);
1362 put_page(head);
1363 head = next;
1364 }
1365 return -ENOMEM;
1366 }
1367 set_page_private(page, (unsigned long)head);
1368 head = page;
1369 }
1370
1371 page = head;
1372 d_off = 0;
1373 for (i = 0; i < num_frags; i++) {
1374 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1375 u32 p_off, p_len, copied;
1376 struct page *p;
1377 u8 *vaddr;
1378
1379 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1380 p, p_off, p_len, copied) {
1381 u32 copy, done = 0;
1382 vaddr = kmap_atomic(p);
1383
1384 while (done < p_len) {
1385 if (d_off == PAGE_SIZE) {
1386 d_off = 0;
1387 page = (struct page *)page_private(page);
1388 }
1389 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1390 memcpy(page_address(page) + d_off,
1391 vaddr + p_off + done, copy);
1392 done += copy;
1393 d_off += copy;
1394 }
1395 kunmap_atomic(vaddr);
1396 }
1397 }
1398
1399
1400 for (i = 0; i < num_frags; i++)
1401 skb_frag_unref(skb, i);
1402
1403
1404 for (i = 0; i < new_frags - 1; i++) {
1405 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1406 head = (struct page *)page_private(head);
1407 }
1408 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1409 skb_shinfo(skb)->nr_frags = new_frags;
1410
1411release:
1412 skb_zcopy_clear(skb, false);
1413 return 0;
1414}
1415EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1432{
1433 struct sk_buff_fclones *fclones = container_of(skb,
1434 struct sk_buff_fclones,
1435 skb1);
1436 struct sk_buff *n;
1437
1438 if (skb_orphan_frags(skb, gfp_mask))
1439 return NULL;
1440
1441 if (skb->fclone == SKB_FCLONE_ORIG &&
1442 refcount_read(&fclones->fclone_ref) == 1) {
1443 n = &fclones->skb2;
1444 refcount_set(&fclones->fclone_ref, 2);
1445 } else {
1446 if (skb_pfmemalloc(skb))
1447 gfp_mask |= __GFP_MEMALLOC;
1448
1449 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1450 if (!n)
1451 return NULL;
1452
1453 n->fclone = SKB_FCLONE_UNAVAILABLE;
1454 }
1455
1456 return __skb_clone(n, skb);
1457}
1458EXPORT_SYMBOL(skb_clone);
1459
1460void skb_headers_offset_update(struct sk_buff *skb, int off)
1461{
1462
1463 if (skb->ip_summed == CHECKSUM_PARTIAL)
1464 skb->csum_start += off;
1465
1466 skb->transport_header += off;
1467 skb->network_header += off;
1468 if (skb_mac_header_was_set(skb))
1469 skb->mac_header += off;
1470 skb->inner_transport_header += off;
1471 skb->inner_network_header += off;
1472 skb->inner_mac_header += off;
1473}
1474EXPORT_SYMBOL(skb_headers_offset_update);
1475
1476void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1477{
1478 __copy_skb_header(new, old);
1479
1480 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1481 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1482 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1483}
1484EXPORT_SYMBOL(skb_copy_header);
1485
1486static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1487{
1488 if (skb_pfmemalloc(skb))
1489 return SKB_ALLOC_RX;
1490 return 0;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1511{
1512 int headerlen = skb_headroom(skb);
1513 unsigned int size = skb_end_offset(skb) + skb->data_len;
1514 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1515 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1516
1517 if (!n)
1518 return NULL;
1519
1520
1521 skb_reserve(n, headerlen);
1522
1523 skb_put(n, skb->len);
1524
1525 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1526
1527 skb_copy_header(n, skb);
1528 return n;
1529}
1530EXPORT_SYMBOL(skb_copy);
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1550 gfp_t gfp_mask, bool fclone)
1551{
1552 unsigned int size = skb_headlen(skb) + headroom;
1553 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1554 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1555
1556 if (!n)
1557 goto out;
1558
1559
1560 skb_reserve(n, headroom);
1561
1562 skb_put(n, skb_headlen(skb));
1563
1564 skb_copy_from_linear_data(skb, n->data, n->len);
1565
1566 n->truesize += skb->data_len;
1567 n->data_len = skb->data_len;
1568 n->len = skb->len;
1569
1570 if (skb_shinfo(skb)->nr_frags) {
1571 int i;
1572
1573 if (skb_orphan_frags(skb, gfp_mask) ||
1574 skb_zerocopy_clone(n, skb, gfp_mask)) {
1575 kfree_skb(n);
1576 n = NULL;
1577 goto out;
1578 }
1579 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1580 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1581 skb_frag_ref(skb, i);
1582 }
1583 skb_shinfo(n)->nr_frags = i;
1584 }
1585
1586 if (skb_has_frag_list(skb)) {
1587 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1588 skb_clone_fraglist(n);
1589 }
1590
1591 skb_copy_header(n, skb);
1592out:
1593 return n;
1594}
1595EXPORT_SYMBOL(__pskb_copy_fclone);
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1614 gfp_t gfp_mask)
1615{
1616 int i, osize = skb_end_offset(skb);
1617 int size = osize + nhead + ntail;
1618 long off;
1619 u8 *data;
1620
1621 BUG_ON(nhead < 0);
1622
1623 BUG_ON(skb_shared(skb));
1624
1625 size = SKB_DATA_ALIGN(size);
1626
1627 if (skb_pfmemalloc(skb))
1628 gfp_mask |= __GFP_MEMALLOC;
1629 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1630 gfp_mask, NUMA_NO_NODE, NULL);
1631 if (!data)
1632 goto nodata;
1633 size = SKB_WITH_OVERHEAD(ksize(data));
1634
1635
1636
1637
1638 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1639
1640 memcpy((struct skb_shared_info *)(data + size),
1641 skb_shinfo(skb),
1642 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1643
1644
1645
1646
1647
1648
1649 if (skb_cloned(skb)) {
1650 if (skb_orphan_frags(skb, gfp_mask))
1651 goto nofrags;
1652 if (skb_zcopy(skb))
1653 refcount_inc(&skb_uarg(skb)->refcnt);
1654 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1655 skb_frag_ref(skb, i);
1656
1657 if (skb_has_frag_list(skb))
1658 skb_clone_fraglist(skb);
1659
1660 skb_release_data(skb);
1661 } else {
1662 skb_free_head(skb);
1663 }
1664 off = (data + nhead) - skb->head;
1665
1666 skb->head = data;
1667 skb->head_frag = 0;
1668 skb->data += off;
1669#ifdef NET_SKBUFF_DATA_USES_OFFSET
1670 skb->end = size;
1671 off = nhead;
1672#else
1673 skb->end = skb->head + size;
1674#endif
1675 skb->tail += off;
1676 skb_headers_offset_update(skb, nhead);
1677 skb->cloned = 0;
1678 skb->hdr_len = 0;
1679 skb->nohdr = 0;
1680 atomic_set(&skb_shinfo(skb)->dataref, 1);
1681
1682 skb_metadata_clear(skb);
1683
1684
1685
1686
1687
1688 if (!skb->sk || skb->destructor == sock_edemux)
1689 skb->truesize += size - osize;
1690
1691 return 0;
1692
1693nofrags:
1694 kfree(data);
1695nodata:
1696 return -ENOMEM;
1697}
1698EXPORT_SYMBOL(pskb_expand_head);
1699
1700
1701
1702struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1703{
1704 struct sk_buff *skb2;
1705 int delta = headroom - skb_headroom(skb);
1706
1707 if (delta <= 0)
1708 skb2 = pskb_copy(skb, GFP_ATOMIC);
1709 else {
1710 skb2 = skb_clone(skb, GFP_ATOMIC);
1711 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1712 GFP_ATOMIC)) {
1713 kfree_skb(skb2);
1714 skb2 = NULL;
1715 }
1716 }
1717 return skb2;
1718}
1719EXPORT_SYMBOL(skb_realloc_headroom);
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1740 int newheadroom, int newtailroom,
1741 gfp_t gfp_mask)
1742{
1743
1744
1745
1746 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1747 gfp_mask, skb_alloc_rx_flag(skb),
1748 NUMA_NO_NODE);
1749 int oldheadroom = skb_headroom(skb);
1750 int head_copy_len, head_copy_off;
1751
1752 if (!n)
1753 return NULL;
1754
1755 skb_reserve(n, newheadroom);
1756
1757
1758 skb_put(n, skb->len);
1759
1760 head_copy_len = oldheadroom;
1761 head_copy_off = 0;
1762 if (newheadroom <= head_copy_len)
1763 head_copy_len = newheadroom;
1764 else
1765 head_copy_off = newheadroom - head_copy_len;
1766
1767
1768 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1769 skb->len + head_copy_len));
1770
1771 skb_copy_header(n, skb);
1772
1773 skb_headers_offset_update(n, newheadroom - oldheadroom);
1774
1775 return n;
1776}
1777EXPORT_SYMBOL(skb_copy_expand);
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1794{
1795 int err;
1796 int ntail;
1797
1798
1799 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1800 memset(skb->data+skb->len, 0, pad);
1801 return 0;
1802 }
1803
1804 ntail = skb->data_len + pad - (skb->end - skb->tail);
1805 if (likely(skb_cloned(skb) || ntail > 0)) {
1806 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1807 if (unlikely(err))
1808 goto free_skb;
1809 }
1810
1811
1812
1813
1814 err = skb_linearize(skb);
1815 if (unlikely(err))
1816 goto free_skb;
1817
1818 memset(skb->data + skb->len, 0, pad);
1819 return 0;
1820
1821free_skb:
1822 if (free_on_error)
1823 kfree_skb(skb);
1824 return err;
1825}
1826EXPORT_SYMBOL(__skb_pad);
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1842{
1843 if (tail != skb) {
1844 skb->data_len += len;
1845 skb->len += len;
1846 }
1847 return skb_put(tail, len);
1848}
1849EXPORT_SYMBOL_GPL(pskb_put);
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860void *skb_put(struct sk_buff *skb, unsigned int len)
1861{
1862 void *tmp = skb_tail_pointer(skb);
1863 SKB_LINEAR_ASSERT(skb);
1864 skb->tail += len;
1865 skb->len += len;
1866 if (unlikely(skb->tail > skb->end))
1867 skb_over_panic(skb, len, __builtin_return_address(0));
1868 return tmp;
1869}
1870EXPORT_SYMBOL(skb_put);
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881void *skb_push(struct sk_buff *skb, unsigned int len)
1882{
1883 skb->data -= len;
1884 skb->len += len;
1885 if (unlikely(skb->data < skb->head))
1886 skb_under_panic(skb, len, __builtin_return_address(0));
1887 return skb->data;
1888}
1889EXPORT_SYMBOL(skb_push);
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901void *skb_pull(struct sk_buff *skb, unsigned int len)
1902{
1903 return skb_pull_inline(skb, len);
1904}
1905EXPORT_SYMBOL(skb_pull);
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916void skb_trim(struct sk_buff *skb, unsigned int len)
1917{
1918 if (skb->len > len)
1919 __skb_trim(skb, len);
1920}
1921EXPORT_SYMBOL(skb_trim);
1922
1923
1924
1925
1926int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1927{
1928 struct sk_buff **fragp;
1929 struct sk_buff *frag;
1930 int offset = skb_headlen(skb);
1931 int nfrags = skb_shinfo(skb)->nr_frags;
1932 int i;
1933 int err;
1934
1935 if (skb_cloned(skb) &&
1936 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1937 return err;
1938
1939 i = 0;
1940 if (offset >= len)
1941 goto drop_pages;
1942
1943 for (; i < nfrags; i++) {
1944 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1945
1946 if (end < len) {
1947 offset = end;
1948 continue;
1949 }
1950
1951 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1952
1953drop_pages:
1954 skb_shinfo(skb)->nr_frags = i;
1955
1956 for (; i < nfrags; i++)
1957 skb_frag_unref(skb, i);
1958
1959 if (skb_has_frag_list(skb))
1960 skb_drop_fraglist(skb);
1961 goto done;
1962 }
1963
1964 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1965 fragp = &frag->next) {
1966 int end = offset + frag->len;
1967
1968 if (skb_shared(frag)) {
1969 struct sk_buff *nfrag;
1970
1971 nfrag = skb_clone(frag, GFP_ATOMIC);
1972 if (unlikely(!nfrag))
1973 return -ENOMEM;
1974
1975 nfrag->next = frag->next;
1976 consume_skb(frag);
1977 frag = nfrag;
1978 *fragp = frag;
1979 }
1980
1981 if (end < len) {
1982 offset = end;
1983 continue;
1984 }
1985
1986 if (end > len &&
1987 unlikely((err = pskb_trim(frag, len - offset))))
1988 return err;
1989
1990 if (frag->next)
1991 skb_drop_list(&frag->next);
1992 break;
1993 }
1994
1995done:
1996 if (len > skb_headlen(skb)) {
1997 skb->data_len -= skb->len - len;
1998 skb->len = len;
1999 } else {
2000 skb->len = len;
2001 skb->data_len = 0;
2002 skb_set_tail_pointer(skb, len);
2003 }
2004
2005 if (!skb->sk || skb->destructor == sock_edemux)
2006 skb_condense(skb);
2007 return 0;
2008}
2009EXPORT_SYMBOL(___pskb_trim);
2010
2011
2012
2013int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2014{
2015 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2016 int delta = skb->len - len;
2017
2018 skb->csum = csum_block_sub(skb->csum,
2019 skb_checksum(skb, len, delta, 0),
2020 len);
2021 }
2022 return __pskb_trim(skb, len);
2023}
2024EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2052{
2053
2054
2055
2056
2057 int i, k, eat = (skb->tail + delta) - skb->end;
2058
2059 if (eat > 0 || skb_cloned(skb)) {
2060 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2061 GFP_ATOMIC))
2062 return NULL;
2063 }
2064
2065 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2066 skb_tail_pointer(skb), delta));
2067
2068
2069
2070
2071 if (!skb_has_frag_list(skb))
2072 goto pull_pages;
2073
2074
2075 eat = delta;
2076 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2077 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2078
2079 if (size >= eat)
2080 goto pull_pages;
2081 eat -= size;
2082 }
2083
2084
2085
2086
2087
2088
2089
2090
2091 if (eat) {
2092 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2093 struct sk_buff *clone = NULL;
2094 struct sk_buff *insp = NULL;
2095
2096 do {
2097 if (list->len <= eat) {
2098
2099 eat -= list->len;
2100 list = list->next;
2101 insp = list;
2102 } else {
2103
2104
2105 if (skb_shared(list)) {
2106
2107 clone = skb_clone(list, GFP_ATOMIC);
2108 if (!clone)
2109 return NULL;
2110 insp = list->next;
2111 list = clone;
2112 } else {
2113
2114
2115 insp = list;
2116 }
2117 if (!pskb_pull(list, eat)) {
2118 kfree_skb(clone);
2119 return NULL;
2120 }
2121 break;
2122 }
2123 } while (eat);
2124
2125
2126 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2127 skb_shinfo(skb)->frag_list = list->next;
2128 kfree_skb(list);
2129 }
2130
2131 if (clone) {
2132 clone->next = list;
2133 skb_shinfo(skb)->frag_list = clone;
2134 }
2135 }
2136
2137
2138pull_pages:
2139 eat = delta;
2140 k = 0;
2141 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2142 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2143
2144 if (size <= eat) {
2145 skb_frag_unref(skb, i);
2146 eat -= size;
2147 } else {
2148 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2149
2150 *frag = skb_shinfo(skb)->frags[i];
2151 if (eat) {
2152 skb_frag_off_add(frag, eat);
2153 skb_frag_size_sub(frag, eat);
2154 if (!i)
2155 goto end;
2156 eat = 0;
2157 }
2158 k++;
2159 }
2160 }
2161 skb_shinfo(skb)->nr_frags = k;
2162
2163end:
2164 skb->tail += delta;
2165 skb->data_len -= delta;
2166
2167 if (!skb->data_len)
2168 skb_zcopy_clear(skb, false);
2169
2170 return skb_tail_pointer(skb);
2171}
2172EXPORT_SYMBOL(__pskb_pull_tail);
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2190{
2191 int start = skb_headlen(skb);
2192 struct sk_buff *frag_iter;
2193 int i, copy;
2194
2195 if (offset > (int)skb->len - len)
2196 goto fault;
2197
2198
2199 if ((copy = start - offset) > 0) {
2200 if (copy > len)
2201 copy = len;
2202 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2203 if ((len -= copy) == 0)
2204 return 0;
2205 offset += copy;
2206 to += copy;
2207 }
2208
2209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2210 int end;
2211 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2212
2213 WARN_ON(start > offset + len);
2214
2215 end = start + skb_frag_size(f);
2216 if ((copy = end - offset) > 0) {
2217 u32 p_off, p_len, copied;
2218 struct page *p;
2219 u8 *vaddr;
2220
2221 if (copy > len)
2222 copy = len;
2223
2224 skb_frag_foreach_page(f,
2225 skb_frag_off(f) + offset - start,
2226 copy, p, p_off, p_len, copied) {
2227 vaddr = kmap_atomic(p);
2228 memcpy(to + copied, vaddr + p_off, p_len);
2229 kunmap_atomic(vaddr);
2230 }
2231
2232 if ((len -= copy) == 0)
2233 return 0;
2234 offset += copy;
2235 to += copy;
2236 }
2237 start = end;
2238 }
2239
2240 skb_walk_frags(skb, frag_iter) {
2241 int end;
2242
2243 WARN_ON(start > offset + len);
2244
2245 end = start + frag_iter->len;
2246 if ((copy = end - offset) > 0) {
2247 if (copy > len)
2248 copy = len;
2249 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2250 goto fault;
2251 if ((len -= copy) == 0)
2252 return 0;
2253 offset += copy;
2254 to += copy;
2255 }
2256 start = end;
2257 }
2258
2259 if (!len)
2260 return 0;
2261
2262fault:
2263 return -EFAULT;
2264}
2265EXPORT_SYMBOL(skb_copy_bits);
2266
2267
2268
2269
2270
2271static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2272{
2273 put_page(spd->pages[i]);
2274}
2275
2276static struct page *linear_to_page(struct page *page, unsigned int *len,
2277 unsigned int *offset,
2278 struct sock *sk)
2279{
2280 struct page_frag *pfrag = sk_page_frag(sk);
2281
2282 if (!sk_page_frag_refill(sk, pfrag))
2283 return NULL;
2284
2285 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2286
2287 memcpy(page_address(pfrag->page) + pfrag->offset,
2288 page_address(page) + *offset, *len);
2289 *offset = pfrag->offset;
2290 pfrag->offset += *len;
2291
2292 return pfrag->page;
2293}
2294
2295static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2296 struct page *page,
2297 unsigned int offset)
2298{
2299 return spd->nr_pages &&
2300 spd->pages[spd->nr_pages - 1] == page &&
2301 (spd->partial[spd->nr_pages - 1].offset +
2302 spd->partial[spd->nr_pages - 1].len == offset);
2303}
2304
2305
2306
2307
2308static bool spd_fill_page(struct splice_pipe_desc *spd,
2309 struct pipe_inode_info *pipe, struct page *page,
2310 unsigned int *len, unsigned int offset,
2311 bool linear,
2312 struct sock *sk)
2313{
2314 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2315 return true;
2316
2317 if (linear) {
2318 page = linear_to_page(page, len, &offset, sk);
2319 if (!page)
2320 return true;
2321 }
2322 if (spd_can_coalesce(spd, page, offset)) {
2323 spd->partial[spd->nr_pages - 1].len += *len;
2324 return false;
2325 }
2326 get_page(page);
2327 spd->pages[spd->nr_pages] = page;
2328 spd->partial[spd->nr_pages].len = *len;
2329 spd->partial[spd->nr_pages].offset = offset;
2330 spd->nr_pages++;
2331
2332 return false;
2333}
2334
2335static bool __splice_segment(struct page *page, unsigned int poff,
2336 unsigned int plen, unsigned int *off,
2337 unsigned int *len,
2338 struct splice_pipe_desc *spd, bool linear,
2339 struct sock *sk,
2340 struct pipe_inode_info *pipe)
2341{
2342 if (!*len)
2343 return true;
2344
2345
2346 if (*off >= plen) {
2347 *off -= plen;
2348 return false;
2349 }
2350
2351
2352 poff += *off;
2353 plen -= *off;
2354 *off = 0;
2355
2356 do {
2357 unsigned int flen = min(*len, plen);
2358
2359 if (spd_fill_page(spd, pipe, page, &flen, poff,
2360 linear, sk))
2361 return true;
2362 poff += flen;
2363 plen -= flen;
2364 *len -= flen;
2365 } while (*len && plen);
2366
2367 return false;
2368}
2369
2370
2371
2372
2373
2374static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2375 unsigned int *offset, unsigned int *len,
2376 struct splice_pipe_desc *spd, struct sock *sk)
2377{
2378 int seg;
2379 struct sk_buff *iter;
2380
2381
2382
2383
2384
2385
2386 if (__splice_segment(virt_to_page(skb->data),
2387 (unsigned long) skb->data & (PAGE_SIZE - 1),
2388 skb_headlen(skb),
2389 offset, len, spd,
2390 skb_head_is_locked(skb),
2391 sk, pipe))
2392 return true;
2393
2394
2395
2396
2397 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2398 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2399
2400 if (__splice_segment(skb_frag_page(f),
2401 skb_frag_off(f), skb_frag_size(f),
2402 offset, len, spd, false, sk, pipe))
2403 return true;
2404 }
2405
2406 skb_walk_frags(skb, iter) {
2407 if (*offset >= iter->len) {
2408 *offset -= iter->len;
2409 continue;
2410 }
2411
2412
2413
2414
2415 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2416 return true;
2417 }
2418
2419 return false;
2420}
2421
2422
2423
2424
2425
2426int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2427 struct pipe_inode_info *pipe, unsigned int tlen,
2428 unsigned int flags)
2429{
2430 struct partial_page partial[MAX_SKB_FRAGS];
2431 struct page *pages[MAX_SKB_FRAGS];
2432 struct splice_pipe_desc spd = {
2433 .pages = pages,
2434 .partial = partial,
2435 .nr_pages_max = MAX_SKB_FRAGS,
2436 .ops = &nosteal_pipe_buf_ops,
2437 .spd_release = sock_spd_release,
2438 };
2439 int ret = 0;
2440
2441 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2442
2443 if (spd.nr_pages)
2444 ret = splice_to_pipe(pipe, &spd);
2445
2446 return ret;
2447}
2448EXPORT_SYMBOL_GPL(skb_splice_bits);
2449
2450
2451int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2452 int len)
2453{
2454 unsigned int orig_len = len;
2455 struct sk_buff *head = skb;
2456 unsigned short fragidx;
2457 int slen, ret;
2458
2459do_frag_list:
2460
2461
2462 while (offset < skb_headlen(skb) && len) {
2463 struct kvec kv;
2464 struct msghdr msg;
2465
2466 slen = min_t(int, len, skb_headlen(skb) - offset);
2467 kv.iov_base = skb->data + offset;
2468 kv.iov_len = slen;
2469 memset(&msg, 0, sizeof(msg));
2470 msg.msg_flags = MSG_DONTWAIT;
2471
2472 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2473 if (ret <= 0)
2474 goto error;
2475
2476 offset += ret;
2477 len -= ret;
2478 }
2479
2480
2481 if (!len)
2482 goto out;
2483
2484
2485 offset -= skb_headlen(skb);
2486
2487
2488 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2489 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2490
2491 if (offset < skb_frag_size(frag))
2492 break;
2493
2494 offset -= skb_frag_size(frag);
2495 }
2496
2497 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2498 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2499
2500 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
2501
2502 while (slen) {
2503 ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
2504 skb_frag_off(frag) + offset,
2505 slen, MSG_DONTWAIT);
2506 if (ret <= 0)
2507 goto error;
2508
2509 len -= ret;
2510 offset += ret;
2511 slen -= ret;
2512 }
2513
2514 offset = 0;
2515 }
2516
2517 if (len) {
2518
2519
2520 if (skb == head) {
2521 if (skb_has_frag_list(skb)) {
2522 skb = skb_shinfo(skb)->frag_list;
2523 goto do_frag_list;
2524 }
2525 } else if (skb->next) {
2526 skb = skb->next;
2527 goto do_frag_list;
2528 }
2529 }
2530
2531out:
2532 return orig_len - len;
2533
2534error:
2535 return orig_len == len ? ret : orig_len - len;
2536}
2537EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2552{
2553 int start = skb_headlen(skb);
2554 struct sk_buff *frag_iter;
2555 int i, copy;
2556
2557 if (offset > (int)skb->len - len)
2558 goto fault;
2559
2560 if ((copy = start - offset) > 0) {
2561 if (copy > len)
2562 copy = len;
2563 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2564 if ((len -= copy) == 0)
2565 return 0;
2566 offset += copy;
2567 from += copy;
2568 }
2569
2570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2572 int end;
2573
2574 WARN_ON(start > offset + len);
2575
2576 end = start + skb_frag_size(frag);
2577 if ((copy = end - offset) > 0) {
2578 u32 p_off, p_len, copied;
2579 struct page *p;
2580 u8 *vaddr;
2581
2582 if (copy > len)
2583 copy = len;
2584
2585 skb_frag_foreach_page(frag,
2586 skb_frag_off(frag) + offset - start,
2587 copy, p, p_off, p_len, copied) {
2588 vaddr = kmap_atomic(p);
2589 memcpy(vaddr + p_off, from + copied, p_len);
2590 kunmap_atomic(vaddr);
2591 }
2592
2593 if ((len -= copy) == 0)
2594 return 0;
2595 offset += copy;
2596 from += copy;
2597 }
2598 start = end;
2599 }
2600
2601 skb_walk_frags(skb, frag_iter) {
2602 int end;
2603
2604 WARN_ON(start > offset + len);
2605
2606 end = start + frag_iter->len;
2607 if ((copy = end - offset) > 0) {
2608 if (copy > len)
2609 copy = len;
2610 if (skb_store_bits(frag_iter, offset - start,
2611 from, copy))
2612 goto fault;
2613 if ((len -= copy) == 0)
2614 return 0;
2615 offset += copy;
2616 from += copy;
2617 }
2618 start = end;
2619 }
2620 if (!len)
2621 return 0;
2622
2623fault:
2624 return -EFAULT;
2625}
2626EXPORT_SYMBOL(skb_store_bits);
2627
2628
2629__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2630 __wsum csum, const struct skb_checksum_ops *ops)
2631{
2632 int start = skb_headlen(skb);
2633 int i, copy = start - offset;
2634 struct sk_buff *frag_iter;
2635 int pos = 0;
2636
2637
2638 if (copy > 0) {
2639 if (copy > len)
2640 copy = len;
2641 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
2642 skb->data + offset, copy, csum);
2643 if ((len -= copy) == 0)
2644 return csum;
2645 offset += copy;
2646 pos = copy;
2647 }
2648
2649 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2650 int end;
2651 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2652
2653 WARN_ON(start > offset + len);
2654
2655 end = start + skb_frag_size(frag);
2656 if ((copy = end - offset) > 0) {
2657 u32 p_off, p_len, copied;
2658 struct page *p;
2659 __wsum csum2;
2660 u8 *vaddr;
2661
2662 if (copy > len)
2663 copy = len;
2664
2665 skb_frag_foreach_page(frag,
2666 skb_frag_off(frag) + offset - start,
2667 copy, p, p_off, p_len, copied) {
2668 vaddr = kmap_atomic(p);
2669 csum2 = INDIRECT_CALL_1(ops->update,
2670 csum_partial_ext,
2671 vaddr + p_off, p_len, 0);
2672 kunmap_atomic(vaddr);
2673 csum = INDIRECT_CALL_1(ops->combine,
2674 csum_block_add_ext, csum,
2675 csum2, pos, p_len);
2676 pos += p_len;
2677 }
2678
2679 if (!(len -= copy))
2680 return csum;
2681 offset += copy;
2682 }
2683 start = end;
2684 }
2685
2686 skb_walk_frags(skb, frag_iter) {
2687 int end;
2688
2689 WARN_ON(start > offset + len);
2690
2691 end = start + frag_iter->len;
2692 if ((copy = end - offset) > 0) {
2693 __wsum csum2;
2694 if (copy > len)
2695 copy = len;
2696 csum2 = __skb_checksum(frag_iter, offset - start,
2697 copy, 0, ops);
2698 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
2699 csum, csum2, pos, copy);
2700 if ((len -= copy) == 0)
2701 return csum;
2702 offset += copy;
2703 pos += copy;
2704 }
2705 start = end;
2706 }
2707 BUG_ON(len);
2708
2709 return csum;
2710}
2711EXPORT_SYMBOL(__skb_checksum);
2712
2713__wsum skb_checksum(const struct sk_buff *skb, int offset,
2714 int len, __wsum csum)
2715{
2716 const struct skb_checksum_ops ops = {
2717 .update = csum_partial_ext,
2718 .combine = csum_block_add_ext,
2719 };
2720
2721 return __skb_checksum(skb, offset, len, csum, &ops);
2722}
2723EXPORT_SYMBOL(skb_checksum);
2724
2725
2726
2727__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2728 u8 *to, int len, __wsum csum)
2729{
2730 int start = skb_headlen(skb);
2731 int i, copy = start - offset;
2732 struct sk_buff *frag_iter;
2733 int pos = 0;
2734
2735
2736 if (copy > 0) {
2737 if (copy > len)
2738 copy = len;
2739 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2740 copy, csum);
2741 if ((len -= copy) == 0)
2742 return csum;
2743 offset += copy;
2744 to += copy;
2745 pos = copy;
2746 }
2747
2748 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2749 int end;
2750
2751 WARN_ON(start > offset + len);
2752
2753 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2754 if ((copy = end - offset) > 0) {
2755 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2756 u32 p_off, p_len, copied;
2757 struct page *p;
2758 __wsum csum2;
2759 u8 *vaddr;
2760
2761 if (copy > len)
2762 copy = len;
2763
2764 skb_frag_foreach_page(frag,
2765 skb_frag_off(frag) + offset - start,
2766 copy, p, p_off, p_len, copied) {
2767 vaddr = kmap_atomic(p);
2768 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2769 to + copied,
2770 p_len, 0);
2771 kunmap_atomic(vaddr);
2772 csum = csum_block_add(csum, csum2, pos);
2773 pos += p_len;
2774 }
2775
2776 if (!(len -= copy))
2777 return csum;
2778 offset += copy;
2779 to += copy;
2780 }
2781 start = end;
2782 }
2783
2784 skb_walk_frags(skb, frag_iter) {
2785 __wsum csum2;
2786 int end;
2787
2788 WARN_ON(start > offset + len);
2789
2790 end = start + frag_iter->len;
2791 if ((copy = end - offset) > 0) {
2792 if (copy > len)
2793 copy = len;
2794 csum2 = skb_copy_and_csum_bits(frag_iter,
2795 offset - start,
2796 to, copy, 0);
2797 csum = csum_block_add(csum, csum2, pos);
2798 if ((len -= copy) == 0)
2799 return csum;
2800 offset += copy;
2801 to += copy;
2802 pos += copy;
2803 }
2804 start = end;
2805 }
2806 BUG_ON(len);
2807 return csum;
2808}
2809EXPORT_SYMBOL(skb_copy_and_csum_bits);
2810
2811__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
2812{
2813 __sum16 sum;
2814
2815 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
2816
2817 if (likely(!sum)) {
2818 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2819 !skb->csum_complete_sw)
2820 netdev_rx_csum_fault(skb->dev, skb);
2821 }
2822 if (!skb_shared(skb))
2823 skb->csum_valid = !sum;
2824 return sum;
2825}
2826EXPORT_SYMBOL(__skb_checksum_complete_head);
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837__sum16 __skb_checksum_complete(struct sk_buff *skb)
2838{
2839 __wsum csum;
2840 __sum16 sum;
2841
2842 csum = skb_checksum(skb, 0, skb->len, 0);
2843
2844 sum = csum_fold(csum_add(skb->csum, csum));
2845
2846
2847
2848
2849
2850
2851
2852 if (likely(!sum)) {
2853 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2854 !skb->csum_complete_sw)
2855 netdev_rx_csum_fault(skb->dev, skb);
2856 }
2857
2858 if (!skb_shared(skb)) {
2859
2860 skb->csum = csum;
2861 skb->ip_summed = CHECKSUM_COMPLETE;
2862 skb->csum_complete_sw = 1;
2863 skb->csum_valid = !sum;
2864 }
2865
2866 return sum;
2867}
2868EXPORT_SYMBOL(__skb_checksum_complete);
2869
2870static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2871{
2872 net_warn_ratelimited(
2873 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2874 __func__);
2875 return 0;
2876}
2877
2878static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2879 int offset, int len)
2880{
2881 net_warn_ratelimited(
2882 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2883 __func__);
2884 return 0;
2885}
2886
2887static const struct skb_checksum_ops default_crc32c_ops = {
2888 .update = warn_crc32c_csum_update,
2889 .combine = warn_crc32c_csum_combine,
2890};
2891
2892const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2893 &default_crc32c_ops;
2894EXPORT_SYMBOL(crc32c_csum_stub);
2895
2896
2897
2898
2899
2900
2901
2902
2903unsigned int
2904skb_zerocopy_headlen(const struct sk_buff *from)
2905{
2906 unsigned int hlen = 0;
2907
2908 if (!from->head_frag ||
2909 skb_headlen(from) < L1_CACHE_BYTES ||
2910 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2911 hlen = skb_headlen(from);
2912
2913 if (skb_has_frag_list(from))
2914 hlen = from->len;
2915
2916 return hlen;
2917}
2918EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938int
2939skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2940{
2941 int i, j = 0;
2942 int plen = 0;
2943 int ret;
2944 struct page *page;
2945 unsigned int offset;
2946
2947 BUG_ON(!from->head_frag && !hlen);
2948
2949
2950 if (len <= skb_tailroom(to))
2951 return skb_copy_bits(from, 0, skb_put(to, len), len);
2952
2953 if (hlen) {
2954 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2955 if (unlikely(ret))
2956 return ret;
2957 len -= hlen;
2958 } else {
2959 plen = min_t(int, skb_headlen(from), len);
2960 if (plen) {
2961 page = virt_to_head_page(from->head);
2962 offset = from->data - (unsigned char *)page_address(page);
2963 __skb_fill_page_desc(to, 0, page, offset, plen);
2964 get_page(page);
2965 j = 1;
2966 len -= plen;
2967 }
2968 }
2969
2970 to->truesize += len + plen;
2971 to->len += len + plen;
2972 to->data_len += len + plen;
2973
2974 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2975 skb_tx_error(from);
2976 return -ENOMEM;
2977 }
2978 skb_zerocopy_clone(to, from, GFP_ATOMIC);
2979
2980 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2981 int size;
2982
2983 if (!len)
2984 break;
2985 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2986 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
2987 len);
2988 skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
2989 len -= size;
2990 skb_frag_ref(to, j);
2991 j++;
2992 }
2993 skb_shinfo(to)->nr_frags = j;
2994
2995 return 0;
2996}
2997EXPORT_SYMBOL_GPL(skb_zerocopy);
2998
2999void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3000{
3001 __wsum csum;
3002 long csstart;
3003
3004 if (skb->ip_summed == CHECKSUM_PARTIAL)
3005 csstart = skb_checksum_start_offset(skb);
3006 else
3007 csstart = skb_headlen(skb);
3008
3009 BUG_ON(csstart > skb_headlen(skb));
3010
3011 skb_copy_from_linear_data(skb, to, csstart);
3012
3013 csum = 0;
3014 if (csstart != skb->len)
3015 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3016 skb->len - csstart, 0);
3017
3018 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3019 long csstuff = csstart + skb->csum_offset;
3020
3021 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
3022 }
3023}
3024EXPORT_SYMBOL(skb_copy_and_csum_dev);
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3036{
3037 unsigned long flags;
3038 struct sk_buff *result;
3039
3040 spin_lock_irqsave(&list->lock, flags);
3041 result = __skb_dequeue(list);
3042 spin_unlock_irqrestore(&list->lock, flags);
3043 return result;
3044}
3045EXPORT_SYMBOL(skb_dequeue);
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3056{
3057 unsigned long flags;
3058 struct sk_buff *result;
3059
3060 spin_lock_irqsave(&list->lock, flags);
3061 result = __skb_dequeue_tail(list);
3062 spin_unlock_irqrestore(&list->lock, flags);
3063 return result;
3064}
3065EXPORT_SYMBOL(skb_dequeue_tail);
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075void skb_queue_purge(struct sk_buff_head *list)
3076{
3077 struct sk_buff *skb;
3078 while ((skb = skb_dequeue(list)) != NULL)
3079 kfree_skb(skb);
3080}
3081EXPORT_SYMBOL(skb_queue_purge);
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093unsigned int skb_rbtree_purge(struct rb_root *root)
3094{
3095 struct rb_node *p = rb_first(root);
3096 unsigned int sum = 0;
3097
3098 while (p) {
3099 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3100
3101 p = rb_next(p);
3102 rb_erase(&skb->rbnode, root);
3103 sum += skb->truesize;
3104 kfree_skb(skb);
3105 }
3106 return sum;
3107}
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3121{
3122 unsigned long flags;
3123
3124 spin_lock_irqsave(&list->lock, flags);
3125 __skb_queue_head(list, newsk);
3126 spin_unlock_irqrestore(&list->lock, flags);
3127}
3128EXPORT_SYMBOL(skb_queue_head);
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3142{
3143 unsigned long flags;
3144
3145 spin_lock_irqsave(&list->lock, flags);
3146 __skb_queue_tail(list, newsk);
3147 spin_unlock_irqrestore(&list->lock, flags);
3148}
3149EXPORT_SYMBOL(skb_queue_tail);
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3162{
3163 unsigned long flags;
3164
3165 spin_lock_irqsave(&list->lock, flags);
3166 __skb_unlink(skb, list);
3167 spin_unlock_irqrestore(&list->lock, flags);
3168}
3169EXPORT_SYMBOL(skb_unlink);
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3182{
3183 unsigned long flags;
3184
3185 spin_lock_irqsave(&list->lock, flags);
3186 __skb_queue_after(list, old, newsk);
3187 spin_unlock_irqrestore(&list->lock, flags);
3188}
3189EXPORT_SYMBOL(skb_append);
3190
3191static inline void skb_split_inside_header(struct sk_buff *skb,
3192 struct sk_buff* skb1,
3193 const u32 len, const int pos)
3194{
3195 int i;
3196
3197 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3198 pos - len);
3199
3200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3201 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3202
3203 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3204 skb_shinfo(skb)->nr_frags = 0;
3205 skb1->data_len = skb->data_len;
3206 skb1->len += skb1->data_len;
3207 skb->data_len = 0;
3208 skb->len = len;
3209 skb_set_tail_pointer(skb, len);
3210}
3211
3212static inline void skb_split_no_header(struct sk_buff *skb,
3213 struct sk_buff* skb1,
3214 const u32 len, int pos)
3215{
3216 int i, k = 0;
3217 const int nfrags = skb_shinfo(skb)->nr_frags;
3218
3219 skb_shinfo(skb)->nr_frags = 0;
3220 skb1->len = skb1->data_len = skb->len - len;
3221 skb->len = len;
3222 skb->data_len = len - pos;
3223
3224 for (i = 0; i < nfrags; i++) {
3225 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3226
3227 if (pos + size > len) {
3228 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3229
3230 if (pos < len) {
3231
3232
3233
3234
3235
3236
3237
3238
3239 skb_frag_ref(skb, i);
3240 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3241 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3242 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3243 skb_shinfo(skb)->nr_frags++;
3244 }
3245 k++;
3246 } else
3247 skb_shinfo(skb)->nr_frags++;
3248 pos += size;
3249 }
3250 skb_shinfo(skb1)->nr_frags = k;
3251}
3252
3253
3254
3255
3256
3257
3258
3259void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3260{
3261 int pos = skb_headlen(skb);
3262
3263 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3264 SKBTX_SHARED_FRAG;
3265 skb_zerocopy_clone(skb1, skb, 0);
3266 if (len < pos)
3267 skb_split_inside_header(skb, skb1, len, pos);
3268 else
3269 skb_split_no_header(skb, skb1, len, pos);
3270}
3271EXPORT_SYMBOL(skb_split);
3272
3273
3274
3275
3276
3277static int skb_prepare_for_shift(struct sk_buff *skb)
3278{
3279 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3280}
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3301{
3302 int from, to, merge, todo;
3303 skb_frag_t *fragfrom, *fragto;
3304
3305 BUG_ON(shiftlen > skb->len);
3306
3307 if (skb_headlen(skb))
3308 return 0;
3309 if (skb_zcopy(tgt) || skb_zcopy(skb))
3310 return 0;
3311
3312 todo = shiftlen;
3313 from = 0;
3314 to = skb_shinfo(tgt)->nr_frags;
3315 fragfrom = &skb_shinfo(skb)->frags[from];
3316
3317
3318
3319
3320 if (!to ||
3321 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3322 skb_frag_off(fragfrom))) {
3323 merge = -1;
3324 } else {
3325 merge = to - 1;
3326
3327 todo -= skb_frag_size(fragfrom);
3328 if (todo < 0) {
3329 if (skb_prepare_for_shift(skb) ||
3330 skb_prepare_for_shift(tgt))
3331 return 0;
3332
3333
3334 fragfrom = &skb_shinfo(skb)->frags[from];
3335 fragto = &skb_shinfo(tgt)->frags[merge];
3336
3337 skb_frag_size_add(fragto, shiftlen);
3338 skb_frag_size_sub(fragfrom, shiftlen);
3339 skb_frag_off_add(fragfrom, shiftlen);
3340
3341 goto onlymerged;
3342 }
3343
3344 from++;
3345 }
3346
3347
3348 if ((shiftlen == skb->len) &&
3349 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3350 return 0;
3351
3352 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3353 return 0;
3354
3355 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3356 if (to == MAX_SKB_FRAGS)
3357 return 0;
3358
3359 fragfrom = &skb_shinfo(skb)->frags[from];
3360 fragto = &skb_shinfo(tgt)->frags[to];
3361
3362 if (todo >= skb_frag_size(fragfrom)) {
3363 *fragto = *fragfrom;
3364 todo -= skb_frag_size(fragfrom);
3365 from++;
3366 to++;
3367
3368 } else {
3369 __skb_frag_ref(fragfrom);
3370 skb_frag_page_copy(fragto, fragfrom);
3371 skb_frag_off_copy(fragto, fragfrom);
3372 skb_frag_size_set(fragto, todo);
3373
3374 skb_frag_off_add(fragfrom, todo);
3375 skb_frag_size_sub(fragfrom, todo);
3376 todo = 0;
3377
3378 to++;
3379 break;
3380 }
3381 }
3382
3383
3384 skb_shinfo(tgt)->nr_frags = to;
3385
3386 if (merge >= 0) {
3387 fragfrom = &skb_shinfo(skb)->frags[0];
3388 fragto = &skb_shinfo(tgt)->frags[merge];
3389
3390 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3391 __skb_frag_unref(fragfrom);
3392 }
3393
3394
3395 to = 0;
3396 while (from < skb_shinfo(skb)->nr_frags)
3397 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3398 skb_shinfo(skb)->nr_frags = to;
3399
3400 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3401
3402onlymerged:
3403
3404
3405
3406 tgt->ip_summed = CHECKSUM_PARTIAL;
3407 skb->ip_summed = CHECKSUM_PARTIAL;
3408
3409
3410 skb->len -= shiftlen;
3411 skb->data_len -= shiftlen;
3412 skb->truesize -= shiftlen;
3413 tgt->len += shiftlen;
3414 tgt->data_len += shiftlen;
3415 tgt->truesize += shiftlen;
3416
3417 return shiftlen;
3418}
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3431 unsigned int to, struct skb_seq_state *st)
3432{
3433 st->lower_offset = from;
3434 st->upper_offset = to;
3435 st->root_skb = st->cur_skb = skb;
3436 st->frag_idx = st->stepped_offset = 0;
3437 st->frag_data = NULL;
3438}
3439EXPORT_SYMBOL(skb_prepare_seq_read);
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3467 struct skb_seq_state *st)
3468{
3469 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3470 skb_frag_t *frag;
3471
3472 if (unlikely(abs_offset >= st->upper_offset)) {
3473 if (st->frag_data) {
3474 kunmap_atomic(st->frag_data);
3475 st->frag_data = NULL;
3476 }
3477 return 0;
3478 }
3479
3480next_skb:
3481 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3482
3483 if (abs_offset < block_limit && !st->frag_data) {
3484 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3485 return block_limit - abs_offset;
3486 }
3487
3488 if (st->frag_idx == 0 && !st->frag_data)
3489 st->stepped_offset += skb_headlen(st->cur_skb);
3490
3491 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3492 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3493 block_limit = skb_frag_size(frag) + st->stepped_offset;
3494
3495 if (abs_offset < block_limit) {
3496 if (!st->frag_data)
3497 st->frag_data = kmap_atomic(skb_frag_page(frag));
3498
3499 *data = (u8 *) st->frag_data + skb_frag_off(frag) +
3500 (abs_offset - st->stepped_offset);
3501
3502 return block_limit - abs_offset;
3503 }
3504
3505 if (st->frag_data) {
3506 kunmap_atomic(st->frag_data);
3507 st->frag_data = NULL;
3508 }
3509
3510 st->frag_idx++;
3511 st->stepped_offset += skb_frag_size(frag);
3512 }
3513
3514 if (st->frag_data) {
3515 kunmap_atomic(st->frag_data);
3516 st->frag_data = NULL;
3517 }
3518
3519 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3520 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3521 st->frag_idx = 0;
3522 goto next_skb;
3523 } else if (st->cur_skb->next) {
3524 st->cur_skb = st->cur_skb->next;
3525 st->frag_idx = 0;
3526 goto next_skb;
3527 }
3528
3529 return 0;
3530}
3531EXPORT_SYMBOL(skb_seq_read);
3532
3533
3534
3535
3536
3537
3538
3539
3540void skb_abort_seq_read(struct skb_seq_state *st)
3541{
3542 if (st->frag_data)
3543 kunmap_atomic(st->frag_data);
3544}
3545EXPORT_SYMBOL(skb_abort_seq_read);
3546
3547#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3548
3549static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3550 struct ts_config *conf,
3551 struct ts_state *state)
3552{
3553 return skb_seq_read(offset, text, TS_SKB_CB(state));
3554}
3555
3556static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3557{
3558 skb_abort_seq_read(TS_SKB_CB(state));
3559}
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3574 unsigned int to, struct ts_config *config)
3575{
3576 struct ts_state state;
3577 unsigned int ret;
3578
3579 config->get_next_block = skb_ts_get_next_block;
3580 config->finish = skb_ts_finish;
3581
3582 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3583
3584 ret = textsearch_find(config, &state);
3585 return (ret <= to - from ? ret : UINT_MAX);
3586}
3587EXPORT_SYMBOL(skb_find_text);
3588
3589int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3590 int offset, size_t size)
3591{
3592 int i = skb_shinfo(skb)->nr_frags;
3593
3594 if (skb_can_coalesce(skb, i, page, offset)) {
3595 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3596 } else if (i < MAX_SKB_FRAGS) {
3597 get_page(page);
3598 skb_fill_page_desc(skb, i, page, offset, size);
3599 } else {
3600 return -EMSGSIZE;
3601 }
3602
3603 return 0;
3604}
3605EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3619{
3620 unsigned char *data = skb->data;
3621
3622 BUG_ON(len > skb->len);
3623 __skb_pull(skb, len);
3624 skb_postpull_rcsum(skb, data, len);
3625 return skb->data;
3626}
3627EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3628
3629static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3630{
3631 skb_frag_t head_frag;
3632 struct page *page;
3633
3634 page = virt_to_head_page(frag_skb->head);
3635 __skb_frag_set_page(&head_frag, page);
3636 skb_frag_off_set(&head_frag, frag_skb->data -
3637 (unsigned char *)page_address(page));
3638 skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
3639 return head_frag;
3640}
3641
3642struct sk_buff *skb_segment_list(struct sk_buff *skb,
3643 netdev_features_t features,
3644 unsigned int offset)
3645{
3646 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
3647 unsigned int tnl_hlen = skb_tnl_header_len(skb);
3648 unsigned int delta_truesize = 0;
3649 unsigned int delta_len = 0;
3650 struct sk_buff *tail = NULL;
3651 struct sk_buff *nskb;
3652
3653 skb_push(skb, -skb_network_offset(skb) + offset);
3654
3655 skb_shinfo(skb)->frag_list = NULL;
3656
3657 do {
3658 nskb = list_skb;
3659 list_skb = list_skb->next;
3660
3661 if (!tail)
3662 skb->next = nskb;
3663 else
3664 tail->next = nskb;
3665
3666 tail = nskb;
3667
3668 delta_len += nskb->len;
3669 delta_truesize += nskb->truesize;
3670
3671 skb_push(nskb, -skb_network_offset(nskb) + offset);
3672
3673 skb_release_head_state(nskb);
3674 __copy_skb_header(nskb, skb);
3675
3676 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
3677 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
3678 nskb->data - tnl_hlen,
3679 offset + tnl_hlen);
3680
3681 if (skb_needs_linearize(nskb, features) &&
3682 __skb_linearize(nskb))
3683 goto err_linearize;
3684
3685 } while (list_skb);
3686
3687 skb->truesize = skb->truesize - delta_truesize;
3688 skb->data_len = skb->data_len - delta_len;
3689 skb->len = skb->len - delta_len;
3690
3691 skb_gso_reset(skb);
3692
3693 skb->prev = tail;
3694
3695 if (skb_needs_linearize(skb, features) &&
3696 __skb_linearize(skb))
3697 goto err_linearize;
3698
3699 skb_get(skb);
3700
3701 return skb;
3702
3703err_linearize:
3704 kfree_skb_list(skb->next);
3705 skb->next = NULL;
3706 return ERR_PTR(-ENOMEM);
3707}
3708EXPORT_SYMBOL_GPL(skb_segment_list);
3709
3710int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
3711{
3712 if (unlikely(p->len + skb->len >= 65536))
3713 return -E2BIG;
3714
3715 if (NAPI_GRO_CB(p)->last == p)
3716 skb_shinfo(p)->frag_list = skb;
3717 else
3718 NAPI_GRO_CB(p)->last->next = skb;
3719
3720 skb_pull(skb, skb_gro_offset(skb));
3721
3722 NAPI_GRO_CB(p)->last = skb;
3723 NAPI_GRO_CB(p)->count++;
3724 p->data_len += skb->len;
3725 p->truesize += skb->truesize;
3726 p->len += skb->len;
3727
3728 NAPI_GRO_CB(skb)->same_flow = 1;
3729
3730 return 0;
3731}
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742struct sk_buff *skb_segment(struct sk_buff *head_skb,
3743 netdev_features_t features)
3744{
3745 struct sk_buff *segs = NULL;
3746 struct sk_buff *tail = NULL;
3747 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3748 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3749 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3750 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3751 struct sk_buff *frag_skb = head_skb;
3752 unsigned int offset = doffset;
3753 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3754 unsigned int partial_segs = 0;
3755 unsigned int headroom;
3756 unsigned int len = head_skb->len;
3757 __be16 proto;
3758 bool csum, sg;
3759 int nfrags = skb_shinfo(head_skb)->nr_frags;
3760 int err = -ENOMEM;
3761 int i = 0;
3762 int pos;
3763
3764 if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
3765 (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777
3778
3779 if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
3780 features &= ~NETIF_F_SG;
3781 }
3782
3783 __skb_push(head_skb, doffset);
3784 proto = skb_network_protocol(head_skb, NULL);
3785 if (unlikely(!proto))
3786 return ERR_PTR(-EINVAL);
3787
3788 sg = !!(features & NETIF_F_SG);
3789 csum = !!can_checksum_protocol(features, proto);
3790
3791 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3792 if (!(features & NETIF_F_GSO_PARTIAL)) {
3793 struct sk_buff *iter;
3794 unsigned int frag_len;
3795
3796 if (!list_skb ||
3797 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3798 goto normal;
3799
3800
3801
3802
3803
3804
3805
3806
3807
3808 frag_len = list_skb->len;
3809 skb_walk_frags(head_skb, iter) {
3810 if (frag_len != iter->len && iter->next)
3811 goto normal;
3812 if (skb_headlen(iter) && !iter->head_frag)
3813 goto normal;
3814
3815 len -= iter->len;
3816 }
3817
3818 if (len != frag_len)
3819 goto normal;
3820 }
3821
3822
3823
3824
3825
3826 partial_segs = len / mss;
3827 if (partial_segs > 1)
3828 mss *= partial_segs;
3829 else
3830 partial_segs = 0;
3831 }
3832
3833normal:
3834 headroom = skb_headroom(head_skb);
3835 pos = skb_headlen(head_skb);
3836
3837 do {
3838 struct sk_buff *nskb;
3839 skb_frag_t *nskb_frag;
3840 int hsize;
3841 int size;
3842
3843 if (unlikely(mss == GSO_BY_FRAGS)) {
3844 len = list_skb->len;
3845 } else {
3846 len = head_skb->len - offset;
3847 if (len > mss)
3848 len = mss;
3849 }
3850
3851 hsize = skb_headlen(head_skb) - offset;
3852 if (hsize < 0)
3853 hsize = 0;
3854 if (hsize > len || !sg)
3855 hsize = len;
3856
3857 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3858 (skb_headlen(list_skb) == len || sg)) {
3859 BUG_ON(skb_headlen(list_skb) > len);
3860
3861 i = 0;
3862 nfrags = skb_shinfo(list_skb)->nr_frags;
3863 frag = skb_shinfo(list_skb)->frags;
3864 frag_skb = list_skb;
3865 pos += skb_headlen(list_skb);
3866
3867 while (pos < offset + len) {
3868 BUG_ON(i >= nfrags);
3869
3870 size = skb_frag_size(frag);
3871 if (pos + size > offset + len)
3872 break;
3873
3874 i++;
3875 pos += size;
3876 frag++;
3877 }
3878
3879 nskb = skb_clone(list_skb, GFP_ATOMIC);
3880 list_skb = list_skb->next;
3881
3882 if (unlikely(!nskb))
3883 goto err;
3884
3885 if (unlikely(pskb_trim(nskb, len))) {
3886 kfree_skb(nskb);
3887 goto err;
3888 }
3889
3890 hsize = skb_end_offset(nskb);
3891 if (skb_cow_head(nskb, doffset + headroom)) {
3892 kfree_skb(nskb);
3893 goto err;
3894 }
3895
3896 nskb->truesize += skb_end_offset(nskb) - hsize;
3897 skb_release_head_state(nskb);
3898 __skb_push(nskb, doffset);
3899 } else {
3900 nskb = __alloc_skb(hsize + doffset + headroom,
3901 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3902 NUMA_NO_NODE);
3903
3904 if (unlikely(!nskb))
3905 goto err;
3906
3907 skb_reserve(nskb, headroom);
3908 __skb_put(nskb, doffset);
3909 }
3910
3911 if (segs)
3912 tail->next = nskb;
3913 else
3914 segs = nskb;
3915 tail = nskb;
3916
3917 __copy_skb_header(nskb, head_skb);
3918
3919 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3920 skb_reset_mac_len(nskb);
3921
3922 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3923 nskb->data - tnl_hlen,
3924 doffset + tnl_hlen);
3925
3926 if (nskb->len == len + doffset)
3927 goto perform_csum_check;
3928
3929 if (!sg) {
3930 if (!csum) {
3931 if (!nskb->remcsum_offload)
3932 nskb->ip_summed = CHECKSUM_NONE;
3933 SKB_GSO_CB(nskb)->csum =
3934 skb_copy_and_csum_bits(head_skb, offset,
3935 skb_put(nskb,
3936 len),
3937 len, 0);
3938 SKB_GSO_CB(nskb)->csum_start =
3939 skb_headroom(nskb) + doffset;
3940 } else {
3941 skb_copy_bits(head_skb, offset,
3942 skb_put(nskb, len),
3943 len);
3944 }
3945 continue;
3946 }
3947
3948 nskb_frag = skb_shinfo(nskb)->frags;
3949
3950 skb_copy_from_linear_data_offset(head_skb, offset,
3951 skb_put(nskb, hsize), hsize);
3952
3953 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3954 SKBTX_SHARED_FRAG;
3955
3956 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3957 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3958 goto err;
3959
3960 while (pos < offset + len) {
3961 if (i >= nfrags) {
3962 i = 0;
3963 nfrags = skb_shinfo(list_skb)->nr_frags;
3964 frag = skb_shinfo(list_skb)->frags;
3965 frag_skb = list_skb;
3966 if (!skb_headlen(list_skb)) {
3967 BUG_ON(!nfrags);
3968 } else {
3969 BUG_ON(!list_skb->head_frag);
3970
3971
3972 i--;
3973 frag--;
3974 }
3975 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3976 skb_zerocopy_clone(nskb, frag_skb,
3977 GFP_ATOMIC))
3978 goto err;
3979
3980 list_skb = list_skb->next;
3981 }
3982
3983 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3984 MAX_SKB_FRAGS)) {
3985 net_warn_ratelimited(
3986 "skb_segment: too many frags: %u %u\n",
3987 pos, mss);
3988 err = -EINVAL;
3989 goto err;
3990 }
3991
3992 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3993 __skb_frag_ref(nskb_frag);
3994 size = skb_frag_size(nskb_frag);
3995
3996 if (pos < offset) {
3997 skb_frag_off_add(nskb_frag, offset - pos);
3998 skb_frag_size_sub(nskb_frag, offset - pos);
3999 }
4000
4001 skb_shinfo(nskb)->nr_frags++;
4002
4003 if (pos + size <= offset + len) {
4004 i++;
4005 frag++;
4006 pos += size;
4007 } else {
4008 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4009 goto skip_fraglist;
4010 }
4011
4012 nskb_frag++;
4013 }
4014
4015skip_fraglist:
4016 nskb->data_len = len - hsize;
4017 nskb->len += nskb->data_len;
4018 nskb->truesize += nskb->data_len;
4019
4020perform_csum_check:
4021 if (!csum) {
4022 if (skb_has_shared_frag(nskb) &&
4023 __skb_linearize(nskb))
4024 goto err;
4025
4026 if (!nskb->remcsum_offload)
4027 nskb->ip_summed = CHECKSUM_NONE;
4028 SKB_GSO_CB(nskb)->csum =
4029 skb_checksum(nskb, doffset,
4030 nskb->len - doffset, 0);
4031 SKB_GSO_CB(nskb)->csum_start =
4032 skb_headroom(nskb) + doffset;
4033 }
4034 } while ((offset += len) < head_skb->len);
4035
4036
4037
4038
4039
4040 segs->prev = tail;
4041
4042 if (partial_segs) {
4043 struct sk_buff *iter;
4044 int type = skb_shinfo(head_skb)->gso_type;
4045 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4046
4047
4048 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4049 type &= ~SKB_GSO_DODGY;
4050
4051
4052
4053
4054 for (iter = segs; iter; iter = iter->next) {
4055 skb_shinfo(iter)->gso_size = gso_size;
4056 skb_shinfo(iter)->gso_segs = partial_segs;
4057 skb_shinfo(iter)->gso_type = type;
4058 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4059 }
4060
4061 if (tail->len - doffset <= gso_size)
4062 skb_shinfo(tail)->gso_size = 0;
4063 else if (tail != segs)
4064 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4065 }
4066
4067
4068
4069
4070
4071 if (head_skb->destructor == sock_wfree) {
4072 swap(tail->truesize, head_skb->truesize);
4073 swap(tail->destructor, head_skb->destructor);
4074 swap(tail->sk, head_skb->sk);
4075 }
4076 return segs;
4077
4078err:
4079 kfree_skb_list(segs);
4080 return ERR_PTR(err);
4081}
4082EXPORT_SYMBOL_GPL(skb_segment);
4083
4084int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
4085{
4086 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
4087 unsigned int offset = skb_gro_offset(skb);
4088 unsigned int headlen = skb_headlen(skb);
4089 unsigned int len = skb_gro_len(skb);
4090 unsigned int delta_truesize;
4091 struct sk_buff *lp;
4092
4093 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
4094 return -E2BIG;
4095
4096 lp = NAPI_GRO_CB(p)->last;
4097 pinfo = skb_shinfo(lp);
4098
4099 if (headlen <= offset) {
4100 skb_frag_t *frag;
4101 skb_frag_t *frag2;
4102 int i = skbinfo->nr_frags;
4103 int nr_frags = pinfo->nr_frags + i;
4104
4105 if (nr_frags > MAX_SKB_FRAGS)
4106 goto merge;
4107
4108 offset -= headlen;
4109 pinfo->nr_frags = nr_frags;
4110 skbinfo->nr_frags = 0;
4111
4112 frag = pinfo->frags + nr_frags;
4113 frag2 = skbinfo->frags + i;
4114 do {
4115 *--frag = *--frag2;
4116 } while (--i);
4117
4118 skb_frag_off_add(frag, offset);
4119 skb_frag_size_sub(frag, offset);
4120
4121
4122 delta_truesize = skb->truesize -
4123 SKB_TRUESIZE(skb_end_offset(skb));
4124
4125 skb->truesize -= skb->data_len;
4126 skb->len -= skb->data_len;
4127 skb->data_len = 0;
4128
4129 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
4130 goto done;
4131 } else if (skb->head_frag) {
4132 int nr_frags = pinfo->nr_frags;
4133 skb_frag_t *frag = pinfo->frags + nr_frags;
4134 struct page *page = virt_to_head_page(skb->head);
4135 unsigned int first_size = headlen - offset;
4136 unsigned int first_offset;
4137
4138 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
4139 goto merge;
4140
4141 first_offset = skb->data -
4142 (unsigned char *)page_address(page) +
4143 offset;
4144
4145 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
4146
4147 __skb_frag_set_page(frag, page);
4148 skb_frag_off_set(frag, first_offset);
4149 skb_frag_size_set(frag, first_size);
4150
4151 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
4152
4153
4154 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4155 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
4156 goto done;
4157 }
4158
4159merge:
4160 delta_truesize = skb->truesize;
4161 if (offset > headlen) {
4162 unsigned int eat = offset - headlen;
4163
4164 skb_frag_off_add(&skbinfo->frags[0], eat);
4165 skb_frag_size_sub(&skbinfo->frags[0], eat);
4166 skb->data_len -= eat;
4167 skb->len -= eat;
4168 offset = headlen;
4169 }
4170
4171 __skb_pull(skb, offset);
4172
4173 if (NAPI_GRO_CB(p)->last == p)
4174 skb_shinfo(p)->frag_list = skb;
4175 else
4176 NAPI_GRO_CB(p)->last->next = skb;
4177 NAPI_GRO_CB(p)->last = skb;
4178 __skb_header_release(skb);
4179 lp = p;
4180
4181done:
4182 NAPI_GRO_CB(p)->count++;
4183 p->data_len += len;
4184 p->truesize += delta_truesize;
4185 p->len += len;
4186 if (lp != p) {
4187 lp->data_len += len;
4188 lp->truesize += delta_truesize;
4189 lp->len += len;
4190 }
4191 NAPI_GRO_CB(skb)->same_flow = 1;
4192 return 0;
4193}
4194
4195#ifdef CONFIG_SKB_EXTENSIONS
4196#define SKB_EXT_ALIGN_VALUE 8
4197#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4198
4199static const u8 skb_ext_type_len[] = {
4200#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4201 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4202#endif
4203#ifdef CONFIG_XFRM
4204 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4205#endif
4206#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4207 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4208#endif
4209#if IS_ENABLED(CONFIG_MPTCP)
4210 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4211#endif
4212};
4213
4214static __always_inline unsigned int skb_ext_total_length(void)
4215{
4216 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4217#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4218 skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4219#endif
4220#ifdef CONFIG_XFRM
4221 skb_ext_type_len[SKB_EXT_SEC_PATH] +
4222#endif
4223#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4224 skb_ext_type_len[TC_SKB_EXT] +
4225#endif
4226#if IS_ENABLED(CONFIG_MPTCP)
4227 skb_ext_type_len[SKB_EXT_MPTCP] +
4228#endif
4229 0;
4230}
4231
4232static void skb_extensions_init(void)
4233{
4234 BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4235 BUILD_BUG_ON(skb_ext_total_length() > 255);
4236
4237 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4238 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4239 0,
4240 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4241 NULL);
4242}
4243#else
4244static void skb_extensions_init(void) {}
4245#endif
4246
4247void __init skb_init(void)
4248{
4249 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4250 sizeof(struct sk_buff),
4251 0,
4252 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4253 offsetof(struct sk_buff, cb),
4254 sizeof_field(struct sk_buff, cb),
4255 NULL);
4256 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4257 sizeof(struct sk_buff_fclones),
4258 0,
4259 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4260 NULL);
4261 skb_extensions_init();
4262}
4263
4264static int
4265__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4266 unsigned int recursion_level)
4267{
4268 int start = skb_headlen(skb);
4269 int i, copy = start - offset;
4270 struct sk_buff *frag_iter;
4271 int elt = 0;
4272
4273 if (unlikely(recursion_level >= 24))
4274 return -EMSGSIZE;
4275
4276 if (copy > 0) {
4277 if (copy > len)
4278 copy = len;
4279 sg_set_buf(sg, skb->data + offset, copy);
4280 elt++;
4281 if ((len -= copy) == 0)
4282 return elt;
4283 offset += copy;
4284 }
4285
4286 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4287 int end;
4288
4289 WARN_ON(start > offset + len);
4290
4291 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4292 if ((copy = end - offset) > 0) {
4293 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4294 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4295 return -EMSGSIZE;
4296
4297 if (copy > len)
4298 copy = len;
4299 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4300 skb_frag_off(frag) + offset - start);
4301 elt++;
4302 if (!(len -= copy))
4303 return elt;
4304 offset += copy;
4305 }
4306 start = end;
4307 }
4308
4309 skb_walk_frags(skb, frag_iter) {
4310 int end, ret;
4311
4312 WARN_ON(start > offset + len);
4313
4314 end = start + frag_iter->len;
4315 if ((copy = end - offset) > 0) {
4316 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4317 return -EMSGSIZE;
4318
4319 if (copy > len)
4320 copy = len;
4321 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4322 copy, recursion_level + 1);
4323 if (unlikely(ret < 0))
4324 return ret;
4325 elt += ret;
4326 if ((len -= copy) == 0)
4327 return elt;
4328 offset += copy;
4329 }
4330 start = end;
4331 }
4332 BUG_ON(len);
4333 return elt;
4334}
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4349{
4350 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4351
4352 if (nsg <= 0)
4353 return nsg;
4354
4355 sg_mark_end(&sg[nsg - 1]);
4356
4357 return nsg;
4358}
4359EXPORT_SYMBOL_GPL(skb_to_sgvec);
4360
4361
4362
4363
4364
4365
4366
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379
4380int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4381 int offset, int len)
4382{
4383 return __skb_to_sgvec(skb, sg, offset, len, 0);
4384}
4385EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4386
4387
4388
4389
4390
4391
4392
4393
4394
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4407{
4408 int copyflag;
4409 int elt;
4410 struct sk_buff *skb1, **skb_p;
4411
4412
4413
4414
4415
4416 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4417 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
4418 return -ENOMEM;
4419
4420
4421 if (!skb_has_frag_list(skb)) {
4422
4423
4424
4425
4426
4427 if (skb_tailroom(skb) < tailbits &&
4428 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4429 return -ENOMEM;
4430
4431
4432 *trailer = skb;
4433 return 1;
4434 }
4435
4436
4437
4438 elt = 1;
4439 skb_p = &skb_shinfo(skb)->frag_list;
4440 copyflag = 0;
4441
4442 while ((skb1 = *skb_p) != NULL) {
4443 int ntail = 0;
4444
4445
4446
4447
4448
4449 if (skb_shared(skb1))
4450 copyflag = 1;
4451
4452
4453
4454 if (skb1->next == NULL && tailbits) {
4455 if (skb_shinfo(skb1)->nr_frags ||
4456 skb_has_frag_list(skb1) ||
4457 skb_tailroom(skb1) < tailbits)
4458 ntail = tailbits + 128;
4459 }
4460
4461 if (copyflag ||
4462 skb_cloned(skb1) ||
4463 ntail ||
4464 skb_shinfo(skb1)->nr_frags ||
4465 skb_has_frag_list(skb1)) {
4466 struct sk_buff *skb2;
4467
4468
4469 if (ntail == 0)
4470 skb2 = skb_copy(skb1, GFP_ATOMIC);
4471 else
4472 skb2 = skb_copy_expand(skb1,
4473 skb_headroom(skb1),
4474 ntail,
4475 GFP_ATOMIC);
4476 if (unlikely(skb2 == NULL))
4477 return -ENOMEM;
4478
4479 if (skb1->sk)
4480 skb_set_owner_w(skb2, skb1->sk);
4481
4482
4483
4484
4485 skb2->next = skb1->next;
4486 *skb_p = skb2;
4487 kfree_skb(skb1);
4488 skb1 = skb2;
4489 }
4490 elt++;
4491 *trailer = skb1;
4492 skb_p = &skb1->next;
4493 }
4494
4495 return elt;
4496}
4497EXPORT_SYMBOL_GPL(skb_cow_data);
4498
4499static void sock_rmem_free(struct sk_buff *skb)
4500{
4501 struct sock *sk = skb->sk;
4502
4503 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4504}
4505
4506static void skb_set_err_queue(struct sk_buff *skb)
4507{
4508
4509
4510
4511 skb->pkt_type = PACKET_OUTGOING;
4512 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4513}
4514
4515
4516
4517
4518int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4519{
4520 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4521 (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4522 return -ENOMEM;
4523
4524 skb_orphan(skb);
4525 skb->sk = sk;
4526 skb->destructor = sock_rmem_free;
4527 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4528 skb_set_err_queue(skb);
4529
4530
4531 skb_dst_force(skb);
4532
4533 skb_queue_tail(&sk->sk_error_queue, skb);
4534 if (!sock_flag(sk, SOCK_DEAD))
4535 sk->sk_error_report(sk);
4536 return 0;
4537}
4538EXPORT_SYMBOL(sock_queue_err_skb);
4539
4540static bool is_icmp_err_skb(const struct sk_buff *skb)
4541{
4542 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4543 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4544}
4545
4546struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4547{
4548 struct sk_buff_head *q = &sk->sk_error_queue;
4549 struct sk_buff *skb, *skb_next = NULL;
4550 bool icmp_next = false;
4551 unsigned long flags;
4552
4553 spin_lock_irqsave(&q->lock, flags);
4554 skb = __skb_dequeue(q);
4555 if (skb && (skb_next = skb_peek(q))) {
4556 icmp_next = is_icmp_err_skb(skb_next);
4557 if (icmp_next)
4558 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4559 }
4560 spin_unlock_irqrestore(&q->lock, flags);
4561
4562 if (is_icmp_err_skb(skb) && !icmp_next)
4563 sk->sk_err = 0;
4564
4565 if (skb_next)
4566 sk->sk_error_report(sk);
4567
4568 return skb;
4569}
4570EXPORT_SYMBOL(sock_dequeue_err_skb);
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4586{
4587 struct sock *sk = skb->sk;
4588 struct sk_buff *clone;
4589
4590 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4591 return NULL;
4592
4593 clone = skb_clone(skb, GFP_ATOMIC);
4594 if (!clone) {
4595 sock_put(sk);
4596 return NULL;
4597 }
4598
4599 clone->sk = sk;
4600 clone->destructor = sock_efree;
4601
4602 return clone;
4603}
4604EXPORT_SYMBOL(skb_clone_sk);
4605
4606static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4607 struct sock *sk,
4608 int tstype,
4609 bool opt_stats)
4610{
4611 struct sock_exterr_skb *serr;
4612 int err;
4613
4614 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4615
4616 serr = SKB_EXT_ERR(skb);
4617 memset(serr, 0, sizeof(*serr));
4618 serr->ee.ee_errno = ENOMSG;
4619 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4620 serr->ee.ee_info = tstype;
4621 serr->opt_stats = opt_stats;
4622 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4623 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4624 serr->ee.ee_data = skb_shinfo(skb)->tskey;
4625 if (sk->sk_protocol == IPPROTO_TCP &&
4626 sk->sk_type == SOCK_STREAM)
4627 serr->ee.ee_data -= sk->sk_tskey;
4628 }
4629
4630 err = sock_queue_err_skb(sk, skb);
4631
4632 if (err)
4633 kfree_skb(skb);
4634}
4635
4636static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4637{
4638 bool ret;
4639
4640 if (likely(sysctl_tstamp_allow_data || tsonly))
4641 return true;
4642
4643 read_lock_bh(&sk->sk_callback_lock);
4644 ret = sk->sk_socket && sk->sk_socket->file &&
4645 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4646 read_unlock_bh(&sk->sk_callback_lock);
4647 return ret;
4648}
4649
4650void skb_complete_tx_timestamp(struct sk_buff *skb,
4651 struct skb_shared_hwtstamps *hwtstamps)
4652{
4653 struct sock *sk = skb->sk;
4654
4655 if (!skb_may_tx_timestamp(sk, false))
4656 goto err;
4657
4658
4659
4660
4661 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4662 *skb_hwtstamps(skb) = *hwtstamps;
4663 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4664 sock_put(sk);
4665 return;
4666 }
4667
4668err:
4669 kfree_skb(skb);
4670}
4671EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4672
4673void __skb_tstamp_tx(struct sk_buff *orig_skb,
4674 struct skb_shared_hwtstamps *hwtstamps,
4675 struct sock *sk, int tstype)
4676{
4677 struct sk_buff *skb;
4678 bool tsonly, opt_stats = false;
4679
4680 if (!sk)
4681 return;
4682
4683 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4684 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4685 return;
4686
4687 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4688 if (!skb_may_tx_timestamp(sk, tsonly))
4689 return;
4690
4691 if (tsonly) {
4692#ifdef CONFIG_INET
4693 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4694 sk->sk_protocol == IPPROTO_TCP &&
4695 sk->sk_type == SOCK_STREAM) {
4696 skb = tcp_get_timestamping_opt_stats(sk, orig_skb);
4697 opt_stats = true;
4698 } else
4699#endif
4700 skb = alloc_skb(0, GFP_ATOMIC);
4701 } else {
4702 skb = skb_clone(orig_skb, GFP_ATOMIC);
4703 }
4704 if (!skb)
4705 return;
4706
4707 if (tsonly) {
4708 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4709 SKBTX_ANY_TSTAMP;
4710 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4711 }
4712
4713 if (hwtstamps)
4714 *skb_hwtstamps(skb) = *hwtstamps;
4715 else
4716 skb->tstamp = ktime_get_real();
4717
4718 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4719}
4720EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4721
4722void skb_tstamp_tx(struct sk_buff *orig_skb,
4723 struct skb_shared_hwtstamps *hwtstamps)
4724{
4725 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4726 SCM_TSTAMP_SND);
4727}
4728EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4729
4730void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4731{
4732 struct sock *sk = skb->sk;
4733 struct sock_exterr_skb *serr;
4734 int err = 1;
4735
4736 skb->wifi_acked_valid = 1;
4737 skb->wifi_acked = acked;
4738
4739 serr = SKB_EXT_ERR(skb);
4740 memset(serr, 0, sizeof(*serr));
4741 serr->ee.ee_errno = ENOMSG;
4742 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4743
4744
4745
4746
4747 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4748 err = sock_queue_err_skb(sk, skb);
4749 sock_put(sk);
4750 }
4751 if (err)
4752 kfree_skb(skb);
4753}
4754EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4755
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4769{
4770 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4771 u32 csum_start = skb_headroom(skb) + (u32)start;
4772
4773 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4774 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4775 start, off, skb_headroom(skb), skb_headlen(skb));
4776 return false;
4777 }
4778 skb->ip_summed = CHECKSUM_PARTIAL;
4779 skb->csum_start = csum_start;
4780 skb->csum_offset = off;
4781 skb_set_transport_header(skb, start);
4782 return true;
4783}
4784EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4785
4786static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4787 unsigned int max)
4788{
4789 if (skb_headlen(skb) >= len)
4790 return 0;
4791
4792
4793
4794
4795 if (max > skb->len)
4796 max = skb->len;
4797
4798 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4799 return -ENOMEM;
4800
4801 if (skb_headlen(skb) < len)
4802 return -EPROTO;
4803
4804 return 0;
4805}
4806
4807#define MAX_TCP_HDR_LEN (15 * 4)
4808
4809static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4810 typeof(IPPROTO_IP) proto,
4811 unsigned int off)
4812{
4813 int err;
4814
4815 switch (proto) {
4816 case IPPROTO_TCP:
4817 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4818 off + MAX_TCP_HDR_LEN);
4819 if (!err && !skb_partial_csum_set(skb, off,
4820 offsetof(struct tcphdr,
4821 check)))
4822 err = -EPROTO;
4823 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4824
4825 case IPPROTO_UDP:
4826 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4827 off + sizeof(struct udphdr));
4828 if (!err && !skb_partial_csum_set(skb, off,
4829 offsetof(struct udphdr,
4830 check)))
4831 err = -EPROTO;
4832 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4833 }
4834
4835 return ERR_PTR(-EPROTO);
4836}
4837
4838
4839
4840
4841#define MAX_IP_HDR_LEN 128
4842
4843static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4844{
4845 unsigned int off;
4846 bool fragment;
4847 __sum16 *csum;
4848 int err;
4849
4850 fragment = false;
4851
4852 err = skb_maybe_pull_tail(skb,
4853 sizeof(struct iphdr),
4854 MAX_IP_HDR_LEN);
4855 if (err < 0)
4856 goto out;
4857
4858 if (ip_is_fragment(ip_hdr(skb)))
4859 fragment = true;
4860
4861 off = ip_hdrlen(skb);
4862
4863 err = -EPROTO;
4864
4865 if (fragment)
4866 goto out;
4867
4868 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4869 if (IS_ERR(csum))
4870 return PTR_ERR(csum);
4871
4872 if (recalculate)
4873 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4874 ip_hdr(skb)->daddr,
4875 skb->len - off,
4876 ip_hdr(skb)->protocol, 0);
4877 err = 0;
4878
4879out:
4880 return err;
4881}
4882
4883
4884
4885
4886#define MAX_IPV6_HDR_LEN 256
4887
4888#define OPT_HDR(type, skb, off) \
4889 (type *)(skb_network_header(skb) + (off))
4890
4891static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4892{
4893 int err;
4894 u8 nexthdr;
4895 unsigned int off;
4896 unsigned int len;
4897 bool fragment;
4898 bool done;
4899 __sum16 *csum;
4900
4901 fragment = false;
4902 done = false;
4903
4904 off = sizeof(struct ipv6hdr);
4905
4906 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4907 if (err < 0)
4908 goto out;
4909
4910 nexthdr = ipv6_hdr(skb)->nexthdr;
4911
4912 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4913 while (off <= len && !done) {
4914 switch (nexthdr) {
4915 case IPPROTO_DSTOPTS:
4916 case IPPROTO_HOPOPTS:
4917 case IPPROTO_ROUTING: {
4918 struct ipv6_opt_hdr *hp;
4919
4920 err = skb_maybe_pull_tail(skb,
4921 off +
4922 sizeof(struct ipv6_opt_hdr),
4923 MAX_IPV6_HDR_LEN);
4924 if (err < 0)
4925 goto out;
4926
4927 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4928 nexthdr = hp->nexthdr;
4929 off += ipv6_optlen(hp);
4930 break;
4931 }
4932 case IPPROTO_AH: {
4933 struct ip_auth_hdr *hp;
4934
4935 err = skb_maybe_pull_tail(skb,
4936 off +
4937 sizeof(struct ip_auth_hdr),
4938 MAX_IPV6_HDR_LEN);
4939 if (err < 0)
4940 goto out;
4941
4942 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4943 nexthdr = hp->nexthdr;
4944 off += ipv6_authlen(hp);
4945 break;
4946 }
4947 case IPPROTO_FRAGMENT: {
4948 struct frag_hdr *hp;
4949
4950 err = skb_maybe_pull_tail(skb,
4951 off +
4952 sizeof(struct frag_hdr),
4953 MAX_IPV6_HDR_LEN);
4954 if (err < 0)
4955 goto out;
4956
4957 hp = OPT_HDR(struct frag_hdr, skb, off);
4958
4959 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4960 fragment = true;
4961
4962 nexthdr = hp->nexthdr;
4963 off += sizeof(struct frag_hdr);
4964 break;
4965 }
4966 default:
4967 done = true;
4968 break;
4969 }
4970 }
4971
4972 err = -EPROTO;
4973
4974 if (!done || fragment)
4975 goto out;
4976
4977 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4978 if (IS_ERR(csum))
4979 return PTR_ERR(csum);
4980
4981 if (recalculate)
4982 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4983 &ipv6_hdr(skb)->daddr,
4984 skb->len - off, nexthdr, 0);
4985 err = 0;
4986
4987out:
4988 return err;
4989}
4990
4991
4992
4993
4994
4995
4996int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4997{
4998 int err;
4999
5000 switch (skb->protocol) {
5001 case htons(ETH_P_IP):
5002 err = skb_checksum_setup_ipv4(skb, recalculate);
5003 break;
5004
5005 case htons(ETH_P_IPV6):
5006 err = skb_checksum_setup_ipv6(skb, recalculate);
5007 break;
5008
5009 default:
5010 err = -EPROTO;
5011 break;
5012 }
5013
5014 return err;
5015}
5016EXPORT_SYMBOL(skb_checksum_setup);
5017
5018
5019
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5032 unsigned int transport_len)
5033{
5034 struct sk_buff *skb_chk;
5035 unsigned int len = skb_transport_offset(skb) + transport_len;
5036 int ret;
5037
5038 if (skb->len < len)
5039 return NULL;
5040 else if (skb->len == len)
5041 return skb;
5042
5043 skb_chk = skb_clone(skb, GFP_ATOMIC);
5044 if (!skb_chk)
5045 return NULL;
5046
5047 ret = pskb_trim_rcsum(skb_chk, len);
5048 if (ret) {
5049 kfree_skb(skb_chk);
5050 return NULL;
5051 }
5052
5053 return skb_chk;
5054}
5055
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5072 unsigned int transport_len,
5073 __sum16(*skb_chkf)(struct sk_buff *skb))
5074{
5075 struct sk_buff *skb_chk;
5076 unsigned int offset = skb_transport_offset(skb);
5077 __sum16 ret;
5078
5079 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5080 if (!skb_chk)
5081 goto err;
5082
5083 if (!pskb_may_pull(skb_chk, offset))
5084 goto err;
5085
5086 skb_pull_rcsum(skb_chk, offset);
5087 ret = skb_chkf(skb_chk);
5088 skb_push_rcsum(skb_chk, offset);
5089
5090 if (ret)
5091 goto err;
5092
5093 return skb_chk;
5094
5095err:
5096 if (skb_chk && skb_chk != skb)
5097 kfree_skb(skb_chk);
5098
5099 return NULL;
5100
5101}
5102EXPORT_SYMBOL(skb_checksum_trimmed);
5103
5104void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5105{
5106 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5107 skb->dev->name);
5108}
5109EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5110
5111void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5112{
5113 if (head_stolen) {
5114 skb_release_head_state(skb);
5115 kmem_cache_free(skbuff_head_cache, skb);
5116 } else {
5117 __kfree_skb(skb);
5118 }
5119}
5120EXPORT_SYMBOL(kfree_skb_partial);
5121
5122
5123
5124
5125
5126
5127
5128
5129bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5130 bool *fragstolen, int *delta_truesize)
5131{
5132 struct skb_shared_info *to_shinfo, *from_shinfo;
5133 int i, delta, len = from->len;
5134
5135 *fragstolen = false;
5136
5137 if (skb_cloned(to))
5138 return false;
5139
5140 if (len <= skb_tailroom(to)) {
5141 if (len)
5142 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5143 *delta_truesize = 0;
5144 return true;
5145 }
5146
5147 to_shinfo = skb_shinfo(to);
5148 from_shinfo = skb_shinfo(from);
5149 if (to_shinfo->frag_list || from_shinfo->frag_list)
5150 return false;
5151 if (skb_zcopy(to) || skb_zcopy(from))
5152 return false;
5153
5154 if (skb_headlen(from) != 0) {
5155 struct page *page;
5156 unsigned int offset;
5157
5158 if (to_shinfo->nr_frags +
5159 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5160 return false;
5161
5162 if (skb_head_is_locked(from))
5163 return false;
5164
5165 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5166
5167 page = virt_to_head_page(from->head);
5168 offset = from->data - (unsigned char *)page_address(page);
5169
5170 skb_fill_page_desc(to, to_shinfo->nr_frags,
5171 page, offset, skb_headlen(from));
5172 *fragstolen = true;
5173 } else {
5174 if (to_shinfo->nr_frags +
5175 from_shinfo->nr_frags > MAX_SKB_FRAGS)
5176 return false;
5177
5178 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5179 }
5180
5181 WARN_ON_ONCE(delta < len);
5182
5183 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5184 from_shinfo->frags,
5185 from_shinfo->nr_frags * sizeof(skb_frag_t));
5186 to_shinfo->nr_frags += from_shinfo->nr_frags;
5187
5188 if (!skb_cloned(from))
5189 from_shinfo->nr_frags = 0;
5190
5191
5192
5193
5194 for (i = 0; i < from_shinfo->nr_frags; i++)
5195 __skb_frag_ref(&from_shinfo->frags[i]);
5196
5197 to->truesize += delta;
5198 to->len += len;
5199 to->data_len += len;
5200
5201 *delta_truesize = delta;
5202 return true;
5203}
5204EXPORT_SYMBOL(skb_try_coalesce);
5205
5206
5207
5208
5209
5210
5211
5212
5213
5214
5215
5216
5217
5218
5219void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5220{
5221 skb->pkt_type = PACKET_HOST;
5222 skb->skb_iif = 0;
5223 skb->ignore_df = 0;
5224 skb_dst_drop(skb);
5225 skb_ext_reset(skb);
5226 nf_reset_ct(skb);
5227 nf_reset_trace(skb);
5228
5229#ifdef CONFIG_NET_SWITCHDEV
5230 skb->offload_fwd_mark = 0;
5231 skb->offload_l3_fwd_mark = 0;
5232#endif
5233
5234 if (!xnet)
5235 return;
5236
5237 ipvs_reset(skb);
5238 skb->mark = 0;
5239 skb->tstamp = 0;
5240}
5241EXPORT_SYMBOL_GPL(skb_scrub_packet);
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252
5253static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5254{
5255 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5256 unsigned int thlen = 0;
5257
5258 if (skb->encapsulation) {
5259 thlen = skb_inner_transport_header(skb) -
5260 skb_transport_header(skb);
5261
5262 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5263 thlen += inner_tcp_hdrlen(skb);
5264 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5265 thlen = tcp_hdrlen(skb);
5266 } else if (unlikely(skb_is_gso_sctp(skb))) {
5267 thlen = sizeof(struct sctphdr);
5268 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5269 thlen = sizeof(struct udphdr);
5270 }
5271
5272
5273
5274
5275 return thlen + shinfo->gso_size;
5276}
5277
5278
5279
5280
5281
5282
5283
5284
5285
5286
5287
5288static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5289{
5290 unsigned int hdr_len = skb_transport_header(skb) -
5291 skb_network_header(skb);
5292
5293 return hdr_len + skb_gso_transport_seglen(skb);
5294}
5295
5296
5297
5298
5299
5300
5301
5302
5303
5304
5305static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5306{
5307 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5308
5309 return hdr_len + skb_gso_transport_seglen(skb);
5310}
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333static inline bool skb_gso_size_check(const struct sk_buff *skb,
5334 unsigned int seg_len,
5335 unsigned int max_len) {
5336 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5337 const struct sk_buff *iter;
5338
5339 if (shinfo->gso_size != GSO_BY_FRAGS)
5340 return seg_len <= max_len;
5341
5342
5343 seg_len -= GSO_BY_FRAGS;
5344
5345 skb_walk_frags(skb, iter) {
5346 if (seg_len + skb_headlen(iter) > max_len)
5347 return false;
5348 }
5349
5350 return true;
5351}
5352
5353
5354
5355
5356
5357
5358
5359
5360
5361
5362
5363bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5364{
5365 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5366}
5367EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5379{
5380 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5381}
5382EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5383
5384static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5385{
5386 int mac_len, meta_len;
5387 void *meta;
5388
5389 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5390 kfree_skb(skb);
5391 return NULL;
5392 }
5393
5394 mac_len = skb->data - skb_mac_header(skb);
5395 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5396 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5397 mac_len - VLAN_HLEN - ETH_TLEN);
5398 }
5399
5400 meta_len = skb_metadata_len(skb);
5401 if (meta_len) {
5402 meta = skb_metadata_end(skb) - meta_len;
5403 memmove(meta + VLAN_HLEN, meta, meta_len);
5404 }
5405
5406 skb->mac_header += VLAN_HLEN;
5407 return skb;
5408}
5409
5410struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5411{
5412 struct vlan_hdr *vhdr;
5413 u16 vlan_tci;
5414
5415 if (unlikely(skb_vlan_tag_present(skb))) {
5416
5417 return skb;
5418 }
5419
5420 skb = skb_share_check(skb, GFP_ATOMIC);
5421 if (unlikely(!skb))
5422 goto err_free;
5423
5424 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5425 goto err_free;
5426
5427 vhdr = (struct vlan_hdr *)skb->data;
5428 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5429 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5430
5431 skb_pull_rcsum(skb, VLAN_HLEN);
5432 vlan_set_encap_proto(skb, vhdr);
5433
5434 skb = skb_reorder_vlan_header(skb);
5435 if (unlikely(!skb))
5436 goto err_free;
5437
5438 skb_reset_network_header(skb);
5439 skb_reset_transport_header(skb);
5440 skb_reset_mac_len(skb);
5441
5442 return skb;
5443
5444err_free:
5445 kfree_skb(skb);
5446 return NULL;
5447}
5448EXPORT_SYMBOL(skb_vlan_untag);
5449
5450int skb_ensure_writable(struct sk_buff *skb, int write_len)
5451{
5452 if (!pskb_may_pull(skb, write_len))
5453 return -ENOMEM;
5454
5455 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5456 return 0;
5457
5458 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5459}
5460EXPORT_SYMBOL(skb_ensure_writable);
5461
5462
5463
5464
5465int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5466{
5467 struct vlan_hdr *vhdr;
5468 int offset = skb->data - skb_mac_header(skb);
5469 int err;
5470
5471 if (WARN_ONCE(offset,
5472 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5473 offset)) {
5474 return -EINVAL;
5475 }
5476
5477 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5478 if (unlikely(err))
5479 return err;
5480
5481 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5482
5483 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5484 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5485
5486 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5487 __skb_pull(skb, VLAN_HLEN);
5488
5489 vlan_set_encap_proto(skb, vhdr);
5490 skb->mac_header += VLAN_HLEN;
5491
5492 if (skb_network_offset(skb) < ETH_HLEN)
5493 skb_set_network_header(skb, ETH_HLEN);
5494
5495 skb_reset_mac_len(skb);
5496
5497 return err;
5498}
5499EXPORT_SYMBOL(__skb_vlan_pop);
5500
5501
5502
5503
5504int skb_vlan_pop(struct sk_buff *skb)
5505{
5506 u16 vlan_tci;
5507 __be16 vlan_proto;
5508 int err;
5509
5510 if (likely(skb_vlan_tag_present(skb))) {
5511 __vlan_hwaccel_clear_tag(skb);
5512 } else {
5513 if (unlikely(!eth_type_vlan(skb->protocol)))
5514 return 0;
5515
5516 err = __skb_vlan_pop(skb, &vlan_tci);
5517 if (err)
5518 return err;
5519 }
5520
5521 if (likely(!eth_type_vlan(skb->protocol)))
5522 return 0;
5523
5524 vlan_proto = skb->protocol;
5525 err = __skb_vlan_pop(skb, &vlan_tci);
5526 if (unlikely(err))
5527 return err;
5528
5529 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5530 return 0;
5531}
5532EXPORT_SYMBOL(skb_vlan_pop);
5533
5534
5535
5536
5537int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5538{
5539 if (skb_vlan_tag_present(skb)) {
5540 int offset = skb->data - skb_mac_header(skb);
5541 int err;
5542
5543 if (WARN_ONCE(offset,
5544 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5545 offset)) {
5546 return -EINVAL;
5547 }
5548
5549 err = __vlan_insert_tag(skb, skb->vlan_proto,
5550 skb_vlan_tag_get(skb));
5551 if (err)
5552 return err;
5553
5554 skb->protocol = skb->vlan_proto;
5555 skb->mac_len += VLAN_HLEN;
5556
5557 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5558 }
5559 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5560 return 0;
5561}
5562EXPORT_SYMBOL(skb_vlan_push);
5563
5564
5565static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
5566 __be16 ethertype)
5567{
5568 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5569 __be16 diff[] = { ~hdr->h_proto, ethertype };
5570
5571 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5572 }
5573
5574 hdr->h_proto = ethertype;
5575}
5576
5577
5578
5579
5580
5581
5582
5583
5584
5585
5586
5587
5588
5589
5590
5591
5592int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5593 int mac_len, bool ethernet)
5594{
5595 struct mpls_shim_hdr *lse;
5596 int err;
5597
5598 if (unlikely(!eth_p_mpls(mpls_proto)))
5599 return -EINVAL;
5600
5601
5602 if (skb->encapsulation)
5603 return -EINVAL;
5604
5605 err = skb_cow_head(skb, MPLS_HLEN);
5606 if (unlikely(err))
5607 return err;
5608
5609 if (!skb->inner_protocol) {
5610 skb_set_inner_network_header(skb, skb_network_offset(skb));
5611 skb_set_inner_protocol(skb, skb->protocol);
5612 }
5613
5614 skb_push(skb, MPLS_HLEN);
5615 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5616 mac_len);
5617 skb_reset_mac_header(skb);
5618 skb_set_network_header(skb, mac_len);
5619 skb_reset_mac_len(skb);
5620
5621 lse = mpls_hdr(skb);
5622 lse->label_stack_entry = mpls_lse;
5623 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
5624
5625 if (ethernet && mac_len >= ETH_HLEN)
5626 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
5627 skb->protocol = mpls_proto;
5628
5629 return 0;
5630}
5631EXPORT_SYMBOL_GPL(skb_mpls_push);
5632
5633
5634
5635
5636
5637
5638
5639
5640
5641
5642
5643
5644
5645int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5646 bool ethernet)
5647{
5648 int err;
5649
5650 if (unlikely(!eth_p_mpls(skb->protocol)))
5651 return 0;
5652
5653 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
5654 if (unlikely(err))
5655 return err;
5656
5657 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5658 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
5659 mac_len);
5660
5661 __skb_pull(skb, MPLS_HLEN);
5662 skb_reset_mac_header(skb);
5663 skb_set_network_header(skb, mac_len);
5664
5665 if (ethernet && mac_len >= ETH_HLEN) {
5666 struct ethhdr *hdr;
5667
5668
5669 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5670 skb_mod_eth_type(skb, hdr, next_proto);
5671 }
5672 skb->protocol = next_proto;
5673
5674 return 0;
5675}
5676EXPORT_SYMBOL_GPL(skb_mpls_pop);
5677
5678
5679
5680
5681
5682
5683
5684
5685
5686
5687
5688int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5689{
5690 int err;
5691
5692 if (unlikely(!eth_p_mpls(skb->protocol)))
5693 return -EINVAL;
5694
5695 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5696 if (unlikely(err))
5697 return err;
5698
5699 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5700 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5701
5702 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5703 }
5704
5705 mpls_hdr(skb)->label_stack_entry = mpls_lse;
5706
5707 return 0;
5708}
5709EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5710
5711
5712
5713
5714
5715
5716
5717
5718
5719
5720int skb_mpls_dec_ttl(struct sk_buff *skb)
5721{
5722 u32 lse;
5723 u8 ttl;
5724
5725 if (unlikely(!eth_p_mpls(skb->protocol)))
5726 return -EINVAL;
5727
5728 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
5729 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
5730 if (!--ttl)
5731 return -EINVAL;
5732
5733 lse &= ~MPLS_LS_TTL_MASK;
5734 lse |= ttl << MPLS_LS_TTL_SHIFT;
5735
5736 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
5737}
5738EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
5739
5740
5741
5742
5743
5744
5745
5746
5747
5748
5749
5750
5751struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5752 unsigned long data_len,
5753 int max_page_order,
5754 int *errcode,
5755 gfp_t gfp_mask)
5756{
5757 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5758 unsigned long chunk;
5759 struct sk_buff *skb;
5760 struct page *page;
5761 int i;
5762
5763 *errcode = -EMSGSIZE;
5764
5765
5766
5767 if (npages > MAX_SKB_FRAGS)
5768 return NULL;
5769
5770 *errcode = -ENOBUFS;
5771 skb = alloc_skb(header_len, gfp_mask);
5772 if (!skb)
5773 return NULL;
5774
5775 skb->truesize += npages << PAGE_SHIFT;
5776
5777 for (i = 0; npages > 0; i++) {
5778 int order = max_page_order;
5779
5780 while (order) {
5781 if (npages >= 1 << order) {
5782 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5783 __GFP_COMP |
5784 __GFP_NOWARN,
5785 order);
5786 if (page)
5787 goto fill_page;
5788
5789 order = 1;
5790 max_page_order = 0;
5791 }
5792 order--;
5793 }
5794 page = alloc_page(gfp_mask);
5795 if (!page)
5796 goto failure;
5797fill_page:
5798 chunk = min_t(unsigned long, data_len,
5799 PAGE_SIZE << order);
5800 skb_fill_page_desc(skb, i, page, 0, chunk);
5801 data_len -= chunk;
5802 npages -= 1 << order;
5803 }
5804 return skb;
5805
5806failure:
5807 kfree_skb(skb);
5808 return NULL;
5809}
5810EXPORT_SYMBOL(alloc_skb_with_frags);
5811
5812
5813static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5814 const int headlen, gfp_t gfp_mask)
5815{
5816 int i;
5817 int size = skb_end_offset(skb);
5818 int new_hlen = headlen - off;
5819 u8 *data;
5820
5821 size = SKB_DATA_ALIGN(size);
5822
5823 if (skb_pfmemalloc(skb))
5824 gfp_mask |= __GFP_MEMALLOC;
5825 data = kmalloc_reserve(size +
5826 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5827 gfp_mask, NUMA_NO_NODE, NULL);
5828 if (!data)
5829 return -ENOMEM;
5830
5831 size = SKB_WITH_OVERHEAD(ksize(data));
5832
5833
5834 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5835 skb->len -= off;
5836
5837 memcpy((struct skb_shared_info *)(data + size),
5838 skb_shinfo(skb),
5839 offsetof(struct skb_shared_info,
5840 frags[skb_shinfo(skb)->nr_frags]));
5841 if (skb_cloned(skb)) {
5842
5843 if (skb_orphan_frags(skb, gfp_mask)) {
5844 kfree(data);
5845 return -ENOMEM;
5846 }
5847 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5848 skb_frag_ref(skb, i);
5849 if (skb_has_frag_list(skb))
5850 skb_clone_fraglist(skb);
5851 skb_release_data(skb);
5852 } else {
5853
5854
5855
5856 skb_free_head(skb);
5857 }
5858
5859 skb->head = data;
5860 skb->data = data;
5861 skb->head_frag = 0;
5862#ifdef NET_SKBUFF_DATA_USES_OFFSET
5863 skb->end = size;
5864#else
5865 skb->end = skb->head + size;
5866#endif
5867 skb_set_tail_pointer(skb, skb_headlen(skb));
5868 skb_headers_offset_update(skb, 0);
5869 skb->cloned = 0;
5870 skb->hdr_len = 0;
5871 skb->nohdr = 0;
5872 atomic_set(&skb_shinfo(skb)->dataref, 1);
5873
5874 return 0;
5875}
5876
5877static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5878
5879
5880
5881
5882static int pskb_carve_frag_list(struct sk_buff *skb,
5883 struct skb_shared_info *shinfo, int eat,
5884 gfp_t gfp_mask)
5885{
5886 struct sk_buff *list = shinfo->frag_list;
5887 struct sk_buff *clone = NULL;
5888 struct sk_buff *insp = NULL;
5889
5890 do {
5891 if (!list) {
5892 pr_err("Not enough bytes to eat. Want %d\n", eat);
5893 return -EFAULT;
5894 }
5895 if (list->len <= eat) {
5896
5897 eat -= list->len;
5898 list = list->next;
5899 insp = list;
5900 } else {
5901
5902 if (skb_shared(list)) {
5903 clone = skb_clone(list, gfp_mask);
5904 if (!clone)
5905 return -ENOMEM;
5906 insp = list->next;
5907 list = clone;
5908 } else {
5909
5910 insp = list;
5911 }
5912 if (pskb_carve(list, eat, gfp_mask) < 0) {
5913 kfree_skb(clone);
5914 return -ENOMEM;
5915 }
5916 break;
5917 }
5918 } while (eat);
5919
5920
5921 while ((list = shinfo->frag_list) != insp) {
5922 shinfo->frag_list = list->next;
5923 kfree_skb(list);
5924 }
5925
5926 if (clone) {
5927 clone->next = list;
5928 shinfo->frag_list = clone;
5929 }
5930 return 0;
5931}
5932
5933
5934
5935
5936static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5937 int pos, gfp_t gfp_mask)
5938{
5939 int i, k = 0;
5940 int size = skb_end_offset(skb);
5941 u8 *data;
5942 const int nfrags = skb_shinfo(skb)->nr_frags;
5943 struct skb_shared_info *shinfo;
5944
5945 size = SKB_DATA_ALIGN(size);
5946
5947 if (skb_pfmemalloc(skb))
5948 gfp_mask |= __GFP_MEMALLOC;
5949 data = kmalloc_reserve(size +
5950 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5951 gfp_mask, NUMA_NO_NODE, NULL);
5952 if (!data)
5953 return -ENOMEM;
5954
5955 size = SKB_WITH_OVERHEAD(ksize(data));
5956
5957 memcpy((struct skb_shared_info *)(data + size),
5958 skb_shinfo(skb), offsetof(struct skb_shared_info,
5959 frags[skb_shinfo(skb)->nr_frags]));
5960 if (skb_orphan_frags(skb, gfp_mask)) {
5961 kfree(data);
5962 return -ENOMEM;
5963 }
5964 shinfo = (struct skb_shared_info *)(data + size);
5965 for (i = 0; i < nfrags; i++) {
5966 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5967
5968 if (pos + fsize > off) {
5969 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5970
5971 if (pos < off) {
5972
5973
5974
5975
5976
5977
5978
5979
5980 skb_frag_off_add(&shinfo->frags[0], off - pos);
5981 skb_frag_size_sub(&shinfo->frags[0], off - pos);
5982 }
5983 skb_frag_ref(skb, i);
5984 k++;
5985 }
5986 pos += fsize;
5987 }
5988 shinfo->nr_frags = k;
5989 if (skb_has_frag_list(skb))
5990 skb_clone_fraglist(skb);
5991
5992
5993 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
5994
5995 if (skb_has_frag_list(skb))
5996 kfree_skb_list(skb_shinfo(skb)->frag_list);
5997 kfree(data);
5998 return -ENOMEM;
5999 }
6000 skb_release_data(skb);
6001
6002 skb->head = data;
6003 skb->head_frag = 0;
6004 skb->data = data;
6005#ifdef NET_SKBUFF_DATA_USES_OFFSET
6006 skb->end = size;
6007#else
6008 skb->end = skb->head + size;
6009#endif
6010 skb_reset_tail_pointer(skb);
6011 skb_headers_offset_update(skb, 0);
6012 skb->cloned = 0;
6013 skb->hdr_len = 0;
6014 skb->nohdr = 0;
6015 skb->len -= off;
6016 skb->data_len = skb->len;
6017 atomic_set(&skb_shinfo(skb)->dataref, 1);
6018 return 0;
6019}
6020
6021
6022static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6023{
6024 int headlen = skb_headlen(skb);
6025
6026 if (len < headlen)
6027 return pskb_carve_inside_header(skb, len, headlen, gfp);
6028 else
6029 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6030}
6031
6032
6033
6034
6035struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6036 int to_copy, gfp_t gfp)
6037{
6038 struct sk_buff *clone = skb_clone(skb, gfp);
6039
6040 if (!clone)
6041 return NULL;
6042
6043 if (pskb_carve(clone, off, gfp) < 0 ||
6044 pskb_trim(clone, to_copy)) {
6045 kfree_skb(clone);
6046 return NULL;
6047 }
6048 return clone;
6049}
6050EXPORT_SYMBOL(pskb_extract);
6051
6052
6053
6054
6055
6056
6057
6058
6059
6060
6061
6062
6063
6064void skb_condense(struct sk_buff *skb)
6065{
6066 if (skb->data_len) {
6067 if (skb->data_len > skb->end - skb->tail ||
6068 skb_cloned(skb))
6069 return;
6070
6071
6072 __pskb_pull_tail(skb, skb->data_len);
6073 }
6074
6075
6076
6077
6078
6079
6080
6081 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6082}
6083
6084#ifdef CONFIG_SKB_EXTENSIONS
6085static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6086{
6087 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6088}
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098
6099struct skb_ext *__skb_ext_alloc(gfp_t flags)
6100{
6101 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6102
6103 if (new) {
6104 memset(new->offset, 0, sizeof(new->offset));
6105 refcount_set(&new->refcnt, 1);
6106 }
6107
6108 return new;
6109}
6110
6111static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6112 unsigned int old_active)
6113{
6114 struct skb_ext *new;
6115
6116 if (refcount_read(&old->refcnt) == 1)
6117 return old;
6118
6119 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6120 if (!new)
6121 return NULL;
6122
6123 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6124 refcount_set(&new->refcnt, 1);
6125
6126#ifdef CONFIG_XFRM
6127 if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6128 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6129 unsigned int i;
6130
6131 for (i = 0; i < sp->len; i++)
6132 xfrm_state_hold(sp->xvec[i]);
6133 }
6134#endif
6135 __skb_ext_put(old);
6136 return new;
6137}
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6150 struct skb_ext *ext)
6151{
6152 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6153
6154 skb_ext_put(skb);
6155 newlen = newoff + skb_ext_type_len[id];
6156 ext->chunks = newlen;
6157 ext->offset[id] = newoff;
6158 skb->extensions = ext;
6159 skb->active_extensions = 1 << id;
6160 return skb_ext_get_ptr(ext, id);
6161}
6162
6163
6164
6165
6166
6167
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6178{
6179 struct skb_ext *new, *old = NULL;
6180 unsigned int newlen, newoff;
6181
6182 if (skb->active_extensions) {
6183 old = skb->extensions;
6184
6185 new = skb_ext_maybe_cow(old, skb->active_extensions);
6186 if (!new)
6187 return NULL;
6188
6189 if (__skb_ext_exist(new, id))
6190 goto set_active;
6191
6192 newoff = new->chunks;
6193 } else {
6194 newoff = SKB_EXT_CHUNKSIZEOF(*new);
6195
6196 new = __skb_ext_alloc(GFP_ATOMIC);
6197 if (!new)
6198 return NULL;
6199 }
6200
6201 newlen = newoff + skb_ext_type_len[id];
6202 new->chunks = newlen;
6203 new->offset[id] = newoff;
6204set_active:
6205 skb->extensions = new;
6206 skb->active_extensions |= 1 << id;
6207 return skb_ext_get_ptr(new, id);
6208}
6209EXPORT_SYMBOL(skb_ext_add);
6210
6211#ifdef CONFIG_XFRM
6212static void skb_ext_put_sp(struct sec_path *sp)
6213{
6214 unsigned int i;
6215
6216 for (i = 0; i < sp->len; i++)
6217 xfrm_state_put(sp->xvec[i]);
6218}
6219#endif
6220
6221void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6222{
6223 struct skb_ext *ext = skb->extensions;
6224
6225 skb->active_extensions &= ~(1 << id);
6226 if (skb->active_extensions == 0) {
6227 skb->extensions = NULL;
6228 __skb_ext_put(ext);
6229#ifdef CONFIG_XFRM
6230 } else if (id == SKB_EXT_SEC_PATH &&
6231 refcount_read(&ext->refcnt) == 1) {
6232 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6233
6234 skb_ext_put_sp(sp);
6235 sp->len = 0;
6236#endif
6237 }
6238}
6239EXPORT_SYMBOL(__skb_ext_del);
6240
6241void __skb_ext_put(struct skb_ext *ext)
6242{
6243
6244
6245
6246 if (refcount_read(&ext->refcnt) == 1)
6247 goto free_now;
6248
6249 if (!refcount_dec_and_test(&ext->refcnt))
6250 return;
6251free_now:
6252#ifdef CONFIG_XFRM
6253 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6254 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6255#endif
6256
6257 kmem_cache_free(skbuff_ext_cache, ext);
6258}
6259EXPORT_SYMBOL(__skb_ext_put);
6260#endif
6261