1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32#include <linux/dma-mapping.h>
33#include <linux/netdev_features.h>
34
35
36#define CHECKSUM_NONE 0
37#define CHECKSUM_UNNECESSARY 1
38#define CHECKSUM_COMPLETE 2
39#define CHECKSUM_PARTIAL 3
40
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1))
43#define SKB_WITH_OVERHEAD(X) \
44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45#define SKB_MAX_ORDER(X, ORDER) \
46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
47#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
48#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
49
50
51#define SKB_TRUESIZE(X) ((X) + \
52 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
53 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100struct net_device;
101struct scatterlist;
102struct pipe_inode_info;
103
104#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
105struct nf_conntrack {
106 atomic_t use;
107};
108#endif
109
110#ifdef CONFIG_BRIDGE_NETFILTER
111struct nf_bridge_info {
112 atomic_t use;
113 struct net_device *physindev;
114 struct net_device *physoutdev;
115 unsigned int mask;
116 unsigned long data[32 / sizeof(unsigned long)];
117};
118#endif
119
120struct sk_buff_head {
121
122 struct sk_buff *next;
123 struct sk_buff *prev;
124
125 __u32 qlen;
126 spinlock_t lock;
127};
128
129struct sk_buff;
130
131
132
133
134
135
136
137
138#if (65536/PAGE_SIZE + 1) < 16
139#define MAX_SKB_FRAGS 16UL
140#else
141#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
142#endif
143
144typedef struct skb_frag_struct skb_frag_t;
145
146struct skb_frag_struct {
147 struct {
148 struct page *p;
149 } page;
150#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
151 __u32 page_offset;
152 __u32 size;
153#else
154 __u16 page_offset;
155 __u16 size;
156#endif
157};
158
159static inline unsigned int skb_frag_size(const skb_frag_t *frag)
160{
161 return frag->size;
162}
163
164static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
165{
166 frag->size = size;
167}
168
169static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
170{
171 frag->size += delta;
172}
173
174static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
175{
176 frag->size -= delta;
177}
178
179#define HAVE_HW_TIME_STAMP
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204struct skb_shared_hwtstamps {
205 ktime_t hwtstamp;
206 ktime_t syststamp;
207};
208
209
210enum {
211
212 SKBTX_HW_TSTAMP = 1 << 0,
213
214
215 SKBTX_SW_TSTAMP = 1 << 1,
216
217
218 SKBTX_IN_PROGRESS = 1 << 2,
219
220
221 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
222
223
224 SKBTX_DEV_ZEROCOPY = 1 << 4,
225
226
227 SKBTX_WIFI_STATUS = 1 << 5,
228};
229
230
231
232
233
234
235struct ubuf_info {
236 void (*callback)(void *);
237 void *arg;
238 unsigned long desc;
239};
240
241
242
243
244struct skb_shared_info {
245 unsigned char nr_frags;
246 __u8 tx_flags;
247 unsigned short gso_size;
248
249 unsigned short gso_segs;
250 unsigned short gso_type;
251 struct sk_buff *frag_list;
252 struct skb_shared_hwtstamps hwtstamps;
253 __be32 ip6_frag_id;
254
255
256
257
258 atomic_t dataref;
259
260
261
262 void * destructor_arg;
263
264
265 skb_frag_t frags[MAX_SKB_FRAGS];
266};
267
268
269
270
271
272
273
274
275
276
277
278
279#define SKB_DATAREF_SHIFT 16
280#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
281
282
283enum {
284 SKB_FCLONE_UNAVAILABLE,
285 SKB_FCLONE_ORIG,
286 SKB_FCLONE_CLONE,
287};
288
289enum {
290 SKB_GSO_TCPV4 = 1 << 0,
291 SKB_GSO_UDP = 1 << 1,
292
293
294 SKB_GSO_DODGY = 1 << 2,
295
296
297 SKB_GSO_TCP_ECN = 1 << 3,
298
299 SKB_GSO_TCPV6 = 1 << 4,
300
301 SKB_GSO_FCOE = 1 << 5,
302};
303
304#if BITS_PER_LONG > 32
305#define NET_SKBUFF_DATA_USES_OFFSET 1
306#endif
307
308#ifdef NET_SKBUFF_DATA_USES_OFFSET
309typedef unsigned int sk_buff_data_t;
310#else
311typedef unsigned char *sk_buff_data_t;
312#endif
313
314#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
315 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
316#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
317#endif
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381struct sk_buff {
382
383 struct sk_buff *next;
384 struct sk_buff *prev;
385
386 ktime_t tstamp;
387
388 struct sock *sk;
389 struct net_device *dev;
390
391
392
393
394
395
396
397 char cb[48] __aligned(8);
398
399 unsigned long _skb_refdst;
400#ifdef CONFIG_XFRM
401 struct sec_path *sp;
402#endif
403 unsigned int len,
404 data_len;
405 __u16 mac_len,
406 hdr_len;
407 union {
408 __wsum csum;
409 struct {
410 __u16 csum_start;
411 __u16 csum_offset;
412 };
413 };
414 __u32 priority;
415 kmemcheck_bitfield_begin(flags1);
416 __u8 local_df:1,
417 cloned:1,
418 ip_summed:2,
419 nohdr:1,
420 nfctinfo:3;
421 __u8 pkt_type:3,
422 fclone:2,
423 ipvs_property:1,
424 peeked:1,
425 nf_trace:1;
426 kmemcheck_bitfield_end(flags1);
427 __be16 protocol;
428
429 void (*destructor)(struct sk_buff *skb);
430#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
431 struct nf_conntrack *nfct;
432#endif
433#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
434 struct sk_buff *nfct_reasm;
435#endif
436#ifdef CONFIG_BRIDGE_NETFILTER
437 struct nf_bridge_info *nf_bridge;
438#endif
439
440 int skb_iif;
441#ifdef CONFIG_NET_SCHED
442 __u16 tc_index;
443#ifdef CONFIG_NET_CLS_ACT
444 __u16 tc_verd;
445#endif
446#endif
447
448 __u32 rxhash;
449
450 __u16 queue_mapping;
451 kmemcheck_bitfield_begin(flags2);
452#ifdef CONFIG_IPV6_NDISC_NODETYPE
453 __u8 ndisc_nodetype:2;
454#endif
455 __u8 ooo_okay:1;
456 __u8 l4_rxhash:1;
457 __u8 wifi_acked_valid:1;
458 __u8 wifi_acked:1;
459
460 kmemcheck_bitfield_end(flags2);
461
462#ifdef CONFIG_NET_DMA
463 dma_cookie_t dma_cookie;
464#endif
465#ifdef CONFIG_NETWORK_SECMARK
466 __u32 secmark;
467#endif
468 union {
469 __u32 mark;
470 __u32 dropcount;
471 };
472
473 __u16 vlan_tci;
474
475 sk_buff_data_t transport_header;
476 sk_buff_data_t network_header;
477 sk_buff_data_t mac_header;
478
479 sk_buff_data_t tail;
480 sk_buff_data_t end;
481 unsigned char *head,
482 *data;
483 unsigned int truesize;
484 atomic_t users;
485};
486
487#ifdef __KERNEL__
488
489
490
491#include <linux/slab.h>
492
493#include <asm/system.h>
494
495
496
497
498
499#define SKB_DST_NOREF 1UL
500#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
501
502
503
504
505
506
507
508static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
509{
510
511
512
513 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
514 !rcu_read_lock_held() &&
515 !rcu_read_lock_bh_held());
516 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
517}
518
519
520
521
522
523
524
525
526
527static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
528{
529 skb->_skb_refdst = (unsigned long)dst;
530}
531
532extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
533
534
535
536
537
538static inline bool skb_dst_is_noref(const struct sk_buff *skb)
539{
540 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
541}
542
543static inline struct rtable *skb_rtable(const struct sk_buff *skb)
544{
545 return (struct rtable *)skb_dst(skb);
546}
547
548extern void kfree_skb(struct sk_buff *skb);
549extern void consume_skb(struct sk_buff *skb);
550extern void __kfree_skb(struct sk_buff *skb);
551extern struct sk_buff *__alloc_skb(unsigned int size,
552 gfp_t priority, int fclone, int node);
553extern struct sk_buff *build_skb(void *data);
554static inline struct sk_buff *alloc_skb(unsigned int size,
555 gfp_t priority)
556{
557 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
558}
559
560static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
561 gfp_t priority)
562{
563 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
564}
565
566extern void skb_recycle(struct sk_buff *skb);
567extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
568
569extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
570extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
571extern struct sk_buff *skb_clone(struct sk_buff *skb,
572 gfp_t priority);
573extern struct sk_buff *skb_copy(const struct sk_buff *skb,
574 gfp_t priority);
575extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
576 int headroom, gfp_t gfp_mask);
577
578extern int pskb_expand_head(struct sk_buff *skb,
579 int nhead, int ntail,
580 gfp_t gfp_mask);
581extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
582 unsigned int headroom);
583extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
584 int newheadroom, int newtailroom,
585 gfp_t priority);
586extern int skb_to_sgvec(struct sk_buff *skb,
587 struct scatterlist *sg, int offset,
588 int len);
589extern int skb_cow_data(struct sk_buff *skb, int tailbits,
590 struct sk_buff **trailer);
591extern int skb_pad(struct sk_buff *skb, int pad);
592#define dev_kfree_skb(a) consume_skb(a)
593
594extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
595 int getfrag(void *from, char *to, int offset,
596 int len,int odd, struct sk_buff *skb),
597 void *from, int length);
598
599struct skb_seq_state {
600 __u32 lower_offset;
601 __u32 upper_offset;
602 __u32 frag_idx;
603 __u32 stepped_offset;
604 struct sk_buff *root_skb;
605 struct sk_buff *cur_skb;
606 __u8 *frag_data;
607};
608
609extern void skb_prepare_seq_read(struct sk_buff *skb,
610 unsigned int from, unsigned int to,
611 struct skb_seq_state *st);
612extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
613 struct skb_seq_state *st);
614extern void skb_abort_seq_read(struct skb_seq_state *st);
615
616extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
617 unsigned int to, struct ts_config *config,
618 struct ts_state *state);
619
620extern void __skb_get_rxhash(struct sk_buff *skb);
621static inline __u32 skb_get_rxhash(struct sk_buff *skb)
622{
623 if (!skb->rxhash)
624 __skb_get_rxhash(skb);
625
626 return skb->rxhash;
627}
628
629#ifdef NET_SKBUFF_DATA_USES_OFFSET
630static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
631{
632 return skb->head + skb->end;
633}
634#else
635static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
636{
637 return skb->end;
638}
639#endif
640
641
642#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
643
644static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
645{
646 return &skb_shinfo(skb)->hwtstamps;
647}
648
649
650
651
652
653
654
655static inline int skb_queue_empty(const struct sk_buff_head *list)
656{
657 return list->next == (struct sk_buff *)list;
658}
659
660
661
662
663
664
665
666
667static inline bool skb_queue_is_last(const struct sk_buff_head *list,
668 const struct sk_buff *skb)
669{
670 return skb->next == (struct sk_buff *)list;
671}
672
673
674
675
676
677
678
679
680static inline bool skb_queue_is_first(const struct sk_buff_head *list,
681 const struct sk_buff *skb)
682{
683 return skb->prev == (struct sk_buff *)list;
684}
685
686
687
688
689
690
691
692
693
694static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
695 const struct sk_buff *skb)
696{
697
698
699
700 BUG_ON(skb_queue_is_last(list, skb));
701 return skb->next;
702}
703
704
705
706
707
708
709
710
711
712static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
713 const struct sk_buff *skb)
714{
715
716
717
718 BUG_ON(skb_queue_is_first(list, skb));
719 return skb->prev;
720}
721
722
723
724
725
726
727
728
729static inline struct sk_buff *skb_get(struct sk_buff *skb)
730{
731 atomic_inc(&skb->users);
732 return skb;
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static inline int skb_cloned(const struct sk_buff *skb)
749{
750 return skb->cloned &&
751 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
752}
753
754
755
756
757
758
759
760
761static inline int skb_header_cloned(const struct sk_buff *skb)
762{
763 int dataref;
764
765 if (!skb->cloned)
766 return 0;
767
768 dataref = atomic_read(&skb_shinfo(skb)->dataref);
769 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
770 return dataref != 1;
771}
772
773
774
775
776
777
778
779
780
781static inline void skb_header_release(struct sk_buff *skb)
782{
783 BUG_ON(skb->nohdr);
784 skb->nohdr = 1;
785 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
786}
787
788
789
790
791
792
793
794
795static inline int skb_shared(const struct sk_buff *skb)
796{
797 return atomic_read(&skb->users) != 1;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
814 gfp_t pri)
815{
816 might_sleep_if(pri & __GFP_WAIT);
817 if (skb_shared(skb)) {
818 struct sk_buff *nskb = skb_clone(skb, pri);
819 kfree_skb(skb);
820 skb = nskb;
821 }
822 return skb;
823}
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
846 gfp_t pri)
847{
848 might_sleep_if(pri & __GFP_WAIT);
849 if (skb_cloned(skb)) {
850 struct sk_buff *nskb = skb_copy(skb, pri);
851 kfree_skb(skb);
852 skb = nskb;
853 }
854 return skb;
855}
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
871{
872 struct sk_buff *list = ((const struct sk_buff *)list_)->next;
873 if (list == (struct sk_buff *)list_)
874 list = NULL;
875 return list;
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
892{
893 struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
894 if (list == (struct sk_buff *)list_)
895 list = NULL;
896 return list;
897}
898
899
900
901
902
903
904
905static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
906{
907 return list_->qlen;
908}
909
910
911
912
913
914
915
916
917
918
919
920static inline void __skb_queue_head_init(struct sk_buff_head *list)
921{
922 list->prev = list->next = (struct sk_buff *)list;
923 list->qlen = 0;
924}
925
926
927
928
929
930
931
932
933
934static inline void skb_queue_head_init(struct sk_buff_head *list)
935{
936 spin_lock_init(&list->lock);
937 __skb_queue_head_init(list);
938}
939
940static inline void skb_queue_head_init_class(struct sk_buff_head *list,
941 struct lock_class_key *class)
942{
943 skb_queue_head_init(list);
944 lockdep_set_class(&list->lock, class);
945}
946
947
948
949
950
951
952
953extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
954static inline void __skb_insert(struct sk_buff *newsk,
955 struct sk_buff *prev, struct sk_buff *next,
956 struct sk_buff_head *list)
957{
958 newsk->next = next;
959 newsk->prev = prev;
960 next->prev = prev->next = newsk;
961 list->qlen++;
962}
963
964static inline void __skb_queue_splice(const struct sk_buff_head *list,
965 struct sk_buff *prev,
966 struct sk_buff *next)
967{
968 struct sk_buff *first = list->next;
969 struct sk_buff *last = list->prev;
970
971 first->prev = prev;
972 prev->next = first;
973
974 last->next = next;
975 next->prev = last;
976}
977
978
979
980
981
982
983static inline void skb_queue_splice(const struct sk_buff_head *list,
984 struct sk_buff_head *head)
985{
986 if (!skb_queue_empty(list)) {
987 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
988 head->qlen += list->qlen;
989 }
990}
991
992
993
994
995
996
997
998
999static inline void skb_queue_splice_init(struct sk_buff_head *list,
1000 struct sk_buff_head *head)
1001{
1002 if (!skb_queue_empty(list)) {
1003 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1004 head->qlen += list->qlen;
1005 __skb_queue_head_init(list);
1006 }
1007}
1008
1009
1010
1011
1012
1013
1014static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1015 struct sk_buff_head *head)
1016{
1017 if (!skb_queue_empty(list)) {
1018 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1019 head->qlen += list->qlen;
1020 }
1021}
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1032 struct sk_buff_head *head)
1033{
1034 if (!skb_queue_empty(list)) {
1035 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1036 head->qlen += list->qlen;
1037 __skb_queue_head_init(list);
1038 }
1039}
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052static inline void __skb_queue_after(struct sk_buff_head *list,
1053 struct sk_buff *prev,
1054 struct sk_buff *newsk)
1055{
1056 __skb_insert(newsk, prev, prev->next, list);
1057}
1058
1059extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1060 struct sk_buff_head *list);
1061
1062static inline void __skb_queue_before(struct sk_buff_head *list,
1063 struct sk_buff *next,
1064 struct sk_buff *newsk)
1065{
1066 __skb_insert(newsk, next->prev, next, list);
1067}
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1080static inline void __skb_queue_head(struct sk_buff_head *list,
1081 struct sk_buff *newsk)
1082{
1083 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1097static inline void __skb_queue_tail(struct sk_buff_head *list,
1098 struct sk_buff *newsk)
1099{
1100 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1101}
1102
1103
1104
1105
1106
1107extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1108static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1109{
1110 struct sk_buff *next, *prev;
1111
1112 list->qlen--;
1113 next = skb->next;
1114 prev = skb->prev;
1115 skb->next = skb->prev = NULL;
1116 next->prev = prev;
1117 prev->next = next;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1129static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1130{
1131 struct sk_buff *skb = skb_peek(list);
1132 if (skb)
1133 __skb_unlink(skb, list);
1134 return skb;
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1146static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1147{
1148 struct sk_buff *skb = skb_peek_tail(list);
1149 if (skb)
1150 __skb_unlink(skb, list);
1151 return skb;
1152}
1153
1154
1155static inline int skb_is_nonlinear(const struct sk_buff *skb)
1156{
1157 return skb->data_len;
1158}
1159
1160static inline unsigned int skb_headlen(const struct sk_buff *skb)
1161{
1162 return skb->len - skb->data_len;
1163}
1164
1165static inline int skb_pagelen(const struct sk_buff *skb)
1166{
1167 int i, len = 0;
1168
1169 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1170 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1171 return len + skb_headlen(skb);
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1188 struct page *page, int off, int size)
1189{
1190 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1191
1192 frag->page.p = page;
1193 frag->page_offset = off;
1194 skb_frag_size_set(frag, size);
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1212 struct page *page, int off, int size)
1213{
1214 __skb_fill_page_desc(skb, i, page, off, size);
1215 skb_shinfo(skb)->nr_frags = i + 1;
1216}
1217
1218extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1219 int off, int size);
1220
1221#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1222#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1223#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1224
1225#ifdef NET_SKBUFF_DATA_USES_OFFSET
1226static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1227{
1228 return skb->head + skb->tail;
1229}
1230
1231static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1232{
1233 skb->tail = skb->data - skb->head;
1234}
1235
1236static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1237{
1238 skb_reset_tail_pointer(skb);
1239 skb->tail += offset;
1240}
1241#else
1242static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1243{
1244 return skb->tail;
1245}
1246
1247static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1248{
1249 skb->tail = skb->data;
1250}
1251
1252static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1253{
1254 skb->tail = skb->data + offset;
1255}
1256
1257#endif
1258
1259
1260
1261
1262extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1263static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1264{
1265 unsigned char *tmp = skb_tail_pointer(skb);
1266 SKB_LINEAR_ASSERT(skb);
1267 skb->tail += len;
1268 skb->len += len;
1269 return tmp;
1270}
1271
1272extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1273static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1274{
1275 skb->data -= len;
1276 skb->len += len;
1277 return skb->data;
1278}
1279
1280extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1281static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1282{
1283 skb->len -= len;
1284 BUG_ON(skb->len < skb->data_len);
1285 return skb->data += len;
1286}
1287
1288static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1289{
1290 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1291}
1292
1293extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1294
1295static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1296{
1297 if (len > skb_headlen(skb) &&
1298 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1299 return NULL;
1300 skb->len -= len;
1301 return skb->data += len;
1302}
1303
1304static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1305{
1306 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1307}
1308
1309static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1310{
1311 if (likely(len <= skb_headlen(skb)))
1312 return 1;
1313 if (unlikely(len > skb->len))
1314 return 0;
1315 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1316}
1317
1318
1319
1320
1321
1322
1323
1324static inline unsigned int skb_headroom(const struct sk_buff *skb)
1325{
1326 return skb->data - skb->head;
1327}
1328
1329
1330
1331
1332
1333
1334
1335static inline int skb_tailroom(const struct sk_buff *skb)
1336{
1337 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1338}
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static inline void skb_reserve(struct sk_buff *skb, int len)
1349{
1350 skb->data += len;
1351 skb->tail += len;
1352}
1353
1354static inline void skb_reset_mac_len(struct sk_buff *skb)
1355{
1356 skb->mac_len = skb->network_header - skb->mac_header;
1357}
1358
1359#ifdef NET_SKBUFF_DATA_USES_OFFSET
1360static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1361{
1362 return skb->head + skb->transport_header;
1363}
1364
1365static inline void skb_reset_transport_header(struct sk_buff *skb)
1366{
1367 skb->transport_header = skb->data - skb->head;
1368}
1369
1370static inline void skb_set_transport_header(struct sk_buff *skb,
1371 const int offset)
1372{
1373 skb_reset_transport_header(skb);
1374 skb->transport_header += offset;
1375}
1376
1377static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1378{
1379 return skb->head + skb->network_header;
1380}
1381
1382static inline void skb_reset_network_header(struct sk_buff *skb)
1383{
1384 skb->network_header = skb->data - skb->head;
1385}
1386
1387static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1388{
1389 skb_reset_network_header(skb);
1390 skb->network_header += offset;
1391}
1392
1393static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1394{
1395 return skb->head + skb->mac_header;
1396}
1397
1398static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1399{
1400 return skb->mac_header != ~0U;
1401}
1402
1403static inline void skb_reset_mac_header(struct sk_buff *skb)
1404{
1405 skb->mac_header = skb->data - skb->head;
1406}
1407
1408static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1409{
1410 skb_reset_mac_header(skb);
1411 skb->mac_header += offset;
1412}
1413
1414#else
1415
1416static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1417{
1418 return skb->transport_header;
1419}
1420
1421static inline void skb_reset_transport_header(struct sk_buff *skb)
1422{
1423 skb->transport_header = skb->data;
1424}
1425
1426static inline void skb_set_transport_header(struct sk_buff *skb,
1427 const int offset)
1428{
1429 skb->transport_header = skb->data + offset;
1430}
1431
1432static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1433{
1434 return skb->network_header;
1435}
1436
1437static inline void skb_reset_network_header(struct sk_buff *skb)
1438{
1439 skb->network_header = skb->data;
1440}
1441
1442static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1443{
1444 skb->network_header = skb->data + offset;
1445}
1446
1447static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1448{
1449 return skb->mac_header;
1450}
1451
1452static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1453{
1454 return skb->mac_header != NULL;
1455}
1456
1457static inline void skb_reset_mac_header(struct sk_buff *skb)
1458{
1459 skb->mac_header = skb->data;
1460}
1461
1462static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1463{
1464 skb->mac_header = skb->data + offset;
1465}
1466#endif
1467
1468static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1469{
1470 if (skb_mac_header_was_set(skb)) {
1471 const unsigned char *old_mac = skb_mac_header(skb);
1472
1473 skb_set_mac_header(skb, -skb->mac_len);
1474 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1475 }
1476}
1477
1478static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1479{
1480 return skb->csum_start - skb_headroom(skb);
1481}
1482
1483static inline int skb_transport_offset(const struct sk_buff *skb)
1484{
1485 return skb_transport_header(skb) - skb->data;
1486}
1487
1488static inline u32 skb_network_header_len(const struct sk_buff *skb)
1489{
1490 return skb->transport_header - skb->network_header;
1491}
1492
1493static inline int skb_network_offset(const struct sk_buff *skb)
1494{
1495 return skb_network_header(skb) - skb->data;
1496}
1497
1498static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1499{
1500 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523#ifndef NET_IP_ALIGN
1524#define NET_IP_ALIGN 2
1525#endif
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547#ifndef NET_SKB_PAD
1548#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1549#endif
1550
1551extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1552
1553static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1554{
1555 if (unlikely(skb_is_nonlinear(skb))) {
1556 WARN_ON(1);
1557 return;
1558 }
1559 skb->len = len;
1560 skb_set_tail_pointer(skb, len);
1561}
1562
1563extern void skb_trim(struct sk_buff *skb, unsigned int len);
1564
1565static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1566{
1567 if (skb->data_len)
1568 return ___pskb_trim(skb, len);
1569 __skb_trim(skb, len);
1570 return 0;
1571}
1572
1573static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1574{
1575 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1588{
1589 int err = pskb_trim(skb, len);
1590 BUG_ON(err);
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601static inline void skb_orphan(struct sk_buff *skb)
1602{
1603 if (skb->destructor)
1604 skb->destructor(skb);
1605 skb->destructor = NULL;
1606 skb->sk = NULL;
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617extern void skb_queue_purge(struct sk_buff_head *list);
1618static inline void __skb_queue_purge(struct sk_buff_head *list)
1619{
1620 struct sk_buff *skb;
1621 while ((skb = __skb_dequeue(list)) != NULL)
1622 kfree_skb(skb);
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1638 gfp_t gfp_mask)
1639{
1640 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1641 if (likely(skb))
1642 skb_reserve(skb, NET_SKB_PAD);
1643 return skb;
1644}
1645
1646extern struct sk_buff *dev_alloc_skb(unsigned int length);
1647
1648extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1649 unsigned int length, gfp_t gfp_mask);
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1665 unsigned int length)
1666{
1667 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1668}
1669
1670static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1671 unsigned int length, gfp_t gfp)
1672{
1673 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1674
1675 if (NET_IP_ALIGN && skb)
1676 skb_reserve(skb, NET_IP_ALIGN);
1677 return skb;
1678}
1679
1680static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1681 unsigned int length)
1682{
1683 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1684}
1685
1686
1687
1688
1689
1690
1691
1692static inline struct page *skb_frag_page(const skb_frag_t *frag)
1693{
1694 return frag->page.p;
1695}
1696
1697
1698
1699
1700
1701
1702
1703static inline void __skb_frag_ref(skb_frag_t *frag)
1704{
1705 get_page(skb_frag_page(frag));
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715static inline void skb_frag_ref(struct sk_buff *skb, int f)
1716{
1717 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1718}
1719
1720
1721
1722
1723
1724
1725
1726static inline void __skb_frag_unref(skb_frag_t *frag)
1727{
1728 put_page(skb_frag_page(frag));
1729}
1730
1731
1732
1733
1734
1735
1736
1737
1738static inline void skb_frag_unref(struct sk_buff *skb, int f)
1739{
1740 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1741}
1742
1743
1744
1745
1746
1747
1748
1749
1750static inline void *skb_frag_address(const skb_frag_t *frag)
1751{
1752 return page_address(skb_frag_page(frag)) + frag->page_offset;
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762static inline void *skb_frag_address_safe(const skb_frag_t *frag)
1763{
1764 void *ptr = page_address(skb_frag_page(frag));
1765 if (unlikely(!ptr))
1766 return NULL;
1767
1768 return ptr + frag->page_offset;
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
1779{
1780 frag->page.p = page;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791static inline void skb_frag_set_page(struct sk_buff *skb, int f,
1792 struct page *page)
1793{
1794 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1809 const skb_frag_t *frag,
1810 size_t offset, size_t size,
1811 enum dma_data_direction dir)
1812{
1813 return dma_map_page(dev, skb_frag_page(frag),
1814 frag->page_offset + offset, size, dir);
1815}
1816
1817static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
1818 gfp_t gfp_mask)
1819{
1820 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
1821}
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1832{
1833 return !skb_header_cloned(skb) &&
1834 skb_headroom(skb) + len <= skb->hdr_len;
1835}
1836
1837static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1838 int cloned)
1839{
1840 int delta = 0;
1841
1842 if (headroom < NET_SKB_PAD)
1843 headroom = NET_SKB_PAD;
1844 if (headroom > skb_headroom(skb))
1845 delta = headroom - skb_headroom(skb);
1846
1847 if (delta || cloned)
1848 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1849 GFP_ATOMIC);
1850 return 0;
1851}
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1866{
1867 return __skb_cow(skb, headroom, skb_cloned(skb));
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1881{
1882 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1883}
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1897{
1898 unsigned int size = skb->len;
1899 if (likely(size >= len))
1900 return 0;
1901 return skb_pad(skb, len - size);
1902}
1903
1904static inline int skb_add_data(struct sk_buff *skb,
1905 char __user *from, int copy)
1906{
1907 const int off = skb->len;
1908
1909 if (skb->ip_summed == CHECKSUM_NONE) {
1910 int err = 0;
1911 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1912 copy, 0, &err);
1913 if (!err) {
1914 skb->csum = csum_block_add(skb->csum, csum, off);
1915 return 0;
1916 }
1917 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1918 return 0;
1919
1920 __skb_trim(skb, off);
1921 return -EFAULT;
1922}
1923
1924static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1925 const struct page *page, int off)
1926{
1927 if (i) {
1928 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1929
1930 return page == skb_frag_page(frag) &&
1931 off == frag->page_offset + skb_frag_size(frag);
1932 }
1933 return 0;
1934}
1935
1936static inline int __skb_linearize(struct sk_buff *skb)
1937{
1938 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948static inline int skb_linearize(struct sk_buff *skb)
1949{
1950 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960static inline int skb_linearize_cow(struct sk_buff *skb)
1961{
1962 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1963 __skb_linearize(skb) : 0;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static inline void skb_postpull_rcsum(struct sk_buff *skb,
1978 const void *start, unsigned int len)
1979{
1980 if (skb->ip_summed == CHECKSUM_COMPLETE)
1981 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1982}
1983
1984unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1996{
1997 if (likely(len >= skb->len))
1998 return 0;
1999 if (skb->ip_summed == CHECKSUM_COMPLETE)
2000 skb->ip_summed = CHECKSUM_NONE;
2001 return __pskb_trim(skb, len);
2002}
2003
2004#define skb_queue_walk(queue, skb) \
2005 for (skb = (queue)->next; \
2006 skb != (struct sk_buff *)(queue); \
2007 skb = skb->next)
2008
2009#define skb_queue_walk_safe(queue, skb, tmp) \
2010 for (skb = (queue)->next, tmp = skb->next; \
2011 skb != (struct sk_buff *)(queue); \
2012 skb = tmp, tmp = skb->next)
2013
2014#define skb_queue_walk_from(queue, skb) \
2015 for (; skb != (struct sk_buff *)(queue); \
2016 skb = skb->next)
2017
2018#define skb_queue_walk_from_safe(queue, skb, tmp) \
2019 for (tmp = skb->next; \
2020 skb != (struct sk_buff *)(queue); \
2021 skb = tmp, tmp = skb->next)
2022
2023#define skb_queue_reverse_walk(queue, skb) \
2024 for (skb = (queue)->prev; \
2025 skb != (struct sk_buff *)(queue); \
2026 skb = skb->prev)
2027
2028#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2029 for (skb = (queue)->prev, tmp = skb->prev; \
2030 skb != (struct sk_buff *)(queue); \
2031 skb = tmp, tmp = skb->prev)
2032
2033#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2034 for (tmp = skb->prev; \
2035 skb != (struct sk_buff *)(queue); \
2036 skb = tmp, tmp = skb->prev)
2037
2038static inline bool skb_has_frag_list(const struct sk_buff *skb)
2039{
2040 return skb_shinfo(skb)->frag_list != NULL;
2041}
2042
2043static inline void skb_frag_list_init(struct sk_buff *skb)
2044{
2045 skb_shinfo(skb)->frag_list = NULL;
2046}
2047
2048static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2049{
2050 frag->next = skb_shinfo(skb)->frag_list;
2051 skb_shinfo(skb)->frag_list = frag;
2052}
2053
2054#define skb_walk_frags(skb, iter) \
2055 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2056
2057extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2058 int *peeked, int *err);
2059extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2060 int noblock, int *err);
2061extern unsigned int datagram_poll(struct file *file, struct socket *sock,
2062 struct poll_table_struct *wait);
2063extern int skb_copy_datagram_iovec(const struct sk_buff *from,
2064 int offset, struct iovec *to,
2065 int size);
2066extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2067 int hlen,
2068 struct iovec *iov);
2069extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
2070 int offset,
2071 const struct iovec *from,
2072 int from_offset,
2073 int len);
2074extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
2075 int offset,
2076 const struct iovec *to,
2077 int to_offset,
2078 int size);
2079extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2080extern void skb_free_datagram_locked(struct sock *sk,
2081 struct sk_buff *skb);
2082extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2083 unsigned int flags);
2084extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
2085 int len, __wsum csum);
2086extern int skb_copy_bits(const struct sk_buff *skb, int offset,
2087 void *to, int len);
2088extern int skb_store_bits(struct sk_buff *skb, int offset,
2089 const void *from, int len);
2090extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
2091 int offset, u8 *to, int len,
2092 __wsum csum);
2093extern int skb_splice_bits(struct sk_buff *skb,
2094 unsigned int offset,
2095 struct pipe_inode_info *pipe,
2096 unsigned int len,
2097 unsigned int flags);
2098extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2099extern void skb_split(struct sk_buff *skb,
2100 struct sk_buff *skb1, const u32 len);
2101extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2102 int shiftlen);
2103
2104extern struct sk_buff *skb_segment(struct sk_buff *skb,
2105 netdev_features_t features);
2106
2107static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2108 int len, void *buffer)
2109{
2110 int hlen = skb_headlen(skb);
2111
2112 if (hlen - offset >= len)
2113 return skb->data + offset;
2114
2115 if (skb_copy_bits(skb, offset, buffer, len) < 0)
2116 return NULL;
2117
2118 return buffer;
2119}
2120
2121static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2122 void *to,
2123 const unsigned int len)
2124{
2125 memcpy(to, skb->data, len);
2126}
2127
2128static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2129 const int offset, void *to,
2130 const unsigned int len)
2131{
2132 memcpy(to, skb->data + offset, len);
2133}
2134
2135static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2136 const void *from,
2137 const unsigned int len)
2138{
2139 memcpy(skb->data, from, len);
2140}
2141
2142static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2143 const int offset,
2144 const void *from,
2145 const unsigned int len)
2146{
2147 memcpy(skb->data + offset, from, len);
2148}
2149
2150extern void skb_init(void);
2151
2152static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2153{
2154 return skb->tstamp;
2155}
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166static inline void skb_get_timestamp(const struct sk_buff *skb,
2167 struct timeval *stamp)
2168{
2169 *stamp = ktime_to_timeval(skb->tstamp);
2170}
2171
2172static inline void skb_get_timestampns(const struct sk_buff *skb,
2173 struct timespec *stamp)
2174{
2175 *stamp = ktime_to_timespec(skb->tstamp);
2176}
2177
2178static inline void __net_timestamp(struct sk_buff *skb)
2179{
2180 skb->tstamp = ktime_get_real();
2181}
2182
2183static inline ktime_t net_timedelta(ktime_t t)
2184{
2185 return ktime_sub(ktime_get_real(), t);
2186}
2187
2188static inline ktime_t net_invalid_timestamp(void)
2189{
2190 return ktime_set(0, 0);
2191}
2192
2193extern void skb_timestamping_init(void);
2194
2195#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2196
2197extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2198extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2199
2200#else
2201
2202static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2203{
2204}
2205
2206static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2207{
2208 return false;
2209}
2210
2211#endif
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225void skb_complete_tx_timestamp(struct sk_buff *skb,
2226 struct skb_shared_hwtstamps *hwtstamps);
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2240 struct skb_shared_hwtstamps *hwtstamps);
2241
2242static inline void sw_tx_timestamp(struct sk_buff *skb)
2243{
2244 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2245 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2246 skb_tstamp_tx(skb, NULL);
2247}
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257static inline void skb_tx_timestamp(struct sk_buff *skb)
2258{
2259 skb_clone_tx_timestamp(skb);
2260 sw_tx_timestamp(skb);
2261}
2262
2263
2264
2265
2266
2267
2268
2269
2270void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2271
2272extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2273extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2274
2275static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2276{
2277 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2278}
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2297{
2298 return skb_csum_unnecessary(skb) ?
2299 0 : __skb_checksum_complete(skb);
2300}
2301
2302#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2303extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2304static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2305{
2306 if (nfct && atomic_dec_and_test(&nfct->use))
2307 nf_conntrack_destroy(nfct);
2308}
2309static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2310{
2311 if (nfct)
2312 atomic_inc(&nfct->use);
2313}
2314#endif
2315#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2316static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2317{
2318 if (skb)
2319 atomic_inc(&skb->users);
2320}
2321static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2322{
2323 if (skb)
2324 kfree_skb(skb);
2325}
2326#endif
2327#ifdef CONFIG_BRIDGE_NETFILTER
2328static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2329{
2330 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2331 kfree(nf_bridge);
2332}
2333static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2334{
2335 if (nf_bridge)
2336 atomic_inc(&nf_bridge->use);
2337}
2338#endif
2339static inline void nf_reset(struct sk_buff *skb)
2340{
2341#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2342 nf_conntrack_put(skb->nfct);
2343 skb->nfct = NULL;
2344#endif
2345#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2346 nf_conntrack_put_reasm(skb->nfct_reasm);
2347 skb->nfct_reasm = NULL;
2348#endif
2349#ifdef CONFIG_BRIDGE_NETFILTER
2350 nf_bridge_put(skb->nf_bridge);
2351 skb->nf_bridge = NULL;
2352#endif
2353}
2354
2355
2356static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2357{
2358#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2359 dst->nfct = src->nfct;
2360 nf_conntrack_get(src->nfct);
2361 dst->nfctinfo = src->nfctinfo;
2362#endif
2363#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2364 dst->nfct_reasm = src->nfct_reasm;
2365 nf_conntrack_get_reasm(src->nfct_reasm);
2366#endif
2367#ifdef CONFIG_BRIDGE_NETFILTER
2368 dst->nf_bridge = src->nf_bridge;
2369 nf_bridge_get(src->nf_bridge);
2370#endif
2371}
2372
2373static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2374{
2375#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2376 nf_conntrack_put(dst->nfct);
2377#endif
2378#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2379 nf_conntrack_put_reasm(dst->nfct_reasm);
2380#endif
2381#ifdef CONFIG_BRIDGE_NETFILTER
2382 nf_bridge_put(dst->nf_bridge);
2383#endif
2384 __nf_copy(dst, src);
2385}
2386
2387#ifdef CONFIG_NETWORK_SECMARK
2388static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2389{
2390 to->secmark = from->secmark;
2391}
2392
2393static inline void skb_init_secmark(struct sk_buff *skb)
2394{
2395 skb->secmark = 0;
2396}
2397#else
2398static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2399{ }
2400
2401static inline void skb_init_secmark(struct sk_buff *skb)
2402{ }
2403#endif
2404
2405static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2406{
2407 skb->queue_mapping = queue_mapping;
2408}
2409
2410static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2411{
2412 return skb->queue_mapping;
2413}
2414
2415static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2416{
2417 to->queue_mapping = from->queue_mapping;
2418}
2419
2420static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2421{
2422 skb->queue_mapping = rx_queue + 1;
2423}
2424
2425static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2426{
2427 return skb->queue_mapping - 1;
2428}
2429
2430static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2431{
2432 return skb->queue_mapping != 0;
2433}
2434
2435extern u16 __skb_tx_hash(const struct net_device *dev,
2436 const struct sk_buff *skb,
2437 unsigned int num_tx_queues);
2438
2439#ifdef CONFIG_XFRM
2440static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2441{
2442 return skb->sp;
2443}
2444#else
2445static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2446{
2447 return NULL;
2448}
2449#endif
2450
2451static inline int skb_is_gso(const struct sk_buff *skb)
2452{
2453 return skb_shinfo(skb)->gso_size;
2454}
2455
2456static inline int skb_is_gso_v6(const struct sk_buff *skb)
2457{
2458 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2459}
2460
2461extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2462
2463static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2464{
2465
2466
2467 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2468
2469 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2470 unlikely(shinfo->gso_type == 0)) {
2471 __skb_warn_lro_forwarding(skb);
2472 return true;
2473 }
2474 return false;
2475}
2476
2477static inline void skb_forward_csum(struct sk_buff *skb)
2478{
2479
2480 if (skb->ip_summed == CHECKSUM_COMPLETE)
2481 skb->ip_summed = CHECKSUM_NONE;
2482}
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2493{
2494#ifdef DEBUG
2495 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2496#endif
2497}
2498
2499bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2500
2501static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2502{
2503 if (irqs_disabled())
2504 return false;
2505
2506 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
2507 return false;
2508
2509 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
2510 return false;
2511
2512 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2513 if (skb_end_pointer(skb) - skb->head < skb_size)
2514 return false;
2515
2516 if (skb_shared(skb) || skb_cloned(skb))
2517 return false;
2518
2519 return true;
2520}
2521#endif
2522#endif
2523