1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32
33
34#define CHECKSUM_NONE 0
35#define CHECKSUM_UNNECESSARY 1
36#define CHECKSUM_COMPLETE 2
37#define CHECKSUM_PARTIAL 3
38
39#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
40 ~(SMP_CACHE_BYTES - 1))
41#define SKB_WITH_OVERHEAD(X) \
42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
43#define SKB_MAX_ORDER(X, ORDER) \
44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94struct net_device;
95struct scatterlist;
96struct pipe_inode_info;
97
98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
99struct nf_conntrack {
100 atomic_t use;
101};
102#endif
103
104#ifdef CONFIG_BRIDGE_NETFILTER
105struct nf_bridge_info {
106 atomic_t use;
107 struct net_device *physindev;
108 struct net_device *physoutdev;
109 unsigned int mask;
110 unsigned long data[32 / sizeof(unsigned long)];
111};
112#endif
113
114struct sk_buff_head {
115
116 struct sk_buff *next;
117 struct sk_buff *prev;
118
119 __u32 qlen;
120 spinlock_t lock;
121};
122
123struct sk_buff;
124
125
126
127
128#if (65536/PAGE_SIZE + 2) < 16
129#define MAX_SKB_FRAGS 16UL
130#else
131#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
132#endif
133
134typedef struct skb_frag_struct skb_frag_t;
135
136struct skb_frag_struct {
137 struct page *page;
138#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
139 __u32 page_offset;
140 __u32 size;
141#else
142 __u16 page_offset;
143 __u16 size;
144#endif
145};
146
147#define HAVE_HW_TIME_STAMP
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172struct skb_shared_hwtstamps {
173 ktime_t hwtstamp;
174 ktime_t syststamp;
175};
176
177
178enum {
179
180 SKBTX_HW_TSTAMP = 1 << 0,
181
182
183 SKBTX_SW_TSTAMP = 1 << 1,
184
185
186 SKBTX_IN_PROGRESS = 1 << 2,
187
188
189 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
190
191
192 SKBTX_DEV_ZEROCOPY = 1 << 4,
193};
194
195
196
197
198
199
200struct ubuf_info {
201 void (*callback)(void *);
202 void *arg;
203 unsigned long desc;
204};
205
206
207
208
209struct skb_shared_info {
210 unsigned short nr_frags;
211 unsigned short gso_size;
212
213 unsigned short gso_segs;
214 unsigned short gso_type;
215 __be32 ip6_frag_id;
216 __u8 tx_flags;
217 struct sk_buff *frag_list;
218 struct skb_shared_hwtstamps hwtstamps;
219
220
221
222
223 atomic_t dataref;
224
225
226
227 void * destructor_arg;
228
229
230 skb_frag_t frags[MAX_SKB_FRAGS];
231};
232
233
234
235
236
237
238
239
240
241
242
243
244#define SKB_DATAREF_SHIFT 16
245#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
246
247
248enum {
249 SKB_FCLONE_UNAVAILABLE,
250 SKB_FCLONE_ORIG,
251 SKB_FCLONE_CLONE,
252};
253
254enum {
255 SKB_GSO_TCPV4 = 1 << 0,
256 SKB_GSO_UDP = 1 << 1,
257
258
259 SKB_GSO_DODGY = 1 << 2,
260
261
262 SKB_GSO_TCP_ECN = 1 << 3,
263
264 SKB_GSO_TCPV6 = 1 << 4,
265
266 SKB_GSO_FCOE = 1 << 5,
267};
268
269#if BITS_PER_LONG > 32
270#define NET_SKBUFF_DATA_USES_OFFSET 1
271#endif
272
273#ifdef NET_SKBUFF_DATA_USES_OFFSET
274typedef unsigned int sk_buff_data_t;
275#else
276typedef unsigned char *sk_buff_data_t;
277#endif
278
279#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
280 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
281#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
282#endif
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342struct sk_buff {
343
344 struct sk_buff *next;
345 struct sk_buff *prev;
346
347 ktime_t tstamp;
348
349 struct sock *sk;
350 struct net_device *dev;
351
352
353
354
355
356
357
358 char cb[48] __aligned(8);
359
360 unsigned long _skb_refdst;
361#ifdef CONFIG_XFRM
362 struct sec_path *sp;
363#endif
364 unsigned int len,
365 data_len;
366 __u16 mac_len,
367 hdr_len;
368 union {
369 __wsum csum;
370 struct {
371 __u16 csum_start;
372 __u16 csum_offset;
373 };
374 };
375 __u32 priority;
376 kmemcheck_bitfield_begin(flags1);
377 __u8 local_df:1,
378 cloned:1,
379 ip_summed:2,
380 nohdr:1,
381 nfctinfo:3;
382 __u8 pkt_type:3,
383 fclone:2,
384 ipvs_property:1,
385 peeked:1,
386 nf_trace:1;
387 kmemcheck_bitfield_end(flags1);
388 __be16 protocol;
389
390 void (*destructor)(struct sk_buff *skb);
391#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
392 struct nf_conntrack *nfct;
393#endif
394#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
395 struct sk_buff *nfct_reasm;
396#endif
397#ifdef CONFIG_BRIDGE_NETFILTER
398 struct nf_bridge_info *nf_bridge;
399#endif
400
401 int skb_iif;
402#ifdef CONFIG_NET_SCHED
403 __u16 tc_index;
404#ifdef CONFIG_NET_CLS_ACT
405 __u16 tc_verd;
406#endif
407#endif
408
409 __u32 rxhash;
410
411 __u16 queue_mapping;
412 kmemcheck_bitfield_begin(flags2);
413#ifdef CONFIG_IPV6_NDISC_NODETYPE
414 __u8 ndisc_nodetype:2;
415#endif
416 __u8 ooo_okay:1;
417 kmemcheck_bitfield_end(flags2);
418
419
420
421#ifdef CONFIG_NET_DMA
422 dma_cookie_t dma_cookie;
423#endif
424#ifdef CONFIG_NETWORK_SECMARK
425 __u32 secmark;
426#endif
427 union {
428 __u32 mark;
429 __u32 dropcount;
430 };
431
432 __u16 vlan_tci;
433
434 sk_buff_data_t transport_header;
435 sk_buff_data_t network_header;
436 sk_buff_data_t mac_header;
437
438 sk_buff_data_t tail;
439 sk_buff_data_t end;
440 unsigned char *head,
441 *data;
442 unsigned int truesize;
443 atomic_t users;
444};
445
446#ifdef __KERNEL__
447
448
449
450#include <linux/slab.h>
451
452#include <asm/system.h>
453
454
455
456
457
458#define SKB_DST_NOREF 1UL
459#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
460
461
462
463
464
465
466
467static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
468{
469
470
471
472 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
473 !rcu_read_lock_held() &&
474 !rcu_read_lock_bh_held());
475 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
476}
477
478
479
480
481
482
483
484
485
486static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
487{
488 skb->_skb_refdst = (unsigned long)dst;
489}
490
491extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
492
493
494
495
496
497static inline bool skb_dst_is_noref(const struct sk_buff *skb)
498{
499 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
500}
501
502static inline struct rtable *skb_rtable(const struct sk_buff *skb)
503{
504 return (struct rtable *)skb_dst(skb);
505}
506
507extern void kfree_skb(struct sk_buff *skb);
508extern void consume_skb(struct sk_buff *skb);
509extern void __kfree_skb(struct sk_buff *skb);
510extern struct sk_buff *__alloc_skb(unsigned int size,
511 gfp_t priority, int fclone, int node);
512static inline struct sk_buff *alloc_skb(unsigned int size,
513 gfp_t priority)
514{
515 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
516}
517
518static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
519 gfp_t priority)
520{
521 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
522}
523
524extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
525
526extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
527extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
528extern struct sk_buff *skb_clone(struct sk_buff *skb,
529 gfp_t priority);
530extern struct sk_buff *skb_copy(const struct sk_buff *skb,
531 gfp_t priority);
532extern struct sk_buff *pskb_copy(struct sk_buff *skb,
533 gfp_t gfp_mask);
534extern int pskb_expand_head(struct sk_buff *skb,
535 int nhead, int ntail,
536 gfp_t gfp_mask);
537extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
538 unsigned int headroom);
539extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
540 int newheadroom, int newtailroom,
541 gfp_t priority);
542extern int skb_to_sgvec(struct sk_buff *skb,
543 struct scatterlist *sg, int offset,
544 int len);
545extern int skb_cow_data(struct sk_buff *skb, int tailbits,
546 struct sk_buff **trailer);
547extern int skb_pad(struct sk_buff *skb, int pad);
548#define dev_kfree_skb(a) consume_skb(a)
549
550extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
551 int getfrag(void *from, char *to, int offset,
552 int len,int odd, struct sk_buff *skb),
553 void *from, int length);
554
555struct skb_seq_state {
556 __u32 lower_offset;
557 __u32 upper_offset;
558 __u32 frag_idx;
559 __u32 stepped_offset;
560 struct sk_buff *root_skb;
561 struct sk_buff *cur_skb;
562 __u8 *frag_data;
563};
564
565extern void skb_prepare_seq_read(struct sk_buff *skb,
566 unsigned int from, unsigned int to,
567 struct skb_seq_state *st);
568extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
569 struct skb_seq_state *st);
570extern void skb_abort_seq_read(struct skb_seq_state *st);
571
572extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
573 unsigned int to, struct ts_config *config,
574 struct ts_state *state);
575
576extern __u32 __skb_get_rxhash(struct sk_buff *skb);
577static inline __u32 skb_get_rxhash(struct sk_buff *skb)
578{
579 if (!skb->rxhash)
580 skb->rxhash = __skb_get_rxhash(skb);
581
582 return skb->rxhash;
583}
584
585#ifdef NET_SKBUFF_DATA_USES_OFFSET
586static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
587{
588 return skb->head + skb->end;
589}
590#else
591static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
592{
593 return skb->end;
594}
595#endif
596
597
598#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
599
600static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
601{
602 return &skb_shinfo(skb)->hwtstamps;
603}
604
605
606
607
608
609
610
611static inline int skb_queue_empty(const struct sk_buff_head *list)
612{
613 return list->next == (struct sk_buff *)list;
614}
615
616
617
618
619
620
621
622
623static inline bool skb_queue_is_last(const struct sk_buff_head *list,
624 const struct sk_buff *skb)
625{
626 return skb->next == (struct sk_buff *)list;
627}
628
629
630
631
632
633
634
635
636static inline bool skb_queue_is_first(const struct sk_buff_head *list,
637 const struct sk_buff *skb)
638{
639 return skb->prev == (struct sk_buff *)list;
640}
641
642
643
644
645
646
647
648
649
650static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
651 const struct sk_buff *skb)
652{
653
654
655
656 BUG_ON(skb_queue_is_last(list, skb));
657 return skb->next;
658}
659
660
661
662
663
664
665
666
667
668static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
669 const struct sk_buff *skb)
670{
671
672
673
674 BUG_ON(skb_queue_is_first(list, skb));
675 return skb->prev;
676}
677
678
679
680
681
682
683
684
685static inline struct sk_buff *skb_get(struct sk_buff *skb)
686{
687 atomic_inc(&skb->users);
688 return skb;
689}
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704static inline int skb_cloned(const struct sk_buff *skb)
705{
706 return skb->cloned &&
707 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
708}
709
710
711
712
713
714
715
716
717static inline int skb_header_cloned(const struct sk_buff *skb)
718{
719 int dataref;
720
721 if (!skb->cloned)
722 return 0;
723
724 dataref = atomic_read(&skb_shinfo(skb)->dataref);
725 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
726 return dataref != 1;
727}
728
729
730
731
732
733
734
735
736
737static inline void skb_header_release(struct sk_buff *skb)
738{
739 BUG_ON(skb->nohdr);
740 skb->nohdr = 1;
741 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
742}
743
744
745
746
747
748
749
750
751static inline int skb_shared(const struct sk_buff *skb)
752{
753 return atomic_read(&skb->users) != 1;
754}
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
770 gfp_t pri)
771{
772 might_sleep_if(pri & __GFP_WAIT);
773 if (skb_shared(skb)) {
774 struct sk_buff *nskb = skb_clone(skb, pri);
775 kfree_skb(skb);
776 skb = nskb;
777 }
778 return skb;
779}
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
802 gfp_t pri)
803{
804 might_sleep_if(pri & __GFP_WAIT);
805 if (skb_cloned(skb)) {
806 struct sk_buff *nskb = skb_copy(skb, pri);
807 kfree_skb(skb);
808 skb = nskb;
809 }
810 return skb;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
827{
828 struct sk_buff *list = ((struct sk_buff *)list_)->next;
829 if (list == (struct sk_buff *)list_)
830 list = NULL;
831 return list;
832}
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
848{
849 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
850 if (list == (struct sk_buff *)list_)
851 list = NULL;
852 return list;
853}
854
855
856
857
858
859
860
861static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
862{
863 return list_->qlen;
864}
865
866
867
868
869
870
871
872
873
874
875
876static inline void __skb_queue_head_init(struct sk_buff_head *list)
877{
878 list->prev = list->next = (struct sk_buff *)list;
879 list->qlen = 0;
880}
881
882
883
884
885
886
887
888
889
890static inline void skb_queue_head_init(struct sk_buff_head *list)
891{
892 spin_lock_init(&list->lock);
893 __skb_queue_head_init(list);
894}
895
896static inline void skb_queue_head_init_class(struct sk_buff_head *list,
897 struct lock_class_key *class)
898{
899 skb_queue_head_init(list);
900 lockdep_set_class(&list->lock, class);
901}
902
903
904
905
906
907
908
909extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
910static inline void __skb_insert(struct sk_buff *newsk,
911 struct sk_buff *prev, struct sk_buff *next,
912 struct sk_buff_head *list)
913{
914 newsk->next = next;
915 newsk->prev = prev;
916 next->prev = prev->next = newsk;
917 list->qlen++;
918}
919
920static inline void __skb_queue_splice(const struct sk_buff_head *list,
921 struct sk_buff *prev,
922 struct sk_buff *next)
923{
924 struct sk_buff *first = list->next;
925 struct sk_buff *last = list->prev;
926
927 first->prev = prev;
928 prev->next = first;
929
930 last->next = next;
931 next->prev = last;
932}
933
934
935
936
937
938
939static inline void skb_queue_splice(const struct sk_buff_head *list,
940 struct sk_buff_head *head)
941{
942 if (!skb_queue_empty(list)) {
943 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
944 head->qlen += list->qlen;
945 }
946}
947
948
949
950
951
952
953
954
955static inline void skb_queue_splice_init(struct sk_buff_head *list,
956 struct sk_buff_head *head)
957{
958 if (!skb_queue_empty(list)) {
959 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
960 head->qlen += list->qlen;
961 __skb_queue_head_init(list);
962 }
963}
964
965
966
967
968
969
970static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
971 struct sk_buff_head *head)
972{
973 if (!skb_queue_empty(list)) {
974 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
975 head->qlen += list->qlen;
976 }
977}
978
979
980
981
982
983
984
985
986
987static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
988 struct sk_buff_head *head)
989{
990 if (!skb_queue_empty(list)) {
991 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
992 head->qlen += list->qlen;
993 __skb_queue_head_init(list);
994 }
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static inline void __skb_queue_after(struct sk_buff_head *list,
1009 struct sk_buff *prev,
1010 struct sk_buff *newsk)
1011{
1012 __skb_insert(newsk, prev, prev->next, list);
1013}
1014
1015extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1016 struct sk_buff_head *list);
1017
1018static inline void __skb_queue_before(struct sk_buff_head *list,
1019 struct sk_buff *next,
1020 struct sk_buff *newsk)
1021{
1022 __skb_insert(newsk, next->prev, next, list);
1023}
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1036static inline void __skb_queue_head(struct sk_buff_head *list,
1037 struct sk_buff *newsk)
1038{
1039 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1040}
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1053static inline void __skb_queue_tail(struct sk_buff_head *list,
1054 struct sk_buff *newsk)
1055{
1056 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1057}
1058
1059
1060
1061
1062
1063extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1064static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1065{
1066 struct sk_buff *next, *prev;
1067
1068 list->qlen--;
1069 next = skb->next;
1070 prev = skb->prev;
1071 skb->next = skb->prev = NULL;
1072 next->prev = prev;
1073 prev->next = next;
1074}
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1085static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1086{
1087 struct sk_buff *skb = skb_peek(list);
1088 if (skb)
1089 __skb_unlink(skb, list);
1090 return skb;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1102static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1103{
1104 struct sk_buff *skb = skb_peek_tail(list);
1105 if (skb)
1106 __skb_unlink(skb, list);
1107 return skb;
1108}
1109
1110
1111static inline int skb_is_nonlinear(const struct sk_buff *skb)
1112{
1113 return skb->data_len;
1114}
1115
1116static inline unsigned int skb_headlen(const struct sk_buff *skb)
1117{
1118 return skb->len - skb->data_len;
1119}
1120
1121static inline int skb_pagelen(const struct sk_buff *skb)
1122{
1123 int i, len = 0;
1124
1125 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1126 len += skb_shinfo(skb)->frags[i].size;
1127 return len + skb_headlen(skb);
1128}
1129
1130static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1131 struct page *page, int off, int size)
1132{
1133 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1134
1135 frag->page = page;
1136 frag->page_offset = off;
1137 frag->size = size;
1138 skb_shinfo(skb)->nr_frags = i + 1;
1139}
1140
1141extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1142 int off, int size);
1143
1144#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1145#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1146#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1147
1148#ifdef NET_SKBUFF_DATA_USES_OFFSET
1149static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1150{
1151 return skb->head + skb->tail;
1152}
1153
1154static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1155{
1156 skb->tail = skb->data - skb->head;
1157}
1158
1159static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1160{
1161 skb_reset_tail_pointer(skb);
1162 skb->tail += offset;
1163}
1164#else
1165static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1166{
1167 return skb->tail;
1168}
1169
1170static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1171{
1172 skb->tail = skb->data;
1173}
1174
1175static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1176{
1177 skb->tail = skb->data + offset;
1178}
1179
1180#endif
1181
1182
1183
1184
1185extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1186static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1187{
1188 unsigned char *tmp = skb_tail_pointer(skb);
1189 SKB_LINEAR_ASSERT(skb);
1190 skb->tail += len;
1191 skb->len += len;
1192 return tmp;
1193}
1194
1195extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1196static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1197{
1198 skb->data -= len;
1199 skb->len += len;
1200 return skb->data;
1201}
1202
1203extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1204static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1205{
1206 skb->len -= len;
1207 BUG_ON(skb->len < skb->data_len);
1208 return skb->data += len;
1209}
1210
1211static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1212{
1213 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1214}
1215
1216extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1217
1218static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1219{
1220 if (len > skb_headlen(skb) &&
1221 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1222 return NULL;
1223 skb->len -= len;
1224 return skb->data += len;
1225}
1226
1227static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1228{
1229 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1230}
1231
1232static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1233{
1234 if (likely(len <= skb_headlen(skb)))
1235 return 1;
1236 if (unlikely(len > skb->len))
1237 return 0;
1238 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1239}
1240
1241
1242
1243
1244
1245
1246
1247static inline unsigned int skb_headroom(const struct sk_buff *skb)
1248{
1249 return skb->data - skb->head;
1250}
1251
1252
1253
1254
1255
1256
1257
1258static inline int skb_tailroom(const struct sk_buff *skb)
1259{
1260 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271static inline void skb_reserve(struct sk_buff *skb, int len)
1272{
1273 skb->data += len;
1274 skb->tail += len;
1275}
1276
1277static inline void skb_reset_mac_len(struct sk_buff *skb)
1278{
1279 skb->mac_len = skb->network_header - skb->mac_header;
1280}
1281
1282#ifdef NET_SKBUFF_DATA_USES_OFFSET
1283static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1284{
1285 return skb->head + skb->transport_header;
1286}
1287
1288static inline void skb_reset_transport_header(struct sk_buff *skb)
1289{
1290 skb->transport_header = skb->data - skb->head;
1291}
1292
1293static inline void skb_set_transport_header(struct sk_buff *skb,
1294 const int offset)
1295{
1296 skb_reset_transport_header(skb);
1297 skb->transport_header += offset;
1298}
1299
1300static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1301{
1302 return skb->head + skb->network_header;
1303}
1304
1305static inline void skb_reset_network_header(struct sk_buff *skb)
1306{
1307 skb->network_header = skb->data - skb->head;
1308}
1309
1310static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1311{
1312 skb_reset_network_header(skb);
1313 skb->network_header += offset;
1314}
1315
1316static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1317{
1318 return skb->head + skb->mac_header;
1319}
1320
1321static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1322{
1323 return skb->mac_header != ~0U;
1324}
1325
1326static inline void skb_reset_mac_header(struct sk_buff *skb)
1327{
1328 skb->mac_header = skb->data - skb->head;
1329}
1330
1331static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1332{
1333 skb_reset_mac_header(skb);
1334 skb->mac_header += offset;
1335}
1336
1337#else
1338
1339static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1340{
1341 return skb->transport_header;
1342}
1343
1344static inline void skb_reset_transport_header(struct sk_buff *skb)
1345{
1346 skb->transport_header = skb->data;
1347}
1348
1349static inline void skb_set_transport_header(struct sk_buff *skb,
1350 const int offset)
1351{
1352 skb->transport_header = skb->data + offset;
1353}
1354
1355static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1356{
1357 return skb->network_header;
1358}
1359
1360static inline void skb_reset_network_header(struct sk_buff *skb)
1361{
1362 skb->network_header = skb->data;
1363}
1364
1365static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1366{
1367 skb->network_header = skb->data + offset;
1368}
1369
1370static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1371{
1372 return skb->mac_header;
1373}
1374
1375static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1376{
1377 return skb->mac_header != NULL;
1378}
1379
1380static inline void skb_reset_mac_header(struct sk_buff *skb)
1381{
1382 skb->mac_header = skb->data;
1383}
1384
1385static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1386{
1387 skb->mac_header = skb->data + offset;
1388}
1389#endif
1390
1391static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1392{
1393 return skb->csum_start - skb_headroom(skb);
1394}
1395
1396static inline int skb_transport_offset(const struct sk_buff *skb)
1397{
1398 return skb_transport_header(skb) - skb->data;
1399}
1400
1401static inline u32 skb_network_header_len(const struct sk_buff *skb)
1402{
1403 return skb->transport_header - skb->network_header;
1404}
1405
1406static inline int skb_network_offset(const struct sk_buff *skb)
1407{
1408 return skb_network_header(skb) - skb->data;
1409}
1410
1411static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1412{
1413 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1414}
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436#ifndef NET_IP_ALIGN
1437#define NET_IP_ALIGN 2
1438#endif
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460#ifndef NET_SKB_PAD
1461#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1462#endif
1463
1464extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1465
1466static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1467{
1468 if (unlikely(skb_is_nonlinear(skb))) {
1469 WARN_ON(1);
1470 return;
1471 }
1472 skb->len = len;
1473 skb_set_tail_pointer(skb, len);
1474}
1475
1476extern void skb_trim(struct sk_buff *skb, unsigned int len);
1477
1478static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1479{
1480 if (skb->data_len)
1481 return ___pskb_trim(skb, len);
1482 __skb_trim(skb, len);
1483 return 0;
1484}
1485
1486static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1487{
1488 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1501{
1502 int err = pskb_trim(skb, len);
1503 BUG_ON(err);
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static inline void skb_orphan(struct sk_buff *skb)
1515{
1516 if (skb->destructor)
1517 skb->destructor(skb);
1518 skb->destructor = NULL;
1519 skb->sk = NULL;
1520}
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530extern void skb_queue_purge(struct sk_buff_head *list);
1531static inline void __skb_queue_purge(struct sk_buff_head *list)
1532{
1533 struct sk_buff *skb;
1534 while ((skb = __skb_dequeue(list)) != NULL)
1535 kfree_skb(skb);
1536}
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1551 gfp_t gfp_mask)
1552{
1553 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1554 if (likely(skb))
1555 skb_reserve(skb, NET_SKB_PAD);
1556 return skb;
1557}
1558
1559extern struct sk_buff *dev_alloc_skb(unsigned int length);
1560
1561extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1562 unsigned int length, gfp_t gfp_mask);
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1578 unsigned int length)
1579{
1580 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1581}
1582
1583static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1584 unsigned int length, gfp_t gfp)
1585{
1586 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1587
1588 if (NET_IP_ALIGN && skb)
1589 skb_reserve(skb, NET_IP_ALIGN);
1590 return skb;
1591}
1592
1593static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1594 unsigned int length)
1595{
1596 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1609{
1610 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621static inline struct page *netdev_alloc_page(struct net_device *dev)
1622{
1623 return __netdev_alloc_page(dev, GFP_ATOMIC);
1624}
1625
1626static inline void netdev_free_page(struct net_device *dev, struct page *page)
1627{
1628 __free_page(page);
1629}
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1640{
1641 return !skb_header_cloned(skb) &&
1642 skb_headroom(skb) + len <= skb->hdr_len;
1643}
1644
1645static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1646 int cloned)
1647{
1648 int delta = 0;
1649
1650 if (headroom < NET_SKB_PAD)
1651 headroom = NET_SKB_PAD;
1652 if (headroom > skb_headroom(skb))
1653 delta = headroom - skb_headroom(skb);
1654
1655 if (delta || cloned)
1656 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1657 GFP_ATOMIC);
1658 return 0;
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1674{
1675 return __skb_cow(skb, headroom, skb_cloned(skb));
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1689{
1690 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1691}
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1705{
1706 unsigned int size = skb->len;
1707 if (likely(size >= len))
1708 return 0;
1709 return skb_pad(skb, len - size);
1710}
1711
1712static inline int skb_add_data(struct sk_buff *skb,
1713 char __user *from, int copy)
1714{
1715 const int off = skb->len;
1716
1717 if (skb->ip_summed == CHECKSUM_NONE) {
1718 int err = 0;
1719 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1720 copy, 0, &err);
1721 if (!err) {
1722 skb->csum = csum_block_add(skb->csum, csum, off);
1723 return 0;
1724 }
1725 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1726 return 0;
1727
1728 __skb_trim(skb, off);
1729 return -EFAULT;
1730}
1731
1732static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1733 struct page *page, int off)
1734{
1735 if (i) {
1736 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1737
1738 return page == frag->page &&
1739 off == frag->page_offset + frag->size;
1740 }
1741 return 0;
1742}
1743
1744static inline int __skb_linearize(struct sk_buff *skb)
1745{
1746 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756static inline int skb_linearize(struct sk_buff *skb)
1757{
1758 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1759}
1760
1761
1762
1763
1764
1765
1766
1767
1768static inline int skb_linearize_cow(struct sk_buff *skb)
1769{
1770 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1771 __skb_linearize(skb) : 0;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static inline void skb_postpull_rcsum(struct sk_buff *skb,
1786 const void *start, unsigned int len)
1787{
1788 if (skb->ip_summed == CHECKSUM_COMPLETE)
1789 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1790}
1791
1792unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1804{
1805 if (likely(len >= skb->len))
1806 return 0;
1807 if (skb->ip_summed == CHECKSUM_COMPLETE)
1808 skb->ip_summed = CHECKSUM_NONE;
1809 return __pskb_trim(skb, len);
1810}
1811
1812#define skb_queue_walk(queue, skb) \
1813 for (skb = (queue)->next; \
1814 skb != (struct sk_buff *)(queue); \
1815 skb = skb->next)
1816
1817#define skb_queue_walk_safe(queue, skb, tmp) \
1818 for (skb = (queue)->next, tmp = skb->next; \
1819 skb != (struct sk_buff *)(queue); \
1820 skb = tmp, tmp = skb->next)
1821
1822#define skb_queue_walk_from(queue, skb) \
1823 for (; skb != (struct sk_buff *)(queue); \
1824 skb = skb->next)
1825
1826#define skb_queue_walk_from_safe(queue, skb, tmp) \
1827 for (tmp = skb->next; \
1828 skb != (struct sk_buff *)(queue); \
1829 skb = tmp, tmp = skb->next)
1830
1831#define skb_queue_reverse_walk(queue, skb) \
1832 for (skb = (queue)->prev; \
1833 skb != (struct sk_buff *)(queue); \
1834 skb = skb->prev)
1835
1836#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
1837 for (skb = (queue)->prev, tmp = skb->prev; \
1838 skb != (struct sk_buff *)(queue); \
1839 skb = tmp, tmp = skb->prev)
1840
1841#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
1842 for (tmp = skb->prev; \
1843 skb != (struct sk_buff *)(queue); \
1844 skb = tmp, tmp = skb->prev)
1845
1846static inline bool skb_has_frag_list(const struct sk_buff *skb)
1847{
1848 return skb_shinfo(skb)->frag_list != NULL;
1849}
1850
1851static inline void skb_frag_list_init(struct sk_buff *skb)
1852{
1853 skb_shinfo(skb)->frag_list = NULL;
1854}
1855
1856static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
1857{
1858 frag->next = skb_shinfo(skb)->frag_list;
1859 skb_shinfo(skb)->frag_list = frag;
1860}
1861
1862#define skb_walk_frags(skb, iter) \
1863 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
1864
1865extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1866 int *peeked, int *err);
1867extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1868 int noblock, int *err);
1869extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1870 struct poll_table_struct *wait);
1871extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1872 int offset, struct iovec *to,
1873 int size);
1874extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1875 int hlen,
1876 struct iovec *iov);
1877extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
1878 int offset,
1879 const struct iovec *from,
1880 int from_offset,
1881 int len);
1882extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
1883 int offset,
1884 const struct iovec *to,
1885 int to_offset,
1886 int size);
1887extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1888extern void skb_free_datagram_locked(struct sock *sk,
1889 struct sk_buff *skb);
1890extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1891 unsigned int flags);
1892extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1893 int len, __wsum csum);
1894extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1895 void *to, int len);
1896extern int skb_store_bits(struct sk_buff *skb, int offset,
1897 const void *from, int len);
1898extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
1899 int offset, u8 *to, int len,
1900 __wsum csum);
1901extern int skb_splice_bits(struct sk_buff *skb,
1902 unsigned int offset,
1903 struct pipe_inode_info *pipe,
1904 unsigned int len,
1905 unsigned int flags);
1906extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1907extern void skb_split(struct sk_buff *skb,
1908 struct sk_buff *skb1, const u32 len);
1909extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1910 int shiftlen);
1911
1912extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
1913
1914static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1915 int len, void *buffer)
1916{
1917 int hlen = skb_headlen(skb);
1918
1919 if (hlen - offset >= len)
1920 return skb->data + offset;
1921
1922 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1923 return NULL;
1924
1925 return buffer;
1926}
1927
1928static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1929 void *to,
1930 const unsigned int len)
1931{
1932 memcpy(to, skb->data, len);
1933}
1934
1935static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1936 const int offset, void *to,
1937 const unsigned int len)
1938{
1939 memcpy(to, skb->data + offset, len);
1940}
1941
1942static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1943 const void *from,
1944 const unsigned int len)
1945{
1946 memcpy(skb->data, from, len);
1947}
1948
1949static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1950 const int offset,
1951 const void *from,
1952 const unsigned int len)
1953{
1954 memcpy(skb->data + offset, from, len);
1955}
1956
1957extern void skb_init(void);
1958
1959static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
1960{
1961 return skb->tstamp;
1962}
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973static inline void skb_get_timestamp(const struct sk_buff *skb,
1974 struct timeval *stamp)
1975{
1976 *stamp = ktime_to_timeval(skb->tstamp);
1977}
1978
1979static inline void skb_get_timestampns(const struct sk_buff *skb,
1980 struct timespec *stamp)
1981{
1982 *stamp = ktime_to_timespec(skb->tstamp);
1983}
1984
1985static inline void __net_timestamp(struct sk_buff *skb)
1986{
1987 skb->tstamp = ktime_get_real();
1988}
1989
1990static inline ktime_t net_timedelta(ktime_t t)
1991{
1992 return ktime_sub(ktime_get_real(), t);
1993}
1994
1995static inline ktime_t net_invalid_timestamp(void)
1996{
1997 return ktime_set(0, 0);
1998}
1999
2000extern void skb_timestamping_init(void);
2001
2002#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2003
2004extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2005extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2006
2007#else
2008
2009static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2010{
2011}
2012
2013static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2014{
2015 return false;
2016}
2017
2018#endif
2019
2020
2021
2022
2023
2024
2025
2026
2027void skb_complete_tx_timestamp(struct sk_buff *skb,
2028 struct skb_shared_hwtstamps *hwtstamps);
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2042 struct skb_shared_hwtstamps *hwtstamps);
2043
2044static inline void sw_tx_timestamp(struct sk_buff *skb)
2045{
2046 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2047 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2048 skb_tstamp_tx(skb, NULL);
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059static inline void skb_tx_timestamp(struct sk_buff *skb)
2060{
2061 skb_clone_tx_timestamp(skb);
2062 sw_tx_timestamp(skb);
2063}
2064
2065extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2066extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2067
2068static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2069{
2070 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2071}
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2090{
2091 return skb_csum_unnecessary(skb) ?
2092 0 : __skb_checksum_complete(skb);
2093}
2094
2095#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2096extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2097static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2098{
2099 if (nfct && atomic_dec_and_test(&nfct->use))
2100 nf_conntrack_destroy(nfct);
2101}
2102static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2103{
2104 if (nfct)
2105 atomic_inc(&nfct->use);
2106}
2107#endif
2108#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2109static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2110{
2111 if (skb)
2112 atomic_inc(&skb->users);
2113}
2114static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2115{
2116 if (skb)
2117 kfree_skb(skb);
2118}
2119#endif
2120#ifdef CONFIG_BRIDGE_NETFILTER
2121static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2122{
2123 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2124 kfree(nf_bridge);
2125}
2126static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2127{
2128 if (nf_bridge)
2129 atomic_inc(&nf_bridge->use);
2130}
2131#endif
2132static inline void nf_reset(struct sk_buff *skb)
2133{
2134#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2135 nf_conntrack_put(skb->nfct);
2136 skb->nfct = NULL;
2137#endif
2138#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2139 nf_conntrack_put_reasm(skb->nfct_reasm);
2140 skb->nfct_reasm = NULL;
2141#endif
2142#ifdef CONFIG_BRIDGE_NETFILTER
2143 nf_bridge_put(skb->nf_bridge);
2144 skb->nf_bridge = NULL;
2145#endif
2146}
2147
2148
2149static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2150{
2151#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2152 dst->nfct = src->nfct;
2153 nf_conntrack_get(src->nfct);
2154 dst->nfctinfo = src->nfctinfo;
2155#endif
2156#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2157 dst->nfct_reasm = src->nfct_reasm;
2158 nf_conntrack_get_reasm(src->nfct_reasm);
2159#endif
2160#ifdef CONFIG_BRIDGE_NETFILTER
2161 dst->nf_bridge = src->nf_bridge;
2162 nf_bridge_get(src->nf_bridge);
2163#endif
2164}
2165
2166static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2167{
2168#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2169 nf_conntrack_put(dst->nfct);
2170#endif
2171#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2172 nf_conntrack_put_reasm(dst->nfct_reasm);
2173#endif
2174#ifdef CONFIG_BRIDGE_NETFILTER
2175 nf_bridge_put(dst->nf_bridge);
2176#endif
2177 __nf_copy(dst, src);
2178}
2179
2180#ifdef CONFIG_NETWORK_SECMARK
2181static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2182{
2183 to->secmark = from->secmark;
2184}
2185
2186static inline void skb_init_secmark(struct sk_buff *skb)
2187{
2188 skb->secmark = 0;
2189}
2190#else
2191static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2192{ }
2193
2194static inline void skb_init_secmark(struct sk_buff *skb)
2195{ }
2196#endif
2197
2198static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2199{
2200 skb->queue_mapping = queue_mapping;
2201}
2202
2203static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2204{
2205 return skb->queue_mapping;
2206}
2207
2208static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2209{
2210 to->queue_mapping = from->queue_mapping;
2211}
2212
2213static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2214{
2215 skb->queue_mapping = rx_queue + 1;
2216}
2217
2218static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2219{
2220 return skb->queue_mapping - 1;
2221}
2222
2223static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2224{
2225 return skb->queue_mapping != 0;
2226}
2227
2228extern u16 __skb_tx_hash(const struct net_device *dev,
2229 const struct sk_buff *skb,
2230 unsigned int num_tx_queues);
2231
2232#ifdef CONFIG_XFRM
2233static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2234{
2235 return skb->sp;
2236}
2237#else
2238static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2239{
2240 return NULL;
2241}
2242#endif
2243
2244static inline int skb_is_gso(const struct sk_buff *skb)
2245{
2246 return skb_shinfo(skb)->gso_size;
2247}
2248
2249static inline int skb_is_gso_v6(const struct sk_buff *skb)
2250{
2251 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2252}
2253
2254extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2255
2256static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2257{
2258
2259
2260 struct skb_shared_info *shinfo = skb_shinfo(skb);
2261 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2262 unlikely(shinfo->gso_type == 0)) {
2263 __skb_warn_lro_forwarding(skb);
2264 return true;
2265 }
2266 return false;
2267}
2268
2269static inline void skb_forward_csum(struct sk_buff *skb)
2270{
2271
2272 if (skb->ip_summed == CHECKSUM_COMPLETE)
2273 skb->ip_summed = CHECKSUM_NONE;
2274}
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284static inline void skb_checksum_none_assert(struct sk_buff *skb)
2285{
2286#ifdef DEBUG
2287 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2288#endif
2289}
2290
2291bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2292
2293#endif
2294#endif
2295