1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <asm/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32
33
34#define CHECKSUM_NONE 0
35#define CHECKSUM_UNNECESSARY 1
36#define CHECKSUM_COMPLETE 2
37#define CHECKSUM_PARTIAL 3
38
39#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
40 ~(SMP_CACHE_BYTES - 1))
41#define SKB_WITH_OVERHEAD(X) \
42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
43#define SKB_MAX_ORDER(X, ORDER) \
44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94struct net_device;
95struct scatterlist;
96struct pipe_inode_info;
97
98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
99struct nf_conntrack {
100 atomic_t use;
101};
102#endif
103
104#ifdef CONFIG_BRIDGE_NETFILTER
105struct nf_bridge_info {
106 atomic_t use;
107 struct net_device *physindev;
108 struct net_device *physoutdev;
109 unsigned int mask;
110 unsigned long data[32 / sizeof(unsigned long)];
111};
112#endif
113
114struct sk_buff_head {
115
116 struct sk_buff *next;
117 struct sk_buff *prev;
118
119 __u32 qlen;
120 spinlock_t lock;
121};
122
123struct sk_buff;
124
125
126#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
127
128typedef struct skb_frag_struct skb_frag_t;
129
130struct skb_frag_struct {
131 struct page *page;
132#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
133 __u32 page_offset;
134 __u32 size;
135#else
136 __u16 page_offset;
137 __u16 size;
138#endif
139};
140
141#define HAVE_HW_TIME_STAMP
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166struct skb_shared_hwtstamps {
167 ktime_t hwtstamp;
168 ktime_t syststamp;
169};
170
171
172enum {
173
174 SKBTX_HW_TSTAMP = 1 << 0,
175
176
177 SKBTX_SW_TSTAMP = 1 << 1,
178
179
180 SKBTX_IN_PROGRESS = 1 << 2,
181
182
183 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
184};
185
186
187
188
189struct skb_shared_info {
190 unsigned short nr_frags;
191 unsigned short gso_size;
192
193 unsigned short gso_segs;
194 unsigned short gso_type;
195 __be32 ip6_frag_id;
196 __u8 tx_flags;
197 struct sk_buff *frag_list;
198 struct skb_shared_hwtstamps hwtstamps;
199
200
201
202
203 atomic_t dataref;
204
205
206
207 void * destructor_arg;
208
209 skb_frag_t frags[MAX_SKB_FRAGS];
210};
211
212
213
214
215
216
217
218
219
220
221
222
223#define SKB_DATAREF_SHIFT 16
224#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
225
226
227enum {
228 SKB_FCLONE_UNAVAILABLE,
229 SKB_FCLONE_ORIG,
230 SKB_FCLONE_CLONE,
231};
232
233enum {
234 SKB_GSO_TCPV4 = 1 << 0,
235 SKB_GSO_UDP = 1 << 1,
236
237
238 SKB_GSO_DODGY = 1 << 2,
239
240
241 SKB_GSO_TCP_ECN = 1 << 3,
242
243 SKB_GSO_TCPV6 = 1 << 4,
244
245 SKB_GSO_FCOE = 1 << 5,
246};
247
248#if BITS_PER_LONG > 32
249#define NET_SKBUFF_DATA_USES_OFFSET 1
250#endif
251
252#ifdef NET_SKBUFF_DATA_USES_OFFSET
253typedef unsigned int sk_buff_data_t;
254#else
255typedef unsigned char *sk_buff_data_t;
256#endif
257
258#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
259 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
260#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
261#endif
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319struct sk_buff {
320
321 struct sk_buff *next;
322 struct sk_buff *prev;
323
324 ktime_t tstamp;
325
326 struct sock *sk;
327 struct net_device *dev;
328
329
330
331
332
333
334
335 char cb[48] __aligned(8);
336
337 unsigned long _skb_refdst;
338#ifdef CONFIG_XFRM
339 struct sec_path *sp;
340#endif
341 unsigned int len,
342 data_len;
343 __u16 mac_len,
344 hdr_len;
345 union {
346 __wsum csum;
347 struct {
348 __u16 csum_start;
349 __u16 csum_offset;
350 };
351 };
352 __u32 priority;
353 kmemcheck_bitfield_begin(flags1);
354 __u8 local_df:1,
355 cloned:1,
356 ip_summed:2,
357 nohdr:1,
358 nfctinfo:3;
359 __u8 pkt_type:3,
360 fclone:2,
361 ipvs_property:1,
362 peeked:1,
363 nf_trace:1;
364 kmemcheck_bitfield_end(flags1);
365 __be16 protocol;
366
367 void (*destructor)(struct sk_buff *skb);
368#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
369 struct nf_conntrack *nfct;
370#endif
371#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
372 struct sk_buff *nfct_reasm;
373#endif
374#ifdef CONFIG_BRIDGE_NETFILTER
375 struct nf_bridge_info *nf_bridge;
376#endif
377
378 int skb_iif;
379#ifdef CONFIG_NET_SCHED
380 __u16 tc_index;
381#ifdef CONFIG_NET_CLS_ACT
382 __u16 tc_verd;
383#endif
384#endif
385
386 __u32 rxhash;
387
388 kmemcheck_bitfield_begin(flags2);
389 __u16 queue_mapping:16;
390#ifdef CONFIG_IPV6_NDISC_NODETYPE
391 __u8 ndisc_nodetype:2,
392 deliver_no_wcard:1;
393#else
394 __u8 deliver_no_wcard:1;
395#endif
396 __u8 ooo_okay:1;
397 kmemcheck_bitfield_end(flags2);
398
399
400
401#ifdef CONFIG_NET_DMA
402 dma_cookie_t dma_cookie;
403#endif
404#ifdef CONFIG_NETWORK_SECMARK
405 __u32 secmark;
406#endif
407 union {
408 __u32 mark;
409 __u32 dropcount;
410 };
411
412 __u16 vlan_tci;
413
414 sk_buff_data_t transport_header;
415 sk_buff_data_t network_header;
416 sk_buff_data_t mac_header;
417
418 sk_buff_data_t tail;
419 sk_buff_data_t end;
420 unsigned char *head,
421 *data;
422 unsigned int truesize;
423 atomic_t users;
424};
425
426#ifdef __KERNEL__
427
428
429
430#include <linux/slab.h>
431
432#include <asm/system.h>
433
434
435
436
437
438#define SKB_DST_NOREF 1UL
439#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
440
441
442
443
444
445
446
447static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
448{
449
450
451
452 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
453 !rcu_read_lock_held() &&
454 !rcu_read_lock_bh_held());
455 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
456}
457
458
459
460
461
462
463
464
465
466static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
467{
468 skb->_skb_refdst = (unsigned long)dst;
469}
470
471extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
472
473
474
475
476
477static inline bool skb_dst_is_noref(const struct sk_buff *skb)
478{
479 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
480}
481
482static inline struct rtable *skb_rtable(const struct sk_buff *skb)
483{
484 return (struct rtable *)skb_dst(skb);
485}
486
487extern void kfree_skb(struct sk_buff *skb);
488extern void consume_skb(struct sk_buff *skb);
489extern void __kfree_skb(struct sk_buff *skb);
490extern struct sk_buff *__alloc_skb(unsigned int size,
491 gfp_t priority, int fclone, int node);
492static inline struct sk_buff *alloc_skb(unsigned int size,
493 gfp_t priority)
494{
495 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
496}
497
498static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
499 gfp_t priority)
500{
501 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
502}
503
504extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
505
506extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
507extern struct sk_buff *skb_clone(struct sk_buff *skb,
508 gfp_t priority);
509extern struct sk_buff *skb_copy(const struct sk_buff *skb,
510 gfp_t priority);
511extern struct sk_buff *pskb_copy(struct sk_buff *skb,
512 gfp_t gfp_mask);
513extern int pskb_expand_head(struct sk_buff *skb,
514 int nhead, int ntail,
515 gfp_t gfp_mask);
516extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
517 unsigned int headroom);
518extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
519 int newheadroom, int newtailroom,
520 gfp_t priority);
521extern int skb_to_sgvec(struct sk_buff *skb,
522 struct scatterlist *sg, int offset,
523 int len);
524extern int skb_cow_data(struct sk_buff *skb, int tailbits,
525 struct sk_buff **trailer);
526extern int skb_pad(struct sk_buff *skb, int pad);
527#define dev_kfree_skb(a) consume_skb(a)
528
529extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
530 int getfrag(void *from, char *to, int offset,
531 int len,int odd, struct sk_buff *skb),
532 void *from, int length);
533
534struct skb_seq_state {
535 __u32 lower_offset;
536 __u32 upper_offset;
537 __u32 frag_idx;
538 __u32 stepped_offset;
539 struct sk_buff *root_skb;
540 struct sk_buff *cur_skb;
541 __u8 *frag_data;
542};
543
544extern void skb_prepare_seq_read(struct sk_buff *skb,
545 unsigned int from, unsigned int to,
546 struct skb_seq_state *st);
547extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
548 struct skb_seq_state *st);
549extern void skb_abort_seq_read(struct skb_seq_state *st);
550
551extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
552 unsigned int to, struct ts_config *config,
553 struct ts_state *state);
554
555extern __u32 __skb_get_rxhash(struct sk_buff *skb);
556static inline __u32 skb_get_rxhash(struct sk_buff *skb)
557{
558 if (!skb->rxhash)
559 skb->rxhash = __skb_get_rxhash(skb);
560
561 return skb->rxhash;
562}
563
564#ifdef NET_SKBUFF_DATA_USES_OFFSET
565static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
566{
567 return skb->head + skb->end;
568}
569#else
570static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
571{
572 return skb->end;
573}
574#endif
575
576
577#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
578
579static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
580{
581 return &skb_shinfo(skb)->hwtstamps;
582}
583
584
585
586
587
588
589
590static inline int skb_queue_empty(const struct sk_buff_head *list)
591{
592 return list->next == (struct sk_buff *)list;
593}
594
595
596
597
598
599
600
601
602static inline bool skb_queue_is_last(const struct sk_buff_head *list,
603 const struct sk_buff *skb)
604{
605 return skb->next == (struct sk_buff *)list;
606}
607
608
609
610
611
612
613
614
615static inline bool skb_queue_is_first(const struct sk_buff_head *list,
616 const struct sk_buff *skb)
617{
618 return skb->prev == (struct sk_buff *)list;
619}
620
621
622
623
624
625
626
627
628
629static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
630 const struct sk_buff *skb)
631{
632
633
634
635 BUG_ON(skb_queue_is_last(list, skb));
636 return skb->next;
637}
638
639
640
641
642
643
644
645
646
647static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
648 const struct sk_buff *skb)
649{
650
651
652
653 BUG_ON(skb_queue_is_first(list, skb));
654 return skb->prev;
655}
656
657
658
659
660
661
662
663
664static inline struct sk_buff *skb_get(struct sk_buff *skb)
665{
666 atomic_inc(&skb->users);
667 return skb;
668}
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683static inline int skb_cloned(const struct sk_buff *skb)
684{
685 return skb->cloned &&
686 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
687}
688
689
690
691
692
693
694
695
696static inline int skb_header_cloned(const struct sk_buff *skb)
697{
698 int dataref;
699
700 if (!skb->cloned)
701 return 0;
702
703 dataref = atomic_read(&skb_shinfo(skb)->dataref);
704 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
705 return dataref != 1;
706}
707
708
709
710
711
712
713
714
715
716static inline void skb_header_release(struct sk_buff *skb)
717{
718 BUG_ON(skb->nohdr);
719 skb->nohdr = 1;
720 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
721}
722
723
724
725
726
727
728
729
730static inline int skb_shared(const struct sk_buff *skb)
731{
732 return atomic_read(&skb->users) != 1;
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
749 gfp_t pri)
750{
751 might_sleep_if(pri & __GFP_WAIT);
752 if (skb_shared(skb)) {
753 struct sk_buff *nskb = skb_clone(skb, pri);
754 kfree_skb(skb);
755 skb = nskb;
756 }
757 return skb;
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
781 gfp_t pri)
782{
783 might_sleep_if(pri & __GFP_WAIT);
784 if (skb_cloned(skb)) {
785 struct sk_buff *nskb = skb_copy(skb, pri);
786 kfree_skb(skb);
787 skb = nskb;
788 }
789 return skb;
790}
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
806{
807 struct sk_buff *list = ((struct sk_buff *)list_)->next;
808 if (list == (struct sk_buff *)list_)
809 list = NULL;
810 return list;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
827{
828 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
829 if (list == (struct sk_buff *)list_)
830 list = NULL;
831 return list;
832}
833
834
835
836
837
838
839
840static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
841{
842 return list_->qlen;
843}
844
845
846
847
848
849
850
851
852
853
854
855static inline void __skb_queue_head_init(struct sk_buff_head *list)
856{
857 list->prev = list->next = (struct sk_buff *)list;
858 list->qlen = 0;
859}
860
861
862
863
864
865
866
867
868
869static inline void skb_queue_head_init(struct sk_buff_head *list)
870{
871 spin_lock_init(&list->lock);
872 __skb_queue_head_init(list);
873}
874
875static inline void skb_queue_head_init_class(struct sk_buff_head *list,
876 struct lock_class_key *class)
877{
878 skb_queue_head_init(list);
879 lockdep_set_class(&list->lock, class);
880}
881
882
883
884
885
886
887
888extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
889static inline void __skb_insert(struct sk_buff *newsk,
890 struct sk_buff *prev, struct sk_buff *next,
891 struct sk_buff_head *list)
892{
893 newsk->next = next;
894 newsk->prev = prev;
895 next->prev = prev->next = newsk;
896 list->qlen++;
897}
898
899static inline void __skb_queue_splice(const struct sk_buff_head *list,
900 struct sk_buff *prev,
901 struct sk_buff *next)
902{
903 struct sk_buff *first = list->next;
904 struct sk_buff *last = list->prev;
905
906 first->prev = prev;
907 prev->next = first;
908
909 last->next = next;
910 next->prev = last;
911}
912
913
914
915
916
917
918static inline void skb_queue_splice(const struct sk_buff_head *list,
919 struct sk_buff_head *head)
920{
921 if (!skb_queue_empty(list)) {
922 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
923 head->qlen += list->qlen;
924 }
925}
926
927
928
929
930
931
932
933
934static inline void skb_queue_splice_init(struct sk_buff_head *list,
935 struct sk_buff_head *head)
936{
937 if (!skb_queue_empty(list)) {
938 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
939 head->qlen += list->qlen;
940 __skb_queue_head_init(list);
941 }
942}
943
944
945
946
947
948
949static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
950 struct sk_buff_head *head)
951{
952 if (!skb_queue_empty(list)) {
953 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
954 head->qlen += list->qlen;
955 }
956}
957
958
959
960
961
962
963
964
965
966static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
967 struct sk_buff_head *head)
968{
969 if (!skb_queue_empty(list)) {
970 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
971 head->qlen += list->qlen;
972 __skb_queue_head_init(list);
973 }
974}
975
976
977
978
979
980
981
982
983
984
985
986
987static inline void __skb_queue_after(struct sk_buff_head *list,
988 struct sk_buff *prev,
989 struct sk_buff *newsk)
990{
991 __skb_insert(newsk, prev, prev->next, list);
992}
993
994extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
995 struct sk_buff_head *list);
996
997static inline void __skb_queue_before(struct sk_buff_head *list,
998 struct sk_buff *next,
999 struct sk_buff *newsk)
1000{
1001 __skb_insert(newsk, next->prev, next, list);
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1015static inline void __skb_queue_head(struct sk_buff_head *list,
1016 struct sk_buff *newsk)
1017{
1018 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1032static inline void __skb_queue_tail(struct sk_buff_head *list,
1033 struct sk_buff *newsk)
1034{
1035 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1036}
1037
1038
1039
1040
1041
1042extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1043static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1044{
1045 struct sk_buff *next, *prev;
1046
1047 list->qlen--;
1048 next = skb->next;
1049 prev = skb->prev;
1050 skb->next = skb->prev = NULL;
1051 next->prev = prev;
1052 prev->next = next;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1064static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1065{
1066 struct sk_buff *skb = skb_peek(list);
1067 if (skb)
1068 __skb_unlink(skb, list);
1069 return skb;
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1081static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1082{
1083 struct sk_buff *skb = skb_peek_tail(list);
1084 if (skb)
1085 __skb_unlink(skb, list);
1086 return skb;
1087}
1088
1089
1090static inline int skb_is_nonlinear(const struct sk_buff *skb)
1091{
1092 return skb->data_len;
1093}
1094
1095static inline unsigned int skb_headlen(const struct sk_buff *skb)
1096{
1097 return skb->len - skb->data_len;
1098}
1099
1100static inline int skb_pagelen(const struct sk_buff *skb)
1101{
1102 int i, len = 0;
1103
1104 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1105 len += skb_shinfo(skb)->frags[i].size;
1106 return len + skb_headlen(skb);
1107}
1108
1109static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1110 struct page *page, int off, int size)
1111{
1112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1113
1114 frag->page = page;
1115 frag->page_offset = off;
1116 frag->size = size;
1117 skb_shinfo(skb)->nr_frags = i + 1;
1118}
1119
1120extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1121 int off, int size);
1122
1123#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1124#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1125#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1126
1127#ifdef NET_SKBUFF_DATA_USES_OFFSET
1128static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1129{
1130 return skb->head + skb->tail;
1131}
1132
1133static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1134{
1135 skb->tail = skb->data - skb->head;
1136}
1137
1138static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1139{
1140 skb_reset_tail_pointer(skb);
1141 skb->tail += offset;
1142}
1143#else
1144static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1145{
1146 return skb->tail;
1147}
1148
1149static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1150{
1151 skb->tail = skb->data;
1152}
1153
1154static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1155{
1156 skb->tail = skb->data + offset;
1157}
1158
1159#endif
1160
1161
1162
1163
1164extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1165static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1166{
1167 unsigned char *tmp = skb_tail_pointer(skb);
1168 SKB_LINEAR_ASSERT(skb);
1169 skb->tail += len;
1170 skb->len += len;
1171 return tmp;
1172}
1173
1174extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1175static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1176{
1177 skb->data -= len;
1178 skb->len += len;
1179 return skb->data;
1180}
1181
1182extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1183static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1184{
1185 skb->len -= len;
1186 BUG_ON(skb->len < skb->data_len);
1187 return skb->data += len;
1188}
1189
1190static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1191{
1192 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1193}
1194
1195extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1196
1197static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1198{
1199 if (len > skb_headlen(skb) &&
1200 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1201 return NULL;
1202 skb->len -= len;
1203 return skb->data += len;
1204}
1205
1206static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1207{
1208 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1209}
1210
1211static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1212{
1213 if (likely(len <= skb_headlen(skb)))
1214 return 1;
1215 if (unlikely(len > skb->len))
1216 return 0;
1217 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1218}
1219
1220
1221
1222
1223
1224
1225
1226static inline unsigned int skb_headroom(const struct sk_buff *skb)
1227{
1228 return skb->data - skb->head;
1229}
1230
1231
1232
1233
1234
1235
1236
1237static inline int skb_tailroom(const struct sk_buff *skb)
1238{
1239 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1240}
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250static inline void skb_reserve(struct sk_buff *skb, int len)
1251{
1252 skb->data += len;
1253 skb->tail += len;
1254}
1255
1256#ifdef NET_SKBUFF_DATA_USES_OFFSET
1257static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1258{
1259 return skb->head + skb->transport_header;
1260}
1261
1262static inline void skb_reset_transport_header(struct sk_buff *skb)
1263{
1264 skb->transport_header = skb->data - skb->head;
1265}
1266
1267static inline void skb_set_transport_header(struct sk_buff *skb,
1268 const int offset)
1269{
1270 skb_reset_transport_header(skb);
1271 skb->transport_header += offset;
1272}
1273
1274static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1275{
1276 return skb->head + skb->network_header;
1277}
1278
1279static inline void skb_reset_network_header(struct sk_buff *skb)
1280{
1281 skb->network_header = skb->data - skb->head;
1282}
1283
1284static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1285{
1286 skb_reset_network_header(skb);
1287 skb->network_header += offset;
1288}
1289
1290static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1291{
1292 return skb->head + skb->mac_header;
1293}
1294
1295static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1296{
1297 return skb->mac_header != ~0U;
1298}
1299
1300static inline void skb_reset_mac_header(struct sk_buff *skb)
1301{
1302 skb->mac_header = skb->data - skb->head;
1303}
1304
1305static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1306{
1307 skb_reset_mac_header(skb);
1308 skb->mac_header += offset;
1309}
1310
1311#else
1312
1313static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1314{
1315 return skb->transport_header;
1316}
1317
1318static inline void skb_reset_transport_header(struct sk_buff *skb)
1319{
1320 skb->transport_header = skb->data;
1321}
1322
1323static inline void skb_set_transport_header(struct sk_buff *skb,
1324 const int offset)
1325{
1326 skb->transport_header = skb->data + offset;
1327}
1328
1329static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1330{
1331 return skb->network_header;
1332}
1333
1334static inline void skb_reset_network_header(struct sk_buff *skb)
1335{
1336 skb->network_header = skb->data;
1337}
1338
1339static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1340{
1341 skb->network_header = skb->data + offset;
1342}
1343
1344static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1345{
1346 return skb->mac_header;
1347}
1348
1349static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1350{
1351 return skb->mac_header != NULL;
1352}
1353
1354static inline void skb_reset_mac_header(struct sk_buff *skb)
1355{
1356 skb->mac_header = skb->data;
1357}
1358
1359static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1360{
1361 skb->mac_header = skb->data + offset;
1362}
1363#endif
1364
1365static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1366{
1367 return skb->csum_start - skb_headroom(skb);
1368}
1369
1370static inline int skb_transport_offset(const struct sk_buff *skb)
1371{
1372 return skb_transport_header(skb) - skb->data;
1373}
1374
1375static inline u32 skb_network_header_len(const struct sk_buff *skb)
1376{
1377 return skb->transport_header - skb->network_header;
1378}
1379
1380static inline int skb_network_offset(const struct sk_buff *skb)
1381{
1382 return skb_network_header(skb) - skb->data;
1383}
1384
1385static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1386{
1387 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410#ifndef NET_IP_ALIGN
1411#define NET_IP_ALIGN 2
1412#endif
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434#ifndef NET_SKB_PAD
1435#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1436#endif
1437
1438extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1439
1440static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1441{
1442 if (unlikely(skb->data_len)) {
1443 WARN_ON(1);
1444 return;
1445 }
1446 skb->len = len;
1447 skb_set_tail_pointer(skb, len);
1448}
1449
1450extern void skb_trim(struct sk_buff *skb, unsigned int len);
1451
1452static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1453{
1454 if (skb->data_len)
1455 return ___pskb_trim(skb, len);
1456 __skb_trim(skb, len);
1457 return 0;
1458}
1459
1460static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1461{
1462 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1463}
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1475{
1476 int err = pskb_trim(skb, len);
1477 BUG_ON(err);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488static inline void skb_orphan(struct sk_buff *skb)
1489{
1490 if (skb->destructor)
1491 skb->destructor(skb);
1492 skb->destructor = NULL;
1493 skb->sk = NULL;
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504extern void skb_queue_purge(struct sk_buff_head *list);
1505static inline void __skb_queue_purge(struct sk_buff_head *list)
1506{
1507 struct sk_buff *skb;
1508 while ((skb = __skb_dequeue(list)) != NULL)
1509 kfree_skb(skb);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1525 gfp_t gfp_mask)
1526{
1527 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1528 if (likely(skb))
1529 skb_reserve(skb, NET_SKB_PAD);
1530 return skb;
1531}
1532
1533extern struct sk_buff *dev_alloc_skb(unsigned int length);
1534
1535extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1536 unsigned int length, gfp_t gfp_mask);
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1552 unsigned int length)
1553{
1554 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1555}
1556
1557static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1558 unsigned int length)
1559{
1560 struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
1561
1562 if (NET_IP_ALIGN && skb)
1563 skb_reserve(skb, NET_IP_ALIGN);
1564 return skb;
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1577{
1578 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1579}
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589static inline struct page *netdev_alloc_page(struct net_device *dev)
1590{
1591 return __netdev_alloc_page(dev, GFP_ATOMIC);
1592}
1593
1594static inline void netdev_free_page(struct net_device *dev, struct page *page)
1595{
1596 __free_page(page);
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1608{
1609 return !skb_header_cloned(skb) &&
1610 skb_headroom(skb) + len <= skb->hdr_len;
1611}
1612
1613static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1614 int cloned)
1615{
1616 int delta = 0;
1617
1618 if (headroom < NET_SKB_PAD)
1619 headroom = NET_SKB_PAD;
1620 if (headroom > skb_headroom(skb))
1621 delta = headroom - skb_headroom(skb);
1622
1623 if (delta || cloned)
1624 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1625 GFP_ATOMIC);
1626 return 0;
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1642{
1643 return __skb_cow(skb, headroom, skb_cloned(skb));
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1657{
1658 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1659}
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1673{
1674 unsigned int size = skb->len;
1675 if (likely(size >= len))
1676 return 0;
1677 return skb_pad(skb, len - size);
1678}
1679
1680static inline int skb_add_data(struct sk_buff *skb,
1681 char __user *from, int copy)
1682{
1683 const int off = skb->len;
1684
1685 if (skb->ip_summed == CHECKSUM_NONE) {
1686 int err = 0;
1687 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1688 copy, 0, &err);
1689 if (!err) {
1690 skb->csum = csum_block_add(skb->csum, csum, off);
1691 return 0;
1692 }
1693 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1694 return 0;
1695
1696 __skb_trim(skb, off);
1697 return -EFAULT;
1698}
1699
1700static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1701 struct page *page, int off)
1702{
1703 if (i) {
1704 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1705
1706 return page == frag->page &&
1707 off == frag->page_offset + frag->size;
1708 }
1709 return 0;
1710}
1711
1712static inline int __skb_linearize(struct sk_buff *skb)
1713{
1714 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1715}
1716
1717
1718
1719
1720
1721
1722
1723
1724static inline int skb_linearize(struct sk_buff *skb)
1725{
1726 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1727}
1728
1729
1730
1731
1732
1733
1734
1735
1736static inline int skb_linearize_cow(struct sk_buff *skb)
1737{
1738 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1739 __skb_linearize(skb) : 0;
1740}
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static inline void skb_postpull_rcsum(struct sk_buff *skb,
1754 const void *start, unsigned int len)
1755{
1756 if (skb->ip_summed == CHECKSUM_COMPLETE)
1757 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1758}
1759
1760unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1772{
1773 if (likely(len >= skb->len))
1774 return 0;
1775 if (skb->ip_summed == CHECKSUM_COMPLETE)
1776 skb->ip_summed = CHECKSUM_NONE;
1777 return __pskb_trim(skb, len);
1778}
1779
1780#define skb_queue_walk(queue, skb) \
1781 for (skb = (queue)->next; \
1782 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1783 skb = skb->next)
1784
1785#define skb_queue_walk_safe(queue, skb, tmp) \
1786 for (skb = (queue)->next, tmp = skb->next; \
1787 skb != (struct sk_buff *)(queue); \
1788 skb = tmp, tmp = skb->next)
1789
1790#define skb_queue_walk_from(queue, skb) \
1791 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1792 skb = skb->next)
1793
1794#define skb_queue_walk_from_safe(queue, skb, tmp) \
1795 for (tmp = skb->next; \
1796 skb != (struct sk_buff *)(queue); \
1797 skb = tmp, tmp = skb->next)
1798
1799#define skb_queue_reverse_walk(queue, skb) \
1800 for (skb = (queue)->prev; \
1801 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1802 skb = skb->prev)
1803
1804
1805static inline bool skb_has_frag_list(const struct sk_buff *skb)
1806{
1807 return skb_shinfo(skb)->frag_list != NULL;
1808}
1809
1810static inline void skb_frag_list_init(struct sk_buff *skb)
1811{
1812 skb_shinfo(skb)->frag_list = NULL;
1813}
1814
1815static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
1816{
1817 frag->next = skb_shinfo(skb)->frag_list;
1818 skb_shinfo(skb)->frag_list = frag;
1819}
1820
1821#define skb_walk_frags(skb, iter) \
1822 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
1823
1824extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1825 int *peeked, int *err);
1826extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1827 int noblock, int *err);
1828extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1829 struct poll_table_struct *wait);
1830extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1831 int offset, struct iovec *to,
1832 int size);
1833extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1834 int hlen,
1835 struct iovec *iov);
1836extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
1837 int offset,
1838 const struct iovec *from,
1839 int from_offset,
1840 int len);
1841extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
1842 int offset,
1843 const struct iovec *to,
1844 int to_offset,
1845 int size);
1846extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1847extern void skb_free_datagram_locked(struct sock *sk,
1848 struct sk_buff *skb);
1849extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1850 unsigned int flags);
1851extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1852 int len, __wsum csum);
1853extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1854 void *to, int len);
1855extern int skb_store_bits(struct sk_buff *skb, int offset,
1856 const void *from, int len);
1857extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
1858 int offset, u8 *to, int len,
1859 __wsum csum);
1860extern int skb_splice_bits(struct sk_buff *skb,
1861 unsigned int offset,
1862 struct pipe_inode_info *pipe,
1863 unsigned int len,
1864 unsigned int flags);
1865extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1866extern void skb_split(struct sk_buff *skb,
1867 struct sk_buff *skb1, const u32 len);
1868extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1869 int shiftlen);
1870
1871extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1872
1873static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1874 int len, void *buffer)
1875{
1876 int hlen = skb_headlen(skb);
1877
1878 if (hlen - offset >= len)
1879 return skb->data + offset;
1880
1881 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1882 return NULL;
1883
1884 return buffer;
1885}
1886
1887static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1888 void *to,
1889 const unsigned int len)
1890{
1891 memcpy(to, skb->data, len);
1892}
1893
1894static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1895 const int offset, void *to,
1896 const unsigned int len)
1897{
1898 memcpy(to, skb->data + offset, len);
1899}
1900
1901static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1902 const void *from,
1903 const unsigned int len)
1904{
1905 memcpy(skb->data, from, len);
1906}
1907
1908static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1909 const int offset,
1910 const void *from,
1911 const unsigned int len)
1912{
1913 memcpy(skb->data + offset, from, len);
1914}
1915
1916extern void skb_init(void);
1917
1918static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
1919{
1920 return skb->tstamp;
1921}
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932static inline void skb_get_timestamp(const struct sk_buff *skb,
1933 struct timeval *stamp)
1934{
1935 *stamp = ktime_to_timeval(skb->tstamp);
1936}
1937
1938static inline void skb_get_timestampns(const struct sk_buff *skb,
1939 struct timespec *stamp)
1940{
1941 *stamp = ktime_to_timespec(skb->tstamp);
1942}
1943
1944static inline void __net_timestamp(struct sk_buff *skb)
1945{
1946 skb->tstamp = ktime_get_real();
1947}
1948
1949static inline ktime_t net_timedelta(ktime_t t)
1950{
1951 return ktime_sub(ktime_get_real(), t);
1952}
1953
1954static inline ktime_t net_invalid_timestamp(void)
1955{
1956 return ktime_set(0, 0);
1957}
1958
1959extern void skb_timestamping_init(void);
1960
1961#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
1962
1963extern void skb_clone_tx_timestamp(struct sk_buff *skb);
1964extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
1965
1966#else
1967
1968static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
1969{
1970}
1971
1972static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
1973{
1974 return false;
1975}
1976
1977#endif
1978
1979
1980
1981
1982
1983
1984
1985
1986void skb_complete_tx_timestamp(struct sk_buff *skb,
1987 struct skb_shared_hwtstamps *hwtstamps);
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2001 struct skb_shared_hwtstamps *hwtstamps);
2002
2003static inline void sw_tx_timestamp(struct sk_buff *skb)
2004{
2005 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2006 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2007 skb_tstamp_tx(skb, NULL);
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019static inline void skb_tx_timestamp(struct sk_buff *skb)
2020{
2021 skb_clone_tx_timestamp(skb);
2022 sw_tx_timestamp(skb);
2023}
2024
2025extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2026extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2027
2028static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2029{
2030 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2031}
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2050{
2051 return skb_csum_unnecessary(skb) ?
2052 0 : __skb_checksum_complete(skb);
2053}
2054
2055#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2056extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2057static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2058{
2059 if (nfct && atomic_dec_and_test(&nfct->use))
2060 nf_conntrack_destroy(nfct);
2061}
2062static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2063{
2064 if (nfct)
2065 atomic_inc(&nfct->use);
2066}
2067#endif
2068#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2069static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2070{
2071 if (skb)
2072 atomic_inc(&skb->users);
2073}
2074static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2075{
2076 if (skb)
2077 kfree_skb(skb);
2078}
2079#endif
2080#ifdef CONFIG_BRIDGE_NETFILTER
2081static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2082{
2083 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2084 kfree(nf_bridge);
2085}
2086static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2087{
2088 if (nf_bridge)
2089 atomic_inc(&nf_bridge->use);
2090}
2091#endif
2092static inline void nf_reset(struct sk_buff *skb)
2093{
2094#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2095 nf_conntrack_put(skb->nfct);
2096 skb->nfct = NULL;
2097#endif
2098#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2099 nf_conntrack_put_reasm(skb->nfct_reasm);
2100 skb->nfct_reasm = NULL;
2101#endif
2102#ifdef CONFIG_BRIDGE_NETFILTER
2103 nf_bridge_put(skb->nf_bridge);
2104 skb->nf_bridge = NULL;
2105#endif
2106}
2107
2108
2109static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2110{
2111#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2112 dst->nfct = src->nfct;
2113 nf_conntrack_get(src->nfct);
2114 dst->nfctinfo = src->nfctinfo;
2115#endif
2116#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2117 dst->nfct_reasm = src->nfct_reasm;
2118 nf_conntrack_get_reasm(src->nfct_reasm);
2119#endif
2120#ifdef CONFIG_BRIDGE_NETFILTER
2121 dst->nf_bridge = src->nf_bridge;
2122 nf_bridge_get(src->nf_bridge);
2123#endif
2124}
2125
2126static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2127{
2128#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2129 nf_conntrack_put(dst->nfct);
2130#endif
2131#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2132 nf_conntrack_put_reasm(dst->nfct_reasm);
2133#endif
2134#ifdef CONFIG_BRIDGE_NETFILTER
2135 nf_bridge_put(dst->nf_bridge);
2136#endif
2137 __nf_copy(dst, src);
2138}
2139
2140#ifdef CONFIG_NETWORK_SECMARK
2141static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2142{
2143 to->secmark = from->secmark;
2144}
2145
2146static inline void skb_init_secmark(struct sk_buff *skb)
2147{
2148 skb->secmark = 0;
2149}
2150#else
2151static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2152{ }
2153
2154static inline void skb_init_secmark(struct sk_buff *skb)
2155{ }
2156#endif
2157
2158static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2159{
2160 skb->queue_mapping = queue_mapping;
2161}
2162
2163static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2164{
2165 return skb->queue_mapping;
2166}
2167
2168static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2169{
2170 to->queue_mapping = from->queue_mapping;
2171}
2172
2173static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2174{
2175 skb->queue_mapping = rx_queue + 1;
2176}
2177
2178static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2179{
2180 return skb->queue_mapping - 1;
2181}
2182
2183static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2184{
2185 return skb->queue_mapping != 0;
2186}
2187
2188extern u16 __skb_tx_hash(const struct net_device *dev,
2189 const struct sk_buff *skb,
2190 unsigned int num_tx_queues);
2191
2192#ifdef CONFIG_XFRM
2193static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2194{
2195 return skb->sp;
2196}
2197#else
2198static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2199{
2200 return NULL;
2201}
2202#endif
2203
2204static inline int skb_is_gso(const struct sk_buff *skb)
2205{
2206 return skb_shinfo(skb)->gso_size;
2207}
2208
2209static inline int skb_is_gso_v6(const struct sk_buff *skb)
2210{
2211 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2212}
2213
2214extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2215
2216static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2217{
2218
2219
2220 struct skb_shared_info *shinfo = skb_shinfo(skb);
2221 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2222 unlikely(shinfo->gso_type == 0)) {
2223 __skb_warn_lro_forwarding(skb);
2224 return true;
2225 }
2226 return false;
2227}
2228
2229static inline void skb_forward_csum(struct sk_buff *skb)
2230{
2231
2232 if (skb->ip_summed == CHECKSUM_COMPLETE)
2233 skb->ip_summed = CHECKSUM_NONE;
2234}
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244static inline void skb_checksum_none_assert(struct sk_buff *skb)
2245{
2246#ifdef DEBUG
2247 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2248#endif
2249}
2250
2251bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2252#endif
2253#endif
2254