1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <asm/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32
33
34#define CHECKSUM_NONE 0
35#define CHECKSUM_UNNECESSARY 1
36#define CHECKSUM_COMPLETE 2
37#define CHECKSUM_PARTIAL 3
38
39#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
40 ~(SMP_CACHE_BYTES - 1))
41#define SKB_WITH_OVERHEAD(X) \
42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
43#define SKB_MAX_ORDER(X, ORDER) \
44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94struct net_device;
95struct scatterlist;
96struct pipe_inode_info;
97
98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
99struct nf_conntrack {
100 atomic_t use;
101};
102#endif
103
104#ifdef CONFIG_BRIDGE_NETFILTER
105struct nf_bridge_info {
106 atomic_t use;
107 struct net_device *physindev;
108 struct net_device *physoutdev;
109 unsigned int mask;
110 unsigned long data[32 / sizeof(unsigned long)];
111};
112#endif
113
114struct sk_buff_head {
115
116 struct sk_buff *next;
117 struct sk_buff *prev;
118
119 __u32 qlen;
120 spinlock_t lock;
121};
122
123struct sk_buff;
124
125
126#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
127
128typedef struct skb_frag_struct skb_frag_t;
129
130struct skb_frag_struct {
131 struct page *page;
132 __u32 page_offset;
133 __u32 size;
134};
135
136#define HAVE_HW_TIME_STAMP
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161struct skb_shared_hwtstamps {
162 ktime_t hwtstamp;
163 ktime_t syststamp;
164};
165
166
167
168
169
170
171
172
173
174
175
176
177union skb_shared_tx {
178 struct {
179 __u8 hardware:1,
180 software:1,
181 in_progress:1;
182 };
183 __u8 flags;
184};
185
186
187
188
189struct skb_shared_info {
190 atomic_t dataref;
191 unsigned short nr_frags;
192 unsigned short gso_size;
193#ifdef CONFIG_HAS_DMA
194 dma_addr_t dma_head;
195#endif
196
197 unsigned short gso_segs;
198 unsigned short gso_type;
199 __be32 ip6_frag_id;
200 union skb_shared_tx tx_flags;
201 struct sk_buff *frag_list;
202 struct skb_shared_hwtstamps hwtstamps;
203 skb_frag_t frags[MAX_SKB_FRAGS];
204#ifdef CONFIG_HAS_DMA
205 dma_addr_t dma_maps[MAX_SKB_FRAGS];
206#endif
207
208
209 void * destructor_arg;
210};
211
212
213
214
215
216
217
218
219
220
221
222
223#define SKB_DATAREF_SHIFT 16
224#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
225
226
227enum {
228 SKB_FCLONE_UNAVAILABLE,
229 SKB_FCLONE_ORIG,
230 SKB_FCLONE_CLONE,
231};
232
233enum {
234 SKB_GSO_TCPV4 = 1 << 0,
235 SKB_GSO_UDP = 1 << 1,
236
237
238 SKB_GSO_DODGY = 1 << 2,
239
240
241 SKB_GSO_TCP_ECN = 1 << 3,
242
243 SKB_GSO_TCPV6 = 1 << 4,
244
245 SKB_GSO_FCOE = 1 << 5,
246};
247
248#if BITS_PER_LONG > 32
249#define NET_SKBUFF_DATA_USES_OFFSET 1
250#endif
251
252#ifdef NET_SKBUFF_DATA_USES_OFFSET
253typedef unsigned int sk_buff_data_t;
254#else
255typedef unsigned char *sk_buff_data_t;
256#endif
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313struct sk_buff {
314
315 struct sk_buff *next;
316 struct sk_buff *prev;
317
318 struct sock *sk;
319 ktime_t tstamp;
320 struct net_device *dev;
321
322 unsigned long _skb_dst;
323#ifdef CONFIG_XFRM
324 struct sec_path *sp;
325#endif
326
327
328
329
330
331
332 char cb[48];
333
334 unsigned int len,
335 data_len;
336 __u16 mac_len,
337 hdr_len;
338 union {
339 __wsum csum;
340 struct {
341 __u16 csum_start;
342 __u16 csum_offset;
343 };
344 };
345 __u32 priority;
346 kmemcheck_bitfield_begin(flags1);
347 __u8 local_df:1,
348 cloned:1,
349 ip_summed:2,
350 nohdr:1,
351 nfctinfo:3;
352 __u8 pkt_type:3,
353 fclone:2,
354 ipvs_property:1,
355 peeked:1,
356 nf_trace:1;
357 __be16 protocol:16;
358 kmemcheck_bitfield_end(flags1);
359
360 void (*destructor)(struct sk_buff *skb);
361#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
362 struct nf_conntrack *nfct;
363 struct sk_buff *nfct_reasm;
364#endif
365#ifdef CONFIG_BRIDGE_NETFILTER
366 struct nf_bridge_info *nf_bridge;
367#endif
368
369 int iif;
370#ifdef CONFIG_NET_SCHED
371 __u16 tc_index;
372#ifdef CONFIG_NET_CLS_ACT
373 __u16 tc_verd;
374#endif
375#endif
376
377 kmemcheck_bitfield_begin(flags2);
378 __u16 queue_mapping:16;
379#ifdef CONFIG_IPV6_NDISC_NODETYPE
380 __u8 ndisc_nodetype:2;
381#endif
382 kmemcheck_bitfield_end(flags2);
383
384
385
386#ifdef CONFIG_NET_DMA
387 dma_cookie_t dma_cookie;
388#endif
389#ifdef CONFIG_NETWORK_SECMARK
390 __u32 secmark;
391#endif
392
393 __u32 mark;
394
395 __u16 vlan_tci;
396
397 sk_buff_data_t transport_header;
398 sk_buff_data_t network_header;
399 sk_buff_data_t mac_header;
400
401 sk_buff_data_t tail;
402 sk_buff_data_t end;
403 unsigned char *head,
404 *data;
405 unsigned int truesize;
406 atomic_t users;
407};
408
409#ifdef __KERNEL__
410
411
412
413#include <linux/slab.h>
414
415#include <asm/system.h>
416
417#ifdef CONFIG_HAS_DMA
418#include <linux/dma-mapping.h>
419extern int skb_dma_map(struct device *dev, struct sk_buff *skb,
420 enum dma_data_direction dir);
421extern void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
422 enum dma_data_direction dir);
423#endif
424
425static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
426{
427 return (struct dst_entry *)skb->_skb_dst;
428}
429
430static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
431{
432 skb->_skb_dst = (unsigned long)dst;
433}
434
435static inline struct rtable *skb_rtable(const struct sk_buff *skb)
436{
437 return (struct rtable *)skb_dst(skb);
438}
439
440extern void kfree_skb(struct sk_buff *skb);
441extern void consume_skb(struct sk_buff *skb);
442extern void __kfree_skb(struct sk_buff *skb);
443extern struct sk_buff *__alloc_skb(unsigned int size,
444 gfp_t priority, int fclone, int node);
445static inline struct sk_buff *alloc_skb(unsigned int size,
446 gfp_t priority)
447{
448 return __alloc_skb(size, priority, 0, -1);
449}
450
451static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
452 gfp_t priority)
453{
454 return __alloc_skb(size, priority, 1, -1);
455}
456
457extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
458
459extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
460extern struct sk_buff *skb_clone(struct sk_buff *skb,
461 gfp_t priority);
462extern struct sk_buff *skb_copy(const struct sk_buff *skb,
463 gfp_t priority);
464extern struct sk_buff *pskb_copy(struct sk_buff *skb,
465 gfp_t gfp_mask);
466extern int pskb_expand_head(struct sk_buff *skb,
467 int nhead, int ntail,
468 gfp_t gfp_mask);
469extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
470 unsigned int headroom);
471extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
472 int newheadroom, int newtailroom,
473 gfp_t priority);
474extern int skb_to_sgvec(struct sk_buff *skb,
475 struct scatterlist *sg, int offset,
476 int len);
477extern int skb_cow_data(struct sk_buff *skb, int tailbits,
478 struct sk_buff **trailer);
479extern int skb_pad(struct sk_buff *skb, int pad);
480#define dev_kfree_skb(a) consume_skb(a)
481#define dev_consume_skb(a) kfree_skb_clean(a)
482extern void skb_over_panic(struct sk_buff *skb, int len,
483 void *here);
484extern void skb_under_panic(struct sk_buff *skb, int len,
485 void *here);
486
487extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
488 int getfrag(void *from, char *to, int offset,
489 int len,int odd, struct sk_buff *skb),
490 void *from, int length);
491
492struct skb_seq_state
493{
494 __u32 lower_offset;
495 __u32 upper_offset;
496 __u32 frag_idx;
497 __u32 stepped_offset;
498 struct sk_buff *root_skb;
499 struct sk_buff *cur_skb;
500 __u8 *frag_data;
501};
502
503extern void skb_prepare_seq_read(struct sk_buff *skb,
504 unsigned int from, unsigned int to,
505 struct skb_seq_state *st);
506extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
507 struct skb_seq_state *st);
508extern void skb_abort_seq_read(struct skb_seq_state *st);
509
510extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
511 unsigned int to, struct ts_config *config,
512 struct ts_state *state);
513
514#ifdef NET_SKBUFF_DATA_USES_OFFSET
515static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
516{
517 return skb->head + skb->end;
518}
519#else
520static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
521{
522 return skb->end;
523}
524#endif
525
526
527#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
528
529static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
530{
531 return &skb_shinfo(skb)->hwtstamps;
532}
533
534static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
535{
536 return &skb_shinfo(skb)->tx_flags;
537}
538
539
540
541
542
543
544
545static inline int skb_queue_empty(const struct sk_buff_head *list)
546{
547 return list->next == (struct sk_buff *)list;
548}
549
550
551
552
553
554
555
556
557static inline bool skb_queue_is_last(const struct sk_buff_head *list,
558 const struct sk_buff *skb)
559{
560 return (skb->next == (struct sk_buff *) list);
561}
562
563
564
565
566
567
568
569
570static inline bool skb_queue_is_first(const struct sk_buff_head *list,
571 const struct sk_buff *skb)
572{
573 return (skb->prev == (struct sk_buff *) list);
574}
575
576
577
578
579
580
581
582
583
584static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
585 const struct sk_buff *skb)
586{
587
588
589
590 BUG_ON(skb_queue_is_last(list, skb));
591 return skb->next;
592}
593
594
595
596
597
598
599
600
601
602static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
603 const struct sk_buff *skb)
604{
605
606
607
608 BUG_ON(skb_queue_is_first(list, skb));
609 return skb->prev;
610}
611
612
613
614
615
616
617
618
619static inline struct sk_buff *skb_get(struct sk_buff *skb)
620{
621 atomic_inc(&skb->users);
622 return skb;
623}
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638static inline int skb_cloned(const struct sk_buff *skb)
639{
640 return skb->cloned &&
641 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
642}
643
644
645
646
647
648
649
650
651static inline int skb_header_cloned(const struct sk_buff *skb)
652{
653 int dataref;
654
655 if (!skb->cloned)
656 return 0;
657
658 dataref = atomic_read(&skb_shinfo(skb)->dataref);
659 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
660 return dataref != 1;
661}
662
663
664
665
666
667
668
669
670
671static inline void skb_header_release(struct sk_buff *skb)
672{
673 BUG_ON(skb->nohdr);
674 skb->nohdr = 1;
675 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
676}
677
678
679
680
681
682
683
684
685static inline int skb_shared(const struct sk_buff *skb)
686{
687 return atomic_read(&skb->users) != 1;
688}
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
704 gfp_t pri)
705{
706 might_sleep_if(pri & __GFP_WAIT);
707 if (skb_shared(skb)) {
708 struct sk_buff *nskb = skb_clone(skb, pri);
709 kfree_skb(skb);
710 skb = nskb;
711 }
712 return skb;
713}
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
736 gfp_t pri)
737{
738 might_sleep_if(pri & __GFP_WAIT);
739 if (skb_cloned(skb)) {
740 struct sk_buff *nskb = skb_copy(skb, pri);
741 kfree_skb(skb);
742 skb = nskb;
743 }
744 return skb;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
761{
762 struct sk_buff *list = ((struct sk_buff *)list_)->next;
763 if (list == (struct sk_buff *)list_)
764 list = NULL;
765 return list;
766}
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
782{
783 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
784 if (list == (struct sk_buff *)list_)
785 list = NULL;
786 return list;
787}
788
789
790
791
792
793
794
795static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
796{
797 return list_->qlen;
798}
799
800
801
802
803
804
805
806
807
808
809
810static inline void __skb_queue_head_init(struct sk_buff_head *list)
811{
812 list->prev = list->next = (struct sk_buff *)list;
813 list->qlen = 0;
814}
815
816
817
818
819
820
821
822
823
824static inline void skb_queue_head_init(struct sk_buff_head *list)
825{
826 spin_lock_init(&list->lock);
827 __skb_queue_head_init(list);
828}
829
830static inline void skb_queue_head_init_class(struct sk_buff_head *list,
831 struct lock_class_key *class)
832{
833 skb_queue_head_init(list);
834 lockdep_set_class(&list->lock, class);
835}
836
837
838
839
840
841
842
843extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
844static inline void __skb_insert(struct sk_buff *newsk,
845 struct sk_buff *prev, struct sk_buff *next,
846 struct sk_buff_head *list)
847{
848 newsk->next = next;
849 newsk->prev = prev;
850 next->prev = prev->next = newsk;
851 list->qlen++;
852}
853
854static inline void __skb_queue_splice(const struct sk_buff_head *list,
855 struct sk_buff *prev,
856 struct sk_buff *next)
857{
858 struct sk_buff *first = list->next;
859 struct sk_buff *last = list->prev;
860
861 first->prev = prev;
862 prev->next = first;
863
864 last->next = next;
865 next->prev = last;
866}
867
868
869
870
871
872
873static inline void skb_queue_splice(const struct sk_buff_head *list,
874 struct sk_buff_head *head)
875{
876 if (!skb_queue_empty(list)) {
877 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
878 head->qlen += list->qlen;
879 }
880}
881
882
883
884
885
886
887
888
889static inline void skb_queue_splice_init(struct sk_buff_head *list,
890 struct sk_buff_head *head)
891{
892 if (!skb_queue_empty(list)) {
893 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
894 head->qlen += list->qlen;
895 __skb_queue_head_init(list);
896 }
897}
898
899
900
901
902
903
904static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
905 struct sk_buff_head *head)
906{
907 if (!skb_queue_empty(list)) {
908 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
909 head->qlen += list->qlen;
910 }
911}
912
913
914
915
916
917
918
919
920
921static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
922 struct sk_buff_head *head)
923{
924 if (!skb_queue_empty(list)) {
925 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
926 head->qlen += list->qlen;
927 __skb_queue_head_init(list);
928 }
929}
930
931
932
933
934
935
936
937
938
939
940
941
942static inline void __skb_queue_after(struct sk_buff_head *list,
943 struct sk_buff *prev,
944 struct sk_buff *newsk)
945{
946 __skb_insert(newsk, prev, prev->next, list);
947}
948
949extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
950 struct sk_buff_head *list);
951
952static inline void __skb_queue_before(struct sk_buff_head *list,
953 struct sk_buff *next,
954 struct sk_buff *newsk)
955{
956 __skb_insert(newsk, next->prev, next, list);
957}
958
959
960
961
962
963
964
965
966
967
968
969extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
970static inline void __skb_queue_head(struct sk_buff_head *list,
971 struct sk_buff *newsk)
972{
973 __skb_queue_after(list, (struct sk_buff *)list, newsk);
974}
975
976
977
978
979
980
981
982
983
984
985
986extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
987static inline void __skb_queue_tail(struct sk_buff_head *list,
988 struct sk_buff *newsk)
989{
990 __skb_queue_before(list, (struct sk_buff *)list, newsk);
991}
992
993
994
995
996
997extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
998static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
999{
1000 struct sk_buff *next, *prev;
1001
1002 list->qlen--;
1003 next = skb->next;
1004 prev = skb->prev;
1005 skb->next = skb->prev = NULL;
1006 next->prev = prev;
1007 prev->next = next;
1008}
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1019static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1020{
1021 struct sk_buff *skb = skb_peek(list);
1022 if (skb)
1023 __skb_unlink(skb, list);
1024 return skb;
1025}
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1036static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1037{
1038 struct sk_buff *skb = skb_peek_tail(list);
1039 if (skb)
1040 __skb_unlink(skb, list);
1041 return skb;
1042}
1043
1044
1045static inline int skb_is_nonlinear(const struct sk_buff *skb)
1046{
1047 return skb->data_len;
1048}
1049
1050static inline unsigned int skb_headlen(const struct sk_buff *skb)
1051{
1052 return skb->len - skb->data_len;
1053}
1054
1055static inline int skb_pagelen(const struct sk_buff *skb)
1056{
1057 int i, len = 0;
1058
1059 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1060 len += skb_shinfo(skb)->frags[i].size;
1061 return len + skb_headlen(skb);
1062}
1063
1064static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1065 struct page *page, int off, int size)
1066{
1067 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1068
1069 frag->page = page;
1070 frag->page_offset = off;
1071 frag->size = size;
1072 skb_shinfo(skb)->nr_frags = i + 1;
1073}
1074
1075extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1076 int off, int size);
1077
1078#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1079#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb))
1080#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1081
1082#ifdef NET_SKBUFF_DATA_USES_OFFSET
1083static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1084{
1085 return skb->head + skb->tail;
1086}
1087
1088static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1089{
1090 skb->tail = skb->data - skb->head;
1091}
1092
1093static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1094{
1095 skb_reset_tail_pointer(skb);
1096 skb->tail += offset;
1097}
1098#else
1099static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1100{
1101 return skb->tail;
1102}
1103
1104static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1105{
1106 skb->tail = skb->data;
1107}
1108
1109static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1110{
1111 skb->tail = skb->data + offset;
1112}
1113
1114#endif
1115
1116
1117
1118
1119extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1120static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1121{
1122 unsigned char *tmp = skb_tail_pointer(skb);
1123 SKB_LINEAR_ASSERT(skb);
1124 skb->tail += len;
1125 skb->len += len;
1126 return tmp;
1127}
1128
1129extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1130static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1131{
1132 skb->data -= len;
1133 skb->len += len;
1134 return skb->data;
1135}
1136
1137extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1138static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1139{
1140 skb->len -= len;
1141 BUG_ON(skb->len < skb->data_len);
1142 return skb->data += len;
1143}
1144
1145extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1146
1147static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1148{
1149 if (len > skb_headlen(skb) &&
1150 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1151 return NULL;
1152 skb->len -= len;
1153 return skb->data += len;
1154}
1155
1156static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1157{
1158 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1159}
1160
1161static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1162{
1163 if (likely(len <= skb_headlen(skb)))
1164 return 1;
1165 if (unlikely(len > skb->len))
1166 return 0;
1167 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1168}
1169
1170
1171
1172
1173
1174
1175
1176static inline unsigned int skb_headroom(const struct sk_buff *skb)
1177{
1178 return skb->data - skb->head;
1179}
1180
1181
1182
1183
1184
1185
1186
1187static inline int skb_tailroom(const struct sk_buff *skb)
1188{
1189 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1190}
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200static inline void skb_reserve(struct sk_buff *skb, int len)
1201{
1202 skb->data += len;
1203 skb->tail += len;
1204}
1205
1206#ifdef NET_SKBUFF_DATA_USES_OFFSET
1207static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1208{
1209 return skb->head + skb->transport_header;
1210}
1211
1212static inline void skb_reset_transport_header(struct sk_buff *skb)
1213{
1214 skb->transport_header = skb->data - skb->head;
1215}
1216
1217static inline void skb_set_transport_header(struct sk_buff *skb,
1218 const int offset)
1219{
1220 skb_reset_transport_header(skb);
1221 skb->transport_header += offset;
1222}
1223
1224static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1225{
1226 return skb->head + skb->network_header;
1227}
1228
1229static inline void skb_reset_network_header(struct sk_buff *skb)
1230{
1231 skb->network_header = skb->data - skb->head;
1232}
1233
1234static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1235{
1236 skb_reset_network_header(skb);
1237 skb->network_header += offset;
1238}
1239
1240static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1241{
1242 return skb->head + skb->mac_header;
1243}
1244
1245static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1246{
1247 return skb->mac_header != ~0U;
1248}
1249
1250static inline void skb_reset_mac_header(struct sk_buff *skb)
1251{
1252 skb->mac_header = skb->data - skb->head;
1253}
1254
1255static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1256{
1257 skb_reset_mac_header(skb);
1258 skb->mac_header += offset;
1259}
1260
1261#else
1262
1263static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1264{
1265 return skb->transport_header;
1266}
1267
1268static inline void skb_reset_transport_header(struct sk_buff *skb)
1269{
1270 skb->transport_header = skb->data;
1271}
1272
1273static inline void skb_set_transport_header(struct sk_buff *skb,
1274 const int offset)
1275{
1276 skb->transport_header = skb->data + offset;
1277}
1278
1279static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1280{
1281 return skb->network_header;
1282}
1283
1284static inline void skb_reset_network_header(struct sk_buff *skb)
1285{
1286 skb->network_header = skb->data;
1287}
1288
1289static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1290{
1291 skb->network_header = skb->data + offset;
1292}
1293
1294static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1295{
1296 return skb->mac_header;
1297}
1298
1299static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1300{
1301 return skb->mac_header != NULL;
1302}
1303
1304static inline void skb_reset_mac_header(struct sk_buff *skb)
1305{
1306 skb->mac_header = skb->data;
1307}
1308
1309static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1310{
1311 skb->mac_header = skb->data + offset;
1312}
1313#endif
1314
1315static inline int skb_transport_offset(const struct sk_buff *skb)
1316{
1317 return skb_transport_header(skb) - skb->data;
1318}
1319
1320static inline u32 skb_network_header_len(const struct sk_buff *skb)
1321{
1322 return skb->transport_header - skb->network_header;
1323}
1324
1325static inline int skb_network_offset(const struct sk_buff *skb)
1326{
1327 return skb_network_header(skb) - skb->data;
1328}
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350#ifndef NET_IP_ALIGN
1351#define NET_IP_ALIGN 2
1352#endif
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369#ifndef NET_SKB_PAD
1370#define NET_SKB_PAD 32
1371#endif
1372
1373extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1374
1375static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1376{
1377 if (unlikely(skb->data_len)) {
1378 WARN_ON(1);
1379 return;
1380 }
1381 skb->len = len;
1382 skb_set_tail_pointer(skb, len);
1383}
1384
1385extern void skb_trim(struct sk_buff *skb, unsigned int len);
1386
1387static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1388{
1389 if (skb->data_len)
1390 return ___pskb_trim(skb, len);
1391 __skb_trim(skb, len);
1392 return 0;
1393}
1394
1395static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1396{
1397 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1398}
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1410{
1411 int err = pskb_trim(skb, len);
1412 BUG_ON(err);
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423static inline void skb_orphan(struct sk_buff *skb)
1424{
1425 if (skb->destructor)
1426 skb->destructor(skb);
1427 skb->destructor = NULL;
1428 skb->sk = NULL;
1429}
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439extern void skb_queue_purge(struct sk_buff_head *list);
1440static inline void __skb_queue_purge(struct sk_buff_head *list)
1441{
1442 struct sk_buff *skb;
1443 while ((skb = __skb_dequeue(list)) != NULL)
1444 kfree_skb(skb);
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1460 gfp_t gfp_mask)
1461{
1462 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1463 if (likely(skb))
1464 skb_reserve(skb, NET_SKB_PAD);
1465 return skb;
1466}
1467
1468extern struct sk_buff *dev_alloc_skb(unsigned int length);
1469
1470extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1471 unsigned int length, gfp_t gfp_mask);
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1487 unsigned int length)
1488{
1489 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1490}
1491
1492extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static inline struct page *netdev_alloc_page(struct net_device *dev)
1503{
1504 return __netdev_alloc_page(dev, GFP_ATOMIC);
1505}
1506
1507static inline void netdev_free_page(struct net_device *dev, struct page *page)
1508{
1509 __free_page(page);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1521{
1522 return !skb_header_cloned(skb) &&
1523 skb_headroom(skb) + len <= skb->hdr_len;
1524}
1525
1526static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1527 int cloned)
1528{
1529 int delta = 0;
1530
1531 if (headroom < NET_SKB_PAD)
1532 headroom = NET_SKB_PAD;
1533 if (headroom > skb_headroom(skb))
1534 delta = headroom - skb_headroom(skb);
1535
1536 if (delta || cloned)
1537 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1538 GFP_ATOMIC);
1539 return 0;
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1555{
1556 return __skb_cow(skb, headroom, skb_cloned(skb));
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1570{
1571 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1586{
1587 unsigned int size = skb->len;
1588 if (likely(size >= len))
1589 return 0;
1590 return skb_pad(skb, len - size);
1591}
1592
1593static inline int skb_add_data(struct sk_buff *skb,
1594 char __user *from, int copy)
1595{
1596 const int off = skb->len;
1597
1598 if (skb->ip_summed == CHECKSUM_NONE) {
1599 int err = 0;
1600 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1601 copy, 0, &err);
1602 if (!err) {
1603 skb->csum = csum_block_add(skb->csum, csum, off);
1604 return 0;
1605 }
1606 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1607 return 0;
1608
1609 __skb_trim(skb, off);
1610 return -EFAULT;
1611}
1612
1613static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1614 struct page *page, int off)
1615{
1616 if (i) {
1617 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1618
1619 return page == frag->page &&
1620 off == frag->page_offset + frag->size;
1621 }
1622 return 0;
1623}
1624
1625static inline int __skb_linearize(struct sk_buff *skb)
1626{
1627 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1628}
1629
1630
1631
1632
1633
1634
1635
1636
1637static inline int skb_linearize(struct sk_buff *skb)
1638{
1639 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649static inline int skb_linearize_cow(struct sk_buff *skb)
1650{
1651 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1652 __skb_linearize(skb) : 0;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static inline void skb_postpull_rcsum(struct sk_buff *skb,
1667 const void *start, unsigned int len)
1668{
1669 if (skb->ip_summed == CHECKSUM_COMPLETE)
1670 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1671}
1672
1673unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1685{
1686 if (likely(len >= skb->len))
1687 return 0;
1688 if (skb->ip_summed == CHECKSUM_COMPLETE)
1689 skb->ip_summed = CHECKSUM_NONE;
1690 return __pskb_trim(skb, len);
1691}
1692
1693#define skb_queue_walk(queue, skb) \
1694 for (skb = (queue)->next; \
1695 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1696 skb = skb->next)
1697
1698#define skb_queue_walk_safe(queue, skb, tmp) \
1699 for (skb = (queue)->next, tmp = skb->next; \
1700 skb != (struct sk_buff *)(queue); \
1701 skb = tmp, tmp = skb->next)
1702
1703#define skb_queue_walk_from(queue, skb) \
1704 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1705 skb = skb->next)
1706
1707#define skb_queue_walk_from_safe(queue, skb, tmp) \
1708 for (tmp = skb->next; \
1709 skb != (struct sk_buff *)(queue); \
1710 skb = tmp, tmp = skb->next)
1711
1712#define skb_queue_reverse_walk(queue, skb) \
1713 for (skb = (queue)->prev; \
1714 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1715 skb = skb->prev)
1716
1717
1718static inline bool skb_has_frags(const struct sk_buff *skb)
1719{
1720 return skb_shinfo(skb)->frag_list != NULL;
1721}
1722
1723static inline void skb_frag_list_init(struct sk_buff *skb)
1724{
1725 skb_shinfo(skb)->frag_list = NULL;
1726}
1727
1728static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
1729{
1730 frag->next = skb_shinfo(skb)->frag_list;
1731 skb_shinfo(skb)->frag_list = frag;
1732}
1733
1734#define skb_walk_frags(skb, iter) \
1735 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
1736
1737extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1738 int *peeked, int *err);
1739extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1740 int noblock, int *err);
1741extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1742 struct poll_table_struct *wait);
1743extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1744 int offset, struct iovec *to,
1745 int size);
1746extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1747 int hlen,
1748 struct iovec *iov);
1749extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
1750 int offset,
1751 const struct iovec *from,
1752 int from_offset,
1753 int len);
1754extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
1755 int offset,
1756 const struct iovec *to,
1757 int to_offset,
1758 int size);
1759extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1760extern void skb_free_datagram_locked(struct sock *sk,
1761 struct sk_buff *skb);
1762extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1763 unsigned int flags);
1764extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1765 int len, __wsum csum);
1766extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1767 void *to, int len);
1768extern int skb_store_bits(struct sk_buff *skb, int offset,
1769 const void *from, int len);
1770extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
1771 int offset, u8 *to, int len,
1772 __wsum csum);
1773extern int skb_splice_bits(struct sk_buff *skb,
1774 unsigned int offset,
1775 struct pipe_inode_info *pipe,
1776 unsigned int len,
1777 unsigned int flags);
1778extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1779extern void skb_split(struct sk_buff *skb,
1780 struct sk_buff *skb1, const u32 len);
1781extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1782 int shiftlen);
1783
1784extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1785
1786static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1787 int len, void *buffer)
1788{
1789 int hlen = skb_headlen(skb);
1790
1791 if (hlen - offset >= len)
1792 return skb->data + offset;
1793
1794 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1795 return NULL;
1796
1797 return buffer;
1798}
1799
1800static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1801 void *to,
1802 const unsigned int len)
1803{
1804 memcpy(to, skb->data, len);
1805}
1806
1807static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1808 const int offset, void *to,
1809 const unsigned int len)
1810{
1811 memcpy(to, skb->data + offset, len);
1812}
1813
1814static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1815 const void *from,
1816 const unsigned int len)
1817{
1818 memcpy(skb->data, from, len);
1819}
1820
1821static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1822 const int offset,
1823 const void *from,
1824 const unsigned int len)
1825{
1826 memcpy(skb->data + offset, from, len);
1827}
1828
1829extern void skb_init(void);
1830
1831static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
1832{
1833 return skb->tstamp;
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845static inline void skb_get_timestamp(const struct sk_buff *skb,
1846 struct timeval *stamp)
1847{
1848 *stamp = ktime_to_timeval(skb->tstamp);
1849}
1850
1851static inline void skb_get_timestampns(const struct sk_buff *skb,
1852 struct timespec *stamp)
1853{
1854 *stamp = ktime_to_timespec(skb->tstamp);
1855}
1856
1857static inline void __net_timestamp(struct sk_buff *skb)
1858{
1859 skb->tstamp = ktime_get_real();
1860}
1861
1862static inline ktime_t net_timedelta(ktime_t t)
1863{
1864 return ktime_sub(ktime_get_real(), t);
1865}
1866
1867static inline ktime_t net_invalid_timestamp(void)
1868{
1869 return ktime_set(0, 0);
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883extern void skb_tstamp_tx(struct sk_buff *orig_skb,
1884 struct skb_shared_hwtstamps *hwtstamps);
1885
1886extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
1887extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
1888
1889static inline int skb_csum_unnecessary(const struct sk_buff *skb)
1890{
1891 return skb->ip_summed & CHECKSUM_UNNECESSARY;
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
1911{
1912 return skb_csum_unnecessary(skb) ?
1913 0 : __skb_checksum_complete(skb);
1914}
1915
1916#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1917extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
1918static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1919{
1920 if (nfct && atomic_dec_and_test(&nfct->use))
1921 nf_conntrack_destroy(nfct);
1922}
1923static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1924{
1925 if (nfct)
1926 atomic_inc(&nfct->use);
1927}
1928static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1929{
1930 if (skb)
1931 atomic_inc(&skb->users);
1932}
1933static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1934{
1935 if (skb)
1936 kfree_skb(skb);
1937}
1938#endif
1939#ifdef CONFIG_BRIDGE_NETFILTER
1940static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1941{
1942 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1943 kfree(nf_bridge);
1944}
1945static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1946{
1947 if (nf_bridge)
1948 atomic_inc(&nf_bridge->use);
1949}
1950#endif
1951static inline void nf_reset(struct sk_buff *skb)
1952{
1953#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1954 nf_conntrack_put(skb->nfct);
1955 skb->nfct = NULL;
1956 nf_conntrack_put_reasm(skb->nfct_reasm);
1957 skb->nfct_reasm = NULL;
1958#endif
1959#ifdef CONFIG_BRIDGE_NETFILTER
1960 nf_bridge_put(skb->nf_bridge);
1961 skb->nf_bridge = NULL;
1962#endif
1963}
1964
1965
1966static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1967{
1968#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1969 dst->nfct = src->nfct;
1970 nf_conntrack_get(src->nfct);
1971 dst->nfctinfo = src->nfctinfo;
1972 dst->nfct_reasm = src->nfct_reasm;
1973 nf_conntrack_get_reasm(src->nfct_reasm);
1974#endif
1975#ifdef CONFIG_BRIDGE_NETFILTER
1976 dst->nf_bridge = src->nf_bridge;
1977 nf_bridge_get(src->nf_bridge);
1978#endif
1979}
1980
1981static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1982{
1983#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1984 nf_conntrack_put(dst->nfct);
1985 nf_conntrack_put_reasm(dst->nfct_reasm);
1986#endif
1987#ifdef CONFIG_BRIDGE_NETFILTER
1988 nf_bridge_put(dst->nf_bridge);
1989#endif
1990 __nf_copy(dst, src);
1991}
1992
1993#ifdef CONFIG_NETWORK_SECMARK
1994static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1995{
1996 to->secmark = from->secmark;
1997}
1998
1999static inline void skb_init_secmark(struct sk_buff *skb)
2000{
2001 skb->secmark = 0;
2002}
2003#else
2004static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2005{ }
2006
2007static inline void skb_init_secmark(struct sk_buff *skb)
2008{ }
2009#endif
2010
2011static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2012{
2013 skb->queue_mapping = queue_mapping;
2014}
2015
2016static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2017{
2018 return skb->queue_mapping;
2019}
2020
2021static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2022{
2023 to->queue_mapping = from->queue_mapping;
2024}
2025
2026static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2027{
2028 skb->queue_mapping = rx_queue + 1;
2029}
2030
2031static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2032{
2033 return skb->queue_mapping - 1;
2034}
2035
2036static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2037{
2038 return (skb->queue_mapping != 0);
2039}
2040
2041extern u16 skb_tx_hash(const struct net_device *dev,
2042 const struct sk_buff *skb);
2043
2044#ifdef CONFIG_XFRM
2045static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2046{
2047 return skb->sp;
2048}
2049#else
2050static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2051{
2052 return NULL;
2053}
2054#endif
2055
2056static inline int skb_is_gso(const struct sk_buff *skb)
2057{
2058 return skb_shinfo(skb)->gso_size;
2059}
2060
2061static inline int skb_is_gso_v6(const struct sk_buff *skb)
2062{
2063 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2064}
2065
2066extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2067
2068static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2069{
2070
2071
2072 struct skb_shared_info *shinfo = skb_shinfo(skb);
2073 if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
2074 __skb_warn_lro_forwarding(skb);
2075 return true;
2076 }
2077 return false;
2078}
2079
2080static inline void skb_forward_csum(struct sk_buff *skb)
2081{
2082
2083 if (skb->ip_summed == CHECKSUM_COMPLETE)
2084 skb->ip_summed = CHECKSUM_NONE;
2085}
2086
2087bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2088#endif
2089#endif
2090