1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32#include <linux/dma-mapping.h>
33
34
35#define CHECKSUM_NONE 0
36#define CHECKSUM_UNNECESSARY 1
37#define CHECKSUM_COMPLETE 2
38#define CHECKSUM_PARTIAL 3
39
40#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
41 ~(SMP_CACHE_BYTES - 1))
42#define SKB_WITH_OVERHEAD(X) \
43 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
44#define SKB_MAX_ORDER(X, ORDER) \
45 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
46#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
47#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
48
49
50#define SKB_TRUESIZE(X) ((X) + \
51 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
52 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100struct net_device;
101struct scatterlist;
102struct pipe_inode_info;
103
104#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
105struct nf_conntrack {
106 atomic_t use;
107};
108#endif
109
110#ifdef CONFIG_BRIDGE_NETFILTER
111struct nf_bridge_info {
112 atomic_t use;
113 struct net_device *physindev;
114 struct net_device *physoutdev;
115 unsigned int mask;
116 unsigned long data[32 / sizeof(unsigned long)];
117};
118#endif
119
120struct sk_buff_head {
121
122 struct sk_buff *next;
123 struct sk_buff *prev;
124
125 __u32 qlen;
126 spinlock_t lock;
127};
128
129struct sk_buff;
130
131
132
133
134#if (65536/PAGE_SIZE + 2) < 16
135#define MAX_SKB_FRAGS 16UL
136#else
137#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
138#endif
139
140typedef struct skb_frag_struct skb_frag_t;
141
142struct skb_frag_struct {
143 struct {
144 struct page *p;
145 } page;
146#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
147 __u32 page_offset;
148 __u32 size;
149#else
150 __u16 page_offset;
151 __u16 size;
152#endif
153};
154
155static inline unsigned int skb_frag_size(const skb_frag_t *frag)
156{
157 return frag->size;
158}
159
160static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
161{
162 frag->size = size;
163}
164
165static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
166{
167 frag->size += delta;
168}
169
170static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
171{
172 frag->size -= delta;
173}
174
175#define HAVE_HW_TIME_STAMP
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200struct skb_shared_hwtstamps {
201 ktime_t hwtstamp;
202 ktime_t syststamp;
203};
204
205
206enum {
207
208 SKBTX_HW_TSTAMP = 1 << 0,
209
210
211 SKBTX_SW_TSTAMP = 1 << 1,
212
213
214 SKBTX_IN_PROGRESS = 1 << 2,
215
216
217 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
218
219
220 SKBTX_DEV_ZEROCOPY = 1 << 4,
221};
222
223
224
225
226
227
228struct ubuf_info {
229 void (*callback)(void *);
230 void *arg;
231 unsigned long desc;
232};
233
234
235
236
237struct skb_shared_info {
238 unsigned short nr_frags;
239 unsigned short gso_size;
240
241 unsigned short gso_segs;
242 unsigned short gso_type;
243 __be32 ip6_frag_id;
244 __u8 tx_flags;
245 struct sk_buff *frag_list;
246 struct skb_shared_hwtstamps hwtstamps;
247
248
249
250
251 atomic_t dataref;
252
253
254
255 void * destructor_arg;
256
257
258 skb_frag_t frags[MAX_SKB_FRAGS];
259};
260
261
262
263
264
265
266
267
268
269
270
271
272#define SKB_DATAREF_SHIFT 16
273#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
274
275
276enum {
277 SKB_FCLONE_UNAVAILABLE,
278 SKB_FCLONE_ORIG,
279 SKB_FCLONE_CLONE,
280};
281
282enum {
283 SKB_GSO_TCPV4 = 1 << 0,
284 SKB_GSO_UDP = 1 << 1,
285
286
287 SKB_GSO_DODGY = 1 << 2,
288
289
290 SKB_GSO_TCP_ECN = 1 << 3,
291
292 SKB_GSO_TCPV6 = 1 << 4,
293
294 SKB_GSO_FCOE = 1 << 5,
295};
296
297#if BITS_PER_LONG > 32
298#define NET_SKBUFF_DATA_USES_OFFSET 1
299#endif
300
301#ifdef NET_SKBUFF_DATA_USES_OFFSET
302typedef unsigned int sk_buff_data_t;
303#else
304typedef unsigned char *sk_buff_data_t;
305#endif
306
307#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
308 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
309#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
310#endif
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372struct sk_buff {
373
374 struct sk_buff *next;
375 struct sk_buff *prev;
376
377 ktime_t tstamp;
378
379 struct sock *sk;
380 struct net_device *dev;
381
382
383
384
385
386
387
388 char cb[48] __aligned(8);
389
390 unsigned long _skb_refdst;
391#ifdef CONFIG_XFRM
392 struct sec_path *sp;
393#endif
394 unsigned int len,
395 data_len;
396 __u16 mac_len,
397 hdr_len;
398 union {
399 __wsum csum;
400 struct {
401 __u16 csum_start;
402 __u16 csum_offset;
403 };
404 };
405 __u32 priority;
406 kmemcheck_bitfield_begin(flags1);
407 __u8 local_df:1,
408 cloned:1,
409 ip_summed:2,
410 nohdr:1,
411 nfctinfo:3;
412 __u8 pkt_type:3,
413 fclone:2,
414 ipvs_property:1,
415 peeked:1,
416 nf_trace:1;
417 kmemcheck_bitfield_end(flags1);
418 __be16 protocol;
419
420 void (*destructor)(struct sk_buff *skb);
421#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
422 struct nf_conntrack *nfct;
423#endif
424#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
425 struct sk_buff *nfct_reasm;
426#endif
427#ifdef CONFIG_BRIDGE_NETFILTER
428 struct nf_bridge_info *nf_bridge;
429#endif
430
431 int skb_iif;
432#ifdef CONFIG_NET_SCHED
433 __u16 tc_index;
434#ifdef CONFIG_NET_CLS_ACT
435 __u16 tc_verd;
436#endif
437#endif
438
439 __u32 rxhash;
440
441 __u16 queue_mapping;
442 kmemcheck_bitfield_begin(flags2);
443#ifdef CONFIG_IPV6_NDISC_NODETYPE
444 __u8 ndisc_nodetype:2;
445#endif
446 __u8 ooo_okay:1;
447 __u8 l4_rxhash:1;
448 kmemcheck_bitfield_end(flags2);
449
450
451
452#ifdef CONFIG_NET_DMA
453 dma_cookie_t dma_cookie;
454#endif
455#ifdef CONFIG_NETWORK_SECMARK
456 __u32 secmark;
457#endif
458 union {
459 __u32 mark;
460 __u32 dropcount;
461 };
462
463 __u16 vlan_tci;
464
465 sk_buff_data_t transport_header;
466 sk_buff_data_t network_header;
467 sk_buff_data_t mac_header;
468
469 sk_buff_data_t tail;
470 sk_buff_data_t end;
471 unsigned char *head,
472 *data;
473 unsigned int truesize;
474 atomic_t users;
475};
476
477#ifdef __KERNEL__
478
479
480
481#include <linux/slab.h>
482
483#include <asm/system.h>
484
485
486
487
488
489#define SKB_DST_NOREF 1UL
490#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
491
492
493
494
495
496
497
498static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
499{
500
501
502
503 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
504 !rcu_read_lock_held() &&
505 !rcu_read_lock_bh_held());
506 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
507}
508
509
510
511
512
513
514
515
516
517static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
518{
519 skb->_skb_refdst = (unsigned long)dst;
520}
521
522extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
523
524
525
526
527
528static inline bool skb_dst_is_noref(const struct sk_buff *skb)
529{
530 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
531}
532
533static inline struct rtable *skb_rtable(const struct sk_buff *skb)
534{
535 return (struct rtable *)skb_dst(skb);
536}
537
538extern void kfree_skb(struct sk_buff *skb);
539extern void consume_skb(struct sk_buff *skb);
540extern void __kfree_skb(struct sk_buff *skb);
541extern struct sk_buff *__alloc_skb(unsigned int size,
542 gfp_t priority, int fclone, int node);
543static inline struct sk_buff *alloc_skb(unsigned int size,
544 gfp_t priority)
545{
546 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
547}
548
549static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
550 gfp_t priority)
551{
552 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
553}
554
555extern void skb_recycle(struct sk_buff *skb);
556extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
557
558extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
559extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
560extern struct sk_buff *skb_clone(struct sk_buff *skb,
561 gfp_t priority);
562extern struct sk_buff *skb_copy(const struct sk_buff *skb,
563 gfp_t priority);
564extern struct sk_buff *pskb_copy(struct sk_buff *skb,
565 gfp_t gfp_mask);
566extern int pskb_expand_head(struct sk_buff *skb,
567 int nhead, int ntail,
568 gfp_t gfp_mask);
569extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
570 unsigned int headroom);
571extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
572 int newheadroom, int newtailroom,
573 gfp_t priority);
574extern int skb_to_sgvec(struct sk_buff *skb,
575 struct scatterlist *sg, int offset,
576 int len);
577extern int skb_cow_data(struct sk_buff *skb, int tailbits,
578 struct sk_buff **trailer);
579extern int skb_pad(struct sk_buff *skb, int pad);
580#define dev_kfree_skb(a) consume_skb(a)
581
582extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
583 int getfrag(void *from, char *to, int offset,
584 int len,int odd, struct sk_buff *skb),
585 void *from, int length);
586
587struct skb_seq_state {
588 __u32 lower_offset;
589 __u32 upper_offset;
590 __u32 frag_idx;
591 __u32 stepped_offset;
592 struct sk_buff *root_skb;
593 struct sk_buff *cur_skb;
594 __u8 *frag_data;
595};
596
597extern void skb_prepare_seq_read(struct sk_buff *skb,
598 unsigned int from, unsigned int to,
599 struct skb_seq_state *st);
600extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
601 struct skb_seq_state *st);
602extern void skb_abort_seq_read(struct skb_seq_state *st);
603
604extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
605 unsigned int to, struct ts_config *config,
606 struct ts_state *state);
607
608extern void __skb_get_rxhash(struct sk_buff *skb);
609static inline __u32 skb_get_rxhash(struct sk_buff *skb)
610{
611 if (!skb->rxhash)
612 __skb_get_rxhash(skb);
613
614 return skb->rxhash;
615}
616
617#ifdef NET_SKBUFF_DATA_USES_OFFSET
618static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
619{
620 return skb->head + skb->end;
621}
622#else
623static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
624{
625 return skb->end;
626}
627#endif
628
629
630#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
631
632static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
633{
634 return &skb_shinfo(skb)->hwtstamps;
635}
636
637
638
639
640
641
642
643static inline int skb_queue_empty(const struct sk_buff_head *list)
644{
645 return list->next == (struct sk_buff *)list;
646}
647
648
649
650
651
652
653
654
655static inline bool skb_queue_is_last(const struct sk_buff_head *list,
656 const struct sk_buff *skb)
657{
658 return skb->next == (struct sk_buff *)list;
659}
660
661
662
663
664
665
666
667
668static inline bool skb_queue_is_first(const struct sk_buff_head *list,
669 const struct sk_buff *skb)
670{
671 return skb->prev == (struct sk_buff *)list;
672}
673
674
675
676
677
678
679
680
681
682static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
683 const struct sk_buff *skb)
684{
685
686
687
688 BUG_ON(skb_queue_is_last(list, skb));
689 return skb->next;
690}
691
692
693
694
695
696
697
698
699
700static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
701 const struct sk_buff *skb)
702{
703
704
705
706 BUG_ON(skb_queue_is_first(list, skb));
707 return skb->prev;
708}
709
710
711
712
713
714
715
716
717static inline struct sk_buff *skb_get(struct sk_buff *skb)
718{
719 atomic_inc(&skb->users);
720 return skb;
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736static inline int skb_cloned(const struct sk_buff *skb)
737{
738 return skb->cloned &&
739 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
740}
741
742
743
744
745
746
747
748
749static inline int skb_header_cloned(const struct sk_buff *skb)
750{
751 int dataref;
752
753 if (!skb->cloned)
754 return 0;
755
756 dataref = atomic_read(&skb_shinfo(skb)->dataref);
757 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
758 return dataref != 1;
759}
760
761
762
763
764
765
766
767
768
769static inline void skb_header_release(struct sk_buff *skb)
770{
771 BUG_ON(skb->nohdr);
772 skb->nohdr = 1;
773 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
774}
775
776
777
778
779
780
781
782
783static inline int skb_shared(const struct sk_buff *skb)
784{
785 return atomic_read(&skb->users) != 1;
786}
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
802 gfp_t pri)
803{
804 might_sleep_if(pri & __GFP_WAIT);
805 if (skb_shared(skb)) {
806 struct sk_buff *nskb = skb_clone(skb, pri);
807 kfree_skb(skb);
808 skb = nskb;
809 }
810 return skb;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
834 gfp_t pri)
835{
836 might_sleep_if(pri & __GFP_WAIT);
837 if (skb_cloned(skb)) {
838 struct sk_buff *nskb = skb_copy(skb, pri);
839 kfree_skb(skb);
840 skb = nskb;
841 }
842 return skb;
843}
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
859{
860 struct sk_buff *list = ((const struct sk_buff *)list_)->next;
861 if (list == (struct sk_buff *)list_)
862 list = NULL;
863 return list;
864}
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
880{
881 struct sk_buff *list = ((const struct sk_buff *)list_)->prev;
882 if (list == (struct sk_buff *)list_)
883 list = NULL;
884 return list;
885}
886
887
888
889
890
891
892
893static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
894{
895 return list_->qlen;
896}
897
898
899
900
901
902
903
904
905
906
907
908static inline void __skb_queue_head_init(struct sk_buff_head *list)
909{
910 list->prev = list->next = (struct sk_buff *)list;
911 list->qlen = 0;
912}
913
914
915
916
917
918
919
920
921
922static inline void skb_queue_head_init(struct sk_buff_head *list)
923{
924 spin_lock_init(&list->lock);
925 __skb_queue_head_init(list);
926}
927
928static inline void skb_queue_head_init_class(struct sk_buff_head *list,
929 struct lock_class_key *class)
930{
931 skb_queue_head_init(list);
932 lockdep_set_class(&list->lock, class);
933}
934
935
936
937
938
939
940
941extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
942static inline void __skb_insert(struct sk_buff *newsk,
943 struct sk_buff *prev, struct sk_buff *next,
944 struct sk_buff_head *list)
945{
946 newsk->next = next;
947 newsk->prev = prev;
948 next->prev = prev->next = newsk;
949 list->qlen++;
950}
951
952static inline void __skb_queue_splice(const struct sk_buff_head *list,
953 struct sk_buff *prev,
954 struct sk_buff *next)
955{
956 struct sk_buff *first = list->next;
957 struct sk_buff *last = list->prev;
958
959 first->prev = prev;
960 prev->next = first;
961
962 last->next = next;
963 next->prev = last;
964}
965
966
967
968
969
970
971static inline void skb_queue_splice(const struct sk_buff_head *list,
972 struct sk_buff_head *head)
973{
974 if (!skb_queue_empty(list)) {
975 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
976 head->qlen += list->qlen;
977 }
978}
979
980
981
982
983
984
985
986
987static inline void skb_queue_splice_init(struct sk_buff_head *list,
988 struct sk_buff_head *head)
989{
990 if (!skb_queue_empty(list)) {
991 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
992 head->qlen += list->qlen;
993 __skb_queue_head_init(list);
994 }
995}
996
997
998
999
1000
1001
1002static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1003 struct sk_buff_head *head)
1004{
1005 if (!skb_queue_empty(list)) {
1006 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1007 head->qlen += list->qlen;
1008 }
1009}
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1020 struct sk_buff_head *head)
1021{
1022 if (!skb_queue_empty(list)) {
1023 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1024 head->qlen += list->qlen;
1025 __skb_queue_head_init(list);
1026 }
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static inline void __skb_queue_after(struct sk_buff_head *list,
1041 struct sk_buff *prev,
1042 struct sk_buff *newsk)
1043{
1044 __skb_insert(newsk, prev, prev->next, list);
1045}
1046
1047extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1048 struct sk_buff_head *list);
1049
1050static inline void __skb_queue_before(struct sk_buff_head *list,
1051 struct sk_buff *next,
1052 struct sk_buff *newsk)
1053{
1054 __skb_insert(newsk, next->prev, next, list);
1055}
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1068static inline void __skb_queue_head(struct sk_buff_head *list,
1069 struct sk_buff *newsk)
1070{
1071 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1085static inline void __skb_queue_tail(struct sk_buff_head *list,
1086 struct sk_buff *newsk)
1087{
1088 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1089}
1090
1091
1092
1093
1094
1095extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1096static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1097{
1098 struct sk_buff *next, *prev;
1099
1100 list->qlen--;
1101 next = skb->next;
1102 prev = skb->prev;
1103 skb->next = skb->prev = NULL;
1104 next->prev = prev;
1105 prev->next = next;
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1117static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1118{
1119 struct sk_buff *skb = skb_peek(list);
1120 if (skb)
1121 __skb_unlink(skb, list);
1122 return skb;
1123}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1134static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1135{
1136 struct sk_buff *skb = skb_peek_tail(list);
1137 if (skb)
1138 __skb_unlink(skb, list);
1139 return skb;
1140}
1141
1142
1143static inline int skb_is_nonlinear(const struct sk_buff *skb)
1144{
1145 return skb->data_len;
1146}
1147
1148static inline unsigned int skb_headlen(const struct sk_buff *skb)
1149{
1150 return skb->len - skb->data_len;
1151}
1152
1153static inline int skb_pagelen(const struct sk_buff *skb)
1154{
1155 int i, len = 0;
1156
1157 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1158 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1159 return len + skb_headlen(skb);
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1176 struct page *page, int off, int size)
1177{
1178 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1179
1180 frag->page.p = page;
1181 frag->page_offset = off;
1182 skb_frag_size_set(frag, size);
1183}
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1200 struct page *page, int off, int size)
1201{
1202 __skb_fill_page_desc(skb, i, page, off, size);
1203 skb_shinfo(skb)->nr_frags = i + 1;
1204}
1205
1206extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1207 int off, int size);
1208
1209#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1210#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1211#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1212
1213#ifdef NET_SKBUFF_DATA_USES_OFFSET
1214static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1215{
1216 return skb->head + skb->tail;
1217}
1218
1219static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1220{
1221 skb->tail = skb->data - skb->head;
1222}
1223
1224static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1225{
1226 skb_reset_tail_pointer(skb);
1227 skb->tail += offset;
1228}
1229#else
1230static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1231{
1232 return skb->tail;
1233}
1234
1235static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1236{
1237 skb->tail = skb->data;
1238}
1239
1240static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1241{
1242 skb->tail = skb->data + offset;
1243}
1244
1245#endif
1246
1247
1248
1249
1250extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1251static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1252{
1253 unsigned char *tmp = skb_tail_pointer(skb);
1254 SKB_LINEAR_ASSERT(skb);
1255 skb->tail += len;
1256 skb->len += len;
1257 return tmp;
1258}
1259
1260extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1261static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1262{
1263 skb->data -= len;
1264 skb->len += len;
1265 return skb->data;
1266}
1267
1268extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1269static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1270{
1271 skb->len -= len;
1272 BUG_ON(skb->len < skb->data_len);
1273 return skb->data += len;
1274}
1275
1276static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1277{
1278 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1279}
1280
1281extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1282
1283static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1284{
1285 if (len > skb_headlen(skb) &&
1286 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1287 return NULL;
1288 skb->len -= len;
1289 return skb->data += len;
1290}
1291
1292static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1293{
1294 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1295}
1296
1297static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1298{
1299 if (likely(len <= skb_headlen(skb)))
1300 return 1;
1301 if (unlikely(len > skb->len))
1302 return 0;
1303 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1304}
1305
1306
1307
1308
1309
1310
1311
1312static inline unsigned int skb_headroom(const struct sk_buff *skb)
1313{
1314 return skb->data - skb->head;
1315}
1316
1317
1318
1319
1320
1321
1322
1323static inline int skb_tailroom(const struct sk_buff *skb)
1324{
1325 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336static inline void skb_reserve(struct sk_buff *skb, int len)
1337{
1338 skb->data += len;
1339 skb->tail += len;
1340}
1341
1342static inline void skb_reset_mac_len(struct sk_buff *skb)
1343{
1344 skb->mac_len = skb->network_header - skb->mac_header;
1345}
1346
1347#ifdef NET_SKBUFF_DATA_USES_OFFSET
1348static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1349{
1350 return skb->head + skb->transport_header;
1351}
1352
1353static inline void skb_reset_transport_header(struct sk_buff *skb)
1354{
1355 skb->transport_header = skb->data - skb->head;
1356}
1357
1358static inline void skb_set_transport_header(struct sk_buff *skb,
1359 const int offset)
1360{
1361 skb_reset_transport_header(skb);
1362 skb->transport_header += offset;
1363}
1364
1365static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1366{
1367 return skb->head + skb->network_header;
1368}
1369
1370static inline void skb_reset_network_header(struct sk_buff *skb)
1371{
1372 skb->network_header = skb->data - skb->head;
1373}
1374
1375static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1376{
1377 skb_reset_network_header(skb);
1378 skb->network_header += offset;
1379}
1380
1381static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1382{
1383 return skb->head + skb->mac_header;
1384}
1385
1386static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1387{
1388 return skb->mac_header != ~0U;
1389}
1390
1391static inline void skb_reset_mac_header(struct sk_buff *skb)
1392{
1393 skb->mac_header = skb->data - skb->head;
1394}
1395
1396static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1397{
1398 skb_reset_mac_header(skb);
1399 skb->mac_header += offset;
1400}
1401
1402#else
1403
1404static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1405{
1406 return skb->transport_header;
1407}
1408
1409static inline void skb_reset_transport_header(struct sk_buff *skb)
1410{
1411 skb->transport_header = skb->data;
1412}
1413
1414static inline void skb_set_transport_header(struct sk_buff *skb,
1415 const int offset)
1416{
1417 skb->transport_header = skb->data + offset;
1418}
1419
1420static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1421{
1422 return skb->network_header;
1423}
1424
1425static inline void skb_reset_network_header(struct sk_buff *skb)
1426{
1427 skb->network_header = skb->data;
1428}
1429
1430static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1431{
1432 skb->network_header = skb->data + offset;
1433}
1434
1435static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1436{
1437 return skb->mac_header;
1438}
1439
1440static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1441{
1442 return skb->mac_header != NULL;
1443}
1444
1445static inline void skb_reset_mac_header(struct sk_buff *skb)
1446{
1447 skb->mac_header = skb->data;
1448}
1449
1450static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1451{
1452 skb->mac_header = skb->data + offset;
1453}
1454#endif
1455
1456static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1457{
1458 return skb->csum_start - skb_headroom(skb);
1459}
1460
1461static inline int skb_transport_offset(const struct sk_buff *skb)
1462{
1463 return skb_transport_header(skb) - skb->data;
1464}
1465
1466static inline u32 skb_network_header_len(const struct sk_buff *skb)
1467{
1468 return skb->transport_header - skb->network_header;
1469}
1470
1471static inline int skb_network_offset(const struct sk_buff *skb)
1472{
1473 return skb_network_header(skb) - skb->data;
1474}
1475
1476static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1477{
1478 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1479}
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501#ifndef NET_IP_ALIGN
1502#define NET_IP_ALIGN 2
1503#endif
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525#ifndef NET_SKB_PAD
1526#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1527#endif
1528
1529extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1530
1531static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1532{
1533 if (unlikely(skb_is_nonlinear(skb))) {
1534 WARN_ON(1);
1535 return;
1536 }
1537 skb->len = len;
1538 skb_set_tail_pointer(skb, len);
1539}
1540
1541extern void skb_trim(struct sk_buff *skb, unsigned int len);
1542
1543static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1544{
1545 if (skb->data_len)
1546 return ___pskb_trim(skb, len);
1547 __skb_trim(skb, len);
1548 return 0;
1549}
1550
1551static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1552{
1553 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1554}
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1566{
1567 int err = pskb_trim(skb, len);
1568 BUG_ON(err);
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static inline void skb_orphan(struct sk_buff *skb)
1580{
1581 if (skb->destructor)
1582 skb->destructor(skb);
1583 skb->destructor = NULL;
1584 skb->sk = NULL;
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595extern void skb_queue_purge(struct sk_buff_head *list);
1596static inline void __skb_queue_purge(struct sk_buff_head *list)
1597{
1598 struct sk_buff *skb;
1599 while ((skb = __skb_dequeue(list)) != NULL)
1600 kfree_skb(skb);
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1616 gfp_t gfp_mask)
1617{
1618 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1619 if (likely(skb))
1620 skb_reserve(skb, NET_SKB_PAD);
1621 return skb;
1622}
1623
1624extern struct sk_buff *dev_alloc_skb(unsigned int length);
1625
1626extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1627 unsigned int length, gfp_t gfp_mask);
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1643 unsigned int length)
1644{
1645 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1646}
1647
1648static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1649 unsigned int length, gfp_t gfp)
1650{
1651 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1652
1653 if (NET_IP_ALIGN && skb)
1654 skb_reserve(skb, NET_IP_ALIGN);
1655 return skb;
1656}
1657
1658static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1659 unsigned int length)
1660{
1661 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1674{
1675 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686static inline struct page *netdev_alloc_page(struct net_device *dev)
1687{
1688 return __netdev_alloc_page(dev, GFP_ATOMIC);
1689}
1690
1691static inline void netdev_free_page(struct net_device *dev, struct page *page)
1692{
1693 __free_page(page);
1694}
1695
1696
1697
1698
1699
1700
1701
1702static inline struct page *skb_frag_page(const skb_frag_t *frag)
1703{
1704 return frag->page.p;
1705}
1706
1707
1708
1709
1710
1711
1712
1713static inline void __skb_frag_ref(skb_frag_t *frag)
1714{
1715 get_page(skb_frag_page(frag));
1716}
1717
1718
1719
1720
1721
1722
1723
1724
1725static inline void skb_frag_ref(struct sk_buff *skb, int f)
1726{
1727 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1728}
1729
1730
1731
1732
1733
1734
1735
1736static inline void __skb_frag_unref(skb_frag_t *frag)
1737{
1738 put_page(skb_frag_page(frag));
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748static inline void skb_frag_unref(struct sk_buff *skb, int f)
1749{
1750 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760static inline void *skb_frag_address(const skb_frag_t *frag)
1761{
1762 return page_address(skb_frag_page(frag)) + frag->page_offset;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772static inline void *skb_frag_address_safe(const skb_frag_t *frag)
1773{
1774 void *ptr = page_address(skb_frag_page(frag));
1775 if (unlikely(!ptr))
1776 return NULL;
1777
1778 return ptr + frag->page_offset;
1779}
1780
1781
1782
1783
1784
1785
1786
1787
1788static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
1789{
1790 frag->page.p = page;
1791}
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801static inline void skb_frag_set_page(struct sk_buff *skb, int f,
1802 struct page *page)
1803{
1804 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
1805}
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818static inline dma_addr_t skb_frag_dma_map(struct device *dev,
1819 const skb_frag_t *frag,
1820 size_t offset, size_t size,
1821 enum dma_data_direction dir)
1822{
1823 return dma_map_page(dev, skb_frag_page(frag),
1824 frag->page_offset + offset, size, dir);
1825}
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
1836{
1837 return !skb_header_cloned(skb) &&
1838 skb_headroom(skb) + len <= skb->hdr_len;
1839}
1840
1841static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1842 int cloned)
1843{
1844 int delta = 0;
1845
1846 if (headroom < NET_SKB_PAD)
1847 headroom = NET_SKB_PAD;
1848 if (headroom > skb_headroom(skb))
1849 delta = headroom - skb_headroom(skb);
1850
1851 if (delta || cloned)
1852 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1853 GFP_ATOMIC);
1854 return 0;
1855}
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1870{
1871 return __skb_cow(skb, headroom, skb_cloned(skb));
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1885{
1886 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1901{
1902 unsigned int size = skb->len;
1903 if (likely(size >= len))
1904 return 0;
1905 return skb_pad(skb, len - size);
1906}
1907
1908static inline int skb_add_data(struct sk_buff *skb,
1909 char __user *from, int copy)
1910{
1911 const int off = skb->len;
1912
1913 if (skb->ip_summed == CHECKSUM_NONE) {
1914 int err = 0;
1915 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1916 copy, 0, &err);
1917 if (!err) {
1918 skb->csum = csum_block_add(skb->csum, csum, off);
1919 return 0;
1920 }
1921 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1922 return 0;
1923
1924 __skb_trim(skb, off);
1925 return -EFAULT;
1926}
1927
1928static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1929 const struct page *page, int off)
1930{
1931 if (i) {
1932 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1933
1934 return page == skb_frag_page(frag) &&
1935 off == frag->page_offset + skb_frag_size(frag);
1936 }
1937 return 0;
1938}
1939
1940static inline int __skb_linearize(struct sk_buff *skb)
1941{
1942 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952static inline int skb_linearize(struct sk_buff *skb)
1953{
1954 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1955}
1956
1957
1958
1959
1960
1961
1962
1963
1964static inline int skb_linearize_cow(struct sk_buff *skb)
1965{
1966 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1967 __skb_linearize(skb) : 0;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981static inline void skb_postpull_rcsum(struct sk_buff *skb,
1982 const void *start, unsigned int len)
1983{
1984 if (skb->ip_summed == CHECKSUM_COMPLETE)
1985 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1986}
1987
1988unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2000{
2001 if (likely(len >= skb->len))
2002 return 0;
2003 if (skb->ip_summed == CHECKSUM_COMPLETE)
2004 skb->ip_summed = CHECKSUM_NONE;
2005 return __pskb_trim(skb, len);
2006}
2007
2008#define skb_queue_walk(queue, skb) \
2009 for (skb = (queue)->next; \
2010 skb != (struct sk_buff *)(queue); \
2011 skb = skb->next)
2012
2013#define skb_queue_walk_safe(queue, skb, tmp) \
2014 for (skb = (queue)->next, tmp = skb->next; \
2015 skb != (struct sk_buff *)(queue); \
2016 skb = tmp, tmp = skb->next)
2017
2018#define skb_queue_walk_from(queue, skb) \
2019 for (; skb != (struct sk_buff *)(queue); \
2020 skb = skb->next)
2021
2022#define skb_queue_walk_from_safe(queue, skb, tmp) \
2023 for (tmp = skb->next; \
2024 skb != (struct sk_buff *)(queue); \
2025 skb = tmp, tmp = skb->next)
2026
2027#define skb_queue_reverse_walk(queue, skb) \
2028 for (skb = (queue)->prev; \
2029 skb != (struct sk_buff *)(queue); \
2030 skb = skb->prev)
2031
2032#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2033 for (skb = (queue)->prev, tmp = skb->prev; \
2034 skb != (struct sk_buff *)(queue); \
2035 skb = tmp, tmp = skb->prev)
2036
2037#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2038 for (tmp = skb->prev; \
2039 skb != (struct sk_buff *)(queue); \
2040 skb = tmp, tmp = skb->prev)
2041
2042static inline bool skb_has_frag_list(const struct sk_buff *skb)
2043{
2044 return skb_shinfo(skb)->frag_list != NULL;
2045}
2046
2047static inline void skb_frag_list_init(struct sk_buff *skb)
2048{
2049 skb_shinfo(skb)->frag_list = NULL;
2050}
2051
2052static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2053{
2054 frag->next = skb_shinfo(skb)->frag_list;
2055 skb_shinfo(skb)->frag_list = frag;
2056}
2057
2058#define skb_walk_frags(skb, iter) \
2059 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2060
2061extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2062 int *peeked, int *err);
2063extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2064 int noblock, int *err);
2065extern unsigned int datagram_poll(struct file *file, struct socket *sock,
2066 struct poll_table_struct *wait);
2067extern int skb_copy_datagram_iovec(const struct sk_buff *from,
2068 int offset, struct iovec *to,
2069 int size);
2070extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2071 int hlen,
2072 struct iovec *iov);
2073extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
2074 int offset,
2075 const struct iovec *from,
2076 int from_offset,
2077 int len);
2078extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
2079 int offset,
2080 const struct iovec *to,
2081 int to_offset,
2082 int size);
2083extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2084extern void skb_free_datagram_locked(struct sock *sk,
2085 struct sk_buff *skb);
2086extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2087 unsigned int flags);
2088extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
2089 int len, __wsum csum);
2090extern int skb_copy_bits(const struct sk_buff *skb, int offset,
2091 void *to, int len);
2092extern int skb_store_bits(struct sk_buff *skb, int offset,
2093 const void *from, int len);
2094extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
2095 int offset, u8 *to, int len,
2096 __wsum csum);
2097extern int skb_splice_bits(struct sk_buff *skb,
2098 unsigned int offset,
2099 struct pipe_inode_info *pipe,
2100 unsigned int len,
2101 unsigned int flags);
2102extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2103extern void skb_split(struct sk_buff *skb,
2104 struct sk_buff *skb1, const u32 len);
2105extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2106 int shiftlen);
2107
2108extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
2109
2110static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2111 int len, void *buffer)
2112{
2113 int hlen = skb_headlen(skb);
2114
2115 if (hlen - offset >= len)
2116 return skb->data + offset;
2117
2118 if (skb_copy_bits(skb, offset, buffer, len) < 0)
2119 return NULL;
2120
2121 return buffer;
2122}
2123
2124static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2125 void *to,
2126 const unsigned int len)
2127{
2128 memcpy(to, skb->data, len);
2129}
2130
2131static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2132 const int offset, void *to,
2133 const unsigned int len)
2134{
2135 memcpy(to, skb->data + offset, len);
2136}
2137
2138static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2139 const void *from,
2140 const unsigned int len)
2141{
2142 memcpy(skb->data, from, len);
2143}
2144
2145static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2146 const int offset,
2147 const void *from,
2148 const unsigned int len)
2149{
2150 memcpy(skb->data + offset, from, len);
2151}
2152
2153extern void skb_init(void);
2154
2155static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2156{
2157 return skb->tstamp;
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169static inline void skb_get_timestamp(const struct sk_buff *skb,
2170 struct timeval *stamp)
2171{
2172 *stamp = ktime_to_timeval(skb->tstamp);
2173}
2174
2175static inline void skb_get_timestampns(const struct sk_buff *skb,
2176 struct timespec *stamp)
2177{
2178 *stamp = ktime_to_timespec(skb->tstamp);
2179}
2180
2181static inline void __net_timestamp(struct sk_buff *skb)
2182{
2183 skb->tstamp = ktime_get_real();
2184}
2185
2186static inline ktime_t net_timedelta(ktime_t t)
2187{
2188 return ktime_sub(ktime_get_real(), t);
2189}
2190
2191static inline ktime_t net_invalid_timestamp(void)
2192{
2193 return ktime_set(0, 0);
2194}
2195
2196extern void skb_timestamping_init(void);
2197
2198#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2199
2200extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2201extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2202
2203#else
2204
2205static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2206{
2207}
2208
2209static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2210{
2211 return false;
2212}
2213
2214#endif
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228void skb_complete_tx_timestamp(struct sk_buff *skb,
2229 struct skb_shared_hwtstamps *hwtstamps);
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2243 struct skb_shared_hwtstamps *hwtstamps);
2244
2245static inline void sw_tx_timestamp(struct sk_buff *skb)
2246{
2247 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2248 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2249 skb_tstamp_tx(skb, NULL);
2250}
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260static inline void skb_tx_timestamp(struct sk_buff *skb)
2261{
2262 skb_clone_tx_timestamp(skb);
2263 sw_tx_timestamp(skb);
2264}
2265
2266extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2267extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2268
2269static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2270{
2271 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2272}
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2291{
2292 return skb_csum_unnecessary(skb) ?
2293 0 : __skb_checksum_complete(skb);
2294}
2295
2296#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2297extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2298static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2299{
2300 if (nfct && atomic_dec_and_test(&nfct->use))
2301 nf_conntrack_destroy(nfct);
2302}
2303static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2304{
2305 if (nfct)
2306 atomic_inc(&nfct->use);
2307}
2308#endif
2309#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2310static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2311{
2312 if (skb)
2313 atomic_inc(&skb->users);
2314}
2315static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2316{
2317 if (skb)
2318 kfree_skb(skb);
2319}
2320#endif
2321#ifdef CONFIG_BRIDGE_NETFILTER
2322static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2323{
2324 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2325 kfree(nf_bridge);
2326}
2327static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2328{
2329 if (nf_bridge)
2330 atomic_inc(&nf_bridge->use);
2331}
2332#endif
2333static inline void nf_reset(struct sk_buff *skb)
2334{
2335#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2336 nf_conntrack_put(skb->nfct);
2337 skb->nfct = NULL;
2338#endif
2339#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2340 nf_conntrack_put_reasm(skb->nfct_reasm);
2341 skb->nfct_reasm = NULL;
2342#endif
2343#ifdef CONFIG_BRIDGE_NETFILTER
2344 nf_bridge_put(skb->nf_bridge);
2345 skb->nf_bridge = NULL;
2346#endif
2347}
2348
2349
2350static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2351{
2352#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2353 dst->nfct = src->nfct;
2354 nf_conntrack_get(src->nfct);
2355 dst->nfctinfo = src->nfctinfo;
2356#endif
2357#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2358 dst->nfct_reasm = src->nfct_reasm;
2359 nf_conntrack_get_reasm(src->nfct_reasm);
2360#endif
2361#ifdef CONFIG_BRIDGE_NETFILTER
2362 dst->nf_bridge = src->nf_bridge;
2363 nf_bridge_get(src->nf_bridge);
2364#endif
2365}
2366
2367static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2368{
2369#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2370 nf_conntrack_put(dst->nfct);
2371#endif
2372#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2373 nf_conntrack_put_reasm(dst->nfct_reasm);
2374#endif
2375#ifdef CONFIG_BRIDGE_NETFILTER
2376 nf_bridge_put(dst->nf_bridge);
2377#endif
2378 __nf_copy(dst, src);
2379}
2380
2381#ifdef CONFIG_NETWORK_SECMARK
2382static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2383{
2384 to->secmark = from->secmark;
2385}
2386
2387static inline void skb_init_secmark(struct sk_buff *skb)
2388{
2389 skb->secmark = 0;
2390}
2391#else
2392static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2393{ }
2394
2395static inline void skb_init_secmark(struct sk_buff *skb)
2396{ }
2397#endif
2398
2399static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2400{
2401 skb->queue_mapping = queue_mapping;
2402}
2403
2404static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2405{
2406 return skb->queue_mapping;
2407}
2408
2409static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2410{
2411 to->queue_mapping = from->queue_mapping;
2412}
2413
2414static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2415{
2416 skb->queue_mapping = rx_queue + 1;
2417}
2418
2419static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2420{
2421 return skb->queue_mapping - 1;
2422}
2423
2424static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2425{
2426 return skb->queue_mapping != 0;
2427}
2428
2429extern u16 __skb_tx_hash(const struct net_device *dev,
2430 const struct sk_buff *skb,
2431 unsigned int num_tx_queues);
2432
2433#ifdef CONFIG_XFRM
2434static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2435{
2436 return skb->sp;
2437}
2438#else
2439static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2440{
2441 return NULL;
2442}
2443#endif
2444
2445static inline int skb_is_gso(const struct sk_buff *skb)
2446{
2447 return skb_shinfo(skb)->gso_size;
2448}
2449
2450static inline int skb_is_gso_v6(const struct sk_buff *skb)
2451{
2452 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2453}
2454
2455extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2456
2457static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2458{
2459
2460
2461 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2462
2463 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2464 unlikely(shinfo->gso_type == 0)) {
2465 __skb_warn_lro_forwarding(skb);
2466 return true;
2467 }
2468 return false;
2469}
2470
2471static inline void skb_forward_csum(struct sk_buff *skb)
2472{
2473
2474 if (skb->ip_summed == CHECKSUM_COMPLETE)
2475 skb->ip_summed = CHECKSUM_NONE;
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2487{
2488#ifdef DEBUG
2489 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2490#endif
2491}
2492
2493bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2494
2495static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2496{
2497 if (irqs_disabled())
2498 return false;
2499
2500 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
2501 return false;
2502
2503 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
2504 return false;
2505
2506 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2507 if (skb_end_pointer(skb) - skb->head < skb_size)
2508 return false;
2509
2510 if (skb_shared(skb) || skb_cloned(skb))
2511 return false;
2512
2513 return true;
2514}
2515#endif
2516#endif
2517