1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23
24#include <linux/atomic.h>
25#include <asm/types.h>
26#include <linux/spinlock.h>
27#include <linux/net.h>
28#include <linux/textsearch.h>
29#include <net/checksum.h>
30#include <linux/rcupdate.h>
31#include <linux/dmaengine.h>
32#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h>
35
36
37#define CHECKSUM_NONE 0
38#define CHECKSUM_UNNECESSARY 1
39#define CHECKSUM_COMPLETE 2
40#define CHECKSUM_PARTIAL 3
41
42#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
43 ~(SMP_CACHE_BYTES - 1))
44#define SKB_WITH_OVERHEAD(X) \
45 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
46#define SKB_MAX_ORDER(X, ORDER) \
47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
48#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
49#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
50
51
52#define SKB_TRUESIZE(X) ((X) + \
53 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
54 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108struct net_device;
109struct scatterlist;
110struct pipe_inode_info;
111
112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
113struct nf_conntrack {
114 atomic_t use;
115};
116#endif
117
118#ifdef CONFIG_BRIDGE_NETFILTER
119struct nf_bridge_info {
120 atomic_t use;
121 unsigned int mask;
122 struct net_device *physindev;
123 struct net_device *physoutdev;
124 unsigned long data[32 / sizeof(unsigned long)];
125};
126#endif
127
128struct sk_buff_head {
129
130 struct sk_buff *next;
131 struct sk_buff *prev;
132
133 __u32 qlen;
134 spinlock_t lock;
135};
136
137struct sk_buff;
138
139
140
141
142
143
144
145
146#if (65536/PAGE_SIZE + 1) < 16
147#define MAX_SKB_FRAGS 16UL
148#else
149#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
150#endif
151
152typedef struct skb_frag_struct skb_frag_t;
153
154struct skb_frag_struct {
155 struct {
156 struct page *p;
157 } page;
158#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
159 __u32 page_offset;
160 __u32 size;
161#else
162 __u16 page_offset;
163 __u16 size;
164#endif
165};
166
167static inline unsigned int skb_frag_size(const skb_frag_t *frag)
168{
169 return frag->size;
170}
171
172static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
173{
174 frag->size = size;
175}
176
177static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
178{
179 frag->size += delta;
180}
181
182static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
183{
184 frag->size -= delta;
185}
186
187#define HAVE_HW_TIME_STAMP
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212struct skb_shared_hwtstamps {
213 ktime_t hwtstamp;
214 ktime_t syststamp;
215};
216
217
218enum {
219
220 SKBTX_HW_TSTAMP = 1 << 0,
221
222
223 SKBTX_SW_TSTAMP = 1 << 1,
224
225
226 SKBTX_IN_PROGRESS = 1 << 2,
227
228
229 SKBTX_DEV_ZEROCOPY = 1 << 3,
230
231
232 SKBTX_WIFI_STATUS = 1 << 4,
233};
234
235
236
237
238
239
240
241
242
243struct ubuf_info {
244 void (*callback)(struct ubuf_info *, bool zerocopy_success);
245 void *ctx;
246 unsigned long desc;
247};
248
249
250
251
252struct skb_shared_info {
253 unsigned char nr_frags;
254 __u8 tx_flags;
255 unsigned short gso_size;
256
257 unsigned short gso_segs;
258 unsigned short gso_type;
259 struct sk_buff *frag_list;
260 struct skb_shared_hwtstamps hwtstamps;
261 __be32 ip6_frag_id;
262
263
264
265
266 atomic_t dataref;
267
268
269
270 void * destructor_arg;
271
272
273 skb_frag_t frags[MAX_SKB_FRAGS];
274};
275
276
277
278
279
280
281
282
283
284
285
286
287#define SKB_DATAREF_SHIFT 16
288#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
289
290
291enum {
292 SKB_FCLONE_UNAVAILABLE,
293 SKB_FCLONE_ORIG,
294 SKB_FCLONE_CLONE,
295};
296
297enum {
298 SKB_GSO_TCPV4 = 1 << 0,
299 SKB_GSO_UDP = 1 << 1,
300
301
302 SKB_GSO_DODGY = 1 << 2,
303
304
305 SKB_GSO_TCP_ECN = 1 << 3,
306
307 SKB_GSO_TCPV6 = 1 << 4,
308
309 SKB_GSO_FCOE = 1 << 5,
310};
311
312#if BITS_PER_LONG > 32
313#define NET_SKBUFF_DATA_USES_OFFSET 1
314#endif
315
316#ifdef NET_SKBUFF_DATA_USES_OFFSET
317typedef unsigned int sk_buff_data_t;
318#else
319typedef unsigned char *sk_buff_data_t;
320#endif
321
322#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
323 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
324#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
325#endif
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392struct sk_buff {
393
394 struct sk_buff *next;
395 struct sk_buff *prev;
396
397 ktime_t tstamp;
398
399 struct sock *sk;
400 struct net_device *dev;
401
402
403
404
405
406
407
408 char cb[48] __aligned(8);
409
410 unsigned long _skb_refdst;
411#ifdef CONFIG_XFRM
412 struct sec_path *sp;
413#endif
414 unsigned int len,
415 data_len;
416 __u16 mac_len,
417 hdr_len;
418 union {
419 __wsum csum;
420 struct {
421 __u16 csum_start;
422 __u16 csum_offset;
423 };
424 };
425 __u32 priority;
426 kmemcheck_bitfield_begin(flags1);
427 __u8 local_df:1,
428 cloned:1,
429 ip_summed:2,
430 nohdr:1,
431 nfctinfo:3;
432 __u8 pkt_type:3,
433 fclone:2,
434 ipvs_property:1,
435 peeked:1,
436 nf_trace:1;
437 kmemcheck_bitfield_end(flags1);
438 __be16 protocol;
439
440 void (*destructor)(struct sk_buff *skb);
441#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
442 struct nf_conntrack *nfct;
443#endif
444#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
445 struct sk_buff *nfct_reasm;
446#endif
447#ifdef CONFIG_BRIDGE_NETFILTER
448 struct nf_bridge_info *nf_bridge;
449#endif
450
451 int skb_iif;
452
453 __u32 rxhash;
454
455 __u16 vlan_tci;
456
457#ifdef CONFIG_NET_SCHED
458 __u16 tc_index;
459#ifdef CONFIG_NET_CLS_ACT
460 __u16 tc_verd;
461#endif
462#endif
463
464 __u16 queue_mapping;
465 kmemcheck_bitfield_begin(flags2);
466#ifdef CONFIG_IPV6_NDISC_NODETYPE
467 __u8 ndisc_nodetype:2;
468#endif
469 __u8 pfmemalloc:1;
470 __u8 ooo_okay:1;
471 __u8 l4_rxhash:1;
472 __u8 wifi_acked_valid:1;
473 __u8 wifi_acked:1;
474 __u8 no_fcs:1;
475 __u8 head_frag:1;
476
477
478
479
480
481 __u8 encapsulation:1;
482
483 kmemcheck_bitfield_end(flags2);
484
485#ifdef CONFIG_NET_DMA
486 dma_cookie_t dma_cookie;
487#endif
488#ifdef CONFIG_NETWORK_SECMARK
489 __u32 secmark;
490#endif
491 union {
492 __u32 mark;
493 __u32 dropcount;
494 __u32 avail_size;
495 };
496
497 sk_buff_data_t inner_transport_header;
498 sk_buff_data_t inner_network_header;
499 sk_buff_data_t transport_header;
500 sk_buff_data_t network_header;
501 sk_buff_data_t mac_header;
502
503 sk_buff_data_t tail;
504 sk_buff_data_t end;
505 unsigned char *head,
506 *data;
507 unsigned int truesize;
508 atomic_t users;
509};
510
511#ifdef __KERNEL__
512
513
514
515#include <linux/slab.h>
516
517
518#define SKB_ALLOC_FCLONE 0x01
519#define SKB_ALLOC_RX 0x02
520
521
522static inline bool skb_pfmemalloc(const struct sk_buff *skb)
523{
524 return unlikely(skb->pfmemalloc);
525}
526
527
528
529
530
531#define SKB_DST_NOREF 1UL
532#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
533
534
535
536
537
538
539
540static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
541{
542
543
544
545 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
546 !rcu_read_lock_held() &&
547 !rcu_read_lock_bh_held());
548 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
549}
550
551
552
553
554
555
556
557
558
559static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
560{
561 skb->_skb_refdst = (unsigned long)dst;
562}
563
564extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
565
566
567
568
569
570static inline bool skb_dst_is_noref(const struct sk_buff *skb)
571{
572 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
573}
574
575static inline struct rtable *skb_rtable(const struct sk_buff *skb)
576{
577 return (struct rtable *)skb_dst(skb);
578}
579
580extern void kfree_skb(struct sk_buff *skb);
581extern void skb_tx_error(struct sk_buff *skb);
582extern void consume_skb(struct sk_buff *skb);
583extern void __kfree_skb(struct sk_buff *skb);
584extern struct kmem_cache *skbuff_head_cache;
585
586extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
587extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
588 bool *fragstolen, int *delta_truesize);
589
590extern struct sk_buff *__alloc_skb(unsigned int size,
591 gfp_t priority, int flags, int node);
592extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
593static inline struct sk_buff *alloc_skb(unsigned int size,
594 gfp_t priority)
595{
596 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
597}
598
599static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
600 gfp_t priority)
601{
602 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
603}
604
605extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
606extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
607extern struct sk_buff *skb_clone(struct sk_buff *skb,
608 gfp_t priority);
609extern struct sk_buff *skb_copy(const struct sk_buff *skb,
610 gfp_t priority);
611extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
612 int headroom, gfp_t gfp_mask);
613
614extern int pskb_expand_head(struct sk_buff *skb,
615 int nhead, int ntail,
616 gfp_t gfp_mask);
617extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
618 unsigned int headroom);
619extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
620 int newheadroom, int newtailroom,
621 gfp_t priority);
622extern int skb_to_sgvec(struct sk_buff *skb,
623 struct scatterlist *sg, int offset,
624 int len);
625extern int skb_cow_data(struct sk_buff *skb, int tailbits,
626 struct sk_buff **trailer);
627extern int skb_pad(struct sk_buff *skb, int pad);
628#define dev_kfree_skb(a) consume_skb(a)
629
630extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
631 int getfrag(void *from, char *to, int offset,
632 int len,int odd, struct sk_buff *skb),
633 void *from, int length);
634
635struct skb_seq_state {
636 __u32 lower_offset;
637 __u32 upper_offset;
638 __u32 frag_idx;
639 __u32 stepped_offset;
640 struct sk_buff *root_skb;
641 struct sk_buff *cur_skb;
642 __u8 *frag_data;
643};
644
645extern void skb_prepare_seq_read(struct sk_buff *skb,
646 unsigned int from, unsigned int to,
647 struct skb_seq_state *st);
648extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
649 struct skb_seq_state *st);
650extern void skb_abort_seq_read(struct skb_seq_state *st);
651
652extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
653 unsigned int to, struct ts_config *config,
654 struct ts_state *state);
655
656extern void __skb_get_rxhash(struct sk_buff *skb);
657static inline __u32 skb_get_rxhash(struct sk_buff *skb)
658{
659 if (!skb->l4_rxhash)
660 __skb_get_rxhash(skb);
661
662 return skb->rxhash;
663}
664
665#ifdef NET_SKBUFF_DATA_USES_OFFSET
666static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
667{
668 return skb->head + skb->end;
669}
670
671static inline unsigned int skb_end_offset(const struct sk_buff *skb)
672{
673 return skb->end;
674}
675#else
676static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
677{
678 return skb->end;
679}
680
681static inline unsigned int skb_end_offset(const struct sk_buff *skb)
682{
683 return skb->end - skb->head;
684}
685#endif
686
687
688#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
689
690static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
691{
692 return &skb_shinfo(skb)->hwtstamps;
693}
694
695
696
697
698
699
700
701static inline int skb_queue_empty(const struct sk_buff_head *list)
702{
703 return list->next == (struct sk_buff *)list;
704}
705
706
707
708
709
710
711
712
713static inline bool skb_queue_is_last(const struct sk_buff_head *list,
714 const struct sk_buff *skb)
715{
716 return skb->next == (struct sk_buff *)list;
717}
718
719
720
721
722
723
724
725
726static inline bool skb_queue_is_first(const struct sk_buff_head *list,
727 const struct sk_buff *skb)
728{
729 return skb->prev == (struct sk_buff *)list;
730}
731
732
733
734
735
736
737
738
739
740static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
741 const struct sk_buff *skb)
742{
743
744
745
746 BUG_ON(skb_queue_is_last(list, skb));
747 return skb->next;
748}
749
750
751
752
753
754
755
756
757
758static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
759 const struct sk_buff *skb)
760{
761
762
763
764 BUG_ON(skb_queue_is_first(list, skb));
765 return skb->prev;
766}
767
768
769
770
771
772
773
774
775static inline struct sk_buff *skb_get(struct sk_buff *skb)
776{
777 atomic_inc(&skb->users);
778 return skb;
779}
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794static inline int skb_cloned(const struct sk_buff *skb)
795{
796 return skb->cloned &&
797 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
798}
799
800
801
802
803
804
805
806
807static inline int skb_header_cloned(const struct sk_buff *skb)
808{
809 int dataref;
810
811 if (!skb->cloned)
812 return 0;
813
814 dataref = atomic_read(&skb_shinfo(skb)->dataref);
815 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
816 return dataref != 1;
817}
818
819
820
821
822
823
824
825
826
827static inline void skb_header_release(struct sk_buff *skb)
828{
829 BUG_ON(skb->nohdr);
830 skb->nohdr = 1;
831 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
832}
833
834
835
836
837
838
839
840
841static inline int skb_shared(const struct sk_buff *skb)
842{
843 return atomic_read(&skb->users) != 1;
844}
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
860{
861 might_sleep_if(pri & __GFP_WAIT);
862 if (skb_shared(skb)) {
863 struct sk_buff *nskb = skb_clone(skb, pri);
864
865 if (likely(nskb))
866 consume_skb(skb);
867 else
868 kfree_skb(skb);
869 skb = nskb;
870 }
871 return skb;
872}
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
895 gfp_t pri)
896{
897 might_sleep_if(pri & __GFP_WAIT);
898 if (skb_cloned(skb)) {
899 struct sk_buff *nskb = skb_copy(skb, pri);
900 kfree_skb(skb);
901 skb = nskb;
902 }
903 return skb;
904}
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
920{
921 struct sk_buff *skb = list_->next;
922
923 if (skb == (struct sk_buff *)list_)
924 skb = NULL;
925 return skb;
926}
927
928
929
930
931
932
933
934
935
936
937static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
938 const struct sk_buff_head *list_)
939{
940 struct sk_buff *next = skb->next;
941
942 if (next == (struct sk_buff *)list_)
943 next = NULL;
944 return next;
945}
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
961{
962 struct sk_buff *skb = list_->prev;
963
964 if (skb == (struct sk_buff *)list_)
965 skb = NULL;
966 return skb;
967
968}
969
970
971
972
973
974
975
976static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
977{
978 return list_->qlen;
979}
980
981
982
983
984
985
986
987
988
989
990
991static inline void __skb_queue_head_init(struct sk_buff_head *list)
992{
993 list->prev = list->next = (struct sk_buff *)list;
994 list->qlen = 0;
995}
996
997
998
999
1000
1001
1002
1003
1004
1005static inline void skb_queue_head_init(struct sk_buff_head *list)
1006{
1007 spin_lock_init(&list->lock);
1008 __skb_queue_head_init(list);
1009}
1010
1011static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1012 struct lock_class_key *class)
1013{
1014 skb_queue_head_init(list);
1015 lockdep_set_class(&list->lock, class);
1016}
1017
1018
1019
1020
1021
1022
1023
1024extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
1025static inline void __skb_insert(struct sk_buff *newsk,
1026 struct sk_buff *prev, struct sk_buff *next,
1027 struct sk_buff_head *list)
1028{
1029 newsk->next = next;
1030 newsk->prev = prev;
1031 next->prev = prev->next = newsk;
1032 list->qlen++;
1033}
1034
1035static inline void __skb_queue_splice(const struct sk_buff_head *list,
1036 struct sk_buff *prev,
1037 struct sk_buff *next)
1038{
1039 struct sk_buff *first = list->next;
1040 struct sk_buff *last = list->prev;
1041
1042 first->prev = prev;
1043 prev->next = first;
1044
1045 last->next = next;
1046 next->prev = last;
1047}
1048
1049
1050
1051
1052
1053
1054static inline void skb_queue_splice(const struct sk_buff_head *list,
1055 struct sk_buff_head *head)
1056{
1057 if (!skb_queue_empty(list)) {
1058 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1059 head->qlen += list->qlen;
1060 }
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070static inline void skb_queue_splice_init(struct sk_buff_head *list,
1071 struct sk_buff_head *head)
1072{
1073 if (!skb_queue_empty(list)) {
1074 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1075 head->qlen += list->qlen;
1076 __skb_queue_head_init(list);
1077 }
1078}
1079
1080
1081
1082
1083
1084
1085static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1086 struct sk_buff_head *head)
1087{
1088 if (!skb_queue_empty(list)) {
1089 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1090 head->qlen += list->qlen;
1091 }
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1103 struct sk_buff_head *head)
1104{
1105 if (!skb_queue_empty(list)) {
1106 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1107 head->qlen += list->qlen;
1108 __skb_queue_head_init(list);
1109 }
1110}
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123static inline void __skb_queue_after(struct sk_buff_head *list,
1124 struct sk_buff *prev,
1125 struct sk_buff *newsk)
1126{
1127 __skb_insert(newsk, prev, prev->next, list);
1128}
1129
1130extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1131 struct sk_buff_head *list);
1132
1133static inline void __skb_queue_before(struct sk_buff_head *list,
1134 struct sk_buff *next,
1135 struct sk_buff *newsk)
1136{
1137 __skb_insert(newsk, next->prev, next, list);
1138}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1151static inline void __skb_queue_head(struct sk_buff_head *list,
1152 struct sk_buff *newsk)
1153{
1154 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1168static inline void __skb_queue_tail(struct sk_buff_head *list,
1169 struct sk_buff *newsk)
1170{
1171 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1172}
1173
1174
1175
1176
1177
1178extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1179static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1180{
1181 struct sk_buff *next, *prev;
1182
1183 list->qlen--;
1184 next = skb->next;
1185 prev = skb->prev;
1186 skb->next = skb->prev = NULL;
1187 next->prev = prev;
1188 prev->next = next;
1189}
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1200static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1201{
1202 struct sk_buff *skb = skb_peek(list);
1203 if (skb)
1204 __skb_unlink(skb, list);
1205 return skb;
1206}
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1217static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1218{
1219 struct sk_buff *skb = skb_peek_tail(list);
1220 if (skb)
1221 __skb_unlink(skb, list);
1222 return skb;
1223}
1224
1225
1226static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1227{
1228 return skb->data_len;
1229}
1230
1231static inline unsigned int skb_headlen(const struct sk_buff *skb)
1232{
1233 return skb->len - skb->data_len;
1234}
1235
1236static inline int skb_pagelen(const struct sk_buff *skb)
1237{
1238 int i, len = 0;
1239
1240 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1241 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1242 return len + skb_headlen(skb);
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1259 struct page *page, int off, int size)
1260{
1261 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 if (page->pfmemalloc && !page->mapping)
1273 skb->pfmemalloc = true;
1274 frag->page.p = page;
1275 frag->page_offset = off;
1276 skb_frag_size_set(frag, size);
1277}
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1294 struct page *page, int off, int size)
1295{
1296 __skb_fill_page_desc(skb, i, page, off, size);
1297 skb_shinfo(skb)->nr_frags = i + 1;
1298}
1299
1300extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1301 int off, int size, unsigned int truesize);
1302
1303#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1304#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1305#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1306
1307#ifdef NET_SKBUFF_DATA_USES_OFFSET
1308static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1309{
1310 return skb->head + skb->tail;
1311}
1312
1313static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1314{
1315 skb->tail = skb->data - skb->head;
1316}
1317
1318static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1319{
1320 skb_reset_tail_pointer(skb);
1321 skb->tail += offset;
1322}
1323#else
1324static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1325{
1326 return skb->tail;
1327}
1328
1329static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1330{
1331 skb->tail = skb->data;
1332}
1333
1334static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1335{
1336 skb->tail = skb->data + offset;
1337}
1338
1339#endif
1340
1341
1342
1343
1344extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1345static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1346{
1347 unsigned char *tmp = skb_tail_pointer(skb);
1348 SKB_LINEAR_ASSERT(skb);
1349 skb->tail += len;
1350 skb->len += len;
1351 return tmp;
1352}
1353
1354extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1355static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1356{
1357 skb->data -= len;
1358 skb->len += len;
1359 return skb->data;
1360}
1361
1362extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1363static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1364{
1365 skb->len -= len;
1366 BUG_ON(skb->len < skb->data_len);
1367 return skb->data += len;
1368}
1369
1370static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1371{
1372 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1373}
1374
1375extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1376
1377static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1378{
1379 if (len > skb_headlen(skb) &&
1380 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1381 return NULL;
1382 skb->len -= len;
1383 return skb->data += len;
1384}
1385
1386static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1387{
1388 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1389}
1390
1391static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1392{
1393 if (likely(len <= skb_headlen(skb)))
1394 return 1;
1395 if (unlikely(len > skb->len))
1396 return 0;
1397 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1398}
1399
1400
1401
1402
1403
1404
1405
1406static inline unsigned int skb_headroom(const struct sk_buff *skb)
1407{
1408 return skb->data - skb->head;
1409}
1410
1411
1412
1413
1414
1415
1416
1417static inline int skb_tailroom(const struct sk_buff *skb)
1418{
1419 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1420}
1421
1422
1423
1424
1425
1426
1427
1428
1429static inline int skb_availroom(const struct sk_buff *skb)
1430{
1431 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442static inline void skb_reserve(struct sk_buff *skb, int len)
1443{
1444 skb->data += len;
1445 skb->tail += len;
1446}
1447
1448static inline void skb_reset_inner_headers(struct sk_buff *skb)
1449{
1450 skb->inner_network_header = skb->network_header;
1451 skb->inner_transport_header = skb->transport_header;
1452}
1453
1454static inline void skb_reset_mac_len(struct sk_buff *skb)
1455{
1456 skb->mac_len = skb->network_header - skb->mac_header;
1457}
1458
1459#ifdef NET_SKBUFF_DATA_USES_OFFSET
1460static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1461 *skb)
1462{
1463 return skb->head + skb->inner_transport_header;
1464}
1465
1466static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1467{
1468 skb->inner_transport_header = skb->data - skb->head;
1469}
1470
1471static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1472 const int offset)
1473{
1474 skb_reset_inner_transport_header(skb);
1475 skb->inner_transport_header += offset;
1476}
1477
1478static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1479{
1480 return skb->head + skb->inner_network_header;
1481}
1482
1483static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1484{
1485 skb->inner_network_header = skb->data - skb->head;
1486}
1487
1488static inline void skb_set_inner_network_header(struct sk_buff *skb,
1489 const int offset)
1490{
1491 skb_reset_inner_network_header(skb);
1492 skb->inner_network_header += offset;
1493}
1494
1495static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1496{
1497 return skb->head + skb->transport_header;
1498}
1499
1500static inline void skb_reset_transport_header(struct sk_buff *skb)
1501{
1502 skb->transport_header = skb->data - skb->head;
1503}
1504
1505static inline void skb_set_transport_header(struct sk_buff *skb,
1506 const int offset)
1507{
1508 skb_reset_transport_header(skb);
1509 skb->transport_header += offset;
1510}
1511
1512static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1513{
1514 return skb->head + skb->network_header;
1515}
1516
1517static inline void skb_reset_network_header(struct sk_buff *skb)
1518{
1519 skb->network_header = skb->data - skb->head;
1520}
1521
1522static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1523{
1524 skb_reset_network_header(skb);
1525 skb->network_header += offset;
1526}
1527
1528static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1529{
1530 return skb->head + skb->mac_header;
1531}
1532
1533static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1534{
1535 return skb->mac_header != ~0U;
1536}
1537
1538static inline void skb_reset_mac_header(struct sk_buff *skb)
1539{
1540 skb->mac_header = skb->data - skb->head;
1541}
1542
1543static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1544{
1545 skb_reset_mac_header(skb);
1546 skb->mac_header += offset;
1547}
1548
1549#else
1550static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1551 *skb)
1552{
1553 return skb->inner_transport_header;
1554}
1555
1556static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1557{
1558 skb->inner_transport_header = skb->data;
1559}
1560
1561static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1562 const int offset)
1563{
1564 skb->inner_transport_header = skb->data + offset;
1565}
1566
1567static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1568{
1569 return skb->inner_network_header;
1570}
1571
1572static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1573{
1574 skb->inner_network_header = skb->data;
1575}
1576
1577static inline void skb_set_inner_network_header(struct sk_buff *skb,
1578 const int offset)
1579{
1580 skb->inner_network_header = skb->data + offset;
1581}
1582
1583static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1584{
1585 return skb->transport_header;
1586}
1587
1588static inline void skb_reset_transport_header(struct sk_buff *skb)
1589{
1590 skb->transport_header = skb->data;
1591}
1592
1593static inline void skb_set_transport_header(struct sk_buff *skb,
1594 const int offset)
1595{
1596 skb->transport_header = skb->data + offset;
1597}
1598
1599static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1600{
1601 return skb->network_header;
1602}
1603
1604static inline void skb_reset_network_header(struct sk_buff *skb)
1605{
1606 skb->network_header = skb->data;
1607}
1608
1609static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1610{
1611 skb->network_header = skb->data + offset;
1612}
1613
1614static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1615{
1616 return skb->mac_header;
1617}
1618
1619static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1620{
1621 return skb->mac_header != NULL;
1622}
1623
1624static inline void skb_reset_mac_header(struct sk_buff *skb)
1625{
1626 skb->mac_header = skb->data;
1627}
1628
1629static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1630{
1631 skb->mac_header = skb->data + offset;
1632}
1633#endif
1634
1635static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1636{
1637 if (skb_mac_header_was_set(skb)) {
1638 const unsigned char *old_mac = skb_mac_header(skb);
1639
1640 skb_set_mac_header(skb, -skb->mac_len);
1641 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1642 }
1643}
1644
1645static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1646{
1647 return skb->csum_start - skb_headroom(skb);
1648}
1649
1650static inline int skb_transport_offset(const struct sk_buff *skb)
1651{
1652 return skb_transport_header(skb) - skb->data;
1653}
1654
1655static inline u32 skb_network_header_len(const struct sk_buff *skb)
1656{
1657 return skb->transport_header - skb->network_header;
1658}
1659
1660static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1661{
1662 return skb->inner_transport_header - skb->inner_network_header;
1663}
1664
1665static inline int skb_network_offset(const struct sk_buff *skb)
1666{
1667 return skb_network_header(skb) - skb->data;
1668}
1669
1670static inline int skb_inner_network_offset(const struct sk_buff *skb)
1671{
1672 return skb_inner_network_header(skb) - skb->data;
1673}
1674
1675static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1676{
1677 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700#ifndef NET_IP_ALIGN
1701#define NET_IP_ALIGN 2
1702#endif
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724#ifndef NET_SKB_PAD
1725#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1726#endif
1727
1728extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1729
1730static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1731{
1732 if (unlikely(skb_is_nonlinear(skb))) {
1733 WARN_ON(1);
1734 return;
1735 }
1736 skb->len = len;
1737 skb_set_tail_pointer(skb, len);
1738}
1739
1740extern void skb_trim(struct sk_buff *skb, unsigned int len);
1741
1742static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1743{
1744 if (skb->data_len)
1745 return ___pskb_trim(skb, len);
1746 __skb_trim(skb, len);
1747 return 0;
1748}
1749
1750static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1751{
1752 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1765{
1766 int err = pskb_trim(skb, len);
1767 BUG_ON(err);
1768}
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static inline void skb_orphan(struct sk_buff *skb)
1779{
1780 if (skb->destructor)
1781 skb->destructor(skb);
1782 skb->destructor = NULL;
1783 skb->sk = NULL;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1796{
1797 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1798 return 0;
1799 return skb_copy_ubufs(skb, gfp_mask);
1800}
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810extern void skb_queue_purge(struct sk_buff_head *list);
1811static inline void __skb_queue_purge(struct sk_buff_head *list)
1812{
1813 struct sk_buff *skb;
1814 while ((skb = __skb_dequeue(list)) != NULL)
1815 kfree_skb(skb);
1816}
1817
1818extern void *netdev_alloc_frag(unsigned int fragsz);
1819
1820extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1821 unsigned int length,
1822 gfp_t gfp_mask);
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1838 unsigned int length)
1839{
1840 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1841}
1842
1843
1844static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1845 gfp_t gfp_mask)
1846{
1847 return __netdev_alloc_skb(NULL, length, gfp_mask);
1848}
1849
1850
1851static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1852{
1853 return netdev_alloc_skb(NULL, length);
1854}
1855
1856
1857static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1858 unsigned int length, gfp_t gfp)
1859{
1860 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1861
1862 if (NET_IP_ALIGN && skb)
1863 skb_reserve(skb, NET_IP_ALIGN);
1864 return skb;
1865}
1866
1867static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1868 unsigned int length)
1869{
1870 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1884 struct sk_buff *skb,
1885 unsigned int order)
1886{
1887 struct page *page;
1888
1889 gfp_mask |= __GFP_COLD;
1890
1891 if (!(gfp_mask & __GFP_NOMEMALLOC))
1892 gfp_mask |= __GFP_MEMALLOC;
1893
1894 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1895 if (skb && page && page->pfmemalloc)
1896 skb->pfmemalloc = true;
1897
1898 return page;
1899}
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1911 struct sk_buff *skb)
1912{
1913 return __skb_alloc_pages(gfp_mask, skb, 0);
1914}
1915
1916
1917
1918
1919
1920
1921static inline void skb_propagate_pfmemalloc(struct page *page,
1922 struct sk_buff *skb)
1923{
1924 if (page && page->pfmemalloc)
1925 skb->pfmemalloc = true;
1926}
1927
1928
1929
1930
1931
1932
1933
1934static inline struct page *skb_frag_page(const skb_frag_t *frag)
1935{
1936 return frag->page.p;
1937}
1938
1939
1940
1941
1942
1943
1944
1945static inline void __skb_frag_ref(skb_frag_t *frag)
1946{
1947 get_page(skb_frag_page(frag));
1948}
1949
1950
1951
1952
1953
1954
1955
1956
1957static inline void skb_frag_ref(struct sk_buff *skb, int f)
1958{
1959 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1960}
1961
1962
1963
1964
1965
1966
1967
1968static inline void __skb_frag_unref(skb_frag_t *frag)
1969{
1970 put_page(skb_frag_page(frag));
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980static inline void skb_frag_unref(struct sk_buff *skb, int f)
1981{
1982 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992static inline void *skb_frag_address(const skb_frag_t *frag)
1993{
1994 return page_address(skb_frag_page(frag)) + frag->page_offset;
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2005{
2006 void *ptr = page_address(skb_frag_page(frag));
2007 if (unlikely(!ptr))
2008 return NULL;
2009
2010 return ptr + frag->page_offset;
2011}
2012
2013
2014
2015
2016
2017
2018
2019
2020static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2021{
2022 frag->page.p = page;
2023}
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2034 struct page *page)
2035{
2036 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2051 const skb_frag_t *frag,
2052 size_t offset, size_t size,
2053 enum dma_data_direction dir)
2054{
2055 return dma_map_page(dev, skb_frag_page(frag),
2056 frag->page_offset + offset, size, dir);
2057}
2058
2059static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2060 gfp_t gfp_mask)
2061{
2062 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2063}
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2074{
2075 return !skb_header_cloned(skb) &&
2076 skb_headroom(skb) + len <= skb->hdr_len;
2077}
2078
2079static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2080 int cloned)
2081{
2082 int delta = 0;
2083
2084 if (headroom > skb_headroom(skb))
2085 delta = headroom - skb_headroom(skb);
2086
2087 if (delta || cloned)
2088 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2089 GFP_ATOMIC);
2090 return 0;
2091}
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2106{
2107 return __skb_cow(skb, headroom, skb_cloned(skb));
2108}
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2121{
2122 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2123}
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2137{
2138 unsigned int size = skb->len;
2139 if (likely(size >= len))
2140 return 0;
2141 return skb_pad(skb, len - size);
2142}
2143
2144static inline int skb_add_data(struct sk_buff *skb,
2145 char __user *from, int copy)
2146{
2147 const int off = skb->len;
2148
2149 if (skb->ip_summed == CHECKSUM_NONE) {
2150 int err = 0;
2151 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2152 copy, 0, &err);
2153 if (!err) {
2154 skb->csum = csum_block_add(skb->csum, csum, off);
2155 return 0;
2156 }
2157 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
2158 return 0;
2159
2160 __skb_trim(skb, off);
2161 return -EFAULT;
2162}
2163
2164static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2165 const struct page *page, int off)
2166{
2167 if (i) {
2168 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2169
2170 return page == skb_frag_page(frag) &&
2171 off == frag->page_offset + skb_frag_size(frag);
2172 }
2173 return false;
2174}
2175
2176static inline int __skb_linearize(struct sk_buff *skb)
2177{
2178 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2179}
2180
2181
2182
2183
2184
2185
2186
2187
2188static inline int skb_linearize(struct sk_buff *skb)
2189{
2190 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200static inline int skb_linearize_cow(struct sk_buff *skb)
2201{
2202 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2203 __skb_linearize(skb) : 0;
2204}
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217static inline void skb_postpull_rcsum(struct sk_buff *skb,
2218 const void *start, unsigned int len)
2219{
2220 if (skb->ip_summed == CHECKSUM_COMPLETE)
2221 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2222}
2223
2224unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2236{
2237 if (likely(len >= skb->len))
2238 return 0;
2239 if (skb->ip_summed == CHECKSUM_COMPLETE)
2240 skb->ip_summed = CHECKSUM_NONE;
2241 return __pskb_trim(skb, len);
2242}
2243
2244#define skb_queue_walk(queue, skb) \
2245 for (skb = (queue)->next; \
2246 skb != (struct sk_buff *)(queue); \
2247 skb = skb->next)
2248
2249#define skb_queue_walk_safe(queue, skb, tmp) \
2250 for (skb = (queue)->next, tmp = skb->next; \
2251 skb != (struct sk_buff *)(queue); \
2252 skb = tmp, tmp = skb->next)
2253
2254#define skb_queue_walk_from(queue, skb) \
2255 for (; skb != (struct sk_buff *)(queue); \
2256 skb = skb->next)
2257
2258#define skb_queue_walk_from_safe(queue, skb, tmp) \
2259 for (tmp = skb->next; \
2260 skb != (struct sk_buff *)(queue); \
2261 skb = tmp, tmp = skb->next)
2262
2263#define skb_queue_reverse_walk(queue, skb) \
2264 for (skb = (queue)->prev; \
2265 skb != (struct sk_buff *)(queue); \
2266 skb = skb->prev)
2267
2268#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2269 for (skb = (queue)->prev, tmp = skb->prev; \
2270 skb != (struct sk_buff *)(queue); \
2271 skb = tmp, tmp = skb->prev)
2272
2273#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2274 for (tmp = skb->prev; \
2275 skb != (struct sk_buff *)(queue); \
2276 skb = tmp, tmp = skb->prev)
2277
2278static inline bool skb_has_frag_list(const struct sk_buff *skb)
2279{
2280 return skb_shinfo(skb)->frag_list != NULL;
2281}
2282
2283static inline void skb_frag_list_init(struct sk_buff *skb)
2284{
2285 skb_shinfo(skb)->frag_list = NULL;
2286}
2287
2288static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2289{
2290 frag->next = skb_shinfo(skb)->frag_list;
2291 skb_shinfo(skb)->frag_list = frag;
2292}
2293
2294#define skb_walk_frags(skb, iter) \
2295 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2296
2297extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2298 int *peeked, int *off, int *err);
2299extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2300 int noblock, int *err);
2301extern unsigned int datagram_poll(struct file *file, struct socket *sock,
2302 struct poll_table_struct *wait);
2303extern int skb_copy_datagram_iovec(const struct sk_buff *from,
2304 int offset, struct iovec *to,
2305 int size);
2306extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2307 int hlen,
2308 struct iovec *iov);
2309extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
2310 int offset,
2311 const struct iovec *from,
2312 int from_offset,
2313 int len);
2314extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
2315 int offset,
2316 const struct iovec *to,
2317 int to_offset,
2318 int size);
2319extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2320extern void skb_free_datagram_locked(struct sock *sk,
2321 struct sk_buff *skb);
2322extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2323 unsigned int flags);
2324extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
2325 int len, __wsum csum);
2326extern int skb_copy_bits(const struct sk_buff *skb, int offset,
2327 void *to, int len);
2328extern int skb_store_bits(struct sk_buff *skb, int offset,
2329 const void *from, int len);
2330extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
2331 int offset, u8 *to, int len,
2332 __wsum csum);
2333extern int skb_splice_bits(struct sk_buff *skb,
2334 unsigned int offset,
2335 struct pipe_inode_info *pipe,
2336 unsigned int len,
2337 unsigned int flags);
2338extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2339extern void skb_split(struct sk_buff *skb,
2340 struct sk_buff *skb1, const u32 len);
2341extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2342 int shiftlen);
2343
2344extern struct sk_buff *skb_segment(struct sk_buff *skb,
2345 netdev_features_t features);
2346
2347static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2348 int len, void *buffer)
2349{
2350 int hlen = skb_headlen(skb);
2351
2352 if (hlen - offset >= len)
2353 return skb->data + offset;
2354
2355 if (skb_copy_bits(skb, offset, buffer, len) < 0)
2356 return NULL;
2357
2358 return buffer;
2359}
2360
2361static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2362 void *to,
2363 const unsigned int len)
2364{
2365 memcpy(to, skb->data, len);
2366}
2367
2368static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2369 const int offset, void *to,
2370 const unsigned int len)
2371{
2372 memcpy(to, skb->data + offset, len);
2373}
2374
2375static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2376 const void *from,
2377 const unsigned int len)
2378{
2379 memcpy(skb->data, from, len);
2380}
2381
2382static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2383 const int offset,
2384 const void *from,
2385 const unsigned int len)
2386{
2387 memcpy(skb->data + offset, from, len);
2388}
2389
2390extern void skb_init(void);
2391
2392static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2393{
2394 return skb->tstamp;
2395}
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406static inline void skb_get_timestamp(const struct sk_buff *skb,
2407 struct timeval *stamp)
2408{
2409 *stamp = ktime_to_timeval(skb->tstamp);
2410}
2411
2412static inline void skb_get_timestampns(const struct sk_buff *skb,
2413 struct timespec *stamp)
2414{
2415 *stamp = ktime_to_timespec(skb->tstamp);
2416}
2417
2418static inline void __net_timestamp(struct sk_buff *skb)
2419{
2420 skb->tstamp = ktime_get_real();
2421}
2422
2423static inline ktime_t net_timedelta(ktime_t t)
2424{
2425 return ktime_sub(ktime_get_real(), t);
2426}
2427
2428static inline ktime_t net_invalid_timestamp(void)
2429{
2430 return ktime_set(0, 0);
2431}
2432
2433extern void skb_timestamping_init(void);
2434
2435#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2436
2437extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2438extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2439
2440#else
2441
2442static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2443{
2444}
2445
2446static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2447{
2448 return false;
2449}
2450
2451#endif
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465void skb_complete_tx_timestamp(struct sk_buff *skb,
2466 struct skb_shared_hwtstamps *hwtstamps);
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2480 struct skb_shared_hwtstamps *hwtstamps);
2481
2482static inline void sw_tx_timestamp(struct sk_buff *skb)
2483{
2484 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2485 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2486 skb_tstamp_tx(skb, NULL);
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static inline void skb_tx_timestamp(struct sk_buff *skb)
2498{
2499 skb_clone_tx_timestamp(skb);
2500 sw_tx_timestamp(skb);
2501}
2502
2503
2504
2505
2506
2507
2508
2509
2510void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2511
2512extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2513extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2514
2515static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2516{
2517 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2518}
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2537{
2538 return skb_csum_unnecessary(skb) ?
2539 0 : __skb_checksum_complete(skb);
2540}
2541
2542#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2543extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2544static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2545{
2546 if (nfct && atomic_dec_and_test(&nfct->use))
2547 nf_conntrack_destroy(nfct);
2548}
2549static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2550{
2551 if (nfct)
2552 atomic_inc(&nfct->use);
2553}
2554#endif
2555#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2556static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2557{
2558 if (skb)
2559 atomic_inc(&skb->users);
2560}
2561static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2562{
2563 if (skb)
2564 kfree_skb(skb);
2565}
2566#endif
2567#ifdef CONFIG_BRIDGE_NETFILTER
2568static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2569{
2570 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2571 kfree(nf_bridge);
2572}
2573static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2574{
2575 if (nf_bridge)
2576 atomic_inc(&nf_bridge->use);
2577}
2578#endif
2579static inline void nf_reset(struct sk_buff *skb)
2580{
2581#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2582 nf_conntrack_put(skb->nfct);
2583 skb->nfct = NULL;
2584#endif
2585#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2586 nf_conntrack_put_reasm(skb->nfct_reasm);
2587 skb->nfct_reasm = NULL;
2588#endif
2589#ifdef CONFIG_BRIDGE_NETFILTER
2590 nf_bridge_put(skb->nf_bridge);
2591 skb->nf_bridge = NULL;
2592#endif
2593}
2594
2595
2596static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2597{
2598#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2599 dst->nfct = src->nfct;
2600 nf_conntrack_get(src->nfct);
2601 dst->nfctinfo = src->nfctinfo;
2602#endif
2603#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2604 dst->nfct_reasm = src->nfct_reasm;
2605 nf_conntrack_get_reasm(src->nfct_reasm);
2606#endif
2607#ifdef CONFIG_BRIDGE_NETFILTER
2608 dst->nf_bridge = src->nf_bridge;
2609 nf_bridge_get(src->nf_bridge);
2610#endif
2611}
2612
2613static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2614{
2615#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2616 nf_conntrack_put(dst->nfct);
2617#endif
2618#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2619 nf_conntrack_put_reasm(dst->nfct_reasm);
2620#endif
2621#ifdef CONFIG_BRIDGE_NETFILTER
2622 nf_bridge_put(dst->nf_bridge);
2623#endif
2624 __nf_copy(dst, src);
2625}
2626
2627#ifdef CONFIG_NETWORK_SECMARK
2628static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2629{
2630 to->secmark = from->secmark;
2631}
2632
2633static inline void skb_init_secmark(struct sk_buff *skb)
2634{
2635 skb->secmark = 0;
2636}
2637#else
2638static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2639{ }
2640
2641static inline void skb_init_secmark(struct sk_buff *skb)
2642{ }
2643#endif
2644
2645static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2646{
2647 skb->queue_mapping = queue_mapping;
2648}
2649
2650static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2651{
2652 return skb->queue_mapping;
2653}
2654
2655static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2656{
2657 to->queue_mapping = from->queue_mapping;
2658}
2659
2660static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2661{
2662 skb->queue_mapping = rx_queue + 1;
2663}
2664
2665static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2666{
2667 return skb->queue_mapping - 1;
2668}
2669
2670static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2671{
2672 return skb->queue_mapping != 0;
2673}
2674
2675extern u16 __skb_tx_hash(const struct net_device *dev,
2676 const struct sk_buff *skb,
2677 unsigned int num_tx_queues);
2678
2679#ifdef CONFIG_XFRM
2680static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2681{
2682 return skb->sp;
2683}
2684#else
2685static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2686{
2687 return NULL;
2688}
2689#endif
2690
2691static inline bool skb_is_gso(const struct sk_buff *skb)
2692{
2693 return skb_shinfo(skb)->gso_size;
2694}
2695
2696static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2697{
2698 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2699}
2700
2701extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2702
2703static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2704{
2705
2706
2707 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2708
2709 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2710 unlikely(shinfo->gso_type == 0)) {
2711 __skb_warn_lro_forwarding(skb);
2712 return true;
2713 }
2714 return false;
2715}
2716
2717static inline void skb_forward_csum(struct sk_buff *skb)
2718{
2719
2720 if (skb->ip_summed == CHECKSUM_COMPLETE)
2721 skb->ip_summed = CHECKSUM_NONE;
2722}
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2733{
2734#ifdef DEBUG
2735 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2736#endif
2737}
2738
2739bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750static inline bool skb_head_is_locked(const struct sk_buff *skb)
2751{
2752 return !skb->head_frag || skb_cloned(skb);
2753}
2754#endif
2755#endif
2756