1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23
24#include <linux/atomic.h>
25#include <asm/types.h>
26#include <linux/spinlock.h>
27#include <linux/net.h>
28#include <linux/textsearch.h>
29#include <net/checksum.h>
30#include <linux/rcupdate.h>
31#include <linux/dmaengine.h>
32#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h>
35#include <net/flow_keys.h>
36
37
38#define CHECKSUM_NONE 0
39#define CHECKSUM_UNNECESSARY 1
40#define CHECKSUM_COMPLETE 2
41#define CHECKSUM_PARTIAL 3
42
43#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
44 ~(SMP_CACHE_BYTES - 1))
45#define SKB_WITH_OVERHEAD(X) \
46 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
47#define SKB_MAX_ORDER(X, ORDER) \
48 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
49#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
50#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
51
52
53#define SKB_TRUESIZE(X) ((X) + \
54 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
55 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109struct net_device;
110struct scatterlist;
111struct pipe_inode_info;
112
113#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
114struct nf_conntrack {
115 atomic_t use;
116};
117#endif
118
119#ifdef CONFIG_BRIDGE_NETFILTER
120struct nf_bridge_info {
121 atomic_t use;
122 unsigned int mask;
123 struct net_device *physindev;
124 struct net_device *physoutdev;
125 unsigned long data[32 / sizeof(unsigned long)];
126};
127#endif
128
129struct sk_buff_head {
130
131 struct sk_buff *next;
132 struct sk_buff *prev;
133
134 __u32 qlen;
135 spinlock_t lock;
136};
137
138struct sk_buff;
139
140
141
142
143
144
145
146
147#if (65536/PAGE_SIZE + 1) < 16
148#define MAX_SKB_FRAGS 16UL
149#else
150#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
151#endif
152
153typedef struct skb_frag_struct skb_frag_t;
154
155struct skb_frag_struct {
156 struct {
157 struct page *p;
158 } page;
159#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
160 __u32 page_offset;
161 __u32 size;
162#else
163 __u16 page_offset;
164 __u16 size;
165#endif
166};
167
168static inline unsigned int skb_frag_size(const skb_frag_t *frag)
169{
170 return frag->size;
171}
172
173static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
174{
175 frag->size = size;
176}
177
178static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
179{
180 frag->size += delta;
181}
182
183static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
184{
185 frag->size -= delta;
186}
187
188#define HAVE_HW_TIME_STAMP
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213struct skb_shared_hwtstamps {
214 ktime_t hwtstamp;
215 ktime_t syststamp;
216};
217
218
219enum {
220
221 SKBTX_HW_TSTAMP = 1 << 0,
222
223
224 SKBTX_SW_TSTAMP = 1 << 1,
225
226
227 SKBTX_IN_PROGRESS = 1 << 2,
228
229
230 SKBTX_DEV_ZEROCOPY = 1 << 3,
231
232
233 SKBTX_WIFI_STATUS = 1 << 4,
234
235
236
237
238
239
240 SKBTX_SHARED_FRAG = 1 << 5,
241};
242
243
244
245
246
247
248
249
250
251struct ubuf_info {
252 void (*callback)(struct ubuf_info *, bool zerocopy_success);
253 void *ctx;
254 unsigned long desc;
255};
256
257
258
259
260struct skb_shared_info {
261 unsigned char nr_frags;
262 __u8 tx_flags;
263 unsigned short gso_size;
264
265 unsigned short gso_segs;
266 unsigned short gso_type;
267 struct sk_buff *frag_list;
268 struct skb_shared_hwtstamps hwtstamps;
269 __be32 ip6_frag_id;
270
271
272
273
274 atomic_t dataref;
275
276
277
278 void * destructor_arg;
279
280
281 skb_frag_t frags[MAX_SKB_FRAGS];
282};
283
284
285
286
287
288
289
290
291
292
293
294
295#define SKB_DATAREF_SHIFT 16
296#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
297
298
299enum {
300 SKB_FCLONE_UNAVAILABLE,
301 SKB_FCLONE_ORIG,
302 SKB_FCLONE_CLONE,
303};
304
305enum {
306 SKB_GSO_TCPV4 = 1 << 0,
307 SKB_GSO_UDP = 1 << 1,
308
309
310 SKB_GSO_DODGY = 1 << 2,
311
312
313 SKB_GSO_TCP_ECN = 1 << 3,
314
315 SKB_GSO_TCPV6 = 1 << 4,
316
317 SKB_GSO_FCOE = 1 << 5,
318
319 SKB_GSO_GRE = 1 << 6,
320
321 SKB_GSO_UDP_TUNNEL = 1 << 7,
322
323 SKB_GSO_MPLS = 1 << 8,
324};
325
326#if BITS_PER_LONG > 32
327#define NET_SKBUFF_DATA_USES_OFFSET 1
328#endif
329
330#ifdef NET_SKBUFF_DATA_USES_OFFSET
331typedef unsigned int sk_buff_data_t;
332#else
333typedef unsigned char *sk_buff_data_t;
334#endif
335
336#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
337 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
338#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
339#endif
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410struct sk_buff {
411
412 struct sk_buff *next;
413 struct sk_buff *prev;
414
415 ktime_t tstamp;
416
417 struct sock *sk;
418 struct net_device *dev;
419
420
421
422
423
424
425
426 char cb[48] __aligned(8);
427
428 unsigned long _skb_refdst;
429#ifdef CONFIG_XFRM
430 struct sec_path *sp;
431#endif
432 unsigned int len,
433 data_len;
434 __u16 mac_len,
435 hdr_len;
436 union {
437 __wsum csum;
438 struct {
439 __u16 csum_start;
440 __u16 csum_offset;
441 };
442 };
443 __u32 priority;
444 kmemcheck_bitfield_begin(flags1);
445 __u8 local_df:1,
446 cloned:1,
447 ip_summed:2,
448 nohdr:1,
449 nfctinfo:3;
450 __u8 pkt_type:3,
451 fclone:2,
452 ipvs_property:1,
453 peeked:1,
454 nf_trace:1;
455 kmemcheck_bitfield_end(flags1);
456 __be16 protocol;
457
458 void (*destructor)(struct sk_buff *skb);
459#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
460 struct nf_conntrack *nfct;
461#endif
462#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
463 struct sk_buff *nfct_reasm;
464#endif
465#ifdef CONFIG_BRIDGE_NETFILTER
466 struct nf_bridge_info *nf_bridge;
467#endif
468
469 int skb_iif;
470
471 __u32 rxhash;
472
473 __be16 vlan_proto;
474 __u16 vlan_tci;
475
476#ifdef CONFIG_NET_SCHED
477 __u16 tc_index;
478#ifdef CONFIG_NET_CLS_ACT
479 __u16 tc_verd;
480#endif
481#endif
482
483 __u16 queue_mapping;
484 kmemcheck_bitfield_begin(flags2);
485#ifdef CONFIG_IPV6_NDISC_NODETYPE
486 __u8 ndisc_nodetype:2;
487#endif
488 __u8 pfmemalloc:1;
489 __u8 ooo_okay:1;
490 __u8 l4_rxhash:1;
491 __u8 wifi_acked_valid:1;
492 __u8 wifi_acked:1;
493 __u8 no_fcs:1;
494 __u8 head_frag:1;
495
496
497
498
499
500 __u8 encapsulation:1;
501
502 kmemcheck_bitfield_end(flags2);
503
504#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
505 union {
506 unsigned int napi_id;
507 dma_cookie_t dma_cookie;
508 };
509#endif
510#ifdef CONFIG_NETWORK_SECMARK
511 __u32 secmark;
512#endif
513 union {
514 __u32 mark;
515 __u32 dropcount;
516 __u32 reserved_tailroom;
517 };
518
519 __be16 inner_protocol;
520 __u16 inner_transport_header;
521 __u16 inner_network_header;
522 __u16 inner_mac_header;
523 __u16 transport_header;
524 __u16 network_header;
525 __u16 mac_header;
526
527 sk_buff_data_t tail;
528 sk_buff_data_t end;
529 unsigned char *head,
530 *data;
531 unsigned int truesize;
532 atomic_t users;
533};
534
535#ifdef __KERNEL__
536
537
538
539#include <linux/slab.h>
540
541
542#define SKB_ALLOC_FCLONE 0x01
543#define SKB_ALLOC_RX 0x02
544
545
546static inline bool skb_pfmemalloc(const struct sk_buff *skb)
547{
548 return unlikely(skb->pfmemalloc);
549}
550
551
552
553
554
555#define SKB_DST_NOREF 1UL
556#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
557
558
559
560
561
562
563
564static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
565{
566
567
568
569 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
570 !rcu_read_lock_held() &&
571 !rcu_read_lock_bh_held());
572 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
573}
574
575
576
577
578
579
580
581
582
583static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
584{
585 skb->_skb_refdst = (unsigned long)dst;
586}
587
588extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
589 bool force);
590
591
592
593
594
595
596
597
598
599
600
601static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
602{
603 __skb_dst_set_noref(skb, dst, false);
604}
605
606
607
608
609
610
611
612
613
614
615
616
617static inline void skb_dst_set_noref_force(struct sk_buff *skb,
618 struct dst_entry *dst)
619{
620 __skb_dst_set_noref(skb, dst, true);
621}
622
623
624
625
626
627static inline bool skb_dst_is_noref(const struct sk_buff *skb)
628{
629 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
630}
631
632static inline struct rtable *skb_rtable(const struct sk_buff *skb)
633{
634 return (struct rtable *)skb_dst(skb);
635}
636
637extern void kfree_skb(struct sk_buff *skb);
638extern void kfree_skb_list(struct sk_buff *segs);
639extern void skb_tx_error(struct sk_buff *skb);
640extern void consume_skb(struct sk_buff *skb);
641extern void __kfree_skb(struct sk_buff *skb);
642extern struct kmem_cache *skbuff_head_cache;
643
644extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
645extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
646 bool *fragstolen, int *delta_truesize);
647
648extern struct sk_buff *__alloc_skb(unsigned int size,
649 gfp_t priority, int flags, int node);
650extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
651static inline struct sk_buff *alloc_skb(unsigned int size,
652 gfp_t priority)
653{
654 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
655}
656
657static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
658 gfp_t priority)
659{
660 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
661}
662
663extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
664static inline struct sk_buff *alloc_skb_head(gfp_t priority)
665{
666 return __alloc_skb_head(priority, -1);
667}
668
669extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
670extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
671extern struct sk_buff *skb_clone(struct sk_buff *skb,
672 gfp_t priority);
673extern struct sk_buff *skb_copy(const struct sk_buff *skb,
674 gfp_t priority);
675extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
676 int headroom, gfp_t gfp_mask);
677
678extern int pskb_expand_head(struct sk_buff *skb,
679 int nhead, int ntail,
680 gfp_t gfp_mask);
681extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
682 unsigned int headroom);
683extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
684 int newheadroom, int newtailroom,
685 gfp_t priority);
686extern int skb_to_sgvec(struct sk_buff *skb,
687 struct scatterlist *sg, int offset,
688 int len);
689extern int skb_cow_data(struct sk_buff *skb, int tailbits,
690 struct sk_buff **trailer);
691extern int skb_pad(struct sk_buff *skb, int pad);
692#define dev_kfree_skb(a) consume_skb(a)
693
694extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
695 int getfrag(void *from, char *to, int offset,
696 int len,int odd, struct sk_buff *skb),
697 void *from, int length);
698
699struct skb_seq_state {
700 __u32 lower_offset;
701 __u32 upper_offset;
702 __u32 frag_idx;
703 __u32 stepped_offset;
704 struct sk_buff *root_skb;
705 struct sk_buff *cur_skb;
706 __u8 *frag_data;
707};
708
709extern void skb_prepare_seq_read(struct sk_buff *skb,
710 unsigned int from, unsigned int to,
711 struct skb_seq_state *st);
712extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
713 struct skb_seq_state *st);
714extern void skb_abort_seq_read(struct skb_seq_state *st);
715
716extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
717 unsigned int to, struct ts_config *config,
718 struct ts_state *state);
719
720extern void __skb_get_rxhash(struct sk_buff *skb);
721static inline __u32 skb_get_rxhash(struct sk_buff *skb)
722{
723 if (!skb->l4_rxhash)
724 __skb_get_rxhash(skb);
725
726 return skb->rxhash;
727}
728
729#ifdef NET_SKBUFF_DATA_USES_OFFSET
730static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
731{
732 return skb->head + skb->end;
733}
734
735static inline unsigned int skb_end_offset(const struct sk_buff *skb)
736{
737 return skb->end;
738}
739#else
740static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
741{
742 return skb->end;
743}
744
745static inline unsigned int skb_end_offset(const struct sk_buff *skb)
746{
747 return skb->end - skb->head;
748}
749#endif
750
751
752#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
753
754static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
755{
756 return &skb_shinfo(skb)->hwtstamps;
757}
758
759
760
761
762
763
764
765static inline int skb_queue_empty(const struct sk_buff_head *list)
766{
767 return list->next == (struct sk_buff *)list;
768}
769
770
771
772
773
774
775
776
777static inline bool skb_queue_is_last(const struct sk_buff_head *list,
778 const struct sk_buff *skb)
779{
780 return skb->next == (struct sk_buff *)list;
781}
782
783
784
785
786
787
788
789
790static inline bool skb_queue_is_first(const struct sk_buff_head *list,
791 const struct sk_buff *skb)
792{
793 return skb->prev == (struct sk_buff *)list;
794}
795
796
797
798
799
800
801
802
803
804static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
805 const struct sk_buff *skb)
806{
807
808
809
810 BUG_ON(skb_queue_is_last(list, skb));
811 return skb->next;
812}
813
814
815
816
817
818
819
820
821
822static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
823 const struct sk_buff *skb)
824{
825
826
827
828 BUG_ON(skb_queue_is_first(list, skb));
829 return skb->prev;
830}
831
832
833
834
835
836
837
838
839static inline struct sk_buff *skb_get(struct sk_buff *skb)
840{
841 atomic_inc(&skb->users);
842 return skb;
843}
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858static inline int skb_cloned(const struct sk_buff *skb)
859{
860 return skb->cloned &&
861 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
862}
863
864static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
865{
866 might_sleep_if(pri & __GFP_WAIT);
867
868 if (skb_cloned(skb))
869 return pskb_expand_head(skb, 0, 0, pri);
870
871 return 0;
872}
873
874
875
876
877
878
879
880
881static inline int skb_header_cloned(const struct sk_buff *skb)
882{
883 int dataref;
884
885 if (!skb->cloned)
886 return 0;
887
888 dataref = atomic_read(&skb_shinfo(skb)->dataref);
889 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
890 return dataref != 1;
891}
892
893
894
895
896
897
898
899
900
901static inline void skb_header_release(struct sk_buff *skb)
902{
903 BUG_ON(skb->nohdr);
904 skb->nohdr = 1;
905 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
906}
907
908
909
910
911
912
913
914
915static inline int skb_shared(const struct sk_buff *skb)
916{
917 return atomic_read(&skb->users) != 1;
918}
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
934{
935 might_sleep_if(pri & __GFP_WAIT);
936 if (skb_shared(skb)) {
937 struct sk_buff *nskb = skb_clone(skb, pri);
938
939 if (likely(nskb))
940 consume_skb(skb);
941 else
942 kfree_skb(skb);
943 skb = nskb;
944 }
945 return skb;
946}
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
969 gfp_t pri)
970{
971 might_sleep_if(pri & __GFP_WAIT);
972 if (skb_cloned(skb)) {
973 struct sk_buff *nskb = skb_copy(skb, pri);
974 kfree_skb(skb);
975 skb = nskb;
976 }
977 return skb;
978}
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
994{
995 struct sk_buff *skb = list_->next;
996
997 if (skb == (struct sk_buff *)list_)
998 skb = NULL;
999 return skb;
1000}
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1012 const struct sk_buff_head *list_)
1013{
1014 struct sk_buff *next = skb->next;
1015
1016 if (next == (struct sk_buff *)list_)
1017 next = NULL;
1018 return next;
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1035{
1036 struct sk_buff *skb = list_->prev;
1037
1038 if (skb == (struct sk_buff *)list_)
1039 skb = NULL;
1040 return skb;
1041
1042}
1043
1044
1045
1046
1047
1048
1049
1050static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1051{
1052 return list_->qlen;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065static inline void __skb_queue_head_init(struct sk_buff_head *list)
1066{
1067 list->prev = list->next = (struct sk_buff *)list;
1068 list->qlen = 0;
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079static inline void skb_queue_head_init(struct sk_buff_head *list)
1080{
1081 spin_lock_init(&list->lock);
1082 __skb_queue_head_init(list);
1083}
1084
1085static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1086 struct lock_class_key *class)
1087{
1088 skb_queue_head_init(list);
1089 lockdep_set_class(&list->lock, class);
1090}
1091
1092
1093
1094
1095
1096
1097
1098extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
1099static inline void __skb_insert(struct sk_buff *newsk,
1100 struct sk_buff *prev, struct sk_buff *next,
1101 struct sk_buff_head *list)
1102{
1103 newsk->next = next;
1104 newsk->prev = prev;
1105 next->prev = prev->next = newsk;
1106 list->qlen++;
1107}
1108
1109static inline void __skb_queue_splice(const struct sk_buff_head *list,
1110 struct sk_buff *prev,
1111 struct sk_buff *next)
1112{
1113 struct sk_buff *first = list->next;
1114 struct sk_buff *last = list->prev;
1115
1116 first->prev = prev;
1117 prev->next = first;
1118
1119 last->next = next;
1120 next->prev = last;
1121}
1122
1123
1124
1125
1126
1127
1128static inline void skb_queue_splice(const struct sk_buff_head *list,
1129 struct sk_buff_head *head)
1130{
1131 if (!skb_queue_empty(list)) {
1132 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1133 head->qlen += list->qlen;
1134 }
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144static inline void skb_queue_splice_init(struct sk_buff_head *list,
1145 struct sk_buff_head *head)
1146{
1147 if (!skb_queue_empty(list)) {
1148 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1149 head->qlen += list->qlen;
1150 __skb_queue_head_init(list);
1151 }
1152}
1153
1154
1155
1156
1157
1158
1159static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1160 struct sk_buff_head *head)
1161{
1162 if (!skb_queue_empty(list)) {
1163 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1164 head->qlen += list->qlen;
1165 }
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1177 struct sk_buff_head *head)
1178{
1179 if (!skb_queue_empty(list)) {
1180 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1181 head->qlen += list->qlen;
1182 __skb_queue_head_init(list);
1183 }
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197static inline void __skb_queue_after(struct sk_buff_head *list,
1198 struct sk_buff *prev,
1199 struct sk_buff *newsk)
1200{
1201 __skb_insert(newsk, prev, prev->next, list);
1202}
1203
1204extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1205 struct sk_buff_head *list);
1206
1207static inline void __skb_queue_before(struct sk_buff_head *list,
1208 struct sk_buff *next,
1209 struct sk_buff *newsk)
1210{
1211 __skb_insert(newsk, next->prev, next, list);
1212}
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1225static inline void __skb_queue_head(struct sk_buff_head *list,
1226 struct sk_buff *newsk)
1227{
1228 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1242static inline void __skb_queue_tail(struct sk_buff_head *list,
1243 struct sk_buff *newsk)
1244{
1245 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1246}
1247
1248
1249
1250
1251
1252extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1253static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1254{
1255 struct sk_buff *next, *prev;
1256
1257 list->qlen--;
1258 next = skb->next;
1259 prev = skb->prev;
1260 skb->next = skb->prev = NULL;
1261 next->prev = prev;
1262 prev->next = next;
1263}
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1274static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1275{
1276 struct sk_buff *skb = skb_peek(list);
1277 if (skb)
1278 __skb_unlink(skb, list);
1279 return skb;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1291static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1292{
1293 struct sk_buff *skb = skb_peek_tail(list);
1294 if (skb)
1295 __skb_unlink(skb, list);
1296 return skb;
1297}
1298
1299
1300static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1301{
1302 return skb->data_len;
1303}
1304
1305static inline unsigned int skb_headlen(const struct sk_buff *skb)
1306{
1307 return skb->len - skb->data_len;
1308}
1309
1310static inline int skb_pagelen(const struct sk_buff *skb)
1311{
1312 int i, len = 0;
1313
1314 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1315 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1316 return len + skb_headlen(skb);
1317}
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1333 struct page *page, int off, int size)
1334{
1335 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 frag->page.p = page;
1347 frag->page_offset = off;
1348 skb_frag_size_set(frag, size);
1349
1350 page = compound_head(page);
1351 if (page->pfmemalloc && !page->mapping)
1352 skb->pfmemalloc = true;
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1370 struct page *page, int off, int size)
1371{
1372 __skb_fill_page_desc(skb, i, page, off, size);
1373 skb_shinfo(skb)->nr_frags = i + 1;
1374}
1375
1376extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1377 int off, int size, unsigned int truesize);
1378
1379#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1380#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1381#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1382
1383#ifdef NET_SKBUFF_DATA_USES_OFFSET
1384static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1385{
1386 return skb->head + skb->tail;
1387}
1388
1389static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1390{
1391 skb->tail = skb->data - skb->head;
1392}
1393
1394static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1395{
1396 skb_reset_tail_pointer(skb);
1397 skb->tail += offset;
1398}
1399
1400#else
1401static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1402{
1403 return skb->tail;
1404}
1405
1406static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1407{
1408 skb->tail = skb->data;
1409}
1410
1411static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1412{
1413 skb->tail = skb->data + offset;
1414}
1415
1416#endif
1417
1418
1419
1420
1421extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1422static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1423{
1424 unsigned char *tmp = skb_tail_pointer(skb);
1425 SKB_LINEAR_ASSERT(skb);
1426 skb->tail += len;
1427 skb->len += len;
1428 return tmp;
1429}
1430
1431extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1432static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1433{
1434 skb->data -= len;
1435 skb->len += len;
1436 return skb->data;
1437}
1438
1439extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1440static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1441{
1442 skb->len -= len;
1443 BUG_ON(skb->len < skb->data_len);
1444 return skb->data += len;
1445}
1446
1447static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1448{
1449 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1450}
1451
1452extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1453
1454static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1455{
1456 if (len > skb_headlen(skb) &&
1457 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1458 return NULL;
1459 skb->len -= len;
1460 return skb->data += len;
1461}
1462
1463static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1464{
1465 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1466}
1467
1468static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1469{
1470 if (likely(len <= skb_headlen(skb)))
1471 return 1;
1472 if (unlikely(len > skb->len))
1473 return 0;
1474 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1475}
1476
1477
1478
1479
1480
1481
1482
1483static inline unsigned int skb_headroom(const struct sk_buff *skb)
1484{
1485 return skb->data - skb->head;
1486}
1487
1488
1489
1490
1491
1492
1493
1494static inline int skb_tailroom(const struct sk_buff *skb)
1495{
1496 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506static inline int skb_availroom(const struct sk_buff *skb)
1507{
1508 if (skb_is_nonlinear(skb))
1509 return 0;
1510
1511 return skb->end - skb->tail - skb->reserved_tailroom;
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522static inline void skb_reserve(struct sk_buff *skb, int len)
1523{
1524 skb->data += len;
1525 skb->tail += len;
1526}
1527
1528static inline void skb_reset_inner_headers(struct sk_buff *skb)
1529{
1530 skb->inner_mac_header = skb->mac_header;
1531 skb->inner_network_header = skb->network_header;
1532 skb->inner_transport_header = skb->transport_header;
1533}
1534
1535static inline void skb_reset_mac_len(struct sk_buff *skb)
1536{
1537 skb->mac_len = skb->network_header - skb->mac_header;
1538}
1539
1540static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1541 *skb)
1542{
1543 return skb->head + skb->inner_transport_header;
1544}
1545
1546static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1547{
1548 skb->inner_transport_header = skb->data - skb->head;
1549}
1550
1551static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1552 const int offset)
1553{
1554 skb_reset_inner_transport_header(skb);
1555 skb->inner_transport_header += offset;
1556}
1557
1558static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1559{
1560 return skb->head + skb->inner_network_header;
1561}
1562
1563static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1564{
1565 skb->inner_network_header = skb->data - skb->head;
1566}
1567
1568static inline void skb_set_inner_network_header(struct sk_buff *skb,
1569 const int offset)
1570{
1571 skb_reset_inner_network_header(skb);
1572 skb->inner_network_header += offset;
1573}
1574
1575static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1576{
1577 return skb->head + skb->inner_mac_header;
1578}
1579
1580static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1581{
1582 skb->inner_mac_header = skb->data - skb->head;
1583}
1584
1585static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1586 const int offset)
1587{
1588 skb_reset_inner_mac_header(skb);
1589 skb->inner_mac_header += offset;
1590}
1591static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1592{
1593 return skb->transport_header != (typeof(skb->transport_header))~0U;
1594}
1595
1596static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1597{
1598 return skb->head + skb->transport_header;
1599}
1600
1601static inline void skb_reset_transport_header(struct sk_buff *skb)
1602{
1603 skb->transport_header = skb->data - skb->head;
1604}
1605
1606static inline void skb_set_transport_header(struct sk_buff *skb,
1607 const int offset)
1608{
1609 skb_reset_transport_header(skb);
1610 skb->transport_header += offset;
1611}
1612
1613static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1614{
1615 return skb->head + skb->network_header;
1616}
1617
1618static inline void skb_reset_network_header(struct sk_buff *skb)
1619{
1620 skb->network_header = skb->data - skb->head;
1621}
1622
1623static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1624{
1625 skb_reset_network_header(skb);
1626 skb->network_header += offset;
1627}
1628
1629static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1630{
1631 return skb->head + skb->mac_header;
1632}
1633
1634static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1635{
1636 return skb->mac_header != (typeof(skb->mac_header))~0U;
1637}
1638
1639static inline void skb_reset_mac_header(struct sk_buff *skb)
1640{
1641 skb->mac_header = skb->data - skb->head;
1642}
1643
1644static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1645{
1646 skb_reset_mac_header(skb);
1647 skb->mac_header += offset;
1648}
1649
1650static inline void skb_probe_transport_header(struct sk_buff *skb,
1651 const int offset_hint)
1652{
1653 struct flow_keys keys;
1654
1655 if (skb_transport_header_was_set(skb))
1656 return;
1657 else if (skb_flow_dissect(skb, &keys))
1658 skb_set_transport_header(skb, keys.thoff);
1659 else
1660 skb_set_transport_header(skb, offset_hint);
1661}
1662
1663static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1664{
1665 if (skb_mac_header_was_set(skb)) {
1666 const unsigned char *old_mac = skb_mac_header(skb);
1667
1668 skb_set_mac_header(skb, -skb->mac_len);
1669 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1670 }
1671}
1672
1673static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1674{
1675 return skb->csum_start - skb_headroom(skb);
1676}
1677
1678static inline int skb_transport_offset(const struct sk_buff *skb)
1679{
1680 return skb_transport_header(skb) - skb->data;
1681}
1682
1683static inline u32 skb_network_header_len(const struct sk_buff *skb)
1684{
1685 return skb->transport_header - skb->network_header;
1686}
1687
1688static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1689{
1690 return skb->inner_transport_header - skb->inner_network_header;
1691}
1692
1693static inline int skb_network_offset(const struct sk_buff *skb)
1694{
1695 return skb_network_header(skb) - skb->data;
1696}
1697
1698static inline int skb_inner_network_offset(const struct sk_buff *skb)
1699{
1700 return skb_inner_network_header(skb) - skb->data;
1701}
1702
1703static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1704{
1705 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728#ifndef NET_IP_ALIGN
1729#define NET_IP_ALIGN 2
1730#endif
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752#ifndef NET_SKB_PAD
1753#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1754#endif
1755
1756extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1757
1758static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1759{
1760 if (unlikely(skb_is_nonlinear(skb))) {
1761 WARN_ON(1);
1762 return;
1763 }
1764 skb->len = len;
1765 skb_set_tail_pointer(skb, len);
1766}
1767
1768extern void skb_trim(struct sk_buff *skb, unsigned int len);
1769
1770static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1771{
1772 if (skb->data_len)
1773 return ___pskb_trim(skb, len);
1774 __skb_trim(skb, len);
1775 return 0;
1776}
1777
1778static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1779{
1780 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1793{
1794 int err = pskb_trim(skb, len);
1795 BUG_ON(err);
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806static inline void skb_orphan(struct sk_buff *skb)
1807{
1808 if (skb->destructor) {
1809 skb->destructor(skb);
1810 skb->destructor = NULL;
1811 skb->sk = NULL;
1812 } else {
1813 BUG_ON(skb->sk);
1814 }
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1827{
1828 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1829 return 0;
1830 return skb_copy_ubufs(skb, gfp_mask);
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841extern void skb_queue_purge(struct sk_buff_head *list);
1842static inline void __skb_queue_purge(struct sk_buff_head *list)
1843{
1844 struct sk_buff *skb;
1845 while ((skb = __skb_dequeue(list)) != NULL)
1846 kfree_skb(skb);
1847}
1848
1849#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
1850#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1851#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
1852
1853extern void *netdev_alloc_frag(unsigned int fragsz);
1854
1855extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1856 unsigned int length,
1857 gfp_t gfp_mask);
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1873 unsigned int length)
1874{
1875 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1876}
1877
1878
1879static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1880 gfp_t gfp_mask)
1881{
1882 return __netdev_alloc_skb(NULL, length, gfp_mask);
1883}
1884
1885
1886static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1887{
1888 return netdev_alloc_skb(NULL, length);
1889}
1890
1891
1892static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1893 unsigned int length, gfp_t gfp)
1894{
1895 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1896
1897 if (NET_IP_ALIGN && skb)
1898 skb_reserve(skb, NET_IP_ALIGN);
1899 return skb;
1900}
1901
1902static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1903 unsigned int length)
1904{
1905 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
1919 struct sk_buff *skb,
1920 unsigned int order)
1921{
1922 struct page *page;
1923
1924 gfp_mask |= __GFP_COLD;
1925
1926 if (!(gfp_mask & __GFP_NOMEMALLOC))
1927 gfp_mask |= __GFP_MEMALLOC;
1928
1929 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
1930 if (skb && page && page->pfmemalloc)
1931 skb->pfmemalloc = true;
1932
1933 return page;
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
1946 struct sk_buff *skb)
1947{
1948 return __skb_alloc_pages(gfp_mask, skb, 0);
1949}
1950
1951
1952
1953
1954
1955
1956static inline void skb_propagate_pfmemalloc(struct page *page,
1957 struct sk_buff *skb)
1958{
1959 if (page && page->pfmemalloc)
1960 skb->pfmemalloc = true;
1961}
1962
1963
1964
1965
1966
1967
1968
1969static inline struct page *skb_frag_page(const skb_frag_t *frag)
1970{
1971 return frag->page.p;
1972}
1973
1974
1975
1976
1977
1978
1979
1980static inline void __skb_frag_ref(skb_frag_t *frag)
1981{
1982 get_page(skb_frag_page(frag));
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992static inline void skb_frag_ref(struct sk_buff *skb, int f)
1993{
1994 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
1995}
1996
1997
1998
1999
2000
2001
2002
2003static inline void __skb_frag_unref(skb_frag_t *frag)
2004{
2005 put_page(skb_frag_page(frag));
2006}
2007
2008
2009
2010
2011
2012
2013
2014
2015static inline void skb_frag_unref(struct sk_buff *skb, int f)
2016{
2017 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027static inline void *skb_frag_address(const skb_frag_t *frag)
2028{
2029 return page_address(skb_frag_page(frag)) + frag->page_offset;
2030}
2031
2032
2033
2034
2035
2036
2037
2038
2039static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2040{
2041 void *ptr = page_address(skb_frag_page(frag));
2042 if (unlikely(!ptr))
2043 return NULL;
2044
2045 return ptr + frag->page_offset;
2046}
2047
2048
2049
2050
2051
2052
2053
2054
2055static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2056{
2057 frag->page.p = page;
2058}
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2069 struct page *page)
2070{
2071 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2072}
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2086 const skb_frag_t *frag,
2087 size_t offset, size_t size,
2088 enum dma_data_direction dir)
2089{
2090 return dma_map_page(dev, skb_frag_page(frag),
2091 frag->page_offset + offset, size, dir);
2092}
2093
2094static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2095 gfp_t gfp_mask)
2096{
2097 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2098}
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2109{
2110 return !skb_header_cloned(skb) &&
2111 skb_headroom(skb) + len <= skb->hdr_len;
2112}
2113
2114static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2115 int cloned)
2116{
2117 int delta = 0;
2118
2119 if (headroom > skb_headroom(skb))
2120 delta = headroom - skb_headroom(skb);
2121
2122 if (delta || cloned)
2123 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2124 GFP_ATOMIC);
2125 return 0;
2126}
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2141{
2142 return __skb_cow(skb, headroom, skb_cloned(skb));
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2156{
2157 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2158}
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2172{
2173 unsigned int size = skb->len;
2174 if (likely(size >= len))
2175 return 0;
2176 return skb_pad(skb, len - size);
2177}
2178
2179static inline int skb_add_data(struct sk_buff *skb,
2180 char __user *from, int copy)
2181{
2182 const int off = skb->len;
2183
2184 if (skb->ip_summed == CHECKSUM_NONE) {
2185 int err = 0;
2186 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2187 copy, 0, &err);
2188 if (!err) {
2189 skb->csum = csum_block_add(skb->csum, csum, off);
2190 return 0;
2191 }
2192 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
2193 return 0;
2194
2195 __skb_trim(skb, off);
2196 return -EFAULT;
2197}
2198
2199static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2200 const struct page *page, int off)
2201{
2202 if (i) {
2203 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2204
2205 return page == skb_frag_page(frag) &&
2206 off == frag->page_offset + skb_frag_size(frag);
2207 }
2208 return false;
2209}
2210
2211static inline int __skb_linearize(struct sk_buff *skb)
2212{
2213 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2214}
2215
2216
2217
2218
2219
2220
2221
2222
2223static inline int skb_linearize(struct sk_buff *skb)
2224{
2225 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2226}
2227
2228
2229
2230
2231
2232
2233
2234
2235static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2236{
2237 return skb_is_nonlinear(skb) &&
2238 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2239}
2240
2241
2242
2243
2244
2245
2246
2247
2248static inline int skb_linearize_cow(struct sk_buff *skb)
2249{
2250 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2251 __skb_linearize(skb) : 0;
2252}
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265static inline void skb_postpull_rcsum(struct sk_buff *skb,
2266 const void *start, unsigned int len)
2267{
2268 if (skb->ip_summed == CHECKSUM_COMPLETE)
2269 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2270}
2271
2272unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2284{
2285 if (likely(len >= skb->len))
2286 return 0;
2287 if (skb->ip_summed == CHECKSUM_COMPLETE)
2288 skb->ip_summed = CHECKSUM_NONE;
2289 return __pskb_trim(skb, len);
2290}
2291
2292#define skb_queue_walk(queue, skb) \
2293 for (skb = (queue)->next; \
2294 skb != (struct sk_buff *)(queue); \
2295 skb = skb->next)
2296
2297#define skb_queue_walk_safe(queue, skb, tmp) \
2298 for (skb = (queue)->next, tmp = skb->next; \
2299 skb != (struct sk_buff *)(queue); \
2300 skb = tmp, tmp = skb->next)
2301
2302#define skb_queue_walk_from(queue, skb) \
2303 for (; skb != (struct sk_buff *)(queue); \
2304 skb = skb->next)
2305
2306#define skb_queue_walk_from_safe(queue, skb, tmp) \
2307 for (tmp = skb->next; \
2308 skb != (struct sk_buff *)(queue); \
2309 skb = tmp, tmp = skb->next)
2310
2311#define skb_queue_reverse_walk(queue, skb) \
2312 for (skb = (queue)->prev; \
2313 skb != (struct sk_buff *)(queue); \
2314 skb = skb->prev)
2315
2316#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2317 for (skb = (queue)->prev, tmp = skb->prev; \
2318 skb != (struct sk_buff *)(queue); \
2319 skb = tmp, tmp = skb->prev)
2320
2321#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2322 for (tmp = skb->prev; \
2323 skb != (struct sk_buff *)(queue); \
2324 skb = tmp, tmp = skb->prev)
2325
2326static inline bool skb_has_frag_list(const struct sk_buff *skb)
2327{
2328 return skb_shinfo(skb)->frag_list != NULL;
2329}
2330
2331static inline void skb_frag_list_init(struct sk_buff *skb)
2332{
2333 skb_shinfo(skb)->frag_list = NULL;
2334}
2335
2336static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2337{
2338 frag->next = skb_shinfo(skb)->frag_list;
2339 skb_shinfo(skb)->frag_list = frag;
2340}
2341
2342#define skb_walk_frags(skb, iter) \
2343 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2344
2345extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2346 int *peeked, int *off, int *err);
2347extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2348 int noblock, int *err);
2349extern unsigned int datagram_poll(struct file *file, struct socket *sock,
2350 struct poll_table_struct *wait);
2351extern int skb_copy_datagram_iovec(const struct sk_buff *from,
2352 int offset, struct iovec *to,
2353 int size);
2354extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2355 int hlen,
2356 struct iovec *iov);
2357extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
2358 int offset,
2359 const struct iovec *from,
2360 int from_offset,
2361 int len);
2362extern int zerocopy_sg_from_iovec(struct sk_buff *skb,
2363 const struct iovec *frm,
2364 int offset,
2365 size_t count);
2366extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
2367 int offset,
2368 const struct iovec *to,
2369 int to_offset,
2370 int size);
2371extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2372extern void skb_free_datagram_locked(struct sock *sk,
2373 struct sk_buff *skb);
2374extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2375 unsigned int flags);
2376extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
2377 int len, __wsum csum);
2378extern int skb_copy_bits(const struct sk_buff *skb, int offset,
2379 void *to, int len);
2380extern int skb_store_bits(struct sk_buff *skb, int offset,
2381 const void *from, int len);
2382extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
2383 int offset, u8 *to, int len,
2384 __wsum csum);
2385extern int skb_splice_bits(struct sk_buff *skb,
2386 unsigned int offset,
2387 struct pipe_inode_info *pipe,
2388 unsigned int len,
2389 unsigned int flags);
2390extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2391extern void skb_split(struct sk_buff *skb,
2392 struct sk_buff *skb1, const u32 len);
2393extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2394 int shiftlen);
2395extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2396
2397extern struct sk_buff *skb_segment(struct sk_buff *skb,
2398 netdev_features_t features);
2399
2400static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2401 int len, void *buffer)
2402{
2403 int hlen = skb_headlen(skb);
2404
2405 if (hlen - offset >= len)
2406 return skb->data + offset;
2407
2408 if (skb_copy_bits(skb, offset, buffer, len) < 0)
2409 return NULL;
2410
2411 return buffer;
2412}
2413
2414static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2415 void *to,
2416 const unsigned int len)
2417{
2418 memcpy(to, skb->data, len);
2419}
2420
2421static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2422 const int offset, void *to,
2423 const unsigned int len)
2424{
2425 memcpy(to, skb->data + offset, len);
2426}
2427
2428static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2429 const void *from,
2430 const unsigned int len)
2431{
2432 memcpy(skb->data, from, len);
2433}
2434
2435static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2436 const int offset,
2437 const void *from,
2438 const unsigned int len)
2439{
2440 memcpy(skb->data + offset, from, len);
2441}
2442
2443extern void skb_init(void);
2444
2445static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2446{
2447 return skb->tstamp;
2448}
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459static inline void skb_get_timestamp(const struct sk_buff *skb,
2460 struct timeval *stamp)
2461{
2462 *stamp = ktime_to_timeval(skb->tstamp);
2463}
2464
2465static inline void skb_get_timestampns(const struct sk_buff *skb,
2466 struct timespec *stamp)
2467{
2468 *stamp = ktime_to_timespec(skb->tstamp);
2469}
2470
2471static inline void __net_timestamp(struct sk_buff *skb)
2472{
2473 skb->tstamp = ktime_get_real();
2474}
2475
2476static inline ktime_t net_timedelta(ktime_t t)
2477{
2478 return ktime_sub(ktime_get_real(), t);
2479}
2480
2481static inline ktime_t net_invalid_timestamp(void)
2482{
2483 return ktime_set(0, 0);
2484}
2485
2486extern void skb_timestamping_init(void);
2487
2488#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2489
2490extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2491extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2492
2493#else
2494
2495static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2496{
2497}
2498
2499static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2500{
2501 return false;
2502}
2503
2504#endif
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518void skb_complete_tx_timestamp(struct sk_buff *skb,
2519 struct skb_shared_hwtstamps *hwtstamps);
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2533 struct skb_shared_hwtstamps *hwtstamps);
2534
2535static inline void sw_tx_timestamp(struct sk_buff *skb)
2536{
2537 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2538 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2539 skb_tstamp_tx(skb, NULL);
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550static inline void skb_tx_timestamp(struct sk_buff *skb)
2551{
2552 skb_clone_tx_timestamp(skb);
2553 sw_tx_timestamp(skb);
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2564
2565extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2566extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2567
2568static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2569{
2570 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2590{
2591 return skb_csum_unnecessary(skb) ?
2592 0 : __skb_checksum_complete(skb);
2593}
2594
2595#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2596extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2597static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2598{
2599 if (nfct && atomic_dec_and_test(&nfct->use))
2600 nf_conntrack_destroy(nfct);
2601}
2602static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2603{
2604 if (nfct)
2605 atomic_inc(&nfct->use);
2606}
2607#endif
2608#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2609static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2610{
2611 if (skb)
2612 atomic_inc(&skb->users);
2613}
2614static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2615{
2616 if (skb)
2617 kfree_skb(skb);
2618}
2619#endif
2620#ifdef CONFIG_BRIDGE_NETFILTER
2621static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2622{
2623 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2624 kfree(nf_bridge);
2625}
2626static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2627{
2628 if (nf_bridge)
2629 atomic_inc(&nf_bridge->use);
2630}
2631#endif
2632static inline void nf_reset(struct sk_buff *skb)
2633{
2634#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2635 nf_conntrack_put(skb->nfct);
2636 skb->nfct = NULL;
2637#endif
2638#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2639 nf_conntrack_put_reasm(skb->nfct_reasm);
2640 skb->nfct_reasm = NULL;
2641#endif
2642#ifdef CONFIG_BRIDGE_NETFILTER
2643 nf_bridge_put(skb->nf_bridge);
2644 skb->nf_bridge = NULL;
2645#endif
2646}
2647
2648static inline void nf_reset_trace(struct sk_buff *skb)
2649{
2650#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2651 skb->nf_trace = 0;
2652#endif
2653}
2654
2655
2656static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2657{
2658#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2659 dst->nfct = src->nfct;
2660 nf_conntrack_get(src->nfct);
2661 dst->nfctinfo = src->nfctinfo;
2662#endif
2663#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2664 dst->nfct_reasm = src->nfct_reasm;
2665 nf_conntrack_get_reasm(src->nfct_reasm);
2666#endif
2667#ifdef CONFIG_BRIDGE_NETFILTER
2668 dst->nf_bridge = src->nf_bridge;
2669 nf_bridge_get(src->nf_bridge);
2670#endif
2671}
2672
2673static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2674{
2675#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2676 nf_conntrack_put(dst->nfct);
2677#endif
2678#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2679 nf_conntrack_put_reasm(dst->nfct_reasm);
2680#endif
2681#ifdef CONFIG_BRIDGE_NETFILTER
2682 nf_bridge_put(dst->nf_bridge);
2683#endif
2684 __nf_copy(dst, src);
2685}
2686
2687#ifdef CONFIG_NETWORK_SECMARK
2688static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2689{
2690 to->secmark = from->secmark;
2691}
2692
2693static inline void skb_init_secmark(struct sk_buff *skb)
2694{
2695 skb->secmark = 0;
2696}
2697#else
2698static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2699{ }
2700
2701static inline void skb_init_secmark(struct sk_buff *skb)
2702{ }
2703#endif
2704
2705static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2706{
2707 skb->queue_mapping = queue_mapping;
2708}
2709
2710static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2711{
2712 return skb->queue_mapping;
2713}
2714
2715static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2716{
2717 to->queue_mapping = from->queue_mapping;
2718}
2719
2720static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2721{
2722 skb->queue_mapping = rx_queue + 1;
2723}
2724
2725static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2726{
2727 return skb->queue_mapping - 1;
2728}
2729
2730static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2731{
2732 return skb->queue_mapping != 0;
2733}
2734
2735extern u16 __skb_tx_hash(const struct net_device *dev,
2736 const struct sk_buff *skb,
2737 unsigned int num_tx_queues);
2738
2739#ifdef CONFIG_XFRM
2740static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2741{
2742 return skb->sp;
2743}
2744#else
2745static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2746{
2747 return NULL;
2748}
2749#endif
2750
2751
2752
2753
2754
2755struct skb_gso_cb {
2756 int mac_offset;
2757};
2758#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2759
2760static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2761{
2762 return (skb_mac_header(inner_skb) - inner_skb->head) -
2763 SKB_GSO_CB(inner_skb)->mac_offset;
2764}
2765
2766static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2767{
2768 int new_headroom, headroom;
2769 int ret;
2770
2771 headroom = skb_headroom(skb);
2772 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2773 if (ret)
2774 return ret;
2775
2776 new_headroom = skb_headroom(skb);
2777 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2778 return 0;
2779}
2780
2781static inline bool skb_is_gso(const struct sk_buff *skb)
2782{
2783 return skb_shinfo(skb)->gso_size;
2784}
2785
2786static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2787{
2788 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2789}
2790
2791extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2792
2793static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2794{
2795
2796
2797 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2798
2799 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2800 unlikely(shinfo->gso_type == 0)) {
2801 __skb_warn_lro_forwarding(skb);
2802 return true;
2803 }
2804 return false;
2805}
2806
2807static inline void skb_forward_csum(struct sk_buff *skb)
2808{
2809
2810 if (skb->ip_summed == CHECKSUM_COMPLETE)
2811 skb->ip_summed = CHECKSUM_NONE;
2812}
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2823{
2824#ifdef DEBUG
2825 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2826#endif
2827}
2828
2829bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2830
2831u32 __skb_get_poff(const struct sk_buff *skb);
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842static inline bool skb_head_is_locked(const struct sk_buff *skb)
2843{
2844 return !skb->head_frag || skb_cloned(skb);
2845}
2846#endif
2847#endif
2848