1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <net/flow_dissector.h>
38#include <linux/splice.h>
39#include <linux/in6.h>
40#include <net/flow.h>
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define CHECKSUM_NONE 0
217#define CHECKSUM_UNNECESSARY 1
218#define CHECKSUM_COMPLETE 2
219#define CHECKSUM_PARTIAL 3
220
221
222#define SKB_MAX_CSUM_LEVEL 3
223
224#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
225#define SKB_WITH_OVERHEAD(X) \
226 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
227#define SKB_MAX_ORDER(X, ORDER) \
228 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
229#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
230#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
231
232
233#define SKB_TRUESIZE(X) ((X) + \
234 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
236
237struct net_device;
238struct scatterlist;
239struct pipe_inode_info;
240struct iov_iter;
241struct napi_struct;
242
243#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
244struct nf_conntrack {
245 atomic_t use;
246};
247#endif
248
249#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
250struct nf_bridge_info {
251 atomic_t use;
252 enum {
253 BRNF_PROTO_UNCHANGED,
254 BRNF_PROTO_8021Q,
255 BRNF_PROTO_PPPOE
256 } orig_proto:8;
257 u8 pkt_otherhost:1;
258 u8 in_prerouting:1;
259 u8 bridged_dnat:1;
260 __u16 frag_max_size;
261 struct net_device *physindev;
262
263
264 struct net_device *physoutdev;
265 union {
266
267 __be32 ipv4_daddr;
268 struct in6_addr ipv6_daddr;
269
270
271
272
273
274 char neigh_header[8];
275 };
276};
277#endif
278
279struct sk_buff_head {
280
281 struct sk_buff *next;
282 struct sk_buff *prev;
283
284 __u32 qlen;
285 spinlock_t lock;
286};
287
288struct sk_buff;
289
290
291
292
293
294
295
296
297#if (65536/PAGE_SIZE + 1) < 16
298#define MAX_SKB_FRAGS 16UL
299#else
300#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
301#endif
302extern int sysctl_max_skb_frags;
303
304typedef struct skb_frag_struct skb_frag_t;
305
306struct skb_frag_struct {
307 struct {
308 struct page *p;
309 } page;
310#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
311 __u32 page_offset;
312 __u32 size;
313#else
314 __u16 page_offset;
315 __u16 size;
316#endif
317};
318
319static inline unsigned int skb_frag_size(const skb_frag_t *frag)
320{
321 return frag->size;
322}
323
324static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
325{
326 frag->size = size;
327}
328
329static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
330{
331 frag->size += delta;
332}
333
334static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
335{
336 frag->size -= delta;
337}
338
339#define HAVE_HW_TIME_STAMP
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355struct skb_shared_hwtstamps {
356 ktime_t hwtstamp;
357};
358
359
360enum {
361
362 SKBTX_HW_TSTAMP = 1 << 0,
363
364
365 SKBTX_SW_TSTAMP = 1 << 1,
366
367
368 SKBTX_IN_PROGRESS = 1 << 2,
369
370
371 SKBTX_DEV_ZEROCOPY = 1 << 3,
372
373
374 SKBTX_WIFI_STATUS = 1 << 4,
375
376
377
378
379
380
381 SKBTX_SHARED_FRAG = 1 << 5,
382
383
384 SKBTX_SCHED_TSTAMP = 1 << 6,
385
386
387 SKBTX_ACK_TSTAMP = 1 << 7,
388};
389
390#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
391 SKBTX_SCHED_TSTAMP | \
392 SKBTX_ACK_TSTAMP)
393#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
394
395
396
397
398
399
400
401
402
403struct ubuf_info {
404 void (*callback)(struct ubuf_info *, bool zerocopy_success);
405 void *ctx;
406 unsigned long desc;
407};
408
409
410
411
412struct skb_shared_info {
413 unsigned char nr_frags;
414 __u8 tx_flags;
415 unsigned short gso_size;
416
417 unsigned short gso_segs;
418 unsigned short gso_type;
419 struct sk_buff *frag_list;
420 struct skb_shared_hwtstamps hwtstamps;
421 u32 tskey;
422 __be32 ip6_frag_id;
423
424
425
426
427 atomic_t dataref;
428
429
430
431 void * destructor_arg;
432
433
434 skb_frag_t frags[MAX_SKB_FRAGS];
435};
436
437
438
439
440
441
442
443
444
445
446
447
448#define SKB_DATAREF_SHIFT 16
449#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
450
451
452enum {
453 SKB_FCLONE_UNAVAILABLE,
454 SKB_FCLONE_ORIG,
455 SKB_FCLONE_CLONE,
456};
457
458enum {
459 SKB_GSO_TCPV4 = 1 << 0,
460 SKB_GSO_UDP = 1 << 1,
461
462
463 SKB_GSO_DODGY = 1 << 2,
464
465
466 SKB_GSO_TCP_ECN = 1 << 3,
467
468 SKB_GSO_TCPV6 = 1 << 4,
469
470 SKB_GSO_FCOE = 1 << 5,
471
472 SKB_GSO_GRE = 1 << 6,
473
474 SKB_GSO_GRE_CSUM = 1 << 7,
475
476 SKB_GSO_IPIP = 1 << 8,
477
478 SKB_GSO_SIT = 1 << 9,
479
480 SKB_GSO_UDP_TUNNEL = 1 << 10,
481
482 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
483
484 SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
485};
486
487#if BITS_PER_LONG > 32
488#define NET_SKBUFF_DATA_USES_OFFSET 1
489#endif
490
491#ifdef NET_SKBUFF_DATA_USES_OFFSET
492typedef unsigned int sk_buff_data_t;
493#else
494typedef unsigned char *sk_buff_data_t;
495#endif
496
497
498
499
500
501
502struct skb_mstamp {
503 union {
504 u64 v64;
505 struct {
506 u32 stamp_us;
507 u32 stamp_jiffies;
508 };
509 };
510};
511
512
513
514
515
516static inline void skb_mstamp_get(struct skb_mstamp *cl)
517{
518 u64 val = local_clock();
519
520 do_div(val, NSEC_PER_USEC);
521 cl->stamp_us = (u32)val;
522 cl->stamp_jiffies = (u32)jiffies;
523}
524
525
526
527
528
529
530static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
531 const struct skb_mstamp *t0)
532{
533 s32 delta_us = t1->stamp_us - t0->stamp_us;
534 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
535
536
537
538
539 if (delta_us <= 0 ||
540 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
541
542 delta_us = jiffies_to_usecs(delta_jiffies);
543
544 return delta_us;
545}
546
547static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
548 const struct skb_mstamp *t0)
549{
550 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
551
552 if (!diff)
553 diff = t1->stamp_us - t0->stamp_us;
554 return diff > 0;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626struct sk_buff {
627 union {
628 struct {
629
630 struct sk_buff *next;
631 struct sk_buff *prev;
632
633 union {
634 ktime_t tstamp;
635 struct skb_mstamp skb_mstamp;
636 };
637 };
638 struct rb_node rbnode;
639 };
640 struct sock *sk;
641 struct net_device *dev;
642
643
644
645
646
647
648
649 char cb[48] __aligned(8);
650
651 unsigned long _skb_refdst;
652 void (*destructor)(struct sk_buff *skb);
653#ifdef CONFIG_XFRM
654 struct sec_path *sp;
655#endif
656#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
657 struct nf_conntrack *nfct;
658#endif
659#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
660 struct nf_bridge_info *nf_bridge;
661#endif
662 unsigned int len,
663 data_len;
664 __u16 mac_len,
665 hdr_len;
666
667
668
669
670 kmemcheck_bitfield_begin(flags1);
671 __u16 queue_mapping;
672 __u8 cloned:1,
673 nohdr:1,
674 fclone:2,
675 peeked:1,
676 head_frag:1,
677 xmit_more:1;
678
679 kmemcheck_bitfield_end(flags1);
680
681
682
683
684
685 __u32 headers_start[0];
686
687
688
689#ifdef __BIG_ENDIAN_BITFIELD
690#define PKT_TYPE_MAX (7 << 5)
691#else
692#define PKT_TYPE_MAX 7
693#endif
694#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
695
696 __u8 __pkt_type_offset[0];
697 __u8 pkt_type:3;
698 __u8 pfmemalloc:1;
699 __u8 ignore_df:1;
700 __u8 nfctinfo:3;
701
702 __u8 nf_trace:1;
703 __u8 ip_summed:2;
704 __u8 ooo_okay:1;
705 __u8 l4_hash:1;
706 __u8 sw_hash:1;
707 __u8 wifi_acked_valid:1;
708 __u8 wifi_acked:1;
709
710 __u8 no_fcs:1;
711
712 __u8 encapsulation:1;
713 __u8 encap_hdr_csum:1;
714 __u8 csum_valid:1;
715 __u8 csum_complete_sw:1;
716 __u8 csum_level:2;
717 __u8 csum_bad:1;
718
719#ifdef CONFIG_IPV6_NDISC_NODETYPE
720 __u8 ndisc_nodetype:2;
721#endif
722 __u8 ipvs_property:1;
723 __u8 inner_protocol_type:1;
724 __u8 remcsum_offload:1;
725
726
727#ifdef CONFIG_NET_SCHED
728 __u16 tc_index;
729#ifdef CONFIG_NET_CLS_ACT
730 __u16 tc_verd;
731#endif
732#endif
733
734 union {
735 __wsum csum;
736 struct {
737 __u16 csum_start;
738 __u16 csum_offset;
739 };
740 };
741 __u32 priority;
742 int skb_iif;
743 __u32 hash;
744 __be16 vlan_proto;
745 __u16 vlan_tci;
746#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
747 union {
748 unsigned int napi_id;
749 unsigned int sender_cpu;
750 };
751#endif
752 union {
753#ifdef CONFIG_NETWORK_SECMARK
754 __u32 secmark;
755#endif
756#ifdef CONFIG_NET_SWITCHDEV
757 __u32 offload_fwd_mark;
758#endif
759 };
760
761 union {
762 __u32 mark;
763 __u32 reserved_tailroom;
764 };
765
766 union {
767 __be16 inner_protocol;
768 __u8 inner_ipproto;
769 };
770
771 __u16 inner_transport_header;
772 __u16 inner_network_header;
773 __u16 inner_mac_header;
774
775 __be16 protocol;
776 __u16 transport_header;
777 __u16 network_header;
778 __u16 mac_header;
779
780
781 __u32 headers_end[0];
782
783
784
785 sk_buff_data_t tail;
786 sk_buff_data_t end;
787 unsigned char *head,
788 *data;
789 unsigned int truesize;
790 atomic_t users;
791};
792
793#ifdef __KERNEL__
794
795
796
797#include <linux/slab.h>
798
799
800#define SKB_ALLOC_FCLONE 0x01
801#define SKB_ALLOC_RX 0x02
802#define SKB_ALLOC_NAPI 0x04
803
804
805static inline bool skb_pfmemalloc(const struct sk_buff *skb)
806{
807 return unlikely(skb->pfmemalloc);
808}
809
810
811
812
813
814#define SKB_DST_NOREF 1UL
815#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
816
817
818
819
820
821
822
823static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
824{
825
826
827
828 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
829 !rcu_read_lock_held() &&
830 !rcu_read_lock_bh_held());
831 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
832}
833
834
835
836
837
838
839
840
841
842static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
843{
844 skb->_skb_refdst = (unsigned long)dst;
845}
846
847
848
849
850
851
852
853
854
855
856
857static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
858{
859 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
860 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
861}
862
863
864
865
866
867static inline bool skb_dst_is_noref(const struct sk_buff *skb)
868{
869 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
870}
871
872static inline struct rtable *skb_rtable(const struct sk_buff *skb)
873{
874 return (struct rtable *)skb_dst(skb);
875}
876
877void kfree_skb(struct sk_buff *skb);
878void kfree_skb_list(struct sk_buff *segs);
879void skb_tx_error(struct sk_buff *skb);
880void consume_skb(struct sk_buff *skb);
881void __kfree_skb(struct sk_buff *skb);
882extern struct kmem_cache *skbuff_head_cache;
883
884void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
885bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
886 bool *fragstolen, int *delta_truesize);
887
888struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
889 int node);
890struct sk_buff *__build_skb(void *data, unsigned int frag_size);
891struct sk_buff *build_skb(void *data, unsigned int frag_size);
892static inline struct sk_buff *alloc_skb(unsigned int size,
893 gfp_t priority)
894{
895 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
896}
897
898struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
899 unsigned long data_len,
900 int max_page_order,
901 int *errcode,
902 gfp_t gfp_mask);
903
904
905struct sk_buff_fclones {
906 struct sk_buff skb1;
907
908 struct sk_buff skb2;
909
910 atomic_t fclone_ref;
911};
912
913
914
915
916
917
918
919
920
921static inline bool skb_fclone_busy(const struct sock *sk,
922 const struct sk_buff *skb)
923{
924 const struct sk_buff_fclones *fclones;
925
926 fclones = container_of(skb, struct sk_buff_fclones, skb1);
927
928 return skb->fclone == SKB_FCLONE_ORIG &&
929 atomic_read(&fclones->fclone_ref) > 1 &&
930 fclones->skb2.sk == sk;
931}
932
933static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
934 gfp_t priority)
935{
936 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
937}
938
939struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
940static inline struct sk_buff *alloc_skb_head(gfp_t priority)
941{
942 return __alloc_skb_head(priority, -1);
943}
944
945struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
946int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
947struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
948struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
949struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
950 gfp_t gfp_mask, bool fclone);
951static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
952 gfp_t gfp_mask)
953{
954 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
955}
956
957int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
958struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
959 unsigned int headroom);
960struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
961 int newtailroom, gfp_t priority);
962int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
963 int offset, int len);
964int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
965 int len);
966int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
967int skb_pad(struct sk_buff *skb, int pad);
968#define dev_kfree_skb(a) consume_skb(a)
969
970int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
971 int getfrag(void *from, char *to, int offset,
972 int len, int odd, struct sk_buff *skb),
973 void *from, int length);
974
975int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
976 int offset, size_t size);
977
978struct skb_seq_state {
979 __u32 lower_offset;
980 __u32 upper_offset;
981 __u32 frag_idx;
982 __u32 stepped_offset;
983 struct sk_buff *root_skb;
984 struct sk_buff *cur_skb;
985 __u8 *frag_data;
986};
987
988void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
989 unsigned int to, struct skb_seq_state *st);
990unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
991 struct skb_seq_state *st);
992void skb_abort_seq_read(struct skb_seq_state *st);
993
994unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
995 unsigned int to, struct ts_config *config);
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023enum pkt_hash_types {
1024 PKT_HASH_TYPE_NONE,
1025 PKT_HASH_TYPE_L2,
1026 PKT_HASH_TYPE_L3,
1027 PKT_HASH_TYPE_L4,
1028};
1029
1030static inline void skb_clear_hash(struct sk_buff *skb)
1031{
1032 skb->hash = 0;
1033 skb->sw_hash = 0;
1034 skb->l4_hash = 0;
1035}
1036
1037static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1038{
1039 if (!skb->l4_hash)
1040 skb_clear_hash(skb);
1041}
1042
1043static inline void
1044__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1045{
1046 skb->l4_hash = is_l4;
1047 skb->sw_hash = is_sw;
1048 skb->hash = hash;
1049}
1050
1051static inline void
1052skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1053{
1054
1055 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1056}
1057
1058static inline void
1059__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1060{
1061 __skb_set_hash(skb, hash, true, is_l4);
1062}
1063
1064void __skb_get_hash(struct sk_buff *skb);
1065u32 skb_get_poff(const struct sk_buff *skb);
1066u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1067 const struct flow_keys *keys, int hlen);
1068__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1069 void *data, int hlen_proto);
1070
1071static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1072 int thoff, u8 ip_proto)
1073{
1074 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1075}
1076
1077void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1078 const struct flow_dissector_key *key,
1079 unsigned int key_count);
1080
1081bool __skb_flow_dissect(const struct sk_buff *skb,
1082 struct flow_dissector *flow_dissector,
1083 void *target_container,
1084 void *data, __be16 proto, int nhoff, int hlen,
1085 unsigned int flags);
1086
1087static inline bool skb_flow_dissect(const struct sk_buff *skb,
1088 struct flow_dissector *flow_dissector,
1089 void *target_container, unsigned int flags)
1090{
1091 return __skb_flow_dissect(skb, flow_dissector, target_container,
1092 NULL, 0, 0, 0, flags);
1093}
1094
1095static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1096 struct flow_keys *flow,
1097 unsigned int flags)
1098{
1099 memset(flow, 0, sizeof(*flow));
1100 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1101 NULL, 0, 0, 0, flags);
1102}
1103
1104static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1105 void *data, __be16 proto,
1106 int nhoff, int hlen,
1107 unsigned int flags)
1108{
1109 memset(flow, 0, sizeof(*flow));
1110 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1111 data, proto, nhoff, hlen, flags);
1112}
1113
1114static inline __u32 skb_get_hash(struct sk_buff *skb)
1115{
1116 if (!skb->l4_hash && !skb->sw_hash)
1117 __skb_get_hash(skb);
1118
1119 return skb->hash;
1120}
1121
1122__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1123
1124static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1125{
1126 if (!skb->l4_hash && !skb->sw_hash) {
1127 struct flow_keys keys;
1128 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1129
1130 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1131 }
1132
1133 return skb->hash;
1134}
1135
1136__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1137
1138static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1139{
1140 if (!skb->l4_hash && !skb->sw_hash) {
1141 struct flow_keys keys;
1142 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1143
1144 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1145 }
1146
1147 return skb->hash;
1148}
1149
1150__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1151
1152static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1153{
1154 return skb->hash;
1155}
1156
1157static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1158{
1159 to->hash = from->hash;
1160 to->sw_hash = from->sw_hash;
1161 to->l4_hash = from->l4_hash;
1162};
1163
1164#ifdef NET_SKBUFF_DATA_USES_OFFSET
1165static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1166{
1167 return skb->head + skb->end;
1168}
1169
1170static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1171{
1172 return skb->end;
1173}
1174#else
1175static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1176{
1177 return skb->end;
1178}
1179
1180static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1181{
1182 return skb->end - skb->head;
1183}
1184#endif
1185
1186
1187#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1188
1189static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1190{
1191 return &skb_shinfo(skb)->hwtstamps;
1192}
1193
1194
1195
1196
1197
1198
1199
1200static inline int skb_queue_empty(const struct sk_buff_head *list)
1201{
1202 return list->next == (const struct sk_buff *) list;
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1213 const struct sk_buff *skb)
1214{
1215 return skb->next == (const struct sk_buff *) list;
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1226 const struct sk_buff *skb)
1227{
1228 return skb->prev == (const struct sk_buff *) list;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1240 const struct sk_buff *skb)
1241{
1242
1243
1244
1245 BUG_ON(skb_queue_is_last(list, skb));
1246 return skb->next;
1247}
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1258 const struct sk_buff *skb)
1259{
1260
1261
1262
1263 BUG_ON(skb_queue_is_first(list, skb));
1264 return skb->prev;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274static inline struct sk_buff *skb_get(struct sk_buff *skb)
1275{
1276 atomic_inc(&skb->users);
1277 return skb;
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293static inline int skb_cloned(const struct sk_buff *skb)
1294{
1295 return skb->cloned &&
1296 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1297}
1298
1299static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1300{
1301 might_sleep_if(gfpflags_allow_blocking(pri));
1302
1303 if (skb_cloned(skb))
1304 return pskb_expand_head(skb, 0, 0, pri);
1305
1306 return 0;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316static inline int skb_header_cloned(const struct sk_buff *skb)
1317{
1318 int dataref;
1319
1320 if (!skb->cloned)
1321 return 0;
1322
1323 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1324 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1325 return dataref != 1;
1326}
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static inline void skb_header_release(struct sk_buff *skb)
1338{
1339 BUG_ON(skb->nohdr);
1340 skb->nohdr = 1;
1341 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351static inline void __skb_header_release(struct sk_buff *skb)
1352{
1353 skb->nohdr = 1;
1354 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365static inline int skb_shared(const struct sk_buff *skb)
1366{
1367 return atomic_read(&skb->users) != 1;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1384{
1385 might_sleep_if(gfpflags_allow_blocking(pri));
1386 if (skb_shared(skb)) {
1387 struct sk_buff *nskb = skb_clone(skb, pri);
1388
1389 if (likely(nskb))
1390 consume_skb(skb);
1391 else
1392 kfree_skb(skb);
1393 skb = nskb;
1394 }
1395 return skb;
1396}
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1419 gfp_t pri)
1420{
1421 might_sleep_if(gfpflags_allow_blocking(pri));
1422 if (skb_cloned(skb)) {
1423 struct sk_buff *nskb = skb_copy(skb, pri);
1424
1425
1426 if (likely(nskb))
1427 consume_skb(skb);
1428 else
1429 kfree_skb(skb);
1430 skb = nskb;
1431 }
1432 return skb;
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1449{
1450 struct sk_buff *skb = list_->next;
1451
1452 if (skb == (struct sk_buff *)list_)
1453 skb = NULL;
1454 return skb;
1455}
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1467 const struct sk_buff_head *list_)
1468{
1469 struct sk_buff *next = skb->next;
1470
1471 if (next == (struct sk_buff *)list_)
1472 next = NULL;
1473 return next;
1474}
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1490{
1491 struct sk_buff *skb = list_->prev;
1492
1493 if (skb == (struct sk_buff *)list_)
1494 skb = NULL;
1495 return skb;
1496
1497}
1498
1499
1500
1501
1502
1503
1504
1505static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1506{
1507 return list_->qlen;
1508}
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520static inline void __skb_queue_head_init(struct sk_buff_head *list)
1521{
1522 list->prev = list->next = (struct sk_buff *)list;
1523 list->qlen = 0;
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static inline void skb_queue_head_init(struct sk_buff_head *list)
1535{
1536 spin_lock_init(&list->lock);
1537 __skb_queue_head_init(list);
1538}
1539
1540static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1541 struct lock_class_key *class)
1542{
1543 skb_queue_head_init(list);
1544 lockdep_set_class(&list->lock, class);
1545}
1546
1547
1548
1549
1550
1551
1552
1553void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1554 struct sk_buff_head *list);
1555static inline void __skb_insert(struct sk_buff *newsk,
1556 struct sk_buff *prev, struct sk_buff *next,
1557 struct sk_buff_head *list)
1558{
1559 newsk->next = next;
1560 newsk->prev = prev;
1561 next->prev = prev->next = newsk;
1562 list->qlen++;
1563}
1564
1565static inline void __skb_queue_splice(const struct sk_buff_head *list,
1566 struct sk_buff *prev,
1567 struct sk_buff *next)
1568{
1569 struct sk_buff *first = list->next;
1570 struct sk_buff *last = list->prev;
1571
1572 first->prev = prev;
1573 prev->next = first;
1574
1575 last->next = next;
1576 next->prev = last;
1577}
1578
1579
1580
1581
1582
1583
1584static inline void skb_queue_splice(const struct sk_buff_head *list,
1585 struct sk_buff_head *head)
1586{
1587 if (!skb_queue_empty(list)) {
1588 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1589 head->qlen += list->qlen;
1590 }
1591}
1592
1593
1594
1595
1596
1597
1598
1599
1600static inline void skb_queue_splice_init(struct sk_buff_head *list,
1601 struct sk_buff_head *head)
1602{
1603 if (!skb_queue_empty(list)) {
1604 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1605 head->qlen += list->qlen;
1606 __skb_queue_head_init(list);
1607 }
1608}
1609
1610
1611
1612
1613
1614
1615static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1616 struct sk_buff_head *head)
1617{
1618 if (!skb_queue_empty(list)) {
1619 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1620 head->qlen += list->qlen;
1621 }
1622}
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1633 struct sk_buff_head *head)
1634{
1635 if (!skb_queue_empty(list)) {
1636 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1637 head->qlen += list->qlen;
1638 __skb_queue_head_init(list);
1639 }
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653static inline void __skb_queue_after(struct sk_buff_head *list,
1654 struct sk_buff *prev,
1655 struct sk_buff *newsk)
1656{
1657 __skb_insert(newsk, prev, prev->next, list);
1658}
1659
1660void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1661 struct sk_buff_head *list);
1662
1663static inline void __skb_queue_before(struct sk_buff_head *list,
1664 struct sk_buff *next,
1665 struct sk_buff *newsk)
1666{
1667 __skb_insert(newsk, next->prev, next, list);
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1681static inline void __skb_queue_head(struct sk_buff_head *list,
1682 struct sk_buff *newsk)
1683{
1684 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1698static inline void __skb_queue_tail(struct sk_buff_head *list,
1699 struct sk_buff *newsk)
1700{
1701 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1702}
1703
1704
1705
1706
1707
1708void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1709static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1710{
1711 struct sk_buff *next, *prev;
1712
1713 list->qlen--;
1714 next = skb->next;
1715 prev = skb->prev;
1716 skb->next = skb->prev = NULL;
1717 next->prev = prev;
1718 prev->next = next;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1730static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1731{
1732 struct sk_buff *skb = skb_peek(list);
1733 if (skb)
1734 __skb_unlink(skb, list);
1735 return skb;
1736}
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1747static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1748{
1749 struct sk_buff *skb = skb_peek_tail(list);
1750 if (skb)
1751 __skb_unlink(skb, list);
1752 return skb;
1753}
1754
1755
1756static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1757{
1758 return skb->data_len;
1759}
1760
1761static inline unsigned int skb_headlen(const struct sk_buff *skb)
1762{
1763 return skb->len - skb->data_len;
1764}
1765
1766static inline int skb_pagelen(const struct sk_buff *skb)
1767{
1768 int i, len = 0;
1769
1770 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1771 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1772 return len + skb_headlen(skb);
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1789 struct page *page, int off, int size)
1790{
1791 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1792
1793
1794
1795
1796
1797
1798 frag->page.p = page;
1799 frag->page_offset = off;
1800 skb_frag_size_set(frag, size);
1801
1802 page = compound_head(page);
1803 if (page_is_pfmemalloc(page))
1804 skb->pfmemalloc = true;
1805}
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1822 struct page *page, int off, int size)
1823{
1824 __skb_fill_page_desc(skb, i, page, off, size);
1825 skb_shinfo(skb)->nr_frags = i + 1;
1826}
1827
1828void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1829 int size, unsigned int truesize);
1830
1831void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1832 unsigned int truesize);
1833
1834#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1835#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1836#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1837
1838#ifdef NET_SKBUFF_DATA_USES_OFFSET
1839static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1840{
1841 return skb->head + skb->tail;
1842}
1843
1844static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1845{
1846 skb->tail = skb->data - skb->head;
1847}
1848
1849static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1850{
1851 skb_reset_tail_pointer(skb);
1852 skb->tail += offset;
1853}
1854
1855#else
1856static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1857{
1858 return skb->tail;
1859}
1860
1861static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1862{
1863 skb->tail = skb->data;
1864}
1865
1866static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1867{
1868 skb->tail = skb->data + offset;
1869}
1870
1871#endif
1872
1873
1874
1875
1876unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1877unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1878static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1879{
1880 unsigned char *tmp = skb_tail_pointer(skb);
1881 SKB_LINEAR_ASSERT(skb);
1882 skb->tail += len;
1883 skb->len += len;
1884 return tmp;
1885}
1886
1887unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1888static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1889{
1890 skb->data -= len;
1891 skb->len += len;
1892 return skb->data;
1893}
1894
1895unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1896static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1897{
1898 skb->len -= len;
1899 BUG_ON(skb->len < skb->data_len);
1900 return skb->data += len;
1901}
1902
1903static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1904{
1905 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1906}
1907
1908unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1909
1910static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1911{
1912 if (len > skb_headlen(skb) &&
1913 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1914 return NULL;
1915 skb->len -= len;
1916 return skb->data += len;
1917}
1918
1919static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1920{
1921 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1922}
1923
1924static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1925{
1926 if (likely(len <= skb_headlen(skb)))
1927 return 1;
1928 if (unlikely(len > skb->len))
1929 return 0;
1930 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1931}
1932
1933
1934
1935
1936
1937
1938
1939static inline unsigned int skb_headroom(const struct sk_buff *skb)
1940{
1941 return skb->data - skb->head;
1942}
1943
1944
1945
1946
1947
1948
1949
1950static inline int skb_tailroom(const struct sk_buff *skb)
1951{
1952 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1953}
1954
1955
1956
1957
1958
1959
1960
1961
1962static inline int skb_availroom(const struct sk_buff *skb)
1963{
1964 if (skb_is_nonlinear(skb))
1965 return 0;
1966
1967 return skb->end - skb->tail - skb->reserved_tailroom;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static inline void skb_reserve(struct sk_buff *skb, int len)
1979{
1980 skb->data += len;
1981 skb->tail += len;
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
1997 unsigned int needed_tailroom)
1998{
1999 SKB_LINEAR_ASSERT(skb);
2000 if (mtu < skb_tailroom(skb) - needed_tailroom)
2001
2002 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2003 else
2004
2005 skb->reserved_tailroom = needed_tailroom;
2006}
2007
2008#define ENCAP_TYPE_ETHER 0
2009#define ENCAP_TYPE_IPPROTO 1
2010
2011static inline void skb_set_inner_protocol(struct sk_buff *skb,
2012 __be16 protocol)
2013{
2014 skb->inner_protocol = protocol;
2015 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2016}
2017
2018static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2019 __u8 ipproto)
2020{
2021 skb->inner_ipproto = ipproto;
2022 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2023}
2024
2025static inline void skb_reset_inner_headers(struct sk_buff *skb)
2026{
2027 skb->inner_mac_header = skb->mac_header;
2028 skb->inner_network_header = skb->network_header;
2029 skb->inner_transport_header = skb->transport_header;
2030}
2031
2032static inline void skb_reset_mac_len(struct sk_buff *skb)
2033{
2034 skb->mac_len = skb->network_header - skb->mac_header;
2035}
2036
2037static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2038 *skb)
2039{
2040 return skb->head + skb->inner_transport_header;
2041}
2042
2043static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2044{
2045 return skb_inner_transport_header(skb) - skb->data;
2046}
2047
2048static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2049{
2050 skb->inner_transport_header = skb->data - skb->head;
2051}
2052
2053static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2054 const int offset)
2055{
2056 skb_reset_inner_transport_header(skb);
2057 skb->inner_transport_header += offset;
2058}
2059
2060static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2061{
2062 return skb->head + skb->inner_network_header;
2063}
2064
2065static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2066{
2067 skb->inner_network_header = skb->data - skb->head;
2068}
2069
2070static inline void skb_set_inner_network_header(struct sk_buff *skb,
2071 const int offset)
2072{
2073 skb_reset_inner_network_header(skb);
2074 skb->inner_network_header += offset;
2075}
2076
2077static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2078{
2079 return skb->head + skb->inner_mac_header;
2080}
2081
2082static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2083{
2084 skb->inner_mac_header = skb->data - skb->head;
2085}
2086
2087static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2088 const int offset)
2089{
2090 skb_reset_inner_mac_header(skb);
2091 skb->inner_mac_header += offset;
2092}
2093static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2094{
2095 return skb->transport_header != (typeof(skb->transport_header))~0U;
2096}
2097
2098static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2099{
2100 return skb->head + skb->transport_header;
2101}
2102
2103static inline void skb_reset_transport_header(struct sk_buff *skb)
2104{
2105 skb->transport_header = skb->data - skb->head;
2106}
2107
2108static inline void skb_set_transport_header(struct sk_buff *skb,
2109 const int offset)
2110{
2111 skb_reset_transport_header(skb);
2112 skb->transport_header += offset;
2113}
2114
2115static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2116{
2117 return skb->head + skb->network_header;
2118}
2119
2120static inline void skb_reset_network_header(struct sk_buff *skb)
2121{
2122 skb->network_header = skb->data - skb->head;
2123}
2124
2125static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2126{
2127 skb_reset_network_header(skb);
2128 skb->network_header += offset;
2129}
2130
2131static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2132{
2133 return skb->head + skb->mac_header;
2134}
2135
2136static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2137{
2138 return skb->mac_header != (typeof(skb->mac_header))~0U;
2139}
2140
2141static inline void skb_reset_mac_header(struct sk_buff *skb)
2142{
2143 skb->mac_header = skb->data - skb->head;
2144}
2145
2146static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2147{
2148 skb_reset_mac_header(skb);
2149 skb->mac_header += offset;
2150}
2151
2152static inline void skb_pop_mac_header(struct sk_buff *skb)
2153{
2154 skb->mac_header = skb->network_header;
2155}
2156
2157static inline void skb_probe_transport_header(struct sk_buff *skb,
2158 const int offset_hint)
2159{
2160 struct flow_keys keys;
2161
2162 if (skb_transport_header_was_set(skb))
2163 return;
2164 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2165 skb_set_transport_header(skb, keys.control.thoff);
2166 else
2167 skb_set_transport_header(skb, offset_hint);
2168}
2169
2170static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2171{
2172 if (skb_mac_header_was_set(skb)) {
2173 const unsigned char *old_mac = skb_mac_header(skb);
2174
2175 skb_set_mac_header(skb, -skb->mac_len);
2176 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2177 }
2178}
2179
2180static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2181{
2182 return skb->csum_start - skb_headroom(skb);
2183}
2184
2185static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2186{
2187 return skb->head + skb->csum_start;
2188}
2189
2190static inline int skb_transport_offset(const struct sk_buff *skb)
2191{
2192 return skb_transport_header(skb) - skb->data;
2193}
2194
2195static inline u32 skb_network_header_len(const struct sk_buff *skb)
2196{
2197 return skb->transport_header - skb->network_header;
2198}
2199
2200static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2201{
2202 return skb->inner_transport_header - skb->inner_network_header;
2203}
2204
2205static inline int skb_network_offset(const struct sk_buff *skb)
2206{
2207 return skb_network_header(skb) - skb->data;
2208}
2209
2210static inline int skb_inner_network_offset(const struct sk_buff *skb)
2211{
2212 return skb_inner_network_header(skb) - skb->data;
2213}
2214
2215static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2216{
2217 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2218}
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240#ifndef NET_IP_ALIGN
2241#define NET_IP_ALIGN 2
2242#endif
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264#ifndef NET_SKB_PAD
2265#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2266#endif
2267
2268int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2269
2270static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2271{
2272 if (unlikely(skb_is_nonlinear(skb))) {
2273 WARN_ON(1);
2274 return;
2275 }
2276 skb->len = len;
2277 skb_set_tail_pointer(skb, len);
2278}
2279
2280void skb_trim(struct sk_buff *skb, unsigned int len);
2281
2282static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2283{
2284 if (skb->data_len)
2285 return ___pskb_trim(skb, len);
2286 __skb_trim(skb, len);
2287 return 0;
2288}
2289
2290static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2291{
2292 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2293}
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2305{
2306 int err = pskb_trim(skb, len);
2307 BUG_ON(err);
2308}
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318static inline void skb_orphan(struct sk_buff *skb)
2319{
2320 if (skb->destructor) {
2321 skb->destructor(skb);
2322 skb->destructor = NULL;
2323 skb->sk = NULL;
2324 } else {
2325 BUG_ON(skb->sk);
2326 }
2327}
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2339{
2340 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2341 return 0;
2342 return skb_copy_ubufs(skb, gfp_mask);
2343}
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353void skb_queue_purge(struct sk_buff_head *list);
2354static inline void __skb_queue_purge(struct sk_buff_head *list)
2355{
2356 struct sk_buff *skb;
2357 while ((skb = __skb_dequeue(list)) != NULL)
2358 kfree_skb(skb);
2359}
2360
2361void *netdev_alloc_frag(unsigned int fragsz);
2362
2363struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2364 gfp_t gfp_mask);
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2380 unsigned int length)
2381{
2382 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2383}
2384
2385
2386static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2387 gfp_t gfp_mask)
2388{
2389 return __netdev_alloc_skb(NULL, length, gfp_mask);
2390}
2391
2392
2393static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2394{
2395 return netdev_alloc_skb(NULL, length);
2396}
2397
2398
2399static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2400 unsigned int length, gfp_t gfp)
2401{
2402 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2403
2404 if (NET_IP_ALIGN && skb)
2405 skb_reserve(skb, NET_IP_ALIGN);
2406 return skb;
2407}
2408
2409static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2410 unsigned int length)
2411{
2412 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2413}
2414
2415static inline void skb_free_frag(void *addr)
2416{
2417 __free_page_frag(addr);
2418}
2419
2420void *napi_alloc_frag(unsigned int fragsz);
2421struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2422 unsigned int length, gfp_t gfp_mask);
2423static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2424 unsigned int length)
2425{
2426 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2427}
2428void napi_consume_skb(struct sk_buff *skb, int budget);
2429
2430void __kfree_skb_flush(void);
2431void __kfree_skb_defer(struct sk_buff *skb);
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2443 unsigned int order)
2444{
2445
2446
2447
2448
2449
2450
2451
2452
2453 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2454
2455 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2456}
2457
2458static inline struct page *dev_alloc_pages(unsigned int order)
2459{
2460 return __dev_alloc_pages(GFP_ATOMIC, order);
2461}
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2472{
2473 return __dev_alloc_pages(gfp_mask, 0);
2474}
2475
2476static inline struct page *dev_alloc_page(void)
2477{
2478 return __dev_alloc_page(GFP_ATOMIC);
2479}
2480
2481
2482
2483
2484
2485
2486static inline void skb_propagate_pfmemalloc(struct page *page,
2487 struct sk_buff *skb)
2488{
2489 if (page_is_pfmemalloc(page))
2490 skb->pfmemalloc = true;
2491}
2492
2493
2494
2495
2496
2497
2498
2499static inline struct page *skb_frag_page(const skb_frag_t *frag)
2500{
2501 return frag->page.p;
2502}
2503
2504
2505
2506
2507
2508
2509
2510static inline void __skb_frag_ref(skb_frag_t *frag)
2511{
2512 get_page(skb_frag_page(frag));
2513}
2514
2515
2516
2517
2518
2519
2520
2521
2522static inline void skb_frag_ref(struct sk_buff *skb, int f)
2523{
2524 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2525}
2526
2527
2528
2529
2530
2531
2532
2533static inline void __skb_frag_unref(skb_frag_t *frag)
2534{
2535 put_page(skb_frag_page(frag));
2536}
2537
2538
2539
2540
2541
2542
2543
2544
2545static inline void skb_frag_unref(struct sk_buff *skb, int f)
2546{
2547 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2548}
2549
2550
2551
2552
2553
2554
2555
2556
2557static inline void *skb_frag_address(const skb_frag_t *frag)
2558{
2559 return page_address(skb_frag_page(frag)) + frag->page_offset;
2560}
2561
2562
2563
2564
2565
2566
2567
2568
2569static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2570{
2571 void *ptr = page_address(skb_frag_page(frag));
2572 if (unlikely(!ptr))
2573 return NULL;
2574
2575 return ptr + frag->page_offset;
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2586{
2587 frag->page.p = page;
2588}
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2599 struct page *page)
2600{
2601 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2602}
2603
2604bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2618 const skb_frag_t *frag,
2619 size_t offset, size_t size,
2620 enum dma_data_direction dir)
2621{
2622 return dma_map_page(dev, skb_frag_page(frag),
2623 frag->page_offset + offset, size, dir);
2624}
2625
2626static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2627 gfp_t gfp_mask)
2628{
2629 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2630}
2631
2632
2633static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2634 gfp_t gfp_mask)
2635{
2636 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2637}
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2649{
2650 return !skb_header_cloned(skb) &&
2651 skb_headroom(skb) + len <= skb->hdr_len;
2652}
2653
2654static inline int skb_try_make_writable(struct sk_buff *skb,
2655 unsigned int write_len)
2656{
2657 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2658 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2659}
2660
2661static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2662 int cloned)
2663{
2664 int delta = 0;
2665
2666 if (headroom > skb_headroom(skb))
2667 delta = headroom - skb_headroom(skb);
2668
2669 if (delta || cloned)
2670 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2671 GFP_ATOMIC);
2672 return 0;
2673}
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2688{
2689 return __skb_cow(skb, headroom, skb_cloned(skb));
2690}
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2703{
2704 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2718{
2719 unsigned int size = skb->len;
2720 if (likely(size >= len))
2721 return 0;
2722 return skb_pad(skb, len - size);
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2736{
2737 unsigned int size = skb->len;
2738
2739 if (unlikely(size < len)) {
2740 len -= size;
2741 if (skb_pad(skb, len))
2742 return -ENOMEM;
2743 __skb_put(skb, len);
2744 }
2745 return 0;
2746}
2747
2748static inline int skb_add_data(struct sk_buff *skb,
2749 struct iov_iter *from, int copy)
2750{
2751 const int off = skb->len;
2752
2753 if (skb->ip_summed == CHECKSUM_NONE) {
2754 __wsum csum = 0;
2755 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2756 &csum, from) == copy) {
2757 skb->csum = csum_block_add(skb->csum, csum, off);
2758 return 0;
2759 }
2760 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2761 return 0;
2762
2763 __skb_trim(skb, off);
2764 return -EFAULT;
2765}
2766
2767static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2768 const struct page *page, int off)
2769{
2770 if (i) {
2771 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2772
2773 return page == skb_frag_page(frag) &&
2774 off == frag->page_offset + skb_frag_size(frag);
2775 }
2776 return false;
2777}
2778
2779static inline int __skb_linearize(struct sk_buff *skb)
2780{
2781 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2782}
2783
2784
2785
2786
2787
2788
2789
2790
2791static inline int skb_linearize(struct sk_buff *skb)
2792{
2793 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2794}
2795
2796
2797
2798
2799
2800
2801
2802
2803static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2804{
2805 return skb_is_nonlinear(skb) &&
2806 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2807}
2808
2809
2810
2811
2812
2813
2814
2815
2816static inline int skb_linearize_cow(struct sk_buff *skb)
2817{
2818 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2819 __skb_linearize(skb) : 0;
2820}
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833static inline void skb_postpull_rcsum(struct sk_buff *skb,
2834 const void *start, unsigned int len)
2835{
2836 if (skb->ip_summed == CHECKSUM_COMPLETE)
2837 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2838 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2839 skb_checksum_start_offset(skb) < 0)
2840 skb->ip_summed = CHECKSUM_NONE;
2841}
2842
2843unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2844
2845static inline void skb_postpush_rcsum(struct sk_buff *skb,
2846 const void *start, unsigned int len)
2847{
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858 if (skb->ip_summed == CHECKSUM_COMPLETE)
2859 skb->csum = csum_partial(start, len, skb->csum);
2860}
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2872{
2873 if (likely(len >= skb->len))
2874 return 0;
2875 if (skb->ip_summed == CHECKSUM_COMPLETE)
2876 skb->ip_summed = CHECKSUM_NONE;
2877 return __pskb_trim(skb, len);
2878}
2879
2880#define skb_queue_walk(queue, skb) \
2881 for (skb = (queue)->next; \
2882 skb != (struct sk_buff *)(queue); \
2883 skb = skb->next)
2884
2885#define skb_queue_walk_safe(queue, skb, tmp) \
2886 for (skb = (queue)->next, tmp = skb->next; \
2887 skb != (struct sk_buff *)(queue); \
2888 skb = tmp, tmp = skb->next)
2889
2890#define skb_queue_walk_from(queue, skb) \
2891 for (; skb != (struct sk_buff *)(queue); \
2892 skb = skb->next)
2893
2894#define skb_queue_walk_from_safe(queue, skb, tmp) \
2895 for (tmp = skb->next; \
2896 skb != (struct sk_buff *)(queue); \
2897 skb = tmp, tmp = skb->next)
2898
2899#define skb_queue_reverse_walk(queue, skb) \
2900 for (skb = (queue)->prev; \
2901 skb != (struct sk_buff *)(queue); \
2902 skb = skb->prev)
2903
2904#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2905 for (skb = (queue)->prev, tmp = skb->prev; \
2906 skb != (struct sk_buff *)(queue); \
2907 skb = tmp, tmp = skb->prev)
2908
2909#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2910 for (tmp = skb->prev; \
2911 skb != (struct sk_buff *)(queue); \
2912 skb = tmp, tmp = skb->prev)
2913
2914static inline bool skb_has_frag_list(const struct sk_buff *skb)
2915{
2916 return skb_shinfo(skb)->frag_list != NULL;
2917}
2918
2919static inline void skb_frag_list_init(struct sk_buff *skb)
2920{
2921 skb_shinfo(skb)->frag_list = NULL;
2922}
2923
2924#define skb_walk_frags(skb, iter) \
2925 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2926
2927
2928int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
2929 const struct sk_buff *skb);
2930struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
2931 int *peeked, int *off, int *err,
2932 struct sk_buff **last);
2933struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2934 int *peeked, int *off, int *err);
2935struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2936 int *err);
2937unsigned int datagram_poll(struct file *file, struct socket *sock,
2938 struct poll_table_struct *wait);
2939int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
2940 struct iov_iter *to, int size);
2941static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
2942 struct msghdr *msg, int size)
2943{
2944 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
2945}
2946int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2947 struct msghdr *msg);
2948int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2949 struct iov_iter *from, int len);
2950int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2951void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2952void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2953int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2954int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2955int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2956__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2957 int len, __wsum csum);
2958ssize_t skb_socket_splice(struct sock *sk,
2959 struct pipe_inode_info *pipe,
2960 struct splice_pipe_desc *spd);
2961int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2962 struct pipe_inode_info *pipe, unsigned int len,
2963 unsigned int flags,
2964 ssize_t (*splice_cb)(struct sock *,
2965 struct pipe_inode_info *,
2966 struct splice_pipe_desc *));
2967void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2968unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2969int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
2970 int len, int hlen);
2971void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2972int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2973void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2974unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2975struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2976struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2977int skb_ensure_writable(struct sk_buff *skb, int write_len);
2978int skb_vlan_pop(struct sk_buff *skb);
2979int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2980
2981static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
2982{
2983 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2984}
2985
2986static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
2987{
2988 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2989}
2990
2991struct skb_checksum_ops {
2992 __wsum (*update)(const void *mem, int len, __wsum wsum);
2993 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
2994};
2995
2996__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2997 __wsum csum, const struct skb_checksum_ops *ops);
2998__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2999 __wsum csum);
3000
3001static inline void * __must_check
3002__skb_header_pointer(const struct sk_buff *skb, int offset,
3003 int len, void *data, int hlen, void *buffer)
3004{
3005 if (hlen - offset >= len)
3006 return data + offset;
3007
3008 if (!skb ||
3009 skb_copy_bits(skb, offset, buffer, len) < 0)
3010 return NULL;
3011
3012 return buffer;
3013}
3014
3015static inline void * __must_check
3016skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3017{
3018 return __skb_header_pointer(skb, offset, len, skb->data,
3019 skb_headlen(skb), buffer);
3020}
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032static inline bool skb_needs_linearize(struct sk_buff *skb,
3033 netdev_features_t features)
3034{
3035 return skb_is_nonlinear(skb) &&
3036 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3037 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3038}
3039
3040static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3041 void *to,
3042 const unsigned int len)
3043{
3044 memcpy(to, skb->data, len);
3045}
3046
3047static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3048 const int offset, void *to,
3049 const unsigned int len)
3050{
3051 memcpy(to, skb->data + offset, len);
3052}
3053
3054static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3055 const void *from,
3056 const unsigned int len)
3057{
3058 memcpy(skb->data, from, len);
3059}
3060
3061static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3062 const int offset,
3063 const void *from,
3064 const unsigned int len)
3065{
3066 memcpy(skb->data + offset, from, len);
3067}
3068
3069void skb_init(void);
3070
3071static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3072{
3073 return skb->tstamp;
3074}
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085static inline void skb_get_timestamp(const struct sk_buff *skb,
3086 struct timeval *stamp)
3087{
3088 *stamp = ktime_to_timeval(skb->tstamp);
3089}
3090
3091static inline void skb_get_timestampns(const struct sk_buff *skb,
3092 struct timespec *stamp)
3093{
3094 *stamp = ktime_to_timespec(skb->tstamp);
3095}
3096
3097static inline void __net_timestamp(struct sk_buff *skb)
3098{
3099 skb->tstamp = ktime_get_real();
3100}
3101
3102static inline ktime_t net_timedelta(ktime_t t)
3103{
3104 return ktime_sub(ktime_get_real(), t);
3105}
3106
3107static inline ktime_t net_invalid_timestamp(void)
3108{
3109 return ktime_set(0, 0);
3110}
3111
3112struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3113
3114#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3115
3116void skb_clone_tx_timestamp(struct sk_buff *skb);
3117bool skb_defer_rx_timestamp(struct sk_buff *skb);
3118
3119#else
3120
3121static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3122{
3123}
3124
3125static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3126{
3127 return false;
3128}
3129
3130#endif
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144void skb_complete_tx_timestamp(struct sk_buff *skb,
3145 struct skb_shared_hwtstamps *hwtstamps);
3146
3147void __skb_tstamp_tx(struct sk_buff *orig_skb,
3148 struct skb_shared_hwtstamps *hwtstamps,
3149 struct sock *sk, int tstype);
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162void skb_tstamp_tx(struct sk_buff *orig_skb,
3163 struct skb_shared_hwtstamps *hwtstamps);
3164
3165static inline void sw_tx_timestamp(struct sk_buff *skb)
3166{
3167 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3168 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3169 skb_tstamp_tx(skb, NULL);
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184static inline void skb_tx_timestamp(struct sk_buff *skb)
3185{
3186 skb_clone_tx_timestamp(skb);
3187 sw_tx_timestamp(skb);
3188}
3189
3190
3191
3192
3193
3194
3195
3196
3197void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3198
3199__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3200__sum16 __skb_checksum_complete(struct sk_buff *skb);
3201
3202static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3203{
3204 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3205 skb->csum_valid ||
3206 (skb->ip_summed == CHECKSUM_PARTIAL &&
3207 skb_checksum_start_offset(skb) >= 0));
3208}
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3227{
3228 return skb_csum_unnecessary(skb) ?
3229 0 : __skb_checksum_complete(skb);
3230}
3231
3232static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3233{
3234 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3235 if (skb->csum_level == 0)
3236 skb->ip_summed = CHECKSUM_NONE;
3237 else
3238 skb->csum_level--;
3239 }
3240}
3241
3242static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3243{
3244 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3245 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3246 skb->csum_level++;
3247 } else if (skb->ip_summed == CHECKSUM_NONE) {
3248 skb->ip_summed = CHECKSUM_UNNECESSARY;
3249 skb->csum_level = 0;
3250 }
3251}
3252
3253static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3254{
3255
3256
3257
3258
3259
3260
3261
3262
3263 if (skb->ip_summed == CHECKSUM_NONE ||
3264 skb->ip_summed == CHECKSUM_UNNECESSARY)
3265 skb->csum_bad = 1;
3266}
3267
3268
3269
3270
3271
3272
3273static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3274 bool zero_okay,
3275 __sum16 check)
3276{
3277 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3278 skb->csum_valid = 1;
3279 __skb_decr_checksum_unnecessary(skb);
3280 return false;
3281 }
3282
3283 return true;
3284}
3285
3286
3287
3288
3289#define CHECKSUM_BREAK 76
3290
3291
3292
3293
3294
3295
3296
3297static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3298{
3299 if (skb->ip_summed == CHECKSUM_COMPLETE)
3300 skb->ip_summed = CHECKSUM_NONE;
3301}
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3313 bool complete,
3314 __wsum psum)
3315{
3316 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3317 if (!csum_fold(csum_add(psum, skb->csum))) {
3318 skb->csum_valid = 1;
3319 return 0;
3320 }
3321 } else if (skb->csum_bad) {
3322
3323 return (__force __sum16)1;
3324 }
3325
3326 skb->csum = psum;
3327
3328 if (complete || skb->len <= CHECKSUM_BREAK) {
3329 __sum16 csum;
3330
3331 csum = __skb_checksum_complete(skb);
3332 skb->csum_valid = !csum;
3333 return csum;
3334 }
3335
3336 return 0;
3337}
3338
3339static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3340{
3341 return 0;
3342}
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354#define __skb_checksum_validate(skb, proto, complete, \
3355 zero_okay, check, compute_pseudo) \
3356({ \
3357 __sum16 __ret = 0; \
3358 skb->csum_valid = 0; \
3359 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3360 __ret = __skb_checksum_validate_complete(skb, \
3361 complete, compute_pseudo(skb, proto)); \
3362 __ret; \
3363})
3364
3365#define skb_checksum_init(skb, proto, compute_pseudo) \
3366 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3367
3368#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3369 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3370
3371#define skb_checksum_validate(skb, proto, compute_pseudo) \
3372 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3373
3374#define skb_checksum_validate_zero_check(skb, proto, check, \
3375 compute_pseudo) \
3376 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3377
3378#define skb_checksum_simple_validate(skb) \
3379 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3380
3381static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3382{
3383 return (skb->ip_summed == CHECKSUM_NONE &&
3384 skb->csum_valid && !skb->csum_bad);
3385}
3386
3387static inline void __skb_checksum_convert(struct sk_buff *skb,
3388 __sum16 check, __wsum pseudo)
3389{
3390 skb->csum = ~pseudo;
3391 skb->ip_summed = CHECKSUM_COMPLETE;
3392}
3393
3394#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3395do { \
3396 if (__skb_checksum_convert_check(skb)) \
3397 __skb_checksum_convert(skb, check, \
3398 compute_pseudo(skb, proto)); \
3399} while (0)
3400
3401static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3402 u16 start, u16 offset)
3403{
3404 skb->ip_summed = CHECKSUM_PARTIAL;
3405 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3406 skb->csum_offset = offset - start;
3407}
3408
3409
3410
3411
3412
3413
3414static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3415 int start, int offset, bool nopartial)
3416{
3417 __wsum delta;
3418
3419 if (!nopartial) {
3420 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3421 return;
3422 }
3423
3424 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3425 __skb_checksum_complete(skb);
3426 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3427 }
3428
3429 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3430
3431
3432 skb->csum = csum_add(skb->csum, delta);
3433}
3434
3435#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3436void nf_conntrack_destroy(struct nf_conntrack *nfct);
3437static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3438{
3439 if (nfct && atomic_dec_and_test(&nfct->use))
3440 nf_conntrack_destroy(nfct);
3441}
3442static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3443{
3444 if (nfct)
3445 atomic_inc(&nfct->use);
3446}
3447#endif
3448#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3449static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3450{
3451 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3452 kfree(nf_bridge);
3453}
3454static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3455{
3456 if (nf_bridge)
3457 atomic_inc(&nf_bridge->use);
3458}
3459#endif
3460static inline void nf_reset(struct sk_buff *skb)
3461{
3462#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3463 nf_conntrack_put(skb->nfct);
3464 skb->nfct = NULL;
3465#endif
3466#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3467 nf_bridge_put(skb->nf_bridge);
3468 skb->nf_bridge = NULL;
3469#endif
3470}
3471
3472static inline void nf_reset_trace(struct sk_buff *skb)
3473{
3474#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3475 skb->nf_trace = 0;
3476#endif
3477}
3478
3479
3480static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3481 bool copy)
3482{
3483#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3484 dst->nfct = src->nfct;
3485 nf_conntrack_get(src->nfct);
3486 if (copy)
3487 dst->nfctinfo = src->nfctinfo;
3488#endif
3489#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3490 dst->nf_bridge = src->nf_bridge;
3491 nf_bridge_get(src->nf_bridge);
3492#endif
3493#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3494 if (copy)
3495 dst->nf_trace = src->nf_trace;
3496#endif
3497}
3498
3499static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3500{
3501#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3502 nf_conntrack_put(dst->nfct);
3503#endif
3504#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3505 nf_bridge_put(dst->nf_bridge);
3506#endif
3507 __nf_copy(dst, src, true);
3508}
3509
3510#ifdef CONFIG_NETWORK_SECMARK
3511static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3512{
3513 to->secmark = from->secmark;
3514}
3515
3516static inline void skb_init_secmark(struct sk_buff *skb)
3517{
3518 skb->secmark = 0;
3519}
3520#else
3521static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3522{ }
3523
3524static inline void skb_init_secmark(struct sk_buff *skb)
3525{ }
3526#endif
3527
3528static inline bool skb_irq_freeable(const struct sk_buff *skb)
3529{
3530 return !skb->destructor &&
3531#if IS_ENABLED(CONFIG_XFRM)
3532 !skb->sp &&
3533#endif
3534#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3535 !skb->nfct &&
3536#endif
3537 !skb->_skb_refdst &&
3538 !skb_has_frag_list(skb);
3539}
3540
3541static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3542{
3543 skb->queue_mapping = queue_mapping;
3544}
3545
3546static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3547{
3548 return skb->queue_mapping;
3549}
3550
3551static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3552{
3553 to->queue_mapping = from->queue_mapping;
3554}
3555
3556static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3557{
3558 skb->queue_mapping = rx_queue + 1;
3559}
3560
3561static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3562{
3563 return skb->queue_mapping - 1;
3564}
3565
3566static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3567{
3568 return skb->queue_mapping != 0;
3569}
3570
3571static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3572{
3573#ifdef CONFIG_XFRM
3574 return skb->sp;
3575#else
3576 return NULL;
3577#endif
3578}
3579
3580
3581
3582
3583
3584
3585
3586struct skb_gso_cb {
3587 int mac_offset;
3588 int encap_level;
3589 __wsum csum;
3590 __u16 csum_start;
3591};
3592#define SKB_SGO_CB_OFFSET 32
3593#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3594
3595static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3596{
3597 return (skb_mac_header(inner_skb) - inner_skb->head) -
3598 SKB_GSO_CB(inner_skb)->mac_offset;
3599}
3600
3601static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3602{
3603 int new_headroom, headroom;
3604 int ret;
3605
3606 headroom = skb_headroom(skb);
3607 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3608 if (ret)
3609 return ret;
3610
3611 new_headroom = skb_headroom(skb);
3612 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3613 return 0;
3614}
3615
3616static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3617{
3618
3619 if (skb->remcsum_offload)
3620 return;
3621
3622 SKB_GSO_CB(skb)->csum = res;
3623 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3624}
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3635{
3636 unsigned char *csum_start = skb_transport_header(skb);
3637 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3638 __wsum partial = SKB_GSO_CB(skb)->csum;
3639
3640 SKB_GSO_CB(skb)->csum = res;
3641 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3642
3643 return csum_fold(csum_partial(csum_start, plen, partial));
3644}
3645
3646static inline bool skb_is_gso(const struct sk_buff *skb)
3647{
3648 return skb_shinfo(skb)->gso_size;
3649}
3650
3651
3652static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3653{
3654 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3655}
3656
3657void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3658
3659static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3660{
3661
3662
3663 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3664
3665 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3666 unlikely(shinfo->gso_type == 0)) {
3667 __skb_warn_lro_forwarding(skb);
3668 return true;
3669 }
3670 return false;
3671}
3672
3673static inline void skb_forward_csum(struct sk_buff *skb)
3674{
3675
3676 if (skb->ip_summed == CHECKSUM_COMPLETE)
3677 skb->ip_summed = CHECKSUM_NONE;
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3689{
3690#ifdef DEBUG
3691 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3692#endif
3693}
3694
3695bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3696
3697int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3698struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3699 unsigned int transport_len,
3700 __sum16(*skb_chkf)(struct sk_buff *skb));
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711static inline bool skb_head_is_locked(const struct sk_buff *skb)
3712{
3713 return !skb->head_frag || skb_cloned(skb);
3714}
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3727{
3728 unsigned int hdr_len = skb_transport_header(skb) -
3729 skb_network_header(skb);
3730 return hdr_len + skb_gso_transport_seglen(skb);
3731}
3732
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742static inline __wsum lco_csum(struct sk_buff *skb)
3743{
3744 unsigned char *csum_start = skb_checksum_start(skb);
3745 unsigned char *l4_hdr = skb_transport_header(skb);
3746 __wsum partial;
3747
3748
3749 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3750 skb->csum_offset));
3751
3752
3753
3754
3755 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3756}
3757
3758#endif
3759#endif
3760