1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <net/flow_dissector.h>
38#include <linux/splice.h>
39#include <linux/in6.h>
40#include <net/flow.h>
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define CHECKSUM_NONE 0
217#define CHECKSUM_UNNECESSARY 1
218#define CHECKSUM_COMPLETE 2
219#define CHECKSUM_PARTIAL 3
220
221
222#define SKB_MAX_CSUM_LEVEL 3
223
224#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
225#define SKB_WITH_OVERHEAD(X) \
226 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
227#define SKB_MAX_ORDER(X, ORDER) \
228 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
229#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
230#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
231
232
233#define SKB_TRUESIZE(X) ((X) + \
234 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
236
237struct net_device;
238struct scatterlist;
239struct pipe_inode_info;
240struct iov_iter;
241struct napi_struct;
242
243#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
244struct nf_conntrack {
245 atomic_t use;
246};
247#endif
248
249#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
250struct nf_bridge_info {
251 atomic_t use;
252 enum {
253 BRNF_PROTO_UNCHANGED,
254 BRNF_PROTO_8021Q,
255 BRNF_PROTO_PPPOE
256 } orig_proto:8;
257 u8 pkt_otherhost:1;
258 u8 in_prerouting:1;
259 u8 bridged_dnat:1;
260 __u16 frag_max_size;
261 struct net_device *physindev;
262
263
264 struct net_device *physoutdev;
265 union {
266
267 __be32 ipv4_daddr;
268 struct in6_addr ipv6_daddr;
269
270
271
272
273
274 char neigh_header[8];
275 };
276};
277#endif
278
279struct sk_buff_head {
280
281 struct sk_buff *next;
282 struct sk_buff *prev;
283
284 __u32 qlen;
285 spinlock_t lock;
286};
287
288struct sk_buff;
289
290
291
292
293
294
295
296
297#if (65536/PAGE_SIZE + 1) < 16
298#define MAX_SKB_FRAGS 16UL
299#else
300#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
301#endif
302extern int sysctl_max_skb_frags;
303
304typedef struct skb_frag_struct skb_frag_t;
305
306struct skb_frag_struct {
307 struct {
308 struct page *p;
309 } page;
310#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
311 __u32 page_offset;
312 __u32 size;
313#else
314 __u16 page_offset;
315 __u16 size;
316#endif
317};
318
319static inline unsigned int skb_frag_size(const skb_frag_t *frag)
320{
321 return frag->size;
322}
323
324static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
325{
326 frag->size = size;
327}
328
329static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
330{
331 frag->size += delta;
332}
333
334static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
335{
336 frag->size -= delta;
337}
338
339#define HAVE_HW_TIME_STAMP
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355struct skb_shared_hwtstamps {
356 ktime_t hwtstamp;
357};
358
359
360enum {
361
362 SKBTX_HW_TSTAMP = 1 << 0,
363
364
365 SKBTX_SW_TSTAMP = 1 << 1,
366
367
368 SKBTX_IN_PROGRESS = 1 << 2,
369
370
371 SKBTX_DEV_ZEROCOPY = 1 << 3,
372
373
374 SKBTX_WIFI_STATUS = 1 << 4,
375
376
377
378
379
380
381 SKBTX_SHARED_FRAG = 1 << 5,
382
383
384 SKBTX_SCHED_TSTAMP = 1 << 6,
385};
386
387#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
388 SKBTX_SCHED_TSTAMP)
389#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
390
391
392
393
394
395
396
397
398
399struct ubuf_info {
400 void (*callback)(struct ubuf_info *, bool zerocopy_success);
401 void *ctx;
402 unsigned long desc;
403};
404
405
406
407
408struct skb_shared_info {
409 unsigned char nr_frags;
410 __u8 tx_flags;
411 unsigned short gso_size;
412
413 unsigned short gso_segs;
414 unsigned short gso_type;
415 struct sk_buff *frag_list;
416 struct skb_shared_hwtstamps hwtstamps;
417 u32 tskey;
418 __be32 ip6_frag_id;
419
420
421
422
423 atomic_t dataref;
424
425
426
427 void * destructor_arg;
428
429
430 skb_frag_t frags[MAX_SKB_FRAGS];
431};
432
433
434
435
436
437
438
439
440
441
442
443
444#define SKB_DATAREF_SHIFT 16
445#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
446
447
448enum {
449 SKB_FCLONE_UNAVAILABLE,
450 SKB_FCLONE_ORIG,
451 SKB_FCLONE_CLONE,
452};
453
454enum {
455 SKB_GSO_TCPV4 = 1 << 0,
456 SKB_GSO_UDP = 1 << 1,
457
458
459 SKB_GSO_DODGY = 1 << 2,
460
461
462 SKB_GSO_TCP_ECN = 1 << 3,
463
464 SKB_GSO_TCP_FIXEDID = 1 << 4,
465
466 SKB_GSO_TCPV6 = 1 << 5,
467
468 SKB_GSO_FCOE = 1 << 6,
469
470 SKB_GSO_GRE = 1 << 7,
471
472 SKB_GSO_GRE_CSUM = 1 << 8,
473
474 SKB_GSO_IPXIP4 = 1 << 9,
475
476 SKB_GSO_IPXIP6 = 1 << 10,
477
478 SKB_GSO_UDP_TUNNEL = 1 << 11,
479
480 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
481
482 SKB_GSO_PARTIAL = 1 << 13,
483
484 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
485};
486
487#if BITS_PER_LONG > 32
488#define NET_SKBUFF_DATA_USES_OFFSET 1
489#endif
490
491#ifdef NET_SKBUFF_DATA_USES_OFFSET
492typedef unsigned int sk_buff_data_t;
493#else
494typedef unsigned char *sk_buff_data_t;
495#endif
496
497
498
499
500
501
502struct skb_mstamp {
503 union {
504 u64 v64;
505 struct {
506 u32 stamp_us;
507 u32 stamp_jiffies;
508 };
509 };
510};
511
512
513
514
515
516static inline void skb_mstamp_get(struct skb_mstamp *cl)
517{
518 u64 val = local_clock();
519
520 do_div(val, NSEC_PER_USEC);
521 cl->stamp_us = (u32)val;
522 cl->stamp_jiffies = (u32)jiffies;
523}
524
525
526
527
528
529
530static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
531 const struct skb_mstamp *t0)
532{
533 s32 delta_us = t1->stamp_us - t0->stamp_us;
534 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
535
536
537
538
539 if (delta_us <= 0 ||
540 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
541
542 delta_us = jiffies_to_usecs(delta_jiffies);
543
544 return delta_us;
545}
546
547static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
548 const struct skb_mstamp *t0)
549{
550 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
551
552 if (!diff)
553 diff = t1->stamp_us - t0->stamp_us;
554 return diff > 0;
555}
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626struct sk_buff {
627 union {
628 struct {
629
630 struct sk_buff *next;
631 struct sk_buff *prev;
632
633 union {
634 ktime_t tstamp;
635 struct skb_mstamp skb_mstamp;
636 };
637 };
638 struct rb_node rbnode;
639 };
640 struct sock *sk;
641 struct net_device *dev;
642
643
644
645
646
647
648
649 char cb[48] __aligned(8);
650
651 unsigned long _skb_refdst;
652 void (*destructor)(struct sk_buff *skb);
653#ifdef CONFIG_XFRM
654 struct sec_path *sp;
655#endif
656#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
657 struct nf_conntrack *nfct;
658#endif
659#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
660 struct nf_bridge_info *nf_bridge;
661#endif
662 unsigned int len,
663 data_len;
664 __u16 mac_len,
665 hdr_len;
666
667
668
669
670 kmemcheck_bitfield_begin(flags1);
671 __u16 queue_mapping;
672 __u8 cloned:1,
673 nohdr:1,
674 fclone:2,
675 peeked:1,
676 head_frag:1,
677 xmit_more:1;
678
679 kmemcheck_bitfield_end(flags1);
680
681
682
683
684
685 __u32 headers_start[0];
686
687
688
689#ifdef __BIG_ENDIAN_BITFIELD
690#define PKT_TYPE_MAX (7 << 5)
691#else
692#define PKT_TYPE_MAX 7
693#endif
694#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
695
696 __u8 __pkt_type_offset[0];
697 __u8 pkt_type:3;
698 __u8 pfmemalloc:1;
699 __u8 ignore_df:1;
700 __u8 nfctinfo:3;
701
702 __u8 nf_trace:1;
703 __u8 ip_summed:2;
704 __u8 ooo_okay:1;
705 __u8 l4_hash:1;
706 __u8 sw_hash:1;
707 __u8 wifi_acked_valid:1;
708 __u8 wifi_acked:1;
709
710 __u8 no_fcs:1;
711
712 __u8 encapsulation:1;
713 __u8 encap_hdr_csum:1;
714 __u8 csum_valid:1;
715 __u8 csum_complete_sw:1;
716 __u8 csum_level:2;
717 __u8 csum_bad:1;
718
719#ifdef CONFIG_IPV6_NDISC_NODETYPE
720 __u8 ndisc_nodetype:2;
721#endif
722 __u8 ipvs_property:1;
723 __u8 inner_protocol_type:1;
724 __u8 remcsum_offload:1;
725
726
727#ifdef CONFIG_NET_SCHED
728 __u16 tc_index;
729#ifdef CONFIG_NET_CLS_ACT
730 __u16 tc_verd;
731#endif
732#endif
733
734 union {
735 __wsum csum;
736 struct {
737 __u16 csum_start;
738 __u16 csum_offset;
739 };
740 };
741 __u32 priority;
742 int skb_iif;
743 __u32 hash;
744 __be16 vlan_proto;
745 __u16 vlan_tci;
746#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
747 union {
748 unsigned int napi_id;
749 unsigned int sender_cpu;
750 };
751#endif
752 union {
753#ifdef CONFIG_NETWORK_SECMARK
754 __u32 secmark;
755#endif
756#ifdef CONFIG_NET_SWITCHDEV
757 __u32 offload_fwd_mark;
758#endif
759 };
760
761 union {
762 __u32 mark;
763 __u32 reserved_tailroom;
764 };
765
766 union {
767 __be16 inner_protocol;
768 __u8 inner_ipproto;
769 };
770
771 __u16 inner_transport_header;
772 __u16 inner_network_header;
773 __u16 inner_mac_header;
774
775 __be16 protocol;
776 __u16 transport_header;
777 __u16 network_header;
778 __u16 mac_header;
779
780
781 __u32 headers_end[0];
782
783
784
785 sk_buff_data_t tail;
786 sk_buff_data_t end;
787 unsigned char *head,
788 *data;
789 unsigned int truesize;
790 atomic_t users;
791};
792
793#ifdef __KERNEL__
794
795
796
797#include <linux/slab.h>
798
799
800#define SKB_ALLOC_FCLONE 0x01
801#define SKB_ALLOC_RX 0x02
802#define SKB_ALLOC_NAPI 0x04
803
804
805static inline bool skb_pfmemalloc(const struct sk_buff *skb)
806{
807 return unlikely(skb->pfmemalloc);
808}
809
810
811
812
813
814#define SKB_DST_NOREF 1UL
815#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
816
817
818
819
820
821
822
823static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
824{
825
826
827
828 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
829 !rcu_read_lock_held() &&
830 !rcu_read_lock_bh_held());
831 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
832}
833
834
835
836
837
838
839
840
841
842static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
843{
844 skb->_skb_refdst = (unsigned long)dst;
845}
846
847
848
849
850
851
852
853
854
855
856
857static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
858{
859 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
860 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
861}
862
863
864
865
866
867static inline bool skb_dst_is_noref(const struct sk_buff *skb)
868{
869 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
870}
871
872static inline struct rtable *skb_rtable(const struct sk_buff *skb)
873{
874 return (struct rtable *)skb_dst(skb);
875}
876
877void kfree_skb(struct sk_buff *skb);
878void kfree_skb_list(struct sk_buff *segs);
879void skb_tx_error(struct sk_buff *skb);
880void consume_skb(struct sk_buff *skb);
881void __kfree_skb(struct sk_buff *skb);
882extern struct kmem_cache *skbuff_head_cache;
883
884void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
885bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
886 bool *fragstolen, int *delta_truesize);
887
888struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
889 int node);
890struct sk_buff *__build_skb(void *data, unsigned int frag_size);
891struct sk_buff *build_skb(void *data, unsigned int frag_size);
892static inline struct sk_buff *alloc_skb(unsigned int size,
893 gfp_t priority)
894{
895 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
896}
897
898struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
899 unsigned long data_len,
900 int max_page_order,
901 int *errcode,
902 gfp_t gfp_mask);
903
904
905struct sk_buff_fclones {
906 struct sk_buff skb1;
907
908 struct sk_buff skb2;
909
910 atomic_t fclone_ref;
911};
912
913
914
915
916
917
918
919
920
921static inline bool skb_fclone_busy(const struct sock *sk,
922 const struct sk_buff *skb)
923{
924 const struct sk_buff_fclones *fclones;
925
926 fclones = container_of(skb, struct sk_buff_fclones, skb1);
927
928 return skb->fclone == SKB_FCLONE_ORIG &&
929 atomic_read(&fclones->fclone_ref) > 1 &&
930 fclones->skb2.sk == sk;
931}
932
933static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
934 gfp_t priority)
935{
936 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
937}
938
939struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
940static inline struct sk_buff *alloc_skb_head(gfp_t priority)
941{
942 return __alloc_skb_head(priority, -1);
943}
944
945struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
946int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
947struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
948struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
949struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
950 gfp_t gfp_mask, bool fclone);
951static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
952 gfp_t gfp_mask)
953{
954 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
955}
956
957int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
958struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
959 unsigned int headroom);
960struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
961 int newtailroom, gfp_t priority);
962int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
963 int offset, int len);
964int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
965 int len);
966int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
967int skb_pad(struct sk_buff *skb, int pad);
968#define dev_kfree_skb(a) consume_skb(a)
969
970int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
971 int getfrag(void *from, char *to, int offset,
972 int len, int odd, struct sk_buff *skb),
973 void *from, int length);
974
975int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
976 int offset, size_t size);
977
978struct skb_seq_state {
979 __u32 lower_offset;
980 __u32 upper_offset;
981 __u32 frag_idx;
982 __u32 stepped_offset;
983 struct sk_buff *root_skb;
984 struct sk_buff *cur_skb;
985 __u8 *frag_data;
986};
987
988void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
989 unsigned int to, struct skb_seq_state *st);
990unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
991 struct skb_seq_state *st);
992void skb_abort_seq_read(struct skb_seq_state *st);
993
994unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
995 unsigned int to, struct ts_config *config);
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023enum pkt_hash_types {
1024 PKT_HASH_TYPE_NONE,
1025 PKT_HASH_TYPE_L2,
1026 PKT_HASH_TYPE_L3,
1027 PKT_HASH_TYPE_L4,
1028};
1029
1030static inline void skb_clear_hash(struct sk_buff *skb)
1031{
1032 skb->hash = 0;
1033 skb->sw_hash = 0;
1034 skb->l4_hash = 0;
1035}
1036
1037static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1038{
1039 if (!skb->l4_hash)
1040 skb_clear_hash(skb);
1041}
1042
1043static inline void
1044__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1045{
1046 skb->l4_hash = is_l4;
1047 skb->sw_hash = is_sw;
1048 skb->hash = hash;
1049}
1050
1051static inline void
1052skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1053{
1054
1055 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1056}
1057
1058static inline void
1059__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1060{
1061 __skb_set_hash(skb, hash, true, is_l4);
1062}
1063
1064void __skb_get_hash(struct sk_buff *skb);
1065u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1066u32 skb_get_poff(const struct sk_buff *skb);
1067u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1068 const struct flow_keys *keys, int hlen);
1069__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1070 void *data, int hlen_proto);
1071
1072static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1073 int thoff, u8 ip_proto)
1074{
1075 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1076}
1077
1078void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1079 const struct flow_dissector_key *key,
1080 unsigned int key_count);
1081
1082bool __skb_flow_dissect(const struct sk_buff *skb,
1083 struct flow_dissector *flow_dissector,
1084 void *target_container,
1085 void *data, __be16 proto, int nhoff, int hlen,
1086 unsigned int flags);
1087
1088static inline bool skb_flow_dissect(const struct sk_buff *skb,
1089 struct flow_dissector *flow_dissector,
1090 void *target_container, unsigned int flags)
1091{
1092 return __skb_flow_dissect(skb, flow_dissector, target_container,
1093 NULL, 0, 0, 0, flags);
1094}
1095
1096static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1097 struct flow_keys *flow,
1098 unsigned int flags)
1099{
1100 memset(flow, 0, sizeof(*flow));
1101 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1102 NULL, 0, 0, 0, flags);
1103}
1104
1105static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1106 void *data, __be16 proto,
1107 int nhoff, int hlen,
1108 unsigned int flags)
1109{
1110 memset(flow, 0, sizeof(*flow));
1111 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1112 data, proto, nhoff, hlen, flags);
1113}
1114
1115static inline __u32 skb_get_hash(struct sk_buff *skb)
1116{
1117 if (!skb->l4_hash && !skb->sw_hash)
1118 __skb_get_hash(skb);
1119
1120 return skb->hash;
1121}
1122
1123__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1124
1125static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1126{
1127 if (!skb->l4_hash && !skb->sw_hash) {
1128 struct flow_keys keys;
1129 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1130
1131 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1132 }
1133
1134 return skb->hash;
1135}
1136
1137__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1138
1139static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1140{
1141 if (!skb->l4_hash && !skb->sw_hash) {
1142 struct flow_keys keys;
1143 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1144
1145 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1146 }
1147
1148 return skb->hash;
1149}
1150
1151__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1152
1153static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1154{
1155 return skb->hash;
1156}
1157
1158static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1159{
1160 to->hash = from->hash;
1161 to->sw_hash = from->sw_hash;
1162 to->l4_hash = from->l4_hash;
1163};
1164
1165#ifdef NET_SKBUFF_DATA_USES_OFFSET
1166static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1167{
1168 return skb->head + skb->end;
1169}
1170
1171static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1172{
1173 return skb->end;
1174}
1175#else
1176static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1177{
1178 return skb->end;
1179}
1180
1181static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1182{
1183 return skb->end - skb->head;
1184}
1185#endif
1186
1187
1188#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1189
1190static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1191{
1192 return &skb_shinfo(skb)->hwtstamps;
1193}
1194
1195
1196
1197
1198
1199
1200
1201static inline int skb_queue_empty(const struct sk_buff_head *list)
1202{
1203 return list->next == (const struct sk_buff *) list;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1214 const struct sk_buff *skb)
1215{
1216 return skb->next == (const struct sk_buff *) list;
1217}
1218
1219
1220
1221
1222
1223
1224
1225
1226static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1227 const struct sk_buff *skb)
1228{
1229 return skb->prev == (const struct sk_buff *) list;
1230}
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1241 const struct sk_buff *skb)
1242{
1243
1244
1245
1246 BUG_ON(skb_queue_is_last(list, skb));
1247 return skb->next;
1248}
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1259 const struct sk_buff *skb)
1260{
1261
1262
1263
1264 BUG_ON(skb_queue_is_first(list, skb));
1265 return skb->prev;
1266}
1267
1268
1269
1270
1271
1272
1273
1274
1275static inline struct sk_buff *skb_get(struct sk_buff *skb)
1276{
1277 atomic_inc(&skb->users);
1278 return skb;
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294static inline int skb_cloned(const struct sk_buff *skb)
1295{
1296 return skb->cloned &&
1297 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1298}
1299
1300static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1301{
1302 might_sleep_if(gfpflags_allow_blocking(pri));
1303
1304 if (skb_cloned(skb))
1305 return pskb_expand_head(skb, 0, 0, pri);
1306
1307 return 0;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317static inline int skb_header_cloned(const struct sk_buff *skb)
1318{
1319 int dataref;
1320
1321 if (!skb->cloned)
1322 return 0;
1323
1324 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1325 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1326 return dataref != 1;
1327}
1328
1329static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1330{
1331 might_sleep_if(gfpflags_allow_blocking(pri));
1332
1333 if (skb_header_cloned(skb))
1334 return pskb_expand_head(skb, 0, 0, pri);
1335
1336 return 0;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348static inline void skb_header_release(struct sk_buff *skb)
1349{
1350 BUG_ON(skb->nohdr);
1351 skb->nohdr = 1;
1352 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362static inline void __skb_header_release(struct sk_buff *skb)
1363{
1364 skb->nohdr = 1;
1365 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376static inline int skb_shared(const struct sk_buff *skb)
1377{
1378 return atomic_read(&skb->users) != 1;
1379}
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1395{
1396 might_sleep_if(gfpflags_allow_blocking(pri));
1397 if (skb_shared(skb)) {
1398 struct sk_buff *nskb = skb_clone(skb, pri);
1399
1400 if (likely(nskb))
1401 consume_skb(skb);
1402 else
1403 kfree_skb(skb);
1404 skb = nskb;
1405 }
1406 return skb;
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1430 gfp_t pri)
1431{
1432 might_sleep_if(gfpflags_allow_blocking(pri));
1433 if (skb_cloned(skb)) {
1434 struct sk_buff *nskb = skb_copy(skb, pri);
1435
1436
1437 if (likely(nskb))
1438 consume_skb(skb);
1439 else
1440 kfree_skb(skb);
1441 skb = nskb;
1442 }
1443 return skb;
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1460{
1461 struct sk_buff *skb = list_->next;
1462
1463 if (skb == (struct sk_buff *)list_)
1464 skb = NULL;
1465 return skb;
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1478 const struct sk_buff_head *list_)
1479{
1480 struct sk_buff *next = skb->next;
1481
1482 if (next == (struct sk_buff *)list_)
1483 next = NULL;
1484 return next;
1485}
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1501{
1502 struct sk_buff *skb = list_->prev;
1503
1504 if (skb == (struct sk_buff *)list_)
1505 skb = NULL;
1506 return skb;
1507
1508}
1509
1510
1511
1512
1513
1514
1515
1516static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1517{
1518 return list_->qlen;
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static inline void __skb_queue_head_init(struct sk_buff_head *list)
1532{
1533 list->prev = list->next = (struct sk_buff *)list;
1534 list->qlen = 0;
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545static inline void skb_queue_head_init(struct sk_buff_head *list)
1546{
1547 spin_lock_init(&list->lock);
1548 __skb_queue_head_init(list);
1549}
1550
1551static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1552 struct lock_class_key *class)
1553{
1554 skb_queue_head_init(list);
1555 lockdep_set_class(&list->lock, class);
1556}
1557
1558
1559
1560
1561
1562
1563
1564void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1565 struct sk_buff_head *list);
1566static inline void __skb_insert(struct sk_buff *newsk,
1567 struct sk_buff *prev, struct sk_buff *next,
1568 struct sk_buff_head *list)
1569{
1570 newsk->next = next;
1571 newsk->prev = prev;
1572 next->prev = prev->next = newsk;
1573 list->qlen++;
1574}
1575
1576static inline void __skb_queue_splice(const struct sk_buff_head *list,
1577 struct sk_buff *prev,
1578 struct sk_buff *next)
1579{
1580 struct sk_buff *first = list->next;
1581 struct sk_buff *last = list->prev;
1582
1583 first->prev = prev;
1584 prev->next = first;
1585
1586 last->next = next;
1587 next->prev = last;
1588}
1589
1590
1591
1592
1593
1594
1595static inline void skb_queue_splice(const struct sk_buff_head *list,
1596 struct sk_buff_head *head)
1597{
1598 if (!skb_queue_empty(list)) {
1599 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1600 head->qlen += list->qlen;
1601 }
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611static inline void skb_queue_splice_init(struct sk_buff_head *list,
1612 struct sk_buff_head *head)
1613{
1614 if (!skb_queue_empty(list)) {
1615 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1616 head->qlen += list->qlen;
1617 __skb_queue_head_init(list);
1618 }
1619}
1620
1621
1622
1623
1624
1625
1626static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1627 struct sk_buff_head *head)
1628{
1629 if (!skb_queue_empty(list)) {
1630 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1631 head->qlen += list->qlen;
1632 }
1633}
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1644 struct sk_buff_head *head)
1645{
1646 if (!skb_queue_empty(list)) {
1647 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1648 head->qlen += list->qlen;
1649 __skb_queue_head_init(list);
1650 }
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664static inline void __skb_queue_after(struct sk_buff_head *list,
1665 struct sk_buff *prev,
1666 struct sk_buff *newsk)
1667{
1668 __skb_insert(newsk, prev, prev->next, list);
1669}
1670
1671void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1672 struct sk_buff_head *list);
1673
1674static inline void __skb_queue_before(struct sk_buff_head *list,
1675 struct sk_buff *next,
1676 struct sk_buff *newsk)
1677{
1678 __skb_insert(newsk, next->prev, next, list);
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1692static inline void __skb_queue_head(struct sk_buff_head *list,
1693 struct sk_buff *newsk)
1694{
1695 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1696}
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1709static inline void __skb_queue_tail(struct sk_buff_head *list,
1710 struct sk_buff *newsk)
1711{
1712 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1713}
1714
1715
1716
1717
1718
1719void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1720static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1721{
1722 struct sk_buff *next, *prev;
1723
1724 list->qlen--;
1725 next = skb->next;
1726 prev = skb->prev;
1727 skb->next = skb->prev = NULL;
1728 next->prev = prev;
1729 prev->next = next;
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1741static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1742{
1743 struct sk_buff *skb = skb_peek(list);
1744 if (skb)
1745 __skb_unlink(skb, list);
1746 return skb;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1758static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1759{
1760 struct sk_buff *skb = skb_peek_tail(list);
1761 if (skb)
1762 __skb_unlink(skb, list);
1763 return skb;
1764}
1765
1766
1767static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1768{
1769 return skb->data_len;
1770}
1771
1772static inline unsigned int skb_headlen(const struct sk_buff *skb)
1773{
1774 return skb->len - skb->data_len;
1775}
1776
1777static inline int skb_pagelen(const struct sk_buff *skb)
1778{
1779 int i, len = 0;
1780
1781 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1782 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1783 return len + skb_headlen(skb);
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1800 struct page *page, int off, int size)
1801{
1802 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1803
1804
1805
1806
1807
1808
1809 frag->page.p = page;
1810 frag->page_offset = off;
1811 skb_frag_size_set(frag, size);
1812
1813 page = compound_head(page);
1814 if (page_is_pfmemalloc(page))
1815 skb->pfmemalloc = true;
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1833 struct page *page, int off, int size)
1834{
1835 __skb_fill_page_desc(skb, i, page, off, size);
1836 skb_shinfo(skb)->nr_frags = i + 1;
1837}
1838
1839void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1840 int size, unsigned int truesize);
1841
1842void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1843 unsigned int truesize);
1844
1845#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1846#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1847#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1848
1849#ifdef NET_SKBUFF_DATA_USES_OFFSET
1850static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1851{
1852 return skb->head + skb->tail;
1853}
1854
1855static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1856{
1857 skb->tail = skb->data - skb->head;
1858}
1859
1860static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1861{
1862 skb_reset_tail_pointer(skb);
1863 skb->tail += offset;
1864}
1865
1866#else
1867static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1868{
1869 return skb->tail;
1870}
1871
1872static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1873{
1874 skb->tail = skb->data;
1875}
1876
1877static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1878{
1879 skb->tail = skb->data + offset;
1880}
1881
1882#endif
1883
1884
1885
1886
1887unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1888unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1889static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1890{
1891 unsigned char *tmp = skb_tail_pointer(skb);
1892 SKB_LINEAR_ASSERT(skb);
1893 skb->tail += len;
1894 skb->len += len;
1895 return tmp;
1896}
1897
1898unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1899static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1900{
1901 skb->data -= len;
1902 skb->len += len;
1903 return skb->data;
1904}
1905
1906unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1907static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1908{
1909 skb->len -= len;
1910 BUG_ON(skb->len < skb->data_len);
1911 return skb->data += len;
1912}
1913
1914static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1915{
1916 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1917}
1918
1919unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1920
1921static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1922{
1923 if (len > skb_headlen(skb) &&
1924 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1925 return NULL;
1926 skb->len -= len;
1927 return skb->data += len;
1928}
1929
1930static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1931{
1932 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1933}
1934
1935static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1936{
1937 if (likely(len <= skb_headlen(skb)))
1938 return 1;
1939 if (unlikely(len > skb->len))
1940 return 0;
1941 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1942}
1943
1944
1945
1946
1947
1948
1949
1950static inline unsigned int skb_headroom(const struct sk_buff *skb)
1951{
1952 return skb->data - skb->head;
1953}
1954
1955
1956
1957
1958
1959
1960
1961static inline int skb_tailroom(const struct sk_buff *skb)
1962{
1963 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1964}
1965
1966
1967
1968
1969
1970
1971
1972
1973static inline int skb_availroom(const struct sk_buff *skb)
1974{
1975 if (skb_is_nonlinear(skb))
1976 return 0;
1977
1978 return skb->end - skb->tail - skb->reserved_tailroom;
1979}
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989static inline void skb_reserve(struct sk_buff *skb, int len)
1990{
1991 skb->data += len;
1992 skb->tail += len;
1993}
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2008 unsigned int needed_tailroom)
2009{
2010 SKB_LINEAR_ASSERT(skb);
2011 if (mtu < skb_tailroom(skb) - needed_tailroom)
2012
2013 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2014 else
2015
2016 skb->reserved_tailroom = needed_tailroom;
2017}
2018
2019#define ENCAP_TYPE_ETHER 0
2020#define ENCAP_TYPE_IPPROTO 1
2021
2022static inline void skb_set_inner_protocol(struct sk_buff *skb,
2023 __be16 protocol)
2024{
2025 skb->inner_protocol = protocol;
2026 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2027}
2028
2029static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2030 __u8 ipproto)
2031{
2032 skb->inner_ipproto = ipproto;
2033 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2034}
2035
2036static inline void skb_reset_inner_headers(struct sk_buff *skb)
2037{
2038 skb->inner_mac_header = skb->mac_header;
2039 skb->inner_network_header = skb->network_header;
2040 skb->inner_transport_header = skb->transport_header;
2041}
2042
2043static inline void skb_reset_mac_len(struct sk_buff *skb)
2044{
2045 skb->mac_len = skb->network_header - skb->mac_header;
2046}
2047
2048static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2049 *skb)
2050{
2051 return skb->head + skb->inner_transport_header;
2052}
2053
2054static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2055{
2056 return skb_inner_transport_header(skb) - skb->data;
2057}
2058
2059static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2060{
2061 skb->inner_transport_header = skb->data - skb->head;
2062}
2063
2064static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2065 const int offset)
2066{
2067 skb_reset_inner_transport_header(skb);
2068 skb->inner_transport_header += offset;
2069}
2070
2071static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2072{
2073 return skb->head + skb->inner_network_header;
2074}
2075
2076static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2077{
2078 skb->inner_network_header = skb->data - skb->head;
2079}
2080
2081static inline void skb_set_inner_network_header(struct sk_buff *skb,
2082 const int offset)
2083{
2084 skb_reset_inner_network_header(skb);
2085 skb->inner_network_header += offset;
2086}
2087
2088static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2089{
2090 return skb->head + skb->inner_mac_header;
2091}
2092
2093static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2094{
2095 skb->inner_mac_header = skb->data - skb->head;
2096}
2097
2098static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2099 const int offset)
2100{
2101 skb_reset_inner_mac_header(skb);
2102 skb->inner_mac_header += offset;
2103}
2104static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2105{
2106 return skb->transport_header != (typeof(skb->transport_header))~0U;
2107}
2108
2109static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2110{
2111 return skb->head + skb->transport_header;
2112}
2113
2114static inline void skb_reset_transport_header(struct sk_buff *skb)
2115{
2116 skb->transport_header = skb->data - skb->head;
2117}
2118
2119static inline void skb_set_transport_header(struct sk_buff *skb,
2120 const int offset)
2121{
2122 skb_reset_transport_header(skb);
2123 skb->transport_header += offset;
2124}
2125
2126static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2127{
2128 return skb->head + skb->network_header;
2129}
2130
2131static inline void skb_reset_network_header(struct sk_buff *skb)
2132{
2133 skb->network_header = skb->data - skb->head;
2134}
2135
2136static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2137{
2138 skb_reset_network_header(skb);
2139 skb->network_header += offset;
2140}
2141
2142static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2143{
2144 return skb->head + skb->mac_header;
2145}
2146
2147static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2148{
2149 return skb->mac_header != (typeof(skb->mac_header))~0U;
2150}
2151
2152static inline void skb_reset_mac_header(struct sk_buff *skb)
2153{
2154 skb->mac_header = skb->data - skb->head;
2155}
2156
2157static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2158{
2159 skb_reset_mac_header(skb);
2160 skb->mac_header += offset;
2161}
2162
2163static inline void skb_pop_mac_header(struct sk_buff *skb)
2164{
2165 skb->mac_header = skb->network_header;
2166}
2167
2168static inline void skb_probe_transport_header(struct sk_buff *skb,
2169 const int offset_hint)
2170{
2171 struct flow_keys keys;
2172
2173 if (skb_transport_header_was_set(skb))
2174 return;
2175 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2176 skb_set_transport_header(skb, keys.control.thoff);
2177 else
2178 skb_set_transport_header(skb, offset_hint);
2179}
2180
2181static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2182{
2183 if (skb_mac_header_was_set(skb)) {
2184 const unsigned char *old_mac = skb_mac_header(skb);
2185
2186 skb_set_mac_header(skb, -skb->mac_len);
2187 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2188 }
2189}
2190
2191static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2192{
2193 return skb->csum_start - skb_headroom(skb);
2194}
2195
2196static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2197{
2198 return skb->head + skb->csum_start;
2199}
2200
2201static inline int skb_transport_offset(const struct sk_buff *skb)
2202{
2203 return skb_transport_header(skb) - skb->data;
2204}
2205
2206static inline u32 skb_network_header_len(const struct sk_buff *skb)
2207{
2208 return skb->transport_header - skb->network_header;
2209}
2210
2211static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2212{
2213 return skb->inner_transport_header - skb->inner_network_header;
2214}
2215
2216static inline int skb_network_offset(const struct sk_buff *skb)
2217{
2218 return skb_network_header(skb) - skb->data;
2219}
2220
2221static inline int skb_inner_network_offset(const struct sk_buff *skb)
2222{
2223 return skb_inner_network_header(skb) - skb->data;
2224}
2225
2226static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2227{
2228 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2229}
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251#ifndef NET_IP_ALIGN
2252#define NET_IP_ALIGN 2
2253#endif
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275#ifndef NET_SKB_PAD
2276#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2277#endif
2278
2279int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2280
2281static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2282{
2283 if (unlikely(skb_is_nonlinear(skb))) {
2284 WARN_ON(1);
2285 return;
2286 }
2287 skb->len = len;
2288 skb_set_tail_pointer(skb, len);
2289}
2290
2291void skb_trim(struct sk_buff *skb, unsigned int len);
2292
2293static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2294{
2295 if (skb->data_len)
2296 return ___pskb_trim(skb, len);
2297 __skb_trim(skb, len);
2298 return 0;
2299}
2300
2301static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2302{
2303 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2304}
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2316{
2317 int err = pskb_trim(skb, len);
2318 BUG_ON(err);
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329static inline void skb_orphan(struct sk_buff *skb)
2330{
2331 if (skb->destructor) {
2332 skb->destructor(skb);
2333 skb->destructor = NULL;
2334 skb->sk = NULL;
2335 } else {
2336 BUG_ON(skb->sk);
2337 }
2338}
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2350{
2351 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2352 return 0;
2353 return skb_copy_ubufs(skb, gfp_mask);
2354}
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364void skb_queue_purge(struct sk_buff_head *list);
2365static inline void __skb_queue_purge(struct sk_buff_head *list)
2366{
2367 struct sk_buff *skb;
2368 while ((skb = __skb_dequeue(list)) != NULL)
2369 kfree_skb(skb);
2370}
2371
2372void *netdev_alloc_frag(unsigned int fragsz);
2373
2374struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2375 gfp_t gfp_mask);
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2391 unsigned int length)
2392{
2393 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2394}
2395
2396
2397static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2398 gfp_t gfp_mask)
2399{
2400 return __netdev_alloc_skb(NULL, length, gfp_mask);
2401}
2402
2403
2404static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2405{
2406 return netdev_alloc_skb(NULL, length);
2407}
2408
2409
2410static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2411 unsigned int length, gfp_t gfp)
2412{
2413 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2414
2415 if (NET_IP_ALIGN && skb)
2416 skb_reserve(skb, NET_IP_ALIGN);
2417 return skb;
2418}
2419
2420static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2421 unsigned int length)
2422{
2423 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2424}
2425
2426static inline void skb_free_frag(void *addr)
2427{
2428 __free_page_frag(addr);
2429}
2430
2431void *napi_alloc_frag(unsigned int fragsz);
2432struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2433 unsigned int length, gfp_t gfp_mask);
2434static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2435 unsigned int length)
2436{
2437 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2438}
2439void napi_consume_skb(struct sk_buff *skb, int budget);
2440
2441void __kfree_skb_flush(void);
2442void __kfree_skb_defer(struct sk_buff *skb);
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2454 unsigned int order)
2455{
2456
2457
2458
2459
2460
2461
2462
2463
2464 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2465
2466 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2467}
2468
2469static inline struct page *dev_alloc_pages(unsigned int order)
2470{
2471 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2472}
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2483{
2484 return __dev_alloc_pages(gfp_mask, 0);
2485}
2486
2487static inline struct page *dev_alloc_page(void)
2488{
2489 return dev_alloc_pages(0);
2490}
2491
2492
2493
2494
2495
2496
2497static inline void skb_propagate_pfmemalloc(struct page *page,
2498 struct sk_buff *skb)
2499{
2500 if (page_is_pfmemalloc(page))
2501 skb->pfmemalloc = true;
2502}
2503
2504
2505
2506
2507
2508
2509
2510static inline struct page *skb_frag_page(const skb_frag_t *frag)
2511{
2512 return frag->page.p;
2513}
2514
2515
2516
2517
2518
2519
2520
2521static inline void __skb_frag_ref(skb_frag_t *frag)
2522{
2523 get_page(skb_frag_page(frag));
2524}
2525
2526
2527
2528
2529
2530
2531
2532
2533static inline void skb_frag_ref(struct sk_buff *skb, int f)
2534{
2535 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2536}
2537
2538
2539
2540
2541
2542
2543
2544static inline void __skb_frag_unref(skb_frag_t *frag)
2545{
2546 put_page(skb_frag_page(frag));
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556static inline void skb_frag_unref(struct sk_buff *skb, int f)
2557{
2558 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2559}
2560
2561
2562
2563
2564
2565
2566
2567
2568static inline void *skb_frag_address(const skb_frag_t *frag)
2569{
2570 return page_address(skb_frag_page(frag)) + frag->page_offset;
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2581{
2582 void *ptr = page_address(skb_frag_page(frag));
2583 if (unlikely(!ptr))
2584 return NULL;
2585
2586 return ptr + frag->page_offset;
2587}
2588
2589
2590
2591
2592
2593
2594
2595
2596static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2597{
2598 frag->page.p = page;
2599}
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2610 struct page *page)
2611{
2612 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2613}
2614
2615bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2629 const skb_frag_t *frag,
2630 size_t offset, size_t size,
2631 enum dma_data_direction dir)
2632{
2633 return dma_map_page(dev, skb_frag_page(frag),
2634 frag->page_offset + offset, size, dir);
2635}
2636
2637static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2638 gfp_t gfp_mask)
2639{
2640 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2641}
2642
2643
2644static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2645 gfp_t gfp_mask)
2646{
2647 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2648}
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2660{
2661 return !skb_header_cloned(skb) &&
2662 skb_headroom(skb) + len <= skb->hdr_len;
2663}
2664
2665static inline int skb_try_make_writable(struct sk_buff *skb,
2666 unsigned int write_len)
2667{
2668 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2669 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2670}
2671
2672static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2673 int cloned)
2674{
2675 int delta = 0;
2676
2677 if (headroom > skb_headroom(skb))
2678 delta = headroom - skb_headroom(skb);
2679
2680 if (delta || cloned)
2681 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2682 GFP_ATOMIC);
2683 return 0;
2684}
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2699{
2700 return __skb_cow(skb, headroom, skb_cloned(skb));
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2714{
2715 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2729{
2730 unsigned int size = skb->len;
2731 if (likely(size >= len))
2732 return 0;
2733 return skb_pad(skb, len - size);
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2747{
2748 unsigned int size = skb->len;
2749
2750 if (unlikely(size < len)) {
2751 len -= size;
2752 if (skb_pad(skb, len))
2753 return -ENOMEM;
2754 __skb_put(skb, len);
2755 }
2756 return 0;
2757}
2758
2759static inline int skb_add_data(struct sk_buff *skb,
2760 struct iov_iter *from, int copy)
2761{
2762 const int off = skb->len;
2763
2764 if (skb->ip_summed == CHECKSUM_NONE) {
2765 __wsum csum = 0;
2766 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2767 &csum, from) == copy) {
2768 skb->csum = csum_block_add(skb->csum, csum, off);
2769 return 0;
2770 }
2771 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2772 return 0;
2773
2774 __skb_trim(skb, off);
2775 return -EFAULT;
2776}
2777
2778static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2779 const struct page *page, int off)
2780{
2781 if (i) {
2782 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2783
2784 return page == skb_frag_page(frag) &&
2785 off == frag->page_offset + skb_frag_size(frag);
2786 }
2787 return false;
2788}
2789
2790static inline int __skb_linearize(struct sk_buff *skb)
2791{
2792 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2793}
2794
2795
2796
2797
2798
2799
2800
2801
2802static inline int skb_linearize(struct sk_buff *skb)
2803{
2804 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2815{
2816 return skb_is_nonlinear(skb) &&
2817 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2818}
2819
2820
2821
2822
2823
2824
2825
2826
2827static inline int skb_linearize_cow(struct sk_buff *skb)
2828{
2829 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2830 __skb_linearize(skb) : 0;
2831}
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844static inline void skb_postpull_rcsum(struct sk_buff *skb,
2845 const void *start, unsigned int len)
2846{
2847 if (skb->ip_summed == CHECKSUM_COMPLETE)
2848 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2849 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2850 skb_checksum_start_offset(skb) < 0)
2851 skb->ip_summed = CHECKSUM_NONE;
2852}
2853
2854unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2855
2856static inline void skb_postpush_rcsum(struct sk_buff *skb,
2857 const void *start, unsigned int len)
2858{
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869 if (skb->ip_summed == CHECKSUM_COMPLETE)
2870 skb->csum = csum_partial(start, len, skb->csum);
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2885 unsigned int len)
2886{
2887 skb_push(skb, len);
2888 skb_postpush_rcsum(skb, skb->data, len);
2889 return skb->data;
2890}
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2902{
2903 if (likely(len >= skb->len))
2904 return 0;
2905 if (skb->ip_summed == CHECKSUM_COMPLETE)
2906 skb->ip_summed = CHECKSUM_NONE;
2907 return __pskb_trim(skb, len);
2908}
2909
2910#define skb_queue_walk(queue, skb) \
2911 for (skb = (queue)->next; \
2912 skb != (struct sk_buff *)(queue); \
2913 skb = skb->next)
2914
2915#define skb_queue_walk_safe(queue, skb, tmp) \
2916 for (skb = (queue)->next, tmp = skb->next; \
2917 skb != (struct sk_buff *)(queue); \
2918 skb = tmp, tmp = skb->next)
2919
2920#define skb_queue_walk_from(queue, skb) \
2921 for (; skb != (struct sk_buff *)(queue); \
2922 skb = skb->next)
2923
2924#define skb_queue_walk_from_safe(queue, skb, tmp) \
2925 for (tmp = skb->next; \
2926 skb != (struct sk_buff *)(queue); \
2927 skb = tmp, tmp = skb->next)
2928
2929#define skb_queue_reverse_walk(queue, skb) \
2930 for (skb = (queue)->prev; \
2931 skb != (struct sk_buff *)(queue); \
2932 skb = skb->prev)
2933
2934#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2935 for (skb = (queue)->prev, tmp = skb->prev; \
2936 skb != (struct sk_buff *)(queue); \
2937 skb = tmp, tmp = skb->prev)
2938
2939#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2940 for (tmp = skb->prev; \
2941 skb != (struct sk_buff *)(queue); \
2942 skb = tmp, tmp = skb->prev)
2943
2944static inline bool skb_has_frag_list(const struct sk_buff *skb)
2945{
2946 return skb_shinfo(skb)->frag_list != NULL;
2947}
2948
2949static inline void skb_frag_list_init(struct sk_buff *skb)
2950{
2951 skb_shinfo(skb)->frag_list = NULL;
2952}
2953
2954#define skb_walk_frags(skb, iter) \
2955 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2956
2957
2958int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
2959 const struct sk_buff *skb);
2960struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
2961 int *peeked, int *off, int *err,
2962 struct sk_buff **last);
2963struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2964 int *peeked, int *off, int *err);
2965struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2966 int *err);
2967unsigned int datagram_poll(struct file *file, struct socket *sock,
2968 struct poll_table_struct *wait);
2969int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
2970 struct iov_iter *to, int size);
2971static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
2972 struct msghdr *msg, int size)
2973{
2974 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
2975}
2976int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2977 struct msghdr *msg);
2978int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2979 struct iov_iter *from, int len);
2980int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2981void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2982void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
2983static inline void skb_free_datagram_locked(struct sock *sk,
2984 struct sk_buff *skb)
2985{
2986 __skb_free_datagram_locked(sk, skb, 0);
2987}
2988int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2989int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2990int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2991__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2992 int len, __wsum csum);
2993ssize_t skb_socket_splice(struct sock *sk,
2994 struct pipe_inode_info *pipe,
2995 struct splice_pipe_desc *spd);
2996int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2997 struct pipe_inode_info *pipe, unsigned int len,
2998 unsigned int flags,
2999 ssize_t (*splice_cb)(struct sock *,
3000 struct pipe_inode_info *,
3001 struct splice_pipe_desc *));
3002void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3003unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3004int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3005 int len, int hlen);
3006void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3007int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3008void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3009unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3010struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3011struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3012int skb_ensure_writable(struct sk_buff *skb, int write_len);
3013int skb_vlan_pop(struct sk_buff *skb);
3014int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3015struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3016 gfp_t gfp);
3017
3018static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3019{
3020 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3021}
3022
3023static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3024{
3025 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3026}
3027
3028struct skb_checksum_ops {
3029 __wsum (*update)(const void *mem, int len, __wsum wsum);
3030 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3031};
3032
3033__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3034 __wsum csum, const struct skb_checksum_ops *ops);
3035__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3036 __wsum csum);
3037
3038static inline void * __must_check
3039__skb_header_pointer(const struct sk_buff *skb, int offset,
3040 int len, void *data, int hlen, void *buffer)
3041{
3042 if (hlen - offset >= len)
3043 return data + offset;
3044
3045 if (!skb ||
3046 skb_copy_bits(skb, offset, buffer, len) < 0)
3047 return NULL;
3048
3049 return buffer;
3050}
3051
3052static inline void * __must_check
3053skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3054{
3055 return __skb_header_pointer(skb, offset, len, skb->data,
3056 skb_headlen(skb), buffer);
3057}
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069static inline bool skb_needs_linearize(struct sk_buff *skb,
3070 netdev_features_t features)
3071{
3072 return skb_is_nonlinear(skb) &&
3073 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3074 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3075}
3076
3077static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3078 void *to,
3079 const unsigned int len)
3080{
3081 memcpy(to, skb->data, len);
3082}
3083
3084static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3085 const int offset, void *to,
3086 const unsigned int len)
3087{
3088 memcpy(to, skb->data + offset, len);
3089}
3090
3091static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3092 const void *from,
3093 const unsigned int len)
3094{
3095 memcpy(skb->data, from, len);
3096}
3097
3098static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3099 const int offset,
3100 const void *from,
3101 const unsigned int len)
3102{
3103 memcpy(skb->data + offset, from, len);
3104}
3105
3106void skb_init(void);
3107
3108static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3109{
3110 return skb->tstamp;
3111}
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122static inline void skb_get_timestamp(const struct sk_buff *skb,
3123 struct timeval *stamp)
3124{
3125 *stamp = ktime_to_timeval(skb->tstamp);
3126}
3127
3128static inline void skb_get_timestampns(const struct sk_buff *skb,
3129 struct timespec *stamp)
3130{
3131 *stamp = ktime_to_timespec(skb->tstamp);
3132}
3133
3134static inline void __net_timestamp(struct sk_buff *skb)
3135{
3136 skb->tstamp = ktime_get_real();
3137}
3138
3139static inline ktime_t net_timedelta(ktime_t t)
3140{
3141 return ktime_sub(ktime_get_real(), t);
3142}
3143
3144static inline ktime_t net_invalid_timestamp(void)
3145{
3146 return ktime_set(0, 0);
3147}
3148
3149struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3150
3151#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3152
3153void skb_clone_tx_timestamp(struct sk_buff *skb);
3154bool skb_defer_rx_timestamp(struct sk_buff *skb);
3155
3156#else
3157
3158static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3159{
3160}
3161
3162static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3163{
3164 return false;
3165}
3166
3167#endif
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181void skb_complete_tx_timestamp(struct sk_buff *skb,
3182 struct skb_shared_hwtstamps *hwtstamps);
3183
3184void __skb_tstamp_tx(struct sk_buff *orig_skb,
3185 struct skb_shared_hwtstamps *hwtstamps,
3186 struct sock *sk, int tstype);
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199void skb_tstamp_tx(struct sk_buff *orig_skb,
3200 struct skb_shared_hwtstamps *hwtstamps);
3201
3202static inline void sw_tx_timestamp(struct sk_buff *skb)
3203{
3204 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3205 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3206 skb_tstamp_tx(skb, NULL);
3207}
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221static inline void skb_tx_timestamp(struct sk_buff *skb)
3222{
3223 skb_clone_tx_timestamp(skb);
3224 sw_tx_timestamp(skb);
3225}
3226
3227
3228
3229
3230
3231
3232
3233
3234void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3235
3236__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3237__sum16 __skb_checksum_complete(struct sk_buff *skb);
3238
3239static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3240{
3241 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3242 skb->csum_valid ||
3243 (skb->ip_summed == CHECKSUM_PARTIAL &&
3244 skb_checksum_start_offset(skb) >= 0));
3245}
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3264{
3265 return skb_csum_unnecessary(skb) ?
3266 0 : __skb_checksum_complete(skb);
3267}
3268
3269static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3270{
3271 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3272 if (skb->csum_level == 0)
3273 skb->ip_summed = CHECKSUM_NONE;
3274 else
3275 skb->csum_level--;
3276 }
3277}
3278
3279static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3280{
3281 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3282 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3283 skb->csum_level++;
3284 } else if (skb->ip_summed == CHECKSUM_NONE) {
3285 skb->ip_summed = CHECKSUM_UNNECESSARY;
3286 skb->csum_level = 0;
3287 }
3288}
3289
3290static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3291{
3292
3293
3294
3295
3296
3297
3298
3299
3300 if (skb->ip_summed == CHECKSUM_NONE ||
3301 skb->ip_summed == CHECKSUM_UNNECESSARY)
3302 skb->csum_bad = 1;
3303}
3304
3305
3306
3307
3308
3309
3310static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3311 bool zero_okay,
3312 __sum16 check)
3313{
3314 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3315 skb->csum_valid = 1;
3316 __skb_decr_checksum_unnecessary(skb);
3317 return false;
3318 }
3319
3320 return true;
3321}
3322
3323
3324
3325
3326#define CHECKSUM_BREAK 76
3327
3328
3329
3330
3331
3332
3333
3334static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3335{
3336 if (skb->ip_summed == CHECKSUM_COMPLETE)
3337 skb->ip_summed = CHECKSUM_NONE;
3338}
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3350 bool complete,
3351 __wsum psum)
3352{
3353 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3354 if (!csum_fold(csum_add(psum, skb->csum))) {
3355 skb->csum_valid = 1;
3356 return 0;
3357 }
3358 } else if (skb->csum_bad) {
3359
3360 return (__force __sum16)1;
3361 }
3362
3363 skb->csum = psum;
3364
3365 if (complete || skb->len <= CHECKSUM_BREAK) {
3366 __sum16 csum;
3367
3368 csum = __skb_checksum_complete(skb);
3369 skb->csum_valid = !csum;
3370 return csum;
3371 }
3372
3373 return 0;
3374}
3375
3376static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3377{
3378 return 0;
3379}
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391#define __skb_checksum_validate(skb, proto, complete, \
3392 zero_okay, check, compute_pseudo) \
3393({ \
3394 __sum16 __ret = 0; \
3395 skb->csum_valid = 0; \
3396 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3397 __ret = __skb_checksum_validate_complete(skb, \
3398 complete, compute_pseudo(skb, proto)); \
3399 __ret; \
3400})
3401
3402#define skb_checksum_init(skb, proto, compute_pseudo) \
3403 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3404
3405#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3406 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3407
3408#define skb_checksum_validate(skb, proto, compute_pseudo) \
3409 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3410
3411#define skb_checksum_validate_zero_check(skb, proto, check, \
3412 compute_pseudo) \
3413 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3414
3415#define skb_checksum_simple_validate(skb) \
3416 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3417
3418static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3419{
3420 return (skb->ip_summed == CHECKSUM_NONE &&
3421 skb->csum_valid && !skb->csum_bad);
3422}
3423
3424static inline void __skb_checksum_convert(struct sk_buff *skb,
3425 __sum16 check, __wsum pseudo)
3426{
3427 skb->csum = ~pseudo;
3428 skb->ip_summed = CHECKSUM_COMPLETE;
3429}
3430
3431#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3432do { \
3433 if (__skb_checksum_convert_check(skb)) \
3434 __skb_checksum_convert(skb, check, \
3435 compute_pseudo(skb, proto)); \
3436} while (0)
3437
3438static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3439 u16 start, u16 offset)
3440{
3441 skb->ip_summed = CHECKSUM_PARTIAL;
3442 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3443 skb->csum_offset = offset - start;
3444}
3445
3446
3447
3448
3449
3450
3451static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3452 int start, int offset, bool nopartial)
3453{
3454 __wsum delta;
3455
3456 if (!nopartial) {
3457 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3458 return;
3459 }
3460
3461 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3462 __skb_checksum_complete(skb);
3463 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3464 }
3465
3466 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3467
3468
3469 skb->csum = csum_add(skb->csum, delta);
3470}
3471
3472#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3473void nf_conntrack_destroy(struct nf_conntrack *nfct);
3474static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3475{
3476 if (nfct && atomic_dec_and_test(&nfct->use))
3477 nf_conntrack_destroy(nfct);
3478}
3479static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3480{
3481 if (nfct)
3482 atomic_inc(&nfct->use);
3483}
3484#endif
3485#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3486static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3487{
3488 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3489 kfree(nf_bridge);
3490}
3491static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3492{
3493 if (nf_bridge)
3494 atomic_inc(&nf_bridge->use);
3495}
3496#endif
3497static inline void nf_reset(struct sk_buff *skb)
3498{
3499#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3500 nf_conntrack_put(skb->nfct);
3501 skb->nfct = NULL;
3502#endif
3503#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3504 nf_bridge_put(skb->nf_bridge);
3505 skb->nf_bridge = NULL;
3506#endif
3507}
3508
3509static inline void nf_reset_trace(struct sk_buff *skb)
3510{
3511#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3512 skb->nf_trace = 0;
3513#endif
3514}
3515
3516
3517static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3518 bool copy)
3519{
3520#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3521 dst->nfct = src->nfct;
3522 nf_conntrack_get(src->nfct);
3523 if (copy)
3524 dst->nfctinfo = src->nfctinfo;
3525#endif
3526#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3527 dst->nf_bridge = src->nf_bridge;
3528 nf_bridge_get(src->nf_bridge);
3529#endif
3530#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3531 if (copy)
3532 dst->nf_trace = src->nf_trace;
3533#endif
3534}
3535
3536static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3537{
3538#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3539 nf_conntrack_put(dst->nfct);
3540#endif
3541#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3542 nf_bridge_put(dst->nf_bridge);
3543#endif
3544 __nf_copy(dst, src, true);
3545}
3546
3547#ifdef CONFIG_NETWORK_SECMARK
3548static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3549{
3550 to->secmark = from->secmark;
3551}
3552
3553static inline void skb_init_secmark(struct sk_buff *skb)
3554{
3555 skb->secmark = 0;
3556}
3557#else
3558static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3559{ }
3560
3561static inline void skb_init_secmark(struct sk_buff *skb)
3562{ }
3563#endif
3564
3565static inline bool skb_irq_freeable(const struct sk_buff *skb)
3566{
3567 return !skb->destructor &&
3568#if IS_ENABLED(CONFIG_XFRM)
3569 !skb->sp &&
3570#endif
3571#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3572 !skb->nfct &&
3573#endif
3574 !skb->_skb_refdst &&
3575 !skb_has_frag_list(skb);
3576}
3577
3578static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3579{
3580 skb->queue_mapping = queue_mapping;
3581}
3582
3583static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3584{
3585 return skb->queue_mapping;
3586}
3587
3588static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3589{
3590 to->queue_mapping = from->queue_mapping;
3591}
3592
3593static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3594{
3595 skb->queue_mapping = rx_queue + 1;
3596}
3597
3598static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3599{
3600 return skb->queue_mapping - 1;
3601}
3602
3603static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3604{
3605 return skb->queue_mapping != 0;
3606}
3607
3608static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3609{
3610#ifdef CONFIG_XFRM
3611 return skb->sp;
3612#else
3613 return NULL;
3614#endif
3615}
3616
3617
3618
3619
3620
3621
3622
3623struct skb_gso_cb {
3624 union {
3625 int mac_offset;
3626 int data_offset;
3627 };
3628 int encap_level;
3629 __wsum csum;
3630 __u16 csum_start;
3631};
3632#define SKB_SGO_CB_OFFSET 32
3633#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3634
3635static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3636{
3637 return (skb_mac_header(inner_skb) - inner_skb->head) -
3638 SKB_GSO_CB(inner_skb)->mac_offset;
3639}
3640
3641static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3642{
3643 int new_headroom, headroom;
3644 int ret;
3645
3646 headroom = skb_headroom(skb);
3647 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3648 if (ret)
3649 return ret;
3650
3651 new_headroom = skb_headroom(skb);
3652 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3653 return 0;
3654}
3655
3656static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3657{
3658
3659 if (skb->remcsum_offload)
3660 return;
3661
3662 SKB_GSO_CB(skb)->csum = res;
3663 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3664}
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3675{
3676 unsigned char *csum_start = skb_transport_header(skb);
3677 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3678 __wsum partial = SKB_GSO_CB(skb)->csum;
3679
3680 SKB_GSO_CB(skb)->csum = res;
3681 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3682
3683 return csum_fold(csum_partial(csum_start, plen, partial));
3684}
3685
3686static inline bool skb_is_gso(const struct sk_buff *skb)
3687{
3688 return skb_shinfo(skb)->gso_size;
3689}
3690
3691
3692static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3693{
3694 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3695}
3696
3697void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3698
3699static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3700{
3701
3702
3703 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3704
3705 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3706 unlikely(shinfo->gso_type == 0)) {
3707 __skb_warn_lro_forwarding(skb);
3708 return true;
3709 }
3710 return false;
3711}
3712
3713static inline void skb_forward_csum(struct sk_buff *skb)
3714{
3715
3716 if (skb->ip_summed == CHECKSUM_COMPLETE)
3717 skb->ip_summed = CHECKSUM_NONE;
3718}
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3729{
3730#ifdef DEBUG
3731 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3732#endif
3733}
3734
3735bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3736
3737int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3738struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3739 unsigned int transport_len,
3740 __sum16(*skb_chkf)(struct sk_buff *skb));
3741
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751static inline bool skb_head_is_locked(const struct sk_buff *skb)
3752{
3753 return !skb->head_frag || skb_cloned(skb);
3754}
3755
3756
3757
3758
3759
3760
3761
3762
3763
3764
3765
3766static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3767{
3768 unsigned int hdr_len = skb_transport_header(skb) -
3769 skb_network_header(skb);
3770 return hdr_len + skb_gso_transport_seglen(skb);
3771}
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782static inline __wsum lco_csum(struct sk_buff *skb)
3783{
3784 unsigned char *csum_start = skb_checksum_start(skb);
3785 unsigned char *l4_hdr = skb_transport_header(skb);
3786 __wsum partial;
3787
3788
3789 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3790 skb->csum_offset));
3791
3792
3793
3794
3795 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3796}
3797
3798#endif
3799#endif
3800