1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/bvec.h>
18#include <linux/cache.h>
19#include <linux/rbtree.h>
20#include <linux/socket.h>
21#include <linux/refcount.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/dma-mapping.h>
32#include <linux/netdev_features.h>
33#include <linux/sched.h>
34#include <linux/sched/clock.h>
35#include <net/flow_dissector.h>
36#include <linux/splice.h>
37#include <linux/in6.h>
38#include <linux/if_packet.h>
39#include <net/flow.h>
40#include <net/page_pool.h>
41#if IS_ENABLED(CONFIG_NF_CONNTRACK)
42#include <linux/netfilter/nf_conntrack_common.h>
43#endif
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221#define CHECKSUM_NONE 0
222#define CHECKSUM_UNNECESSARY 1
223#define CHECKSUM_COMPLETE 2
224#define CHECKSUM_PARTIAL 3
225
226
227#define SKB_MAX_CSUM_LEVEL 3
228
229#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
230#define SKB_WITH_OVERHEAD(X) \
231 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
232#define SKB_MAX_ORDER(X, ORDER) \
233 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
234#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
235#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
236
237
238#define SKB_TRUESIZE(X) ((X) + \
239 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
240 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
241
242struct ahash_request;
243struct net_device;
244struct scatterlist;
245struct pipe_inode_info;
246struct iov_iter;
247struct napi_struct;
248struct bpf_prog;
249union bpf_attr;
250struct skb_ext;
251
252#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
253struct nf_bridge_info {
254 enum {
255 BRNF_PROTO_UNCHANGED,
256 BRNF_PROTO_8021Q,
257 BRNF_PROTO_PPPOE
258 } orig_proto:8;
259 u8 pkt_otherhost:1;
260 u8 in_prerouting:1;
261 u8 bridged_dnat:1;
262 __u16 frag_max_size;
263 struct net_device *physindev;
264
265
266 struct net_device *physoutdev;
267 union {
268
269 __be32 ipv4_daddr;
270 struct in6_addr ipv6_daddr;
271
272
273
274
275
276 char neigh_header[8];
277 };
278};
279#endif
280
281#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
282
283
284
285
286struct tc_skb_ext {
287 __u32 chain;
288 __u16 mru;
289 bool post_ct;
290};
291#endif
292
293struct sk_buff_head {
294
295 struct sk_buff *next;
296 struct sk_buff *prev;
297
298 __u32 qlen;
299 spinlock_t lock;
300};
301
302struct sk_buff;
303
304
305
306
307
308
309
310
311#if (65536/PAGE_SIZE + 1) < 16
312#define MAX_SKB_FRAGS 16UL
313#else
314#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
315#endif
316extern int sysctl_max_skb_frags;
317
318
319
320
321#define GSO_BY_FRAGS 0xFFFF
322
323typedef struct bio_vec skb_frag_t;
324
325
326
327
328
329static inline unsigned int skb_frag_size(const skb_frag_t *frag)
330{
331 return frag->bv_len;
332}
333
334
335
336
337
338
339static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
340{
341 frag->bv_len = size;
342}
343
344
345
346
347
348
349static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
350{
351 frag->bv_len += delta;
352}
353
354
355
356
357
358
359static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
360{
361 frag->bv_len -= delta;
362}
363
364
365
366
367
368static inline bool skb_frag_must_loop(struct page *p)
369{
370#if defined(CONFIG_HIGHMEM)
371 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
372 return true;
373#endif
374 return false;
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
395 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
396 p_off = (f_off) & (PAGE_SIZE - 1), \
397 p_len = skb_frag_must_loop(p) ? \
398 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
399 copied = 0; \
400 copied < f_len; \
401 copied += p_len, p++, p_off = 0, \
402 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
403
404#define HAVE_HW_TIME_STAMP
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420struct skb_shared_hwtstamps {
421 ktime_t hwtstamp;
422};
423
424
425enum {
426
427 SKBTX_HW_TSTAMP = 1 << 0,
428
429
430 SKBTX_SW_TSTAMP = 1 << 1,
431
432
433 SKBTX_IN_PROGRESS = 1 << 2,
434
435
436 SKBTX_WIFI_STATUS = 1 << 4,
437
438
439 SKBTX_SCHED_TSTAMP = 1 << 6,
440};
441
442#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
443 SKBTX_SCHED_TSTAMP)
444#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
445
446
447enum {
448
449 SKBFL_ZEROCOPY_ENABLE = BIT(0),
450
451
452
453
454
455
456 SKBFL_SHARED_FRAG = BIT(1),
457};
458
459#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
460
461
462
463
464
465
466
467
468
469struct ubuf_info {
470 void (*callback)(struct sk_buff *, struct ubuf_info *,
471 bool zerocopy_success);
472 union {
473 struct {
474 unsigned long desc;
475 void *ctx;
476 };
477 struct {
478 u32 id;
479 u16 len;
480 u16 zerocopy:1;
481 u32 bytelen;
482 };
483 };
484 refcount_t refcnt;
485 u8 flags;
486
487 struct mmpin {
488 struct user_struct *user;
489 unsigned int num_pg;
490 } mmp;
491};
492
493#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
494
495int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
496void mm_unaccount_pinned_pages(struct mmpin *mmp);
497
498struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
499struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
500 struct ubuf_info *uarg);
501
502void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
503
504void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
505 bool success);
506
507int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
508int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
509 struct msghdr *msg, int len,
510 struct ubuf_info *uarg);
511
512
513
514
515struct skb_shared_info {
516 __u8 flags;
517 __u8 meta_len;
518 __u8 nr_frags;
519 __u8 tx_flags;
520 unsigned short gso_size;
521
522 unsigned short gso_segs;
523 struct sk_buff *frag_list;
524 struct skb_shared_hwtstamps hwtstamps;
525 unsigned int gso_type;
526 u32 tskey;
527
528
529
530
531 atomic_t dataref;
532
533
534
535 void * destructor_arg;
536
537
538 skb_frag_t frags[MAX_SKB_FRAGS];
539};
540
541
542
543
544
545
546
547
548
549
550
551
552#define SKB_DATAREF_SHIFT 16
553#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
554
555
556enum {
557 SKB_FCLONE_UNAVAILABLE,
558 SKB_FCLONE_ORIG,
559 SKB_FCLONE_CLONE,
560};
561
562enum {
563 SKB_GSO_TCPV4 = 1 << 0,
564
565
566 SKB_GSO_DODGY = 1 << 1,
567
568
569 SKB_GSO_TCP_ECN = 1 << 2,
570
571 SKB_GSO_TCP_FIXEDID = 1 << 3,
572
573 SKB_GSO_TCPV6 = 1 << 4,
574
575 SKB_GSO_FCOE = 1 << 5,
576
577 SKB_GSO_GRE = 1 << 6,
578
579 SKB_GSO_GRE_CSUM = 1 << 7,
580
581 SKB_GSO_IPXIP4 = 1 << 8,
582
583 SKB_GSO_IPXIP6 = 1 << 9,
584
585 SKB_GSO_UDP_TUNNEL = 1 << 10,
586
587 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
588
589 SKB_GSO_PARTIAL = 1 << 12,
590
591 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
592
593 SKB_GSO_SCTP = 1 << 14,
594
595 SKB_GSO_ESP = 1 << 15,
596
597 SKB_GSO_UDP = 1 << 16,
598
599 SKB_GSO_UDP_L4 = 1 << 17,
600
601 SKB_GSO_FRAGLIST = 1 << 18,
602};
603
604#if BITS_PER_LONG > 32
605#define NET_SKBUFF_DATA_USES_OFFSET 1
606#endif
607
608#ifdef NET_SKBUFF_DATA_USES_OFFSET
609typedef unsigned int sk_buff_data_t;
610#else
611typedef unsigned char *sk_buff_data_t;
612#endif
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720struct sk_buff {
721 union {
722 struct {
723
724 struct sk_buff *next;
725 struct sk_buff *prev;
726
727 union {
728 struct net_device *dev;
729
730
731
732
733 unsigned long dev_scratch;
734 };
735 };
736 struct rb_node rbnode;
737 struct list_head list;
738 };
739
740 union {
741 struct sock *sk;
742 int ip_defrag_offset;
743 };
744
745 union {
746 ktime_t tstamp;
747 u64 skb_mstamp_ns;
748 };
749
750
751
752
753
754
755 char cb[48] __aligned(8);
756
757 union {
758 struct {
759 unsigned long _skb_refdst;
760 void (*destructor)(struct sk_buff *skb);
761 };
762 struct list_head tcp_tsorted_anchor;
763#ifdef CONFIG_NET_SOCK_MSG
764 unsigned long _sk_redir;
765#endif
766 };
767
768#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
769 unsigned long _nfct;
770#endif
771 unsigned int len,
772 data_len;
773 __u16 mac_len,
774 hdr_len;
775
776
777
778
779 __u16 queue_mapping;
780
781
782#ifdef __BIG_ENDIAN_BITFIELD
783#define CLONED_MASK (1 << 7)
784#else
785#define CLONED_MASK 1
786#endif
787#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
788
789
790 __u8 __cloned_offset[0];
791
792 __u8 cloned:1,
793 nohdr:1,
794 fclone:2,
795 peeked:1,
796 head_frag:1,
797 pfmemalloc:1,
798 pp_recycle:1;
799#ifdef CONFIG_SKB_EXTENSIONS
800 __u8 active_extensions;
801#endif
802
803
804
805
806
807 __u32 headers_start[0];
808
809
810
811#ifdef __BIG_ENDIAN_BITFIELD
812#define PKT_TYPE_MAX (7 << 5)
813#else
814#define PKT_TYPE_MAX 7
815#endif
816#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
817
818
819 __u8 __pkt_type_offset[0];
820
821 __u8 pkt_type:3;
822 __u8 ignore_df:1;
823 __u8 nf_trace:1;
824 __u8 ip_summed:2;
825 __u8 ooo_okay:1;
826
827 __u8 l4_hash:1;
828 __u8 sw_hash:1;
829 __u8 wifi_acked_valid:1;
830 __u8 wifi_acked:1;
831 __u8 no_fcs:1;
832
833 __u8 encapsulation:1;
834 __u8 encap_hdr_csum:1;
835 __u8 csum_valid:1;
836
837#ifdef __BIG_ENDIAN_BITFIELD
838#define PKT_VLAN_PRESENT_BIT 7
839#else
840#define PKT_VLAN_PRESENT_BIT 0
841#endif
842#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
843
844 __u8 __pkt_vlan_present_offset[0];
845
846 __u8 vlan_present:1;
847 __u8 csum_complete_sw:1;
848 __u8 csum_level:2;
849 __u8 csum_not_inet:1;
850 __u8 dst_pending_confirm:1;
851#ifdef CONFIG_IPV6_NDISC_NODETYPE
852 __u8 ndisc_nodetype:2;
853#endif
854
855 __u8 ipvs_property:1;
856 __u8 inner_protocol_type:1;
857 __u8 remcsum_offload:1;
858#ifdef CONFIG_NET_SWITCHDEV
859 __u8 offload_fwd_mark:1;
860 __u8 offload_l3_fwd_mark:1;
861#endif
862#ifdef CONFIG_NET_CLS_ACT
863 __u8 tc_skip_classify:1;
864 __u8 tc_at_ingress:1;
865#endif
866#ifdef CONFIG_NET_REDIRECT
867 __u8 redirected:1;
868 __u8 from_ingress:1;
869#endif
870#ifdef CONFIG_TLS_DEVICE
871 __u8 decrypted:1;
872#endif
873
874#ifdef CONFIG_NET_SCHED
875 __u16 tc_index;
876#endif
877
878 union {
879 __wsum csum;
880 struct {
881 __u16 csum_start;
882 __u16 csum_offset;
883 };
884 };
885 __u32 priority;
886 int skb_iif;
887 __u32 hash;
888 __be16 vlan_proto;
889 __u16 vlan_tci;
890#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
891 union {
892 unsigned int napi_id;
893 unsigned int sender_cpu;
894 };
895#endif
896#ifdef CONFIG_NETWORK_SECMARK
897 __u32 secmark;
898#endif
899
900 union {
901 __u32 mark;
902 __u32 reserved_tailroom;
903 };
904
905 union {
906 __be16 inner_protocol;
907 __u8 inner_ipproto;
908 };
909
910 __u16 inner_transport_header;
911 __u16 inner_network_header;
912 __u16 inner_mac_header;
913
914 __be16 protocol;
915 __u16 transport_header;
916 __u16 network_header;
917 __u16 mac_header;
918
919#ifdef CONFIG_KCOV
920 u64 kcov_handle;
921#endif
922
923
924 __u32 headers_end[0];
925
926
927
928 sk_buff_data_t tail;
929 sk_buff_data_t end;
930 unsigned char *head,
931 *data;
932 unsigned int truesize;
933 refcount_t users;
934
935#ifdef CONFIG_SKB_EXTENSIONS
936
937 struct skb_ext *extensions;
938#endif
939};
940
941#ifdef __KERNEL__
942
943
944
945
946#define SKB_ALLOC_FCLONE 0x01
947#define SKB_ALLOC_RX 0x02
948#define SKB_ALLOC_NAPI 0x04
949
950
951
952
953
954static inline bool skb_pfmemalloc(const struct sk_buff *skb)
955{
956 return unlikely(skb->pfmemalloc);
957}
958
959
960
961
962
963#define SKB_DST_NOREF 1UL
964#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
965
966
967
968
969
970
971
972static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
973{
974
975
976
977 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
978 !rcu_read_lock_held() &&
979 !rcu_read_lock_bh_held());
980 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
981}
982
983
984
985
986
987
988
989
990
991static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
992{
993 skb->_skb_refdst = (unsigned long)dst;
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
1007{
1008 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
1009 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
1010}
1011
1012
1013
1014
1015
1016static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1017{
1018 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1019}
1020
1021
1022
1023
1024
1025static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1026{
1027 return (struct rtable *)skb_dst(skb);
1028}
1029
1030
1031
1032
1033
1034static inline bool skb_pkt_type_ok(u32 ptype)
1035{
1036 return ptype <= PACKET_OTHERHOST;
1037}
1038
1039
1040
1041
1042
1043static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1044{
1045#ifdef CONFIG_NET_RX_BUSY_POLL
1046 return skb->napi_id;
1047#else
1048 return 0;
1049#endif
1050}
1051
1052
1053
1054
1055
1056
1057
1058static inline bool skb_unref(struct sk_buff *skb)
1059{
1060 if (unlikely(!skb))
1061 return false;
1062 if (likely(refcount_read(&skb->users) == 1))
1063 smp_rmb();
1064 else if (likely(!refcount_dec_and_test(&skb->users)))
1065 return false;
1066
1067 return true;
1068}
1069
1070void skb_release_head_state(struct sk_buff *skb);
1071void kfree_skb(struct sk_buff *skb);
1072void kfree_skb_list(struct sk_buff *segs);
1073void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1074void skb_tx_error(struct sk_buff *skb);
1075
1076#ifdef CONFIG_TRACEPOINTS
1077void consume_skb(struct sk_buff *skb);
1078#else
1079static inline void consume_skb(struct sk_buff *skb)
1080{
1081 return kfree_skb(skb);
1082}
1083#endif
1084
1085void __consume_stateless_skb(struct sk_buff *skb);
1086void __kfree_skb(struct sk_buff *skb);
1087extern struct kmem_cache *skbuff_head_cache;
1088
1089void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1090bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1091 bool *fragstolen, int *delta_truesize);
1092
1093struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1094 int node);
1095struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1096struct sk_buff *build_skb(void *data, unsigned int frag_size);
1097struct sk_buff *build_skb_around(struct sk_buff *skb,
1098 void *data, unsigned int frag_size);
1099
1100struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
1101
1102
1103
1104
1105
1106
1107
1108
1109static inline struct sk_buff *alloc_skb(unsigned int size,
1110 gfp_t priority)
1111{
1112 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1113}
1114
1115struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1116 unsigned long data_len,
1117 int max_page_order,
1118 int *errcode,
1119 gfp_t gfp_mask);
1120struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1121
1122
1123struct sk_buff_fclones {
1124 struct sk_buff skb1;
1125
1126 struct sk_buff skb2;
1127
1128 refcount_t fclone_ref;
1129};
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140static inline bool skb_fclone_busy(const struct sock *sk,
1141 const struct sk_buff *skb)
1142{
1143 const struct sk_buff_fclones *fclones;
1144
1145 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1146
1147 return skb->fclone == SKB_FCLONE_ORIG &&
1148 refcount_read(&fclones->fclone_ref) > 1 &&
1149 READ_ONCE(fclones->skb2.sk) == sk;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1160 gfp_t priority)
1161{
1162 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1163}
1164
1165struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1166void skb_headers_offset_update(struct sk_buff *skb, int off);
1167int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1168struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1169void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1170struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1171struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1172 gfp_t gfp_mask, bool fclone);
1173static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1174 gfp_t gfp_mask)
1175{
1176 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1177}
1178
1179int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1180struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1181 unsigned int headroom);
1182struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1183 int newtailroom, gfp_t priority);
1184int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1185 int offset, int len);
1186int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1187 int offset, int len);
1188int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1189int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202static inline int skb_pad(struct sk_buff *skb, int pad)
1203{
1204 return __skb_pad(skb, pad, true);
1205}
1206#define dev_kfree_skb(a) consume_skb(a)
1207
1208int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1209 int offset, size_t size);
1210
1211struct skb_seq_state {
1212 __u32 lower_offset;
1213 __u32 upper_offset;
1214 __u32 frag_idx;
1215 __u32 stepped_offset;
1216 struct sk_buff *root_skb;
1217 struct sk_buff *cur_skb;
1218 __u8 *frag_data;
1219 __u32 frag_off;
1220};
1221
1222void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1223 unsigned int to, struct skb_seq_state *st);
1224unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1225 struct skb_seq_state *st);
1226void skb_abort_seq_read(struct skb_seq_state *st);
1227
1228unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1229 unsigned int to, struct ts_config *config);
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257enum pkt_hash_types {
1258 PKT_HASH_TYPE_NONE,
1259 PKT_HASH_TYPE_L2,
1260 PKT_HASH_TYPE_L3,
1261 PKT_HASH_TYPE_L4,
1262};
1263
1264static inline void skb_clear_hash(struct sk_buff *skb)
1265{
1266 skb->hash = 0;
1267 skb->sw_hash = 0;
1268 skb->l4_hash = 0;
1269}
1270
1271static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1272{
1273 if (!skb->l4_hash)
1274 skb_clear_hash(skb);
1275}
1276
1277static inline void
1278__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1279{
1280 skb->l4_hash = is_l4;
1281 skb->sw_hash = is_sw;
1282 skb->hash = hash;
1283}
1284
1285static inline void
1286skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1287{
1288
1289 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1290}
1291
1292static inline void
1293__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1294{
1295 __skb_set_hash(skb, hash, true, is_l4);
1296}
1297
1298void __skb_get_hash(struct sk_buff *skb);
1299u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1300u32 skb_get_poff(const struct sk_buff *skb);
1301u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1302 const struct flow_keys_basic *keys, int hlen);
1303__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1304 const void *data, int hlen_proto);
1305
1306static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1307 int thoff, u8 ip_proto)
1308{
1309 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1310}
1311
1312void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1313 const struct flow_dissector_key *key,
1314 unsigned int key_count);
1315
1316struct bpf_flow_dissector;
1317bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1318 __be16 proto, int nhoff, int hlen, unsigned int flags);
1319
1320bool __skb_flow_dissect(const struct net *net,
1321 const struct sk_buff *skb,
1322 struct flow_dissector *flow_dissector,
1323 void *target_container, const void *data,
1324 __be16 proto, int nhoff, int hlen, unsigned int flags);
1325
1326static inline bool skb_flow_dissect(const struct sk_buff *skb,
1327 struct flow_dissector *flow_dissector,
1328 void *target_container, unsigned int flags)
1329{
1330 return __skb_flow_dissect(NULL, skb, flow_dissector,
1331 target_container, NULL, 0, 0, 0, flags);
1332}
1333
1334static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1335 struct flow_keys *flow,
1336 unsigned int flags)
1337{
1338 memset(flow, 0, sizeof(*flow));
1339 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1340 flow, NULL, 0, 0, 0, flags);
1341}
1342
1343static inline bool
1344skb_flow_dissect_flow_keys_basic(const struct net *net,
1345 const struct sk_buff *skb,
1346 struct flow_keys_basic *flow,
1347 const void *data, __be16 proto,
1348 int nhoff, int hlen, unsigned int flags)
1349{
1350 memset(flow, 0, sizeof(*flow));
1351 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1352 data, proto, nhoff, hlen, flags);
1353}
1354
1355void skb_flow_dissect_meta(const struct sk_buff *skb,
1356 struct flow_dissector *flow_dissector,
1357 void *target_container);
1358
1359
1360
1361
1362
1363void
1364skb_flow_dissect_ct(const struct sk_buff *skb,
1365 struct flow_dissector *flow_dissector,
1366 void *target_container,
1367 u16 *ctinfo_map, size_t mapsize,
1368 bool post_ct);
1369void
1370skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1371 struct flow_dissector *flow_dissector,
1372 void *target_container);
1373
1374void skb_flow_dissect_hash(const struct sk_buff *skb,
1375 struct flow_dissector *flow_dissector,
1376 void *target_container);
1377
1378static inline __u32 skb_get_hash(struct sk_buff *skb)
1379{
1380 if (!skb->l4_hash && !skb->sw_hash)
1381 __skb_get_hash(skb);
1382
1383 return skb->hash;
1384}
1385
1386static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1387{
1388 if (!skb->l4_hash && !skb->sw_hash) {
1389 struct flow_keys keys;
1390 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1391
1392 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1393 }
1394
1395 return skb->hash;
1396}
1397
1398__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1399 const siphash_key_t *perturb);
1400
1401static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1402{
1403 return skb->hash;
1404}
1405
1406static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1407{
1408 to->hash = from->hash;
1409 to->sw_hash = from->sw_hash;
1410 to->l4_hash = from->l4_hash;
1411};
1412
1413static inline void skb_copy_decrypted(struct sk_buff *to,
1414 const struct sk_buff *from)
1415{
1416#ifdef CONFIG_TLS_DEVICE
1417 to->decrypted = from->decrypted;
1418#endif
1419}
1420
1421#ifdef NET_SKBUFF_DATA_USES_OFFSET
1422static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1423{
1424 return skb->head + skb->end;
1425}
1426
1427static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1428{
1429 return skb->end;
1430}
1431#else
1432static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1433{
1434 return skb->end;
1435}
1436
1437static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1438{
1439 return skb->end - skb->head;
1440}
1441#endif
1442
1443
1444#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1445
1446static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1447{
1448 return &skb_shinfo(skb)->hwtstamps;
1449}
1450
1451static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1452{
1453 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
1454
1455 return is_zcopy ? skb_uarg(skb) : NULL;
1456}
1457
1458static inline void net_zcopy_get(struct ubuf_info *uarg)
1459{
1460 refcount_inc(&uarg->refcnt);
1461}
1462
1463static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
1464{
1465 skb_shinfo(skb)->destructor_arg = uarg;
1466 skb_shinfo(skb)->flags |= uarg->flags;
1467}
1468
1469static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1470 bool *have_ref)
1471{
1472 if (skb && uarg && !skb_zcopy(skb)) {
1473 if (unlikely(have_ref && *have_ref))
1474 *have_ref = false;
1475 else
1476 net_zcopy_get(uarg);
1477 skb_zcopy_init(skb, uarg);
1478 }
1479}
1480
1481static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1482{
1483 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1484 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
1485}
1486
1487static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1488{
1489 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1490}
1491
1492static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1493{
1494 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1495}
1496
1497static inline void net_zcopy_put(struct ubuf_info *uarg)
1498{
1499 if (uarg)
1500 uarg->callback(NULL, uarg, true);
1501}
1502
1503static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1504{
1505 if (uarg) {
1506 if (uarg->callback == msg_zerocopy_callback)
1507 msg_zerocopy_put_abort(uarg, have_uref);
1508 else if (have_uref)
1509 net_zcopy_put(uarg);
1510 }
1511}
1512
1513
1514static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
1515{
1516 struct ubuf_info *uarg = skb_zcopy(skb);
1517
1518 if (uarg) {
1519 if (!skb_zcopy_is_nouarg(skb))
1520 uarg->callback(skb, uarg, zerocopy_success);
1521
1522 skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
1523 }
1524}
1525
1526static inline void skb_mark_not_on_list(struct sk_buff *skb)
1527{
1528 skb->next = NULL;
1529}
1530
1531
1532#define skb_list_walk_safe(first, skb, next_skb) \
1533 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1534 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1535
1536static inline void skb_list_del_init(struct sk_buff *skb)
1537{
1538 __list_del_entry(&skb->list);
1539 skb_mark_not_on_list(skb);
1540}
1541
1542
1543
1544
1545
1546
1547
1548static inline int skb_queue_empty(const struct sk_buff_head *list)
1549{
1550 return list->next == (const struct sk_buff *) list;
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1561{
1562 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1574 const struct sk_buff *skb)
1575{
1576 return skb->next == (const struct sk_buff *) list;
1577}
1578
1579
1580
1581
1582
1583
1584
1585
1586static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1587 const struct sk_buff *skb)
1588{
1589 return skb->prev == (const struct sk_buff *) list;
1590}
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1601 const struct sk_buff *skb)
1602{
1603
1604
1605
1606 BUG_ON(skb_queue_is_last(list, skb));
1607 return skb->next;
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1619 const struct sk_buff *skb)
1620{
1621
1622
1623
1624 BUG_ON(skb_queue_is_first(list, skb));
1625 return skb->prev;
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635static inline struct sk_buff *skb_get(struct sk_buff *skb)
1636{
1637 refcount_inc(&skb->users);
1638 return skb;
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653static inline int skb_cloned(const struct sk_buff *skb)
1654{
1655 return skb->cloned &&
1656 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1657}
1658
1659static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1660{
1661 might_sleep_if(gfpflags_allow_blocking(pri));
1662
1663 if (skb_cloned(skb))
1664 return pskb_expand_head(skb, 0, 0, pri);
1665
1666 return 0;
1667}
1668
1669
1670
1671
1672
1673
1674
1675
1676static inline int skb_header_cloned(const struct sk_buff *skb)
1677{
1678 int dataref;
1679
1680 if (!skb->cloned)
1681 return 0;
1682
1683 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1684 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1685 return dataref != 1;
1686}
1687
1688static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1689{
1690 might_sleep_if(gfpflags_allow_blocking(pri));
1691
1692 if (skb_header_cloned(skb))
1693 return pskb_expand_head(skb, 0, 0, pri);
1694
1695 return 0;
1696}
1697
1698
1699
1700
1701
1702static inline void __skb_header_release(struct sk_buff *skb)
1703{
1704 skb->nohdr = 1;
1705 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1706}
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716static inline int skb_shared(const struct sk_buff *skb)
1717{
1718 return refcount_read(&skb->users) != 1;
1719}
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1735{
1736 might_sleep_if(gfpflags_allow_blocking(pri));
1737 if (skb_shared(skb)) {
1738 struct sk_buff *nskb = skb_clone(skb, pri);
1739
1740 if (likely(nskb))
1741 consume_skb(skb);
1742 else
1743 kfree_skb(skb);
1744 skb = nskb;
1745 }
1746 return skb;
1747}
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1770 gfp_t pri)
1771{
1772 might_sleep_if(gfpflags_allow_blocking(pri));
1773 if (skb_cloned(skb)) {
1774 struct sk_buff *nskb = skb_copy(skb, pri);
1775
1776
1777 if (likely(nskb))
1778 consume_skb(skb);
1779 else
1780 kfree_skb(skb);
1781 skb = nskb;
1782 }
1783 return skb;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1800{
1801 struct sk_buff *skb = list_->next;
1802
1803 if (skb == (struct sk_buff *)list_)
1804 skb = NULL;
1805 return skb;
1806}
1807
1808
1809
1810
1811
1812
1813
1814static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1815{
1816 return list_->next;
1817}
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1829 const struct sk_buff_head *list_)
1830{
1831 struct sk_buff *next = skb->next;
1832
1833 if (next == (struct sk_buff *)list_)
1834 next = NULL;
1835 return next;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1852{
1853 struct sk_buff *skb = READ_ONCE(list_->prev);
1854
1855 if (skb == (struct sk_buff *)list_)
1856 skb = NULL;
1857 return skb;
1858
1859}
1860
1861
1862
1863
1864
1865
1866
1867static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1868{
1869 return list_->qlen;
1870}
1871
1872
1873
1874
1875
1876
1877
1878
1879static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
1880{
1881 return READ_ONCE(list_->qlen);
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894static inline void __skb_queue_head_init(struct sk_buff_head *list)
1895{
1896 list->prev = list->next = (struct sk_buff *)list;
1897 list->qlen = 0;
1898}
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908static inline void skb_queue_head_init(struct sk_buff_head *list)
1909{
1910 spin_lock_init(&list->lock);
1911 __skb_queue_head_init(list);
1912}
1913
1914static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1915 struct lock_class_key *class)
1916{
1917 skb_queue_head_init(list);
1918 lockdep_set_class(&list->lock, class);
1919}
1920
1921
1922
1923
1924
1925
1926
1927static inline void __skb_insert(struct sk_buff *newsk,
1928 struct sk_buff *prev, struct sk_buff *next,
1929 struct sk_buff_head *list)
1930{
1931
1932
1933
1934 WRITE_ONCE(newsk->next, next);
1935 WRITE_ONCE(newsk->prev, prev);
1936 WRITE_ONCE(next->prev, newsk);
1937 WRITE_ONCE(prev->next, newsk);
1938 list->qlen++;
1939}
1940
1941static inline void __skb_queue_splice(const struct sk_buff_head *list,
1942 struct sk_buff *prev,
1943 struct sk_buff *next)
1944{
1945 struct sk_buff *first = list->next;
1946 struct sk_buff *last = list->prev;
1947
1948 WRITE_ONCE(first->prev, prev);
1949 WRITE_ONCE(prev->next, first);
1950
1951 WRITE_ONCE(last->next, next);
1952 WRITE_ONCE(next->prev, last);
1953}
1954
1955
1956
1957
1958
1959
1960static inline void skb_queue_splice(const struct sk_buff_head *list,
1961 struct sk_buff_head *head)
1962{
1963 if (!skb_queue_empty(list)) {
1964 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1965 head->qlen += list->qlen;
1966 }
1967}
1968
1969
1970
1971
1972
1973
1974
1975
1976static inline void skb_queue_splice_init(struct sk_buff_head *list,
1977 struct sk_buff_head *head)
1978{
1979 if (!skb_queue_empty(list)) {
1980 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1981 head->qlen += list->qlen;
1982 __skb_queue_head_init(list);
1983 }
1984}
1985
1986
1987
1988
1989
1990
1991static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1992 struct sk_buff_head *head)
1993{
1994 if (!skb_queue_empty(list)) {
1995 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1996 head->qlen += list->qlen;
1997 }
1998}
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
2009 struct sk_buff_head *head)
2010{
2011 if (!skb_queue_empty(list)) {
2012 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2013 head->qlen += list->qlen;
2014 __skb_queue_head_init(list);
2015 }
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static inline void __skb_queue_after(struct sk_buff_head *list,
2030 struct sk_buff *prev,
2031 struct sk_buff *newsk)
2032{
2033 __skb_insert(newsk, prev, prev->next, list);
2034}
2035
2036void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2037 struct sk_buff_head *list);
2038
2039static inline void __skb_queue_before(struct sk_buff_head *list,
2040 struct sk_buff *next,
2041 struct sk_buff *newsk)
2042{
2043 __skb_insert(newsk, next->prev, next, list);
2044}
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056static inline void __skb_queue_head(struct sk_buff_head *list,
2057 struct sk_buff *newsk)
2058{
2059 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2060}
2061void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static inline void __skb_queue_tail(struct sk_buff_head *list,
2074 struct sk_buff *newsk)
2075{
2076 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2077}
2078void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2079
2080
2081
2082
2083
2084void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2085static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2086{
2087 struct sk_buff *next, *prev;
2088
2089 WRITE_ONCE(list->qlen, list->qlen - 1);
2090 next = skb->next;
2091 prev = skb->prev;
2092 skb->next = skb->prev = NULL;
2093 WRITE_ONCE(next->prev, prev);
2094 WRITE_ONCE(prev->next, next);
2095}
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2106{
2107 struct sk_buff *skb = skb_peek(list);
2108 if (skb)
2109 __skb_unlink(skb, list);
2110 return skb;
2111}
2112struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2123{
2124 struct sk_buff *skb = skb_peek_tail(list);
2125 if (skb)
2126 __skb_unlink(skb, list);
2127 return skb;
2128}
2129struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2130
2131
2132static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2133{
2134 return skb->data_len;
2135}
2136
2137static inline unsigned int skb_headlen(const struct sk_buff *skb)
2138{
2139 return skb->len - skb->data_len;
2140}
2141
2142static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2143{
2144 unsigned int i, len = 0;
2145
2146 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2147 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2148 return len;
2149}
2150
2151static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2152{
2153 return skb_headlen(skb) + __skb_pagelen(skb);
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2170 struct page *page, int off, int size)
2171{
2172 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2173
2174
2175
2176
2177
2178
2179 frag->bv_page = page;
2180 frag->bv_offset = off;
2181 skb_frag_size_set(frag, size);
2182
2183 page = compound_head(page);
2184 if (page_is_pfmemalloc(page))
2185 skb->pfmemalloc = true;
2186}
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2203 struct page *page, int off, int size)
2204{
2205 __skb_fill_page_desc(skb, i, page, off, size);
2206 skb_shinfo(skb)->nr_frags = i + 1;
2207}
2208
2209void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2210 int size, unsigned int truesize);
2211
2212void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2213 unsigned int truesize);
2214
2215#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2216
2217#ifdef NET_SKBUFF_DATA_USES_OFFSET
2218static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2219{
2220 return skb->head + skb->tail;
2221}
2222
2223static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2224{
2225 skb->tail = skb->data - skb->head;
2226}
2227
2228static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2229{
2230 skb_reset_tail_pointer(skb);
2231 skb->tail += offset;
2232}
2233
2234#else
2235static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2236{
2237 return skb->tail;
2238}
2239
2240static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2241{
2242 skb->tail = skb->data;
2243}
2244
2245static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2246{
2247 skb->tail = skb->data + offset;
2248}
2249
2250#endif
2251
2252
2253
2254
2255void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2256void *skb_put(struct sk_buff *skb, unsigned int len);
2257static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2258{
2259 void *tmp = skb_tail_pointer(skb);
2260 SKB_LINEAR_ASSERT(skb);
2261 skb->tail += len;
2262 skb->len += len;
2263 return tmp;
2264}
2265
2266static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2267{
2268 void *tmp = __skb_put(skb, len);
2269
2270 memset(tmp, 0, len);
2271 return tmp;
2272}
2273
2274static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2275 unsigned int len)
2276{
2277 void *tmp = __skb_put(skb, len);
2278
2279 memcpy(tmp, data, len);
2280 return tmp;
2281}
2282
2283static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2284{
2285 *(u8 *)__skb_put(skb, 1) = val;
2286}
2287
2288static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2289{
2290 void *tmp = skb_put(skb, len);
2291
2292 memset(tmp, 0, len);
2293
2294 return tmp;
2295}
2296
2297static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2298 unsigned int len)
2299{
2300 void *tmp = skb_put(skb, len);
2301
2302 memcpy(tmp, data, len);
2303
2304 return tmp;
2305}
2306
2307static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2308{
2309 *(u8 *)skb_put(skb, 1) = val;
2310}
2311
2312void *skb_push(struct sk_buff *skb, unsigned int len);
2313static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2314{
2315 skb->data -= len;
2316 skb->len += len;
2317 return skb->data;
2318}
2319
2320void *skb_pull(struct sk_buff *skb, unsigned int len);
2321static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2322{
2323 skb->len -= len;
2324 BUG_ON(skb->len < skb->data_len);
2325 return skb->data += len;
2326}
2327
2328static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2329{
2330 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2331}
2332
2333void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2334
2335static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2336{
2337 if (len > skb_headlen(skb) &&
2338 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2339 return NULL;
2340 skb->len -= len;
2341 return skb->data += len;
2342}
2343
2344static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2345{
2346 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2347}
2348
2349static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2350{
2351 if (likely(len <= skb_headlen(skb)))
2352 return true;
2353 if (unlikely(len > skb->len))
2354 return false;
2355 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2356}
2357
2358void skb_condense(struct sk_buff *skb);
2359
2360
2361
2362
2363
2364
2365
2366static inline unsigned int skb_headroom(const struct sk_buff *skb)
2367{
2368 return skb->data - skb->head;
2369}
2370
2371
2372
2373
2374
2375
2376
2377static inline int skb_tailroom(const struct sk_buff *skb)
2378{
2379 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2380}
2381
2382
2383
2384
2385
2386
2387
2388
2389static inline int skb_availroom(const struct sk_buff *skb)
2390{
2391 if (skb_is_nonlinear(skb))
2392 return 0;
2393
2394 return skb->end - skb->tail - skb->reserved_tailroom;
2395}
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405static inline void skb_reserve(struct sk_buff *skb, int len)
2406{
2407 skb->data += len;
2408 skb->tail += len;
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2424 unsigned int needed_tailroom)
2425{
2426 SKB_LINEAR_ASSERT(skb);
2427 if (mtu < skb_tailroom(skb) - needed_tailroom)
2428
2429 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2430 else
2431
2432 skb->reserved_tailroom = needed_tailroom;
2433}
2434
2435#define ENCAP_TYPE_ETHER 0
2436#define ENCAP_TYPE_IPPROTO 1
2437
2438static inline void skb_set_inner_protocol(struct sk_buff *skb,
2439 __be16 protocol)
2440{
2441 skb->inner_protocol = protocol;
2442 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2443}
2444
2445static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2446 __u8 ipproto)
2447{
2448 skb->inner_ipproto = ipproto;
2449 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2450}
2451
2452static inline void skb_reset_inner_headers(struct sk_buff *skb)
2453{
2454 skb->inner_mac_header = skb->mac_header;
2455 skb->inner_network_header = skb->network_header;
2456 skb->inner_transport_header = skb->transport_header;
2457}
2458
2459static inline void skb_reset_mac_len(struct sk_buff *skb)
2460{
2461 skb->mac_len = skb->network_header - skb->mac_header;
2462}
2463
2464static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2465 *skb)
2466{
2467 return skb->head + skb->inner_transport_header;
2468}
2469
2470static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2471{
2472 return skb_inner_transport_header(skb) - skb->data;
2473}
2474
2475static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2476{
2477 skb->inner_transport_header = skb->data - skb->head;
2478}
2479
2480static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2481 const int offset)
2482{
2483 skb_reset_inner_transport_header(skb);
2484 skb->inner_transport_header += offset;
2485}
2486
2487static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2488{
2489 return skb->head + skb->inner_network_header;
2490}
2491
2492static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2493{
2494 skb->inner_network_header = skb->data - skb->head;
2495}
2496
2497static inline void skb_set_inner_network_header(struct sk_buff *skb,
2498 const int offset)
2499{
2500 skb_reset_inner_network_header(skb);
2501 skb->inner_network_header += offset;
2502}
2503
2504static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2505{
2506 return skb->head + skb->inner_mac_header;
2507}
2508
2509static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2510{
2511 skb->inner_mac_header = skb->data - skb->head;
2512}
2513
2514static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2515 const int offset)
2516{
2517 skb_reset_inner_mac_header(skb);
2518 skb->inner_mac_header += offset;
2519}
2520static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2521{
2522 return skb->transport_header != (typeof(skb->transport_header))~0U;
2523}
2524
2525static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2526{
2527 return skb->head + skb->transport_header;
2528}
2529
2530static inline void skb_reset_transport_header(struct sk_buff *skb)
2531{
2532 skb->transport_header = skb->data - skb->head;
2533}
2534
2535static inline void skb_set_transport_header(struct sk_buff *skb,
2536 const int offset)
2537{
2538 skb_reset_transport_header(skb);
2539 skb->transport_header += offset;
2540}
2541
2542static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2543{
2544 return skb->head + skb->network_header;
2545}
2546
2547static inline void skb_reset_network_header(struct sk_buff *skb)
2548{
2549 skb->network_header = skb->data - skb->head;
2550}
2551
2552static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2553{
2554 skb_reset_network_header(skb);
2555 skb->network_header += offset;
2556}
2557
2558static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2559{
2560 return skb->head + skb->mac_header;
2561}
2562
2563static inline int skb_mac_offset(const struct sk_buff *skb)
2564{
2565 return skb_mac_header(skb) - skb->data;
2566}
2567
2568static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2569{
2570 return skb->network_header - skb->mac_header;
2571}
2572
2573static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2574{
2575 return skb->mac_header != (typeof(skb->mac_header))~0U;
2576}
2577
2578static inline void skb_unset_mac_header(struct sk_buff *skb)
2579{
2580 skb->mac_header = (typeof(skb->mac_header))~0U;
2581}
2582
2583static inline void skb_reset_mac_header(struct sk_buff *skb)
2584{
2585 skb->mac_header = skb->data - skb->head;
2586}
2587
2588static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2589{
2590 skb_reset_mac_header(skb);
2591 skb->mac_header += offset;
2592}
2593
2594static inline void skb_pop_mac_header(struct sk_buff *skb)
2595{
2596 skb->mac_header = skb->network_header;
2597}
2598
2599static inline void skb_probe_transport_header(struct sk_buff *skb)
2600{
2601 struct flow_keys_basic keys;
2602
2603 if (skb_transport_header_was_set(skb))
2604 return;
2605
2606 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2607 NULL, 0, 0, 0, 0))
2608 skb_set_transport_header(skb, keys.control.thoff);
2609}
2610
2611static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2612{
2613 if (skb_mac_header_was_set(skb)) {
2614 const unsigned char *old_mac = skb_mac_header(skb);
2615
2616 skb_set_mac_header(skb, -skb->mac_len);
2617 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2618 }
2619}
2620
2621static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2622{
2623 return skb->csum_start - skb_headroom(skb);
2624}
2625
2626static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2627{
2628 return skb->head + skb->csum_start;
2629}
2630
2631static inline int skb_transport_offset(const struct sk_buff *skb)
2632{
2633 return skb_transport_header(skb) - skb->data;
2634}
2635
2636static inline u32 skb_network_header_len(const struct sk_buff *skb)
2637{
2638 return skb->transport_header - skb->network_header;
2639}
2640
2641static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2642{
2643 return skb->inner_transport_header - skb->inner_network_header;
2644}
2645
2646static inline int skb_network_offset(const struct sk_buff *skb)
2647{
2648 return skb_network_header(skb) - skb->data;
2649}
2650
2651static inline int skb_inner_network_offset(const struct sk_buff *skb)
2652{
2653 return skb_inner_network_header(skb) - skb->data;
2654}
2655
2656static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2657{
2658 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2659}
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681#ifndef NET_IP_ALIGN
2682#define NET_IP_ALIGN 2
2683#endif
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705#ifndef NET_SKB_PAD
2706#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2707#endif
2708
2709int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2710
2711static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2712{
2713 if (WARN_ON(skb_is_nonlinear(skb)))
2714 return;
2715 skb->len = len;
2716 skb_set_tail_pointer(skb, len);
2717}
2718
2719static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2720{
2721 __skb_set_length(skb, len);
2722}
2723
2724void skb_trim(struct sk_buff *skb, unsigned int len);
2725
2726static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2727{
2728 if (skb->data_len)
2729 return ___pskb_trim(skb, len);
2730 __skb_trim(skb, len);
2731 return 0;
2732}
2733
2734static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2735{
2736 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2749{
2750 int err = pskb_trim(skb, len);
2751 BUG_ON(err);
2752}
2753
2754static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2755{
2756 unsigned int diff = len - skb->len;
2757
2758 if (skb_tailroom(skb) < diff) {
2759 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2760 GFP_ATOMIC);
2761 if (ret)
2762 return ret;
2763 }
2764 __skb_set_length(skb, len);
2765 return 0;
2766}
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776static inline void skb_orphan(struct sk_buff *skb)
2777{
2778 if (skb->destructor) {
2779 skb->destructor(skb);
2780 skb->destructor = NULL;
2781 skb->sk = NULL;
2782 } else {
2783 BUG_ON(skb->sk);
2784 }
2785}
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2797{
2798 if (likely(!skb_zcopy(skb)))
2799 return 0;
2800 if (!skb_zcopy_is_nouarg(skb) &&
2801 skb_uarg(skb)->callback == msg_zerocopy_callback)
2802 return 0;
2803 return skb_copy_ubufs(skb, gfp_mask);
2804}
2805
2806
2807static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2808{
2809 if (likely(!skb_zcopy(skb)))
2810 return 0;
2811 return skb_copy_ubufs(skb, gfp_mask);
2812}
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822static inline void __skb_queue_purge(struct sk_buff_head *list)
2823{
2824 struct sk_buff *skb;
2825 while ((skb = __skb_dequeue(list)) != NULL)
2826 kfree_skb(skb);
2827}
2828void skb_queue_purge(struct sk_buff_head *list);
2829
2830unsigned int skb_rbtree_purge(struct rb_root *root);
2831
2832void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
2833
2834
2835
2836
2837
2838
2839
2840
2841static inline void *netdev_alloc_frag(unsigned int fragsz)
2842{
2843 return __netdev_alloc_frag_align(fragsz, ~0u);
2844}
2845
2846static inline void *netdev_alloc_frag_align(unsigned int fragsz,
2847 unsigned int align)
2848{
2849 WARN_ON_ONCE(!is_power_of_2(align));
2850 return __netdev_alloc_frag_align(fragsz, -align);
2851}
2852
2853struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2854 gfp_t gfp_mask);
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2870 unsigned int length)
2871{
2872 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2873}
2874
2875
2876static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2877 gfp_t gfp_mask)
2878{
2879 return __netdev_alloc_skb(NULL, length, gfp_mask);
2880}
2881
2882
2883static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2884{
2885 return netdev_alloc_skb(NULL, length);
2886}
2887
2888
2889static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2890 unsigned int length, gfp_t gfp)
2891{
2892 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2893
2894 if (NET_IP_ALIGN && skb)
2895 skb_reserve(skb, NET_IP_ALIGN);
2896 return skb;
2897}
2898
2899static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2900 unsigned int length)
2901{
2902 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2903}
2904
2905static inline void skb_free_frag(void *addr)
2906{
2907 page_frag_free(addr);
2908}
2909
2910void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
2911
2912static inline void *napi_alloc_frag(unsigned int fragsz)
2913{
2914 return __napi_alloc_frag_align(fragsz, ~0u);
2915}
2916
2917static inline void *napi_alloc_frag_align(unsigned int fragsz,
2918 unsigned int align)
2919{
2920 WARN_ON_ONCE(!is_power_of_2(align));
2921 return __napi_alloc_frag_align(fragsz, -align);
2922}
2923
2924struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2925 unsigned int length, gfp_t gfp_mask);
2926static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2927 unsigned int length)
2928{
2929 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2930}
2931void napi_consume_skb(struct sk_buff *skb, int budget);
2932
2933void napi_skb_free_stolen_head(struct sk_buff *skb);
2934void __kfree_skb_defer(struct sk_buff *skb);
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2946 unsigned int order)
2947{
2948
2949
2950
2951
2952
2953
2954
2955
2956 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2957
2958 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2959}
2960
2961static inline struct page *dev_alloc_pages(unsigned int order)
2962{
2963 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2975{
2976 return __dev_alloc_pages(gfp_mask, 0);
2977}
2978
2979static inline struct page *dev_alloc_page(void)
2980{
2981 return dev_alloc_pages(0);
2982}
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994static inline bool dev_page_is_reusable(const struct page *page)
2995{
2996 return likely(page_to_nid(page) == numa_mem_id() &&
2997 !page_is_pfmemalloc(page));
2998}
2999
3000
3001
3002
3003
3004
3005static inline void skb_propagate_pfmemalloc(const struct page *page,
3006 struct sk_buff *skb)
3007{
3008 if (page_is_pfmemalloc(page))
3009 skb->pfmemalloc = true;
3010}
3011
3012
3013
3014
3015
3016static inline unsigned int skb_frag_off(const skb_frag_t *frag)
3017{
3018 return frag->bv_offset;
3019}
3020
3021
3022
3023
3024
3025
3026static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3027{
3028 frag->bv_offset += delta;
3029}
3030
3031
3032
3033
3034
3035
3036static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3037{
3038 frag->bv_offset = offset;
3039}
3040
3041
3042
3043
3044
3045
3046static inline void skb_frag_off_copy(skb_frag_t *fragto,
3047 const skb_frag_t *fragfrom)
3048{
3049 fragto->bv_offset = fragfrom->bv_offset;
3050}
3051
3052
3053
3054
3055
3056
3057
3058static inline struct page *skb_frag_page(const skb_frag_t *frag)
3059{
3060 return frag->bv_page;
3061}
3062
3063
3064
3065
3066
3067
3068
3069static inline void __skb_frag_ref(skb_frag_t *frag)
3070{
3071 get_page(skb_frag_page(frag));
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081static inline void skb_frag_ref(struct sk_buff *skb, int f)
3082{
3083 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3084}
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
3095{
3096 struct page *page = skb_frag_page(frag);
3097
3098#ifdef CONFIG_PAGE_POOL
3099 if (recycle && page_pool_return_skb_page(page))
3100 return;
3101#endif
3102 put_page(page);
3103}
3104
3105
3106
3107
3108
3109
3110
3111
3112static inline void skb_frag_unref(struct sk_buff *skb, int f)
3113{
3114 __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
3115}
3116
3117
3118
3119
3120
3121
3122
3123
3124static inline void *skb_frag_address(const skb_frag_t *frag)
3125{
3126 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3127}
3128
3129
3130
3131
3132
3133
3134
3135
3136static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3137{
3138 void *ptr = page_address(skb_frag_page(frag));
3139 if (unlikely(!ptr))
3140 return NULL;
3141
3142 return ptr + skb_frag_off(frag);
3143}
3144
3145
3146
3147
3148
3149
3150static inline void skb_frag_page_copy(skb_frag_t *fragto,
3151 const skb_frag_t *fragfrom)
3152{
3153 fragto->bv_page = fragfrom->bv_page;
3154}
3155
3156
3157
3158
3159
3160
3161
3162
3163static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3164{
3165 frag->bv_page = page;
3166}
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3177 struct page *page)
3178{
3179 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3180}
3181
3182bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3196 const skb_frag_t *frag,
3197 size_t offset, size_t size,
3198 enum dma_data_direction dir)
3199{
3200 return dma_map_page(dev, skb_frag_page(frag),
3201 skb_frag_off(frag) + offset, size, dir);
3202}
3203
3204static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3205 gfp_t gfp_mask)
3206{
3207 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3208}
3209
3210
3211static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3212 gfp_t gfp_mask)
3213{
3214 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3215}
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3227{
3228 return !skb_header_cloned(skb) &&
3229 skb_headroom(skb) + len <= skb->hdr_len;
3230}
3231
3232static inline int skb_try_make_writable(struct sk_buff *skb,
3233 unsigned int write_len)
3234{
3235 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3236 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3237}
3238
3239static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3240 int cloned)
3241{
3242 int delta = 0;
3243
3244 if (headroom > skb_headroom(skb))
3245 delta = headroom - skb_headroom(skb);
3246
3247 if (delta || cloned)
3248 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3249 GFP_ATOMIC);
3250 return 0;
3251}
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3266{
3267 return __skb_cow(skb, headroom, skb_cloned(skb));
3268}
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3281{
3282 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3296{
3297 unsigned int size = skb->len;
3298 if (likely(size >= len))
3299 return 0;
3300 return skb_pad(skb, len - size);
3301}
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3315 unsigned int len,
3316 bool free_on_error)
3317{
3318 unsigned int size = skb->len;
3319
3320 if (unlikely(size < len)) {
3321 len -= size;
3322 if (__skb_pad(skb, len, free_on_error))
3323 return -ENOMEM;
3324 __skb_put(skb, len);
3325 }
3326 return 0;
3327}
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3340{
3341 return __skb_put_padto(skb, len, true);
3342}
3343
3344static inline int skb_add_data(struct sk_buff *skb,
3345 struct iov_iter *from, int copy)
3346{
3347 const int off = skb->len;
3348
3349 if (skb->ip_summed == CHECKSUM_NONE) {
3350 __wsum csum = 0;
3351 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3352 &csum, from)) {
3353 skb->csum = csum_block_add(skb->csum, csum, off);
3354 return 0;
3355 }
3356 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3357 return 0;
3358
3359 __skb_trim(skb, off);
3360 return -EFAULT;
3361}
3362
3363static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3364 const struct page *page, int off)
3365{
3366 if (skb_zcopy(skb))
3367 return false;
3368 if (i) {
3369 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3370
3371 return page == skb_frag_page(frag) &&
3372 off == skb_frag_off(frag) + skb_frag_size(frag);
3373 }
3374 return false;
3375}
3376
3377static inline int __skb_linearize(struct sk_buff *skb)
3378{
3379 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3380}
3381
3382
3383
3384
3385
3386
3387
3388
3389static inline int skb_linearize(struct sk_buff *skb)
3390{
3391 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3392}
3393
3394
3395
3396
3397
3398
3399
3400
3401static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3402{
3403 return skb_is_nonlinear(skb) &&
3404 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
3405}
3406
3407
3408
3409
3410
3411
3412
3413
3414static inline int skb_linearize_cow(struct sk_buff *skb)
3415{
3416 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3417 __skb_linearize(skb) : 0;
3418}
3419
3420static __always_inline void
3421__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3422 unsigned int off)
3423{
3424 if (skb->ip_summed == CHECKSUM_COMPLETE)
3425 skb->csum = csum_block_sub(skb->csum,
3426 csum_partial(start, len, 0), off);
3427 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3428 skb_checksum_start_offset(skb) < 0)
3429 skb->ip_summed = CHECKSUM_NONE;
3430}
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442static inline void skb_postpull_rcsum(struct sk_buff *skb,
3443 const void *start, unsigned int len)
3444{
3445 __skb_postpull_rcsum(skb, start, len, 0);
3446}
3447
3448static __always_inline void
3449__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3450 unsigned int off)
3451{
3452 if (skb->ip_summed == CHECKSUM_COMPLETE)
3453 skb->csum = csum_block_add(skb->csum,
3454 csum_partial(start, len, 0), off);
3455}
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466static inline void skb_postpush_rcsum(struct sk_buff *skb,
3467 const void *start, unsigned int len)
3468{
3469 __skb_postpush_rcsum(skb, start, len, 0);
3470}
3471
3472void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3486{
3487 skb_push(skb, len);
3488 skb_postpush_rcsum(skb, skb->data, len);
3489 return skb->data;
3490}
3491
3492int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3504{
3505 if (likely(len >= skb->len))
3506 return 0;
3507 return pskb_trim_rcsum_slow(skb, len);
3508}
3509
3510static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3511{
3512 if (skb->ip_summed == CHECKSUM_COMPLETE)
3513 skb->ip_summed = CHECKSUM_NONE;
3514 __skb_trim(skb, len);
3515 return 0;
3516}
3517
3518static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3519{
3520 if (skb->ip_summed == CHECKSUM_COMPLETE)
3521 skb->ip_summed = CHECKSUM_NONE;
3522 return __skb_grow(skb, len);
3523}
3524
3525#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3526#define skb_rb_first(root) rb_to_skb(rb_first(root))
3527#define skb_rb_last(root) rb_to_skb(rb_last(root))
3528#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3529#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3530
3531#define skb_queue_walk(queue, skb) \
3532 for (skb = (queue)->next; \
3533 skb != (struct sk_buff *)(queue); \
3534 skb = skb->next)
3535
3536#define skb_queue_walk_safe(queue, skb, tmp) \
3537 for (skb = (queue)->next, tmp = skb->next; \
3538 skb != (struct sk_buff *)(queue); \
3539 skb = tmp, tmp = skb->next)
3540
3541#define skb_queue_walk_from(queue, skb) \
3542 for (; skb != (struct sk_buff *)(queue); \
3543 skb = skb->next)
3544
3545#define skb_rbtree_walk(skb, root) \
3546 for (skb = skb_rb_first(root); skb != NULL; \
3547 skb = skb_rb_next(skb))
3548
3549#define skb_rbtree_walk_from(skb) \
3550 for (; skb != NULL; \
3551 skb = skb_rb_next(skb))
3552
3553#define skb_rbtree_walk_from_safe(skb, tmp) \
3554 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3555 skb = tmp)
3556
3557#define skb_queue_walk_from_safe(queue, skb, tmp) \
3558 for (tmp = skb->next; \
3559 skb != (struct sk_buff *)(queue); \
3560 skb = tmp, tmp = skb->next)
3561
3562#define skb_queue_reverse_walk(queue, skb) \
3563 for (skb = (queue)->prev; \
3564 skb != (struct sk_buff *)(queue); \
3565 skb = skb->prev)
3566
3567#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3568 for (skb = (queue)->prev, tmp = skb->prev; \
3569 skb != (struct sk_buff *)(queue); \
3570 skb = tmp, tmp = skb->prev)
3571
3572#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3573 for (tmp = skb->prev; \
3574 skb != (struct sk_buff *)(queue); \
3575 skb = tmp, tmp = skb->prev)
3576
3577static inline bool skb_has_frag_list(const struct sk_buff *skb)
3578{
3579 return skb_shinfo(skb)->frag_list != NULL;
3580}
3581
3582static inline void skb_frag_list_init(struct sk_buff *skb)
3583{
3584 skb_shinfo(skb)->frag_list = NULL;
3585}
3586
3587#define skb_walk_frags(skb, iter) \
3588 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3589
3590
3591int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3592 int *err, long *timeo_p,
3593 const struct sk_buff *skb);
3594struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3595 struct sk_buff_head *queue,
3596 unsigned int flags,
3597 int *off, int *err,
3598 struct sk_buff **last);
3599struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3600 struct sk_buff_head *queue,
3601 unsigned int flags, int *off, int *err,
3602 struct sk_buff **last);
3603struct sk_buff *__skb_recv_datagram(struct sock *sk,
3604 struct sk_buff_head *sk_queue,
3605 unsigned int flags, int *off, int *err);
3606struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3607 int *err);
3608__poll_t datagram_poll(struct file *file, struct socket *sock,
3609 struct poll_table_struct *wait);
3610int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3611 struct iov_iter *to, int size);
3612static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3613 struct msghdr *msg, int size)
3614{
3615 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3616}
3617int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3618 struct msghdr *msg);
3619int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3620 struct iov_iter *to, int len,
3621 struct ahash_request *hash);
3622int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3623 struct iov_iter *from, int len);
3624int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3625void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3626void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3627static inline void skb_free_datagram_locked(struct sock *sk,
3628 struct sk_buff *skb)
3629{
3630 __skb_free_datagram_locked(sk, skb, 0);
3631}
3632int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3633int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3634int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3635__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3636 int len);
3637int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3638 struct pipe_inode_info *pipe, unsigned int len,
3639 unsigned int flags);
3640int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3641 int len);
3642int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3643void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3644unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3645int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3646 int len, int hlen);
3647void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3648int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3649void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3650bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3651bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3652struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3653struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3654 unsigned int offset);
3655struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3656int skb_ensure_writable(struct sk_buff *skb, int write_len);
3657int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3658int skb_vlan_pop(struct sk_buff *skb);
3659int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3660int skb_eth_pop(struct sk_buff *skb);
3661int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3662 const unsigned char *src);
3663int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3664 int mac_len, bool ethernet);
3665int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3666 bool ethernet);
3667int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3668int skb_mpls_dec_ttl(struct sk_buff *skb);
3669struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3670 gfp_t gfp);
3671
3672static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3673{
3674 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3675}
3676
3677static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3678{
3679 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3680}
3681
3682struct skb_checksum_ops {
3683 __wsum (*update)(const void *mem, int len, __wsum wsum);
3684 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3685};
3686
3687extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3688
3689__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3690 __wsum csum, const struct skb_checksum_ops *ops);
3691__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3692 __wsum csum);
3693
3694static inline void * __must_check
3695__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
3696 const void *data, int hlen, void *buffer)
3697{
3698 if (likely(hlen - offset >= len))
3699 return (void *)data + offset;
3700
3701 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
3702 return NULL;
3703
3704 return buffer;
3705}
3706
3707static inline void * __must_check
3708skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3709{
3710 return __skb_header_pointer(skb, offset, len, skb->data,
3711 skb_headlen(skb), buffer);
3712}
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724static inline bool skb_needs_linearize(struct sk_buff *skb,
3725 netdev_features_t features)
3726{
3727 return skb_is_nonlinear(skb) &&
3728 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3729 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3730}
3731
3732static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3733 void *to,
3734 const unsigned int len)
3735{
3736 memcpy(to, skb->data, len);
3737}
3738
3739static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3740 const int offset, void *to,
3741 const unsigned int len)
3742{
3743 memcpy(to, skb->data + offset, len);
3744}
3745
3746static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3747 const void *from,
3748 const unsigned int len)
3749{
3750 memcpy(skb->data, from, len);
3751}
3752
3753static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3754 const int offset,
3755 const void *from,
3756 const unsigned int len)
3757{
3758 memcpy(skb->data + offset, from, len);
3759}
3760
3761void skb_init(void);
3762
3763static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3764{
3765 return skb->tstamp;
3766}
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776
3777static inline void skb_get_timestamp(const struct sk_buff *skb,
3778 struct __kernel_old_timeval *stamp)
3779{
3780 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3781}
3782
3783static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3784 struct __kernel_sock_timeval *stamp)
3785{
3786 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3787
3788 stamp->tv_sec = ts.tv_sec;
3789 stamp->tv_usec = ts.tv_nsec / 1000;
3790}
3791
3792static inline void skb_get_timestampns(const struct sk_buff *skb,
3793 struct __kernel_old_timespec *stamp)
3794{
3795 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3796
3797 stamp->tv_sec = ts.tv_sec;
3798 stamp->tv_nsec = ts.tv_nsec;
3799}
3800
3801static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3802 struct __kernel_timespec *stamp)
3803{
3804 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3805
3806 stamp->tv_sec = ts.tv_sec;
3807 stamp->tv_nsec = ts.tv_nsec;
3808}
3809
3810static inline void __net_timestamp(struct sk_buff *skb)
3811{
3812 skb->tstamp = ktime_get_real();
3813}
3814
3815static inline ktime_t net_timedelta(ktime_t t)
3816{
3817 return ktime_sub(ktime_get_real(), t);
3818}
3819
3820static inline ktime_t net_invalid_timestamp(void)
3821{
3822 return 0;
3823}
3824
3825static inline u8 skb_metadata_len(const struct sk_buff *skb)
3826{
3827 return skb_shinfo(skb)->meta_len;
3828}
3829
3830static inline void *skb_metadata_end(const struct sk_buff *skb)
3831{
3832 return skb_mac_header(skb);
3833}
3834
3835static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3836 const struct sk_buff *skb_b,
3837 u8 meta_len)
3838{
3839 const void *a = skb_metadata_end(skb_a);
3840 const void *b = skb_metadata_end(skb_b);
3841
3842#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3843 u64 diffs = 0;
3844
3845 switch (meta_len) {
3846#define __it(x, op) (x -= sizeof(u##op))
3847#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3848 case 32: diffs |= __it_diff(a, b, 64);
3849 fallthrough;
3850 case 24: diffs |= __it_diff(a, b, 64);
3851 fallthrough;
3852 case 16: diffs |= __it_diff(a, b, 64);
3853 fallthrough;
3854 case 8: diffs |= __it_diff(a, b, 64);
3855 break;
3856 case 28: diffs |= __it_diff(a, b, 64);
3857 fallthrough;
3858 case 20: diffs |= __it_diff(a, b, 64);
3859 fallthrough;
3860 case 12: diffs |= __it_diff(a, b, 64);
3861 fallthrough;
3862 case 4: diffs |= __it_diff(a, b, 32);
3863 break;
3864 }
3865 return diffs;
3866#else
3867 return memcmp(a - meta_len, b - meta_len, meta_len);
3868#endif
3869}
3870
3871static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3872 const struct sk_buff *skb_b)
3873{
3874 u8 len_a = skb_metadata_len(skb_a);
3875 u8 len_b = skb_metadata_len(skb_b);
3876
3877 if (!(len_a | len_b))
3878 return false;
3879
3880 return len_a != len_b ?
3881 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3882}
3883
3884static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3885{
3886 skb_shinfo(skb)->meta_len = meta_len;
3887}
3888
3889static inline void skb_metadata_clear(struct sk_buff *skb)
3890{
3891 skb_metadata_set(skb, 0);
3892}
3893
3894struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3895
3896#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3897
3898void skb_clone_tx_timestamp(struct sk_buff *skb);
3899bool skb_defer_rx_timestamp(struct sk_buff *skb);
3900
3901#else
3902
3903static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3904{
3905}
3906
3907static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3908{
3909 return false;
3910}
3911
3912#endif
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925
3926void skb_complete_tx_timestamp(struct sk_buff *skb,
3927 struct skb_shared_hwtstamps *hwtstamps);
3928
3929void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
3930 struct skb_shared_hwtstamps *hwtstamps,
3931 struct sock *sk, int tstype);
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944void skb_tstamp_tx(struct sk_buff *orig_skb,
3945 struct skb_shared_hwtstamps *hwtstamps);
3946
3947
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959static inline void skb_tx_timestamp(struct sk_buff *skb)
3960{
3961 skb_clone_tx_timestamp(skb);
3962 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3963 skb_tstamp_tx(skb, NULL);
3964}
3965
3966
3967
3968
3969
3970
3971
3972
3973void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3974
3975__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3976__sum16 __skb_checksum_complete(struct sk_buff *skb);
3977
3978static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3979{
3980 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3981 skb->csum_valid ||
3982 (skb->ip_summed == CHECKSUM_PARTIAL &&
3983 skb_checksum_start_offset(skb) >= 0));
3984}
3985
3986
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
4003{
4004 return skb_csum_unnecessary(skb) ?
4005 0 : __skb_checksum_complete(skb);
4006}
4007
4008static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
4009{
4010 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4011 if (skb->csum_level == 0)
4012 skb->ip_summed = CHECKSUM_NONE;
4013 else
4014 skb->csum_level--;
4015 }
4016}
4017
4018static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
4019{
4020 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4021 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
4022 skb->csum_level++;
4023 } else if (skb->ip_summed == CHECKSUM_NONE) {
4024 skb->ip_summed = CHECKSUM_UNNECESSARY;
4025 skb->csum_level = 0;
4026 }
4027}
4028
4029static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4030{
4031 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4032 skb->ip_summed = CHECKSUM_NONE;
4033 skb->csum_level = 0;
4034 }
4035}
4036
4037
4038
4039
4040
4041
4042static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
4043 bool zero_okay,
4044 __sum16 check)
4045{
4046 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
4047 skb->csum_valid = 1;
4048 __skb_decr_checksum_unnecessary(skb);
4049 return false;
4050 }
4051
4052 return true;
4053}
4054
4055
4056
4057
4058#define CHECKSUM_BREAK 76
4059
4060
4061
4062
4063
4064
4065
4066static inline void skb_checksum_complete_unset(struct sk_buff *skb)
4067{
4068 if (skb->ip_summed == CHECKSUM_COMPLETE)
4069 skb->ip_summed = CHECKSUM_NONE;
4070}
4071
4072
4073
4074
4075
4076
4077
4078
4079
4080
4081static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
4082 bool complete,
4083 __wsum psum)
4084{
4085 if (skb->ip_summed == CHECKSUM_COMPLETE) {
4086 if (!csum_fold(csum_add(psum, skb->csum))) {
4087 skb->csum_valid = 1;
4088 return 0;
4089 }
4090 }
4091
4092 skb->csum = psum;
4093
4094 if (complete || skb->len <= CHECKSUM_BREAK) {
4095 __sum16 csum;
4096
4097 csum = __skb_checksum_complete(skb);
4098 skb->csum_valid = !csum;
4099 return csum;
4100 }
4101
4102 return 0;
4103}
4104
4105static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4106{
4107 return 0;
4108}
4109
4110
4111
4112
4113
4114
4115
4116
4117
4118
4119
4120#define __skb_checksum_validate(skb, proto, complete, \
4121 zero_okay, check, compute_pseudo) \
4122({ \
4123 __sum16 __ret = 0; \
4124 skb->csum_valid = 0; \
4125 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4126 __ret = __skb_checksum_validate_complete(skb, \
4127 complete, compute_pseudo(skb, proto)); \
4128 __ret; \
4129})
4130
4131#define skb_checksum_init(skb, proto, compute_pseudo) \
4132 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4133
4134#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4135 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4136
4137#define skb_checksum_validate(skb, proto, compute_pseudo) \
4138 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4139
4140#define skb_checksum_validate_zero_check(skb, proto, check, \
4141 compute_pseudo) \
4142 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4143
4144#define skb_checksum_simple_validate(skb) \
4145 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4146
4147static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4148{
4149 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4150}
4151
4152static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4153{
4154 skb->csum = ~pseudo;
4155 skb->ip_summed = CHECKSUM_COMPLETE;
4156}
4157
4158#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4159do { \
4160 if (__skb_checksum_convert_check(skb)) \
4161 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4162} while (0)
4163
4164static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4165 u16 start, u16 offset)
4166{
4167 skb->ip_summed = CHECKSUM_PARTIAL;
4168 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4169 skb->csum_offset = offset - start;
4170}
4171
4172
4173
4174
4175
4176
4177static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4178 int start, int offset, bool nopartial)
4179{
4180 __wsum delta;
4181
4182 if (!nopartial) {
4183 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4184 return;
4185 }
4186
4187 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4188 __skb_checksum_complete(skb);
4189 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4190 }
4191
4192 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4193
4194
4195 skb->csum = csum_add(skb->csum, delta);
4196}
4197
4198static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4199{
4200#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4201 return (void *)(skb->_nfct & NFCT_PTRMASK);
4202#else
4203 return NULL;
4204#endif
4205}
4206
4207static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4208{
4209#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4210 return skb->_nfct;
4211#else
4212 return 0UL;
4213#endif
4214}
4215
4216static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4217{
4218#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4219 skb->_nfct = nfct;
4220#endif
4221}
4222
4223#ifdef CONFIG_SKB_EXTENSIONS
4224enum skb_ext_id {
4225#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4226 SKB_EXT_BRIDGE_NF,
4227#endif
4228#ifdef CONFIG_XFRM
4229 SKB_EXT_SEC_PATH,
4230#endif
4231#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4232 TC_SKB_EXT,
4233#endif
4234#if IS_ENABLED(CONFIG_MPTCP)
4235 SKB_EXT_MPTCP,
4236#endif
4237 SKB_EXT_NUM,
4238};
4239
4240
4241
4242
4243
4244
4245
4246
4247
4248
4249
4250struct skb_ext {
4251 refcount_t refcnt;
4252 u8 offset[SKB_EXT_NUM];
4253 u8 chunks;
4254 char data[] __aligned(8);
4255};
4256
4257struct skb_ext *__skb_ext_alloc(gfp_t flags);
4258void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4259 struct skb_ext *ext);
4260void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4261void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4262void __skb_ext_put(struct skb_ext *ext);
4263
4264static inline void skb_ext_put(struct sk_buff *skb)
4265{
4266 if (skb->active_extensions)
4267 __skb_ext_put(skb->extensions);
4268}
4269
4270static inline void __skb_ext_copy(struct sk_buff *dst,
4271 const struct sk_buff *src)
4272{
4273 dst->active_extensions = src->active_extensions;
4274
4275 if (src->active_extensions) {
4276 struct skb_ext *ext = src->extensions;
4277
4278 refcount_inc(&ext->refcnt);
4279 dst->extensions = ext;
4280 }
4281}
4282
4283static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4284{
4285 skb_ext_put(dst);
4286 __skb_ext_copy(dst, src);
4287}
4288
4289static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4290{
4291 return !!ext->offset[i];
4292}
4293
4294static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4295{
4296 return skb->active_extensions & (1 << id);
4297}
4298
4299static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4300{
4301 if (skb_ext_exist(skb, id))
4302 __skb_ext_del(skb, id);
4303}
4304
4305static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4306{
4307 if (skb_ext_exist(skb, id)) {
4308 struct skb_ext *ext = skb->extensions;
4309
4310 return (void *)ext + (ext->offset[id] << 3);
4311 }
4312
4313 return NULL;
4314}
4315
4316static inline void skb_ext_reset(struct sk_buff *skb)
4317{
4318 if (unlikely(skb->active_extensions)) {
4319 __skb_ext_put(skb->extensions);
4320 skb->active_extensions = 0;
4321 }
4322}
4323
4324static inline bool skb_has_extensions(struct sk_buff *skb)
4325{
4326 return unlikely(skb->active_extensions);
4327}
4328#else
4329static inline void skb_ext_put(struct sk_buff *skb) {}
4330static inline void skb_ext_reset(struct sk_buff *skb) {}
4331static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4332static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4333static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4334static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4335#endif
4336
4337static inline void nf_reset_ct(struct sk_buff *skb)
4338{
4339#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4340 nf_conntrack_put(skb_nfct(skb));
4341 skb->_nfct = 0;
4342#endif
4343}
4344
4345static inline void nf_reset_trace(struct sk_buff *skb)
4346{
4347#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4348 skb->nf_trace = 0;
4349#endif
4350}
4351
4352static inline void ipvs_reset(struct sk_buff *skb)
4353{
4354#if IS_ENABLED(CONFIG_IP_VS)
4355 skb->ipvs_property = 0;
4356#endif
4357}
4358
4359
4360static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4361 bool copy)
4362{
4363#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4364 dst->_nfct = src->_nfct;
4365 nf_conntrack_get(skb_nfct(src));
4366#endif
4367#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4368 if (copy)
4369 dst->nf_trace = src->nf_trace;
4370#endif
4371}
4372
4373static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4374{
4375#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4376 nf_conntrack_put(skb_nfct(dst));
4377#endif
4378 __nf_copy(dst, src, true);
4379}
4380
4381#ifdef CONFIG_NETWORK_SECMARK
4382static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4383{
4384 to->secmark = from->secmark;
4385}
4386
4387static inline void skb_init_secmark(struct sk_buff *skb)
4388{
4389 skb->secmark = 0;
4390}
4391#else
4392static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4393{ }
4394
4395static inline void skb_init_secmark(struct sk_buff *skb)
4396{ }
4397#endif
4398
4399static inline int secpath_exists(const struct sk_buff *skb)
4400{
4401#ifdef CONFIG_XFRM
4402 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4403#else
4404 return 0;
4405#endif
4406}
4407
4408static inline bool skb_irq_freeable(const struct sk_buff *skb)
4409{
4410 return !skb->destructor &&
4411 !secpath_exists(skb) &&
4412 !skb_nfct(skb) &&
4413 !skb->_skb_refdst &&
4414 !skb_has_frag_list(skb);
4415}
4416
4417static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4418{
4419 skb->queue_mapping = queue_mapping;
4420}
4421
4422static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4423{
4424 return skb->queue_mapping;
4425}
4426
4427static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4428{
4429 to->queue_mapping = from->queue_mapping;
4430}
4431
4432static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4433{
4434 skb->queue_mapping = rx_queue + 1;
4435}
4436
4437static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4438{
4439 return skb->queue_mapping - 1;
4440}
4441
4442static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4443{
4444 return skb->queue_mapping != 0;
4445}
4446
4447static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4448{
4449 skb->dst_pending_confirm = val;
4450}
4451
4452static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4453{
4454 return skb->dst_pending_confirm != 0;
4455}
4456
4457static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4458{
4459#ifdef CONFIG_XFRM
4460 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4461#else
4462 return NULL;
4463#endif
4464}
4465
4466
4467
4468
4469
4470
4471
4472struct skb_gso_cb {
4473 union {
4474 int mac_offset;
4475 int data_offset;
4476 };
4477 int encap_level;
4478 __wsum csum;
4479 __u16 csum_start;
4480};
4481#define SKB_GSO_CB_OFFSET 32
4482#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4483
4484static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4485{
4486 return (skb_mac_header(inner_skb) - inner_skb->head) -
4487 SKB_GSO_CB(inner_skb)->mac_offset;
4488}
4489
4490static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4491{
4492 int new_headroom, headroom;
4493 int ret;
4494
4495 headroom = skb_headroom(skb);
4496 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4497 if (ret)
4498 return ret;
4499
4500 new_headroom = skb_headroom(skb);
4501 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4502 return 0;
4503}
4504
4505static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4506{
4507
4508 if (skb->remcsum_offload)
4509 return;
4510
4511 SKB_GSO_CB(skb)->csum = res;
4512 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4524{
4525 unsigned char *csum_start = skb_transport_header(skb);
4526 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4527 __wsum partial = SKB_GSO_CB(skb)->csum;
4528
4529 SKB_GSO_CB(skb)->csum = res;
4530 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4531
4532 return csum_fold(csum_partial(csum_start, plen, partial));
4533}
4534
4535static inline bool skb_is_gso(const struct sk_buff *skb)
4536{
4537 return skb_shinfo(skb)->gso_size;
4538}
4539
4540
4541static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4542{
4543 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4544}
4545
4546
4547static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4548{
4549 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4550}
4551
4552
4553static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4554{
4555 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4556}
4557
4558static inline void skb_gso_reset(struct sk_buff *skb)
4559{
4560 skb_shinfo(skb)->gso_size = 0;
4561 skb_shinfo(skb)->gso_segs = 0;
4562 skb_shinfo(skb)->gso_type = 0;
4563}
4564
4565static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4566 u16 increment)
4567{
4568 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4569 return;
4570 shinfo->gso_size += increment;
4571}
4572
4573static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4574 u16 decrement)
4575{
4576 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4577 return;
4578 shinfo->gso_size -= decrement;
4579}
4580
4581void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4582
4583static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4584{
4585
4586
4587 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4588
4589 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4590 unlikely(shinfo->gso_type == 0)) {
4591 __skb_warn_lro_forwarding(skb);
4592 return true;
4593 }
4594 return false;
4595}
4596
4597static inline void skb_forward_csum(struct sk_buff *skb)
4598{
4599
4600 if (skb->ip_summed == CHECKSUM_COMPLETE)
4601 skb->ip_summed = CHECKSUM_NONE;
4602}
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4613{
4614#ifdef DEBUG
4615 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4616#endif
4617}
4618
4619bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4620
4621int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4622struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4623 unsigned int transport_len,
4624 __sum16(*skb_chkf)(struct sk_buff *skb));
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634
4635static inline bool skb_head_is_locked(const struct sk_buff *skb)
4636{
4637 return !skb->head_frag || skb_cloned(skb);
4638}
4639
4640
4641
4642
4643
4644
4645
4646
4647
4648
4649static inline __wsum lco_csum(struct sk_buff *skb)
4650{
4651 unsigned char *csum_start = skb_checksum_start(skb);
4652 unsigned char *l4_hdr = skb_transport_header(skb);
4653 __wsum partial;
4654
4655
4656 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4657 skb->csum_offset));
4658
4659
4660
4661
4662 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4663}
4664
4665static inline bool skb_is_redirected(const struct sk_buff *skb)
4666{
4667#ifdef CONFIG_NET_REDIRECT
4668 return skb->redirected;
4669#else
4670 return false;
4671#endif
4672}
4673
4674static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4675{
4676#ifdef CONFIG_NET_REDIRECT
4677 skb->redirected = 1;
4678 skb->from_ingress = from_ingress;
4679 if (skb->from_ingress)
4680 skb->tstamp = 0;
4681#endif
4682}
4683
4684static inline void skb_reset_redirect(struct sk_buff *skb)
4685{
4686#ifdef CONFIG_NET_REDIRECT
4687 skb->redirected = 0;
4688#endif
4689}
4690
4691static inline bool skb_csum_is_sctp(struct sk_buff *skb)
4692{
4693 return skb->csum_not_inet;
4694}
4695
4696static inline void skb_set_kcov_handle(struct sk_buff *skb,
4697 const u64 kcov_handle)
4698{
4699#ifdef CONFIG_KCOV
4700 skb->kcov_handle = kcov_handle;
4701#endif
4702}
4703
4704static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
4705{
4706#ifdef CONFIG_KCOV
4707 return skb->kcov_handle;
4708#else
4709 return 0;
4710#endif
4711}
4712
4713#ifdef CONFIG_PAGE_POOL
4714static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
4715 struct page_pool *pp)
4716{
4717 skb->pp_recycle = 1;
4718 page_pool_store_mem_info(page, pp);
4719}
4720#endif
4721
4722static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
4723{
4724 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
4725 return false;
4726 return page_pool_return_skb_page(virt_to_page(data));
4727}
4728
4729#endif
4730#endif
4731