1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/bvec.h>
18#include <linux/cache.h>
19#include <linux/rbtree.h>
20#include <linux/socket.h>
21#include <linux/refcount.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/dma-mapping.h>
32#include <linux/netdev_features.h>
33#include <linux/sched.h>
34#include <linux/sched/clock.h>
35#include <net/flow_dissector.h>
36#include <linux/splice.h>
37#include <linux/in6.h>
38#include <linux/if_packet.h>
39#include <net/flow.h>
40#include <net/page_pool.h>
41#if IS_ENABLED(CONFIG_NF_CONNTRACK)
42#include <linux/netfilter/nf_conntrack_common.h>
43#endif
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221#define CHECKSUM_NONE 0
222#define CHECKSUM_UNNECESSARY 1
223#define CHECKSUM_COMPLETE 2
224#define CHECKSUM_PARTIAL 3
225
226
227#define SKB_MAX_CSUM_LEVEL 3
228
229#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
230#define SKB_WITH_OVERHEAD(X) \
231 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
232#define SKB_MAX_ORDER(X, ORDER) \
233 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
234#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
235#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
236
237
238#define SKB_TRUESIZE(X) ((X) + \
239 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
240 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
241
242struct ahash_request;
243struct net_device;
244struct scatterlist;
245struct pipe_inode_info;
246struct iov_iter;
247struct napi_struct;
248struct bpf_prog;
249union bpf_attr;
250struct skb_ext;
251
252#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
253struct nf_bridge_info {
254 enum {
255 BRNF_PROTO_UNCHANGED,
256 BRNF_PROTO_8021Q,
257 BRNF_PROTO_PPPOE
258 } orig_proto:8;
259 u8 pkt_otherhost:1;
260 u8 in_prerouting:1;
261 u8 bridged_dnat:1;
262 __u16 frag_max_size;
263 struct net_device *physindev;
264
265
266 struct net_device *physoutdev;
267 union {
268
269 __be32 ipv4_daddr;
270 struct in6_addr ipv6_daddr;
271
272
273
274
275
276 char neigh_header[8];
277 };
278};
279#endif
280
281#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
282
283
284
285
286struct tc_skb_ext {
287 __u32 chain;
288 __u16 mru;
289 bool post_ct;
290};
291#endif
292
293struct sk_buff_head {
294
295 struct sk_buff *next;
296 struct sk_buff *prev;
297
298 __u32 qlen;
299 spinlock_t lock;
300};
301
302struct sk_buff;
303
304
305
306
307
308
309
310
311#if (65536/PAGE_SIZE + 1) < 16
312#define MAX_SKB_FRAGS 16UL
313#else
314#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
315#endif
316extern int sysctl_max_skb_frags;
317
318
319
320
321#define GSO_BY_FRAGS 0xFFFF
322
323typedef struct bio_vec skb_frag_t;
324
325
326
327
328
329static inline unsigned int skb_frag_size(const skb_frag_t *frag)
330{
331 return frag->bv_len;
332}
333
334
335
336
337
338
339static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
340{
341 frag->bv_len = size;
342}
343
344
345
346
347
348
349static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
350{
351 frag->bv_len += delta;
352}
353
354
355
356
357
358
359static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
360{
361 frag->bv_len -= delta;
362}
363
364
365
366
367
368static inline bool skb_frag_must_loop(struct page *p)
369{
370#if defined(CONFIG_HIGHMEM)
371 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
372 return true;
373#endif
374 return false;
375}
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
395 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
396 p_off = (f_off) & (PAGE_SIZE - 1), \
397 p_len = skb_frag_must_loop(p) ? \
398 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
399 copied = 0; \
400 copied < f_len; \
401 copied += p_len, p++, p_off = 0, \
402 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
403
404#define HAVE_HW_TIME_STAMP
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420struct skb_shared_hwtstamps {
421 ktime_t hwtstamp;
422};
423
424
425enum {
426
427 SKBTX_HW_TSTAMP = 1 << 0,
428
429
430 SKBTX_SW_TSTAMP = 1 << 1,
431
432
433 SKBTX_IN_PROGRESS = 1 << 2,
434
435
436 SKBTX_WIFI_STATUS = 1 << 4,
437
438
439 SKBTX_SCHED_TSTAMP = 1 << 6,
440};
441
442#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
443 SKBTX_SCHED_TSTAMP)
444#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
445
446
447enum {
448
449 SKBFL_ZEROCOPY_ENABLE = BIT(0),
450
451
452
453
454
455
456 SKBFL_SHARED_FRAG = BIT(1),
457};
458
459#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
460
461
462
463
464
465
466
467
468
469struct ubuf_info {
470 void (*callback)(struct sk_buff *, struct ubuf_info *,
471 bool zerocopy_success);
472 union {
473 struct {
474 unsigned long desc;
475 void *ctx;
476 };
477 struct {
478 u32 id;
479 u16 len;
480 u16 zerocopy:1;
481 u32 bytelen;
482 };
483 };
484 refcount_t refcnt;
485 u8 flags;
486
487 struct mmpin {
488 struct user_struct *user;
489 unsigned int num_pg;
490 } mmp;
491};
492
493#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
494
495int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
496void mm_unaccount_pinned_pages(struct mmpin *mmp);
497
498struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size);
499struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
500 struct ubuf_info *uarg);
501
502void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
503
504void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
505 bool success);
506
507int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
508int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
509 struct msghdr *msg, int len,
510 struct ubuf_info *uarg);
511
512
513
514
515struct skb_shared_info {
516 __u8 flags;
517 __u8 meta_len;
518 __u8 nr_frags;
519 __u8 tx_flags;
520 unsigned short gso_size;
521
522 unsigned short gso_segs;
523 struct sk_buff *frag_list;
524 struct skb_shared_hwtstamps hwtstamps;
525 unsigned int gso_type;
526 u32 tskey;
527
528
529
530
531 atomic_t dataref;
532
533
534
535 void * destructor_arg;
536
537
538 skb_frag_t frags[MAX_SKB_FRAGS];
539};
540
541
542
543
544
545
546
547
548
549
550
551
552#define SKB_DATAREF_SHIFT 16
553#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
554
555
556enum {
557 SKB_FCLONE_UNAVAILABLE,
558 SKB_FCLONE_ORIG,
559 SKB_FCLONE_CLONE,
560};
561
562enum {
563 SKB_GSO_TCPV4 = 1 << 0,
564
565
566 SKB_GSO_DODGY = 1 << 1,
567
568
569 SKB_GSO_TCP_ECN = 1 << 2,
570
571 SKB_GSO_TCP_FIXEDID = 1 << 3,
572
573 SKB_GSO_TCPV6 = 1 << 4,
574
575 SKB_GSO_FCOE = 1 << 5,
576
577 SKB_GSO_GRE = 1 << 6,
578
579 SKB_GSO_GRE_CSUM = 1 << 7,
580
581 SKB_GSO_IPXIP4 = 1 << 8,
582
583 SKB_GSO_IPXIP6 = 1 << 9,
584
585 SKB_GSO_UDP_TUNNEL = 1 << 10,
586
587 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
588
589 SKB_GSO_PARTIAL = 1 << 12,
590
591 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
592
593 SKB_GSO_SCTP = 1 << 14,
594
595 SKB_GSO_ESP = 1 << 15,
596
597 SKB_GSO_UDP = 1 << 16,
598
599 SKB_GSO_UDP_L4 = 1 << 17,
600
601 SKB_GSO_FRAGLIST = 1 << 18,
602};
603
604#if BITS_PER_LONG > 32
605#define NET_SKBUFF_DATA_USES_OFFSET 1
606#endif
607
608#ifdef NET_SKBUFF_DATA_USES_OFFSET
609typedef unsigned int sk_buff_data_t;
610#else
611typedef unsigned char *sk_buff_data_t;
612#endif
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721struct sk_buff {
722 union {
723 struct {
724
725 struct sk_buff *next;
726 struct sk_buff *prev;
727
728 union {
729 struct net_device *dev;
730
731
732
733
734 unsigned long dev_scratch;
735 };
736 };
737 struct rb_node rbnode;
738 struct list_head list;
739 };
740
741 union {
742 struct sock *sk;
743 int ip_defrag_offset;
744 };
745
746 union {
747 ktime_t tstamp;
748 u64 skb_mstamp_ns;
749 };
750
751
752
753
754
755
756 char cb[48] __aligned(8);
757
758 union {
759 struct {
760 unsigned long _skb_refdst;
761 void (*destructor)(struct sk_buff *skb);
762 };
763 struct list_head tcp_tsorted_anchor;
764#ifdef CONFIG_NET_SOCK_MSG
765 unsigned long _sk_redir;
766#endif
767 };
768
769#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
770 unsigned long _nfct;
771#endif
772 unsigned int len,
773 data_len;
774 __u16 mac_len,
775 hdr_len;
776
777
778
779
780 __u16 queue_mapping;
781
782
783#ifdef __BIG_ENDIAN_BITFIELD
784#define CLONED_MASK (1 << 7)
785#else
786#define CLONED_MASK 1
787#endif
788#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
789
790
791 __u8 __cloned_offset[0];
792
793 __u8 cloned:1,
794 nohdr:1,
795 fclone:2,
796 peeked:1,
797 head_frag:1,
798 pfmemalloc:1,
799 pp_recycle:1;
800#ifdef CONFIG_SKB_EXTENSIONS
801 __u8 active_extensions;
802#endif
803
804
805
806
807
808 __u32 headers_start[0];
809
810
811
812#ifdef __BIG_ENDIAN_BITFIELD
813#define PKT_TYPE_MAX (7 << 5)
814#else
815#define PKT_TYPE_MAX 7
816#endif
817#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
818
819
820 __u8 __pkt_type_offset[0];
821
822 __u8 pkt_type:3;
823 __u8 ignore_df:1;
824 __u8 nf_trace:1;
825 __u8 ip_summed:2;
826 __u8 ooo_okay:1;
827
828 __u8 l4_hash:1;
829 __u8 sw_hash:1;
830 __u8 wifi_acked_valid:1;
831 __u8 wifi_acked:1;
832 __u8 no_fcs:1;
833
834 __u8 encapsulation:1;
835 __u8 encap_hdr_csum:1;
836 __u8 csum_valid:1;
837
838#ifdef __BIG_ENDIAN_BITFIELD
839#define PKT_VLAN_PRESENT_BIT 7
840#else
841#define PKT_VLAN_PRESENT_BIT 0
842#endif
843#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
844
845 __u8 __pkt_vlan_present_offset[0];
846
847 __u8 vlan_present:1;
848 __u8 csum_complete_sw:1;
849 __u8 csum_level:2;
850 __u8 csum_not_inet:1;
851 __u8 dst_pending_confirm:1;
852#ifdef CONFIG_IPV6_NDISC_NODETYPE
853 __u8 ndisc_nodetype:2;
854#endif
855
856 __u8 ipvs_property:1;
857 __u8 inner_protocol_type:1;
858 __u8 remcsum_offload:1;
859#ifdef CONFIG_NET_SWITCHDEV
860 __u8 offload_fwd_mark:1;
861 __u8 offload_l3_fwd_mark:1;
862#endif
863#ifdef CONFIG_NET_CLS_ACT
864 __u8 tc_skip_classify:1;
865 __u8 tc_at_ingress:1;
866#endif
867 __u8 redirected:1;
868#ifdef CONFIG_NET_REDIRECT
869 __u8 from_ingress:1;
870#endif
871#ifdef CONFIG_TLS_DEVICE
872 __u8 decrypted:1;
873#endif
874 __u8 slow_gro:1;
875
876#ifdef CONFIG_NET_SCHED
877 __u16 tc_index;
878#endif
879
880 union {
881 __wsum csum;
882 struct {
883 __u16 csum_start;
884 __u16 csum_offset;
885 };
886 };
887 __u32 priority;
888 int skb_iif;
889 __u32 hash;
890 __be16 vlan_proto;
891 __u16 vlan_tci;
892#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
893 union {
894 unsigned int napi_id;
895 unsigned int sender_cpu;
896 };
897#endif
898#ifdef CONFIG_NETWORK_SECMARK
899 __u32 secmark;
900#endif
901
902 union {
903 __u32 mark;
904 __u32 reserved_tailroom;
905 };
906
907 union {
908 __be16 inner_protocol;
909 __u8 inner_ipproto;
910 };
911
912 __u16 inner_transport_header;
913 __u16 inner_network_header;
914 __u16 inner_mac_header;
915
916 __be16 protocol;
917 __u16 transport_header;
918 __u16 network_header;
919 __u16 mac_header;
920
921#ifdef CONFIG_KCOV
922 u64 kcov_handle;
923#endif
924
925
926 __u32 headers_end[0];
927
928
929
930 sk_buff_data_t tail;
931 sk_buff_data_t end;
932 unsigned char *head,
933 *data;
934 unsigned int truesize;
935 refcount_t users;
936
937#ifdef CONFIG_SKB_EXTENSIONS
938
939 struct skb_ext *extensions;
940#endif
941};
942
943#ifdef __KERNEL__
944
945
946
947
948#define SKB_ALLOC_FCLONE 0x01
949#define SKB_ALLOC_RX 0x02
950#define SKB_ALLOC_NAPI 0x04
951
952
953
954
955
956static inline bool skb_pfmemalloc(const struct sk_buff *skb)
957{
958 return unlikely(skb->pfmemalloc);
959}
960
961
962
963
964
965#define SKB_DST_NOREF 1UL
966#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
967
968
969
970
971
972
973
974static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
975{
976
977
978
979 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
980 !rcu_read_lock_held() &&
981 !rcu_read_lock_bh_held());
982 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
983}
984
985
986
987
988
989
990
991
992
993static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
994{
995 skb->slow_gro |= !!dst;
996 skb->_skb_refdst = (unsigned long)dst;
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
1010{
1011 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
1012 skb->slow_gro |= !!dst;
1013 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
1014}
1015
1016
1017
1018
1019
1020static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1021{
1022 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1023}
1024
1025
1026
1027
1028
1029static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1030{
1031 return (struct rtable *)skb_dst(skb);
1032}
1033
1034
1035
1036
1037
1038static inline bool skb_pkt_type_ok(u32 ptype)
1039{
1040 return ptype <= PACKET_OTHERHOST;
1041}
1042
1043
1044
1045
1046
1047static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1048{
1049#ifdef CONFIG_NET_RX_BUSY_POLL
1050 return skb->napi_id;
1051#else
1052 return 0;
1053#endif
1054}
1055
1056
1057
1058
1059
1060
1061
1062static inline bool skb_unref(struct sk_buff *skb)
1063{
1064 if (unlikely(!skb))
1065 return false;
1066 if (likely(refcount_read(&skb->users) == 1))
1067 smp_rmb();
1068 else if (likely(!refcount_dec_and_test(&skb->users)))
1069 return false;
1070
1071 return true;
1072}
1073
1074void skb_release_head_state(struct sk_buff *skb);
1075void kfree_skb(struct sk_buff *skb);
1076void kfree_skb_list(struct sk_buff *segs);
1077void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1078void skb_tx_error(struct sk_buff *skb);
1079
1080#ifdef CONFIG_TRACEPOINTS
1081void consume_skb(struct sk_buff *skb);
1082#else
1083static inline void consume_skb(struct sk_buff *skb)
1084{
1085 return kfree_skb(skb);
1086}
1087#endif
1088
1089void __consume_stateless_skb(struct sk_buff *skb);
1090void __kfree_skb(struct sk_buff *skb);
1091extern struct kmem_cache *skbuff_head_cache;
1092
1093void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1094bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1095 bool *fragstolen, int *delta_truesize);
1096
1097struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1098 int node);
1099struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1100struct sk_buff *build_skb(void *data, unsigned int frag_size);
1101struct sk_buff *build_skb_around(struct sk_buff *skb,
1102 void *data, unsigned int frag_size);
1103
1104struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
1105
1106
1107
1108
1109
1110
1111
1112
1113static inline struct sk_buff *alloc_skb(unsigned int size,
1114 gfp_t priority)
1115{
1116 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1117}
1118
1119struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1120 unsigned long data_len,
1121 int max_page_order,
1122 int *errcode,
1123 gfp_t gfp_mask);
1124struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1125
1126
1127struct sk_buff_fclones {
1128 struct sk_buff skb1;
1129
1130 struct sk_buff skb2;
1131
1132 refcount_t fclone_ref;
1133};
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static inline bool skb_fclone_busy(const struct sock *sk,
1145 const struct sk_buff *skb)
1146{
1147 const struct sk_buff_fclones *fclones;
1148
1149 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1150
1151 return skb->fclone == SKB_FCLONE_ORIG &&
1152 refcount_read(&fclones->fclone_ref) > 1 &&
1153 READ_ONCE(fclones->skb2.sk) == sk;
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1164 gfp_t priority)
1165{
1166 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1167}
1168
1169struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1170void skb_headers_offset_update(struct sk_buff *skb, int off);
1171int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1172struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1173void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1174struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1175struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1176 gfp_t gfp_mask, bool fclone);
1177static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1178 gfp_t gfp_mask)
1179{
1180 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1181}
1182
1183int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1184struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1185 unsigned int headroom);
1186struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
1187struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1188 int newtailroom, gfp_t priority);
1189int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1190 int offset, int len);
1191int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1192 int offset, int len);
1193int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1194int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static inline int skb_pad(struct sk_buff *skb, int pad)
1208{
1209 return __skb_pad(skb, pad, true);
1210}
1211#define dev_kfree_skb(a) consume_skb(a)
1212
1213int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1214 int offset, size_t size);
1215
1216struct skb_seq_state {
1217 __u32 lower_offset;
1218 __u32 upper_offset;
1219 __u32 frag_idx;
1220 __u32 stepped_offset;
1221 struct sk_buff *root_skb;
1222 struct sk_buff *cur_skb;
1223 __u8 *frag_data;
1224 __u32 frag_off;
1225};
1226
1227void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1228 unsigned int to, struct skb_seq_state *st);
1229unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1230 struct skb_seq_state *st);
1231void skb_abort_seq_read(struct skb_seq_state *st);
1232
1233unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1234 unsigned int to, struct ts_config *config);
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262enum pkt_hash_types {
1263 PKT_HASH_TYPE_NONE,
1264 PKT_HASH_TYPE_L2,
1265 PKT_HASH_TYPE_L3,
1266 PKT_HASH_TYPE_L4,
1267};
1268
1269static inline void skb_clear_hash(struct sk_buff *skb)
1270{
1271 skb->hash = 0;
1272 skb->sw_hash = 0;
1273 skb->l4_hash = 0;
1274}
1275
1276static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1277{
1278 if (!skb->l4_hash)
1279 skb_clear_hash(skb);
1280}
1281
1282static inline void
1283__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1284{
1285 skb->l4_hash = is_l4;
1286 skb->sw_hash = is_sw;
1287 skb->hash = hash;
1288}
1289
1290static inline void
1291skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1292{
1293
1294 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1295}
1296
1297static inline void
1298__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1299{
1300 __skb_set_hash(skb, hash, true, is_l4);
1301}
1302
1303void __skb_get_hash(struct sk_buff *skb);
1304u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1305u32 skb_get_poff(const struct sk_buff *skb);
1306u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1307 const struct flow_keys_basic *keys, int hlen);
1308__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1309 const void *data, int hlen_proto);
1310
1311static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1312 int thoff, u8 ip_proto)
1313{
1314 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1315}
1316
1317void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1318 const struct flow_dissector_key *key,
1319 unsigned int key_count);
1320
1321struct bpf_flow_dissector;
1322bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1323 __be16 proto, int nhoff, int hlen, unsigned int flags);
1324
1325bool __skb_flow_dissect(const struct net *net,
1326 const struct sk_buff *skb,
1327 struct flow_dissector *flow_dissector,
1328 void *target_container, const void *data,
1329 __be16 proto, int nhoff, int hlen, unsigned int flags);
1330
1331static inline bool skb_flow_dissect(const struct sk_buff *skb,
1332 struct flow_dissector *flow_dissector,
1333 void *target_container, unsigned int flags)
1334{
1335 return __skb_flow_dissect(NULL, skb, flow_dissector,
1336 target_container, NULL, 0, 0, 0, flags);
1337}
1338
1339static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1340 struct flow_keys *flow,
1341 unsigned int flags)
1342{
1343 memset(flow, 0, sizeof(*flow));
1344 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1345 flow, NULL, 0, 0, 0, flags);
1346}
1347
1348static inline bool
1349skb_flow_dissect_flow_keys_basic(const struct net *net,
1350 const struct sk_buff *skb,
1351 struct flow_keys_basic *flow,
1352 const void *data, __be16 proto,
1353 int nhoff, int hlen, unsigned int flags)
1354{
1355 memset(flow, 0, sizeof(*flow));
1356 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1357 data, proto, nhoff, hlen, flags);
1358}
1359
1360void skb_flow_dissect_meta(const struct sk_buff *skb,
1361 struct flow_dissector *flow_dissector,
1362 void *target_container);
1363
1364
1365
1366
1367
1368void
1369skb_flow_dissect_ct(const struct sk_buff *skb,
1370 struct flow_dissector *flow_dissector,
1371 void *target_container,
1372 u16 *ctinfo_map, size_t mapsize,
1373 bool post_ct);
1374void
1375skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1376 struct flow_dissector *flow_dissector,
1377 void *target_container);
1378
1379void skb_flow_dissect_hash(const struct sk_buff *skb,
1380 struct flow_dissector *flow_dissector,
1381 void *target_container);
1382
1383static inline __u32 skb_get_hash(struct sk_buff *skb)
1384{
1385 if (!skb->l4_hash && !skb->sw_hash)
1386 __skb_get_hash(skb);
1387
1388 return skb->hash;
1389}
1390
1391static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1392{
1393 if (!skb->l4_hash && !skb->sw_hash) {
1394 struct flow_keys keys;
1395 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1396
1397 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1398 }
1399
1400 return skb->hash;
1401}
1402
1403__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1404 const siphash_key_t *perturb);
1405
1406static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1407{
1408 return skb->hash;
1409}
1410
1411static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1412{
1413 to->hash = from->hash;
1414 to->sw_hash = from->sw_hash;
1415 to->l4_hash = from->l4_hash;
1416};
1417
1418static inline void skb_copy_decrypted(struct sk_buff *to,
1419 const struct sk_buff *from)
1420{
1421#ifdef CONFIG_TLS_DEVICE
1422 to->decrypted = from->decrypted;
1423#endif
1424}
1425
1426#ifdef NET_SKBUFF_DATA_USES_OFFSET
1427static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1428{
1429 return skb->head + skb->end;
1430}
1431
1432static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1433{
1434 return skb->end;
1435}
1436#else
1437static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1438{
1439 return skb->end;
1440}
1441
1442static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1443{
1444 return skb->end - skb->head;
1445}
1446#endif
1447
1448
1449#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1450
1451static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1452{
1453 return &skb_shinfo(skb)->hwtstamps;
1454}
1455
1456static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1457{
1458 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
1459
1460 return is_zcopy ? skb_uarg(skb) : NULL;
1461}
1462
1463static inline void net_zcopy_get(struct ubuf_info *uarg)
1464{
1465 refcount_inc(&uarg->refcnt);
1466}
1467
1468static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
1469{
1470 skb_shinfo(skb)->destructor_arg = uarg;
1471 skb_shinfo(skb)->flags |= uarg->flags;
1472}
1473
1474static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1475 bool *have_ref)
1476{
1477 if (skb && uarg && !skb_zcopy(skb)) {
1478 if (unlikely(have_ref && *have_ref))
1479 *have_ref = false;
1480 else
1481 net_zcopy_get(uarg);
1482 skb_zcopy_init(skb, uarg);
1483 }
1484}
1485
1486static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1487{
1488 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1489 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
1490}
1491
1492static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1493{
1494 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1495}
1496
1497static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1498{
1499 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1500}
1501
1502static inline void net_zcopy_put(struct ubuf_info *uarg)
1503{
1504 if (uarg)
1505 uarg->callback(NULL, uarg, true);
1506}
1507
1508static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1509{
1510 if (uarg) {
1511 if (uarg->callback == msg_zerocopy_callback)
1512 msg_zerocopy_put_abort(uarg, have_uref);
1513 else if (have_uref)
1514 net_zcopy_put(uarg);
1515 }
1516}
1517
1518
1519static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
1520{
1521 struct ubuf_info *uarg = skb_zcopy(skb);
1522
1523 if (uarg) {
1524 if (!skb_zcopy_is_nouarg(skb))
1525 uarg->callback(skb, uarg, zerocopy_success);
1526
1527 skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG;
1528 }
1529}
1530
1531static inline void skb_mark_not_on_list(struct sk_buff *skb)
1532{
1533 skb->next = NULL;
1534}
1535
1536
1537#define skb_list_walk_safe(first, skb, next_skb) \
1538 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1539 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1540
1541static inline void skb_list_del_init(struct sk_buff *skb)
1542{
1543 __list_del_entry(&skb->list);
1544 skb_mark_not_on_list(skb);
1545}
1546
1547
1548
1549
1550
1551
1552
1553static inline int skb_queue_empty(const struct sk_buff_head *list)
1554{
1555 return list->next == (const struct sk_buff *) list;
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1566{
1567 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1568}
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1579 const struct sk_buff *skb)
1580{
1581 return skb->next == (const struct sk_buff *) list;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1592 const struct sk_buff *skb)
1593{
1594 return skb->prev == (const struct sk_buff *) list;
1595}
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1606 const struct sk_buff *skb)
1607{
1608
1609
1610
1611 BUG_ON(skb_queue_is_last(list, skb));
1612 return skb->next;
1613}
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1624 const struct sk_buff *skb)
1625{
1626
1627
1628
1629 BUG_ON(skb_queue_is_first(list, skb));
1630 return skb->prev;
1631}
1632
1633
1634
1635
1636
1637
1638
1639
1640static inline struct sk_buff *skb_get(struct sk_buff *skb)
1641{
1642 refcount_inc(&skb->users);
1643 return skb;
1644}
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658static inline int skb_cloned(const struct sk_buff *skb)
1659{
1660 return skb->cloned &&
1661 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1662}
1663
1664static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1665{
1666 might_sleep_if(gfpflags_allow_blocking(pri));
1667
1668 if (skb_cloned(skb))
1669 return pskb_expand_head(skb, 0, 0, pri);
1670
1671 return 0;
1672}
1673
1674
1675
1676
1677
1678
1679
1680
1681static inline int skb_header_cloned(const struct sk_buff *skb)
1682{
1683 int dataref;
1684
1685 if (!skb->cloned)
1686 return 0;
1687
1688 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1689 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1690 return dataref != 1;
1691}
1692
1693static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1694{
1695 might_sleep_if(gfpflags_allow_blocking(pri));
1696
1697 if (skb_header_cloned(skb))
1698 return pskb_expand_head(skb, 0, 0, pri);
1699
1700 return 0;
1701}
1702
1703
1704
1705
1706
1707static inline void __skb_header_release(struct sk_buff *skb)
1708{
1709 skb->nohdr = 1;
1710 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static inline int skb_shared(const struct sk_buff *skb)
1722{
1723 return refcount_read(&skb->users) != 1;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1740{
1741 might_sleep_if(gfpflags_allow_blocking(pri));
1742 if (skb_shared(skb)) {
1743 struct sk_buff *nskb = skb_clone(skb, pri);
1744
1745 if (likely(nskb))
1746 consume_skb(skb);
1747 else
1748 kfree_skb(skb);
1749 skb = nskb;
1750 }
1751 return skb;
1752}
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1775 gfp_t pri)
1776{
1777 might_sleep_if(gfpflags_allow_blocking(pri));
1778 if (skb_cloned(skb)) {
1779 struct sk_buff *nskb = skb_copy(skb, pri);
1780
1781
1782 if (likely(nskb))
1783 consume_skb(skb);
1784 else
1785 kfree_skb(skb);
1786 skb = nskb;
1787 }
1788 return skb;
1789}
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1805{
1806 struct sk_buff *skb = list_->next;
1807
1808 if (skb == (struct sk_buff *)list_)
1809 skb = NULL;
1810 return skb;
1811}
1812
1813
1814
1815
1816
1817
1818
1819static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1820{
1821 return list_->next;
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1834 const struct sk_buff_head *list_)
1835{
1836 struct sk_buff *next = skb->next;
1837
1838 if (next == (struct sk_buff *)list_)
1839 next = NULL;
1840 return next;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1857{
1858 struct sk_buff *skb = READ_ONCE(list_->prev);
1859
1860 if (skb == (struct sk_buff *)list_)
1861 skb = NULL;
1862 return skb;
1863
1864}
1865
1866
1867
1868
1869
1870
1871
1872static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1873{
1874 return list_->qlen;
1875}
1876
1877
1878
1879
1880
1881
1882
1883
1884static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
1885{
1886 return READ_ONCE(list_->qlen);
1887}
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899static inline void __skb_queue_head_init(struct sk_buff_head *list)
1900{
1901 list->prev = list->next = (struct sk_buff *)list;
1902 list->qlen = 0;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913static inline void skb_queue_head_init(struct sk_buff_head *list)
1914{
1915 spin_lock_init(&list->lock);
1916 __skb_queue_head_init(list);
1917}
1918
1919static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1920 struct lock_class_key *class)
1921{
1922 skb_queue_head_init(list);
1923 lockdep_set_class(&list->lock, class);
1924}
1925
1926
1927
1928
1929
1930
1931
1932static inline void __skb_insert(struct sk_buff *newsk,
1933 struct sk_buff *prev, struct sk_buff *next,
1934 struct sk_buff_head *list)
1935{
1936
1937
1938
1939 WRITE_ONCE(newsk->next, next);
1940 WRITE_ONCE(newsk->prev, prev);
1941 WRITE_ONCE(next->prev, newsk);
1942 WRITE_ONCE(prev->next, newsk);
1943 WRITE_ONCE(list->qlen, list->qlen + 1);
1944}
1945
1946static inline void __skb_queue_splice(const struct sk_buff_head *list,
1947 struct sk_buff *prev,
1948 struct sk_buff *next)
1949{
1950 struct sk_buff *first = list->next;
1951 struct sk_buff *last = list->prev;
1952
1953 WRITE_ONCE(first->prev, prev);
1954 WRITE_ONCE(prev->next, first);
1955
1956 WRITE_ONCE(last->next, next);
1957 WRITE_ONCE(next->prev, last);
1958}
1959
1960
1961
1962
1963
1964
1965static inline void skb_queue_splice(const struct sk_buff_head *list,
1966 struct sk_buff_head *head)
1967{
1968 if (!skb_queue_empty(list)) {
1969 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1970 head->qlen += list->qlen;
1971 }
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981static inline void skb_queue_splice_init(struct sk_buff_head *list,
1982 struct sk_buff_head *head)
1983{
1984 if (!skb_queue_empty(list)) {
1985 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1986 head->qlen += list->qlen;
1987 __skb_queue_head_init(list);
1988 }
1989}
1990
1991
1992
1993
1994
1995
1996static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1997 struct sk_buff_head *head)
1998{
1999 if (!skb_queue_empty(list)) {
2000 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2001 head->qlen += list->qlen;
2002 }
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
2014 struct sk_buff_head *head)
2015{
2016 if (!skb_queue_empty(list)) {
2017 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2018 head->qlen += list->qlen;
2019 __skb_queue_head_init(list);
2020 }
2021}
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034static inline void __skb_queue_after(struct sk_buff_head *list,
2035 struct sk_buff *prev,
2036 struct sk_buff *newsk)
2037{
2038 __skb_insert(newsk, prev, prev->next, list);
2039}
2040
2041void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2042 struct sk_buff_head *list);
2043
2044static inline void __skb_queue_before(struct sk_buff_head *list,
2045 struct sk_buff *next,
2046 struct sk_buff *newsk)
2047{
2048 __skb_insert(newsk, next->prev, next, list);
2049}
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061static inline void __skb_queue_head(struct sk_buff_head *list,
2062 struct sk_buff *newsk)
2063{
2064 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2065}
2066void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078static inline void __skb_queue_tail(struct sk_buff_head *list,
2079 struct sk_buff *newsk)
2080{
2081 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2082}
2083void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2084
2085
2086
2087
2088
2089void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2090static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2091{
2092 struct sk_buff *next, *prev;
2093
2094 WRITE_ONCE(list->qlen, list->qlen - 1);
2095 next = skb->next;
2096 prev = skb->prev;
2097 skb->next = skb->prev = NULL;
2098 WRITE_ONCE(next->prev, prev);
2099 WRITE_ONCE(prev->next, next);
2100}
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2111{
2112 struct sk_buff *skb = skb_peek(list);
2113 if (skb)
2114 __skb_unlink(skb, list);
2115 return skb;
2116}
2117struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2128{
2129 struct sk_buff *skb = skb_peek_tail(list);
2130 if (skb)
2131 __skb_unlink(skb, list);
2132 return skb;
2133}
2134struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2135
2136
2137static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2138{
2139 return skb->data_len;
2140}
2141
2142static inline unsigned int skb_headlen(const struct sk_buff *skb)
2143{
2144 return skb->len - skb->data_len;
2145}
2146
2147static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2148{
2149 unsigned int i, len = 0;
2150
2151 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2152 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2153 return len;
2154}
2155
2156static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2157{
2158 return skb_headlen(skb) + __skb_pagelen(skb);
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2175 struct page *page, int off, int size)
2176{
2177 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2178
2179
2180
2181
2182
2183
2184 frag->bv_page = page;
2185 frag->bv_offset = off;
2186 skb_frag_size_set(frag, size);
2187
2188 page = compound_head(page);
2189 if (page_is_pfmemalloc(page))
2190 skb->pfmemalloc = true;
2191}
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2208 struct page *page, int off, int size)
2209{
2210 __skb_fill_page_desc(skb, i, page, off, size);
2211 skb_shinfo(skb)->nr_frags = i + 1;
2212}
2213
2214void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2215 int size, unsigned int truesize);
2216
2217void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2218 unsigned int truesize);
2219
2220#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2221
2222#ifdef NET_SKBUFF_DATA_USES_OFFSET
2223static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2224{
2225 return skb->head + skb->tail;
2226}
2227
2228static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2229{
2230 skb->tail = skb->data - skb->head;
2231}
2232
2233static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2234{
2235 skb_reset_tail_pointer(skb);
2236 skb->tail += offset;
2237}
2238
2239#else
2240static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2241{
2242 return skb->tail;
2243}
2244
2245static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2246{
2247 skb->tail = skb->data;
2248}
2249
2250static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2251{
2252 skb->tail = skb->data + offset;
2253}
2254
2255#endif
2256
2257
2258
2259
2260void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2261void *skb_put(struct sk_buff *skb, unsigned int len);
2262static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2263{
2264 void *tmp = skb_tail_pointer(skb);
2265 SKB_LINEAR_ASSERT(skb);
2266 skb->tail += len;
2267 skb->len += len;
2268 return tmp;
2269}
2270
2271static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2272{
2273 void *tmp = __skb_put(skb, len);
2274
2275 memset(tmp, 0, len);
2276 return tmp;
2277}
2278
2279static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2280 unsigned int len)
2281{
2282 void *tmp = __skb_put(skb, len);
2283
2284 memcpy(tmp, data, len);
2285 return tmp;
2286}
2287
2288static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2289{
2290 *(u8 *)__skb_put(skb, 1) = val;
2291}
2292
2293static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2294{
2295 void *tmp = skb_put(skb, len);
2296
2297 memset(tmp, 0, len);
2298
2299 return tmp;
2300}
2301
2302static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2303 unsigned int len)
2304{
2305 void *tmp = skb_put(skb, len);
2306
2307 memcpy(tmp, data, len);
2308
2309 return tmp;
2310}
2311
2312static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2313{
2314 *(u8 *)skb_put(skb, 1) = val;
2315}
2316
2317void *skb_push(struct sk_buff *skb, unsigned int len);
2318static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2319{
2320 skb->data -= len;
2321 skb->len += len;
2322 return skb->data;
2323}
2324
2325void *skb_pull(struct sk_buff *skb, unsigned int len);
2326static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2327{
2328 skb->len -= len;
2329 BUG_ON(skb->len < skb->data_len);
2330 return skb->data += len;
2331}
2332
2333static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2334{
2335 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2336}
2337
2338void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2339
2340static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2341{
2342 if (len > skb_headlen(skb) &&
2343 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2344 return NULL;
2345 skb->len -= len;
2346 return skb->data += len;
2347}
2348
2349static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2350{
2351 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2352}
2353
2354static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2355{
2356 if (likely(len <= skb_headlen(skb)))
2357 return true;
2358 if (unlikely(len > skb->len))
2359 return false;
2360 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2361}
2362
2363void skb_condense(struct sk_buff *skb);
2364
2365
2366
2367
2368
2369
2370
2371static inline unsigned int skb_headroom(const struct sk_buff *skb)
2372{
2373 return skb->data - skb->head;
2374}
2375
2376
2377
2378
2379
2380
2381
2382static inline int skb_tailroom(const struct sk_buff *skb)
2383{
2384 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2385}
2386
2387
2388
2389
2390
2391
2392
2393
2394static inline int skb_availroom(const struct sk_buff *skb)
2395{
2396 if (skb_is_nonlinear(skb))
2397 return 0;
2398
2399 return skb->end - skb->tail - skb->reserved_tailroom;
2400}
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410static inline void skb_reserve(struct sk_buff *skb, int len)
2411{
2412 skb->data += len;
2413 skb->tail += len;
2414}
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2429 unsigned int needed_tailroom)
2430{
2431 SKB_LINEAR_ASSERT(skb);
2432 if (mtu < skb_tailroom(skb) - needed_tailroom)
2433
2434 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2435 else
2436
2437 skb->reserved_tailroom = needed_tailroom;
2438}
2439
2440#define ENCAP_TYPE_ETHER 0
2441#define ENCAP_TYPE_IPPROTO 1
2442
2443static inline void skb_set_inner_protocol(struct sk_buff *skb,
2444 __be16 protocol)
2445{
2446 skb->inner_protocol = protocol;
2447 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2448}
2449
2450static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2451 __u8 ipproto)
2452{
2453 skb->inner_ipproto = ipproto;
2454 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2455}
2456
2457static inline void skb_reset_inner_headers(struct sk_buff *skb)
2458{
2459 skb->inner_mac_header = skb->mac_header;
2460 skb->inner_network_header = skb->network_header;
2461 skb->inner_transport_header = skb->transport_header;
2462}
2463
2464static inline void skb_reset_mac_len(struct sk_buff *skb)
2465{
2466 skb->mac_len = skb->network_header - skb->mac_header;
2467}
2468
2469static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2470 *skb)
2471{
2472 return skb->head + skb->inner_transport_header;
2473}
2474
2475static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2476{
2477 return skb_inner_transport_header(skb) - skb->data;
2478}
2479
2480static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2481{
2482 skb->inner_transport_header = skb->data - skb->head;
2483}
2484
2485static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2486 const int offset)
2487{
2488 skb_reset_inner_transport_header(skb);
2489 skb->inner_transport_header += offset;
2490}
2491
2492static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2493{
2494 return skb->head + skb->inner_network_header;
2495}
2496
2497static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2498{
2499 skb->inner_network_header = skb->data - skb->head;
2500}
2501
2502static inline void skb_set_inner_network_header(struct sk_buff *skb,
2503 const int offset)
2504{
2505 skb_reset_inner_network_header(skb);
2506 skb->inner_network_header += offset;
2507}
2508
2509static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2510{
2511 return skb->head + skb->inner_mac_header;
2512}
2513
2514static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2515{
2516 skb->inner_mac_header = skb->data - skb->head;
2517}
2518
2519static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2520 const int offset)
2521{
2522 skb_reset_inner_mac_header(skb);
2523 skb->inner_mac_header += offset;
2524}
2525static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2526{
2527 return skb->transport_header != (typeof(skb->transport_header))~0U;
2528}
2529
2530static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2531{
2532 return skb->head + skb->transport_header;
2533}
2534
2535static inline void skb_reset_transport_header(struct sk_buff *skb)
2536{
2537 skb->transport_header = skb->data - skb->head;
2538}
2539
2540static inline void skb_set_transport_header(struct sk_buff *skb,
2541 const int offset)
2542{
2543 skb_reset_transport_header(skb);
2544 skb->transport_header += offset;
2545}
2546
2547static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2548{
2549 return skb->head + skb->network_header;
2550}
2551
2552static inline void skb_reset_network_header(struct sk_buff *skb)
2553{
2554 skb->network_header = skb->data - skb->head;
2555}
2556
2557static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2558{
2559 skb_reset_network_header(skb);
2560 skb->network_header += offset;
2561}
2562
2563static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2564{
2565 return skb->head + skb->mac_header;
2566}
2567
2568static inline int skb_mac_offset(const struct sk_buff *skb)
2569{
2570 return skb_mac_header(skb) - skb->data;
2571}
2572
2573static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2574{
2575 return skb->network_header - skb->mac_header;
2576}
2577
2578static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2579{
2580 return skb->mac_header != (typeof(skb->mac_header))~0U;
2581}
2582
2583static inline void skb_unset_mac_header(struct sk_buff *skb)
2584{
2585 skb->mac_header = (typeof(skb->mac_header))~0U;
2586}
2587
2588static inline void skb_reset_mac_header(struct sk_buff *skb)
2589{
2590 skb->mac_header = skb->data - skb->head;
2591}
2592
2593static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2594{
2595 skb_reset_mac_header(skb);
2596 skb->mac_header += offset;
2597}
2598
2599static inline void skb_pop_mac_header(struct sk_buff *skb)
2600{
2601 skb->mac_header = skb->network_header;
2602}
2603
2604static inline void skb_probe_transport_header(struct sk_buff *skb)
2605{
2606 struct flow_keys_basic keys;
2607
2608 if (skb_transport_header_was_set(skb))
2609 return;
2610
2611 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2612 NULL, 0, 0, 0, 0))
2613 skb_set_transport_header(skb, keys.control.thoff);
2614}
2615
2616static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2617{
2618 if (skb_mac_header_was_set(skb)) {
2619 const unsigned char *old_mac = skb_mac_header(skb);
2620
2621 skb_set_mac_header(skb, -skb->mac_len);
2622 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2623 }
2624}
2625
2626static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2627{
2628 return skb->csum_start - skb_headroom(skb);
2629}
2630
2631static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2632{
2633 return skb->head + skb->csum_start;
2634}
2635
2636static inline int skb_transport_offset(const struct sk_buff *skb)
2637{
2638 return skb_transport_header(skb) - skb->data;
2639}
2640
2641static inline u32 skb_network_header_len(const struct sk_buff *skb)
2642{
2643 return skb->transport_header - skb->network_header;
2644}
2645
2646static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2647{
2648 return skb->inner_transport_header - skb->inner_network_header;
2649}
2650
2651static inline int skb_network_offset(const struct sk_buff *skb)
2652{
2653 return skb_network_header(skb) - skb->data;
2654}
2655
2656static inline int skb_inner_network_offset(const struct sk_buff *skb)
2657{
2658 return skb_inner_network_header(skb) - skb->data;
2659}
2660
2661static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2662{
2663 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2664}
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686#ifndef NET_IP_ALIGN
2687#define NET_IP_ALIGN 2
2688#endif
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710#ifndef NET_SKB_PAD
2711#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2712#endif
2713
2714int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2715
2716static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2717{
2718 if (WARN_ON(skb_is_nonlinear(skb)))
2719 return;
2720 skb->len = len;
2721 skb_set_tail_pointer(skb, len);
2722}
2723
2724static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2725{
2726 __skb_set_length(skb, len);
2727}
2728
2729void skb_trim(struct sk_buff *skb, unsigned int len);
2730
2731static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2732{
2733 if (skb->data_len)
2734 return ___pskb_trim(skb, len);
2735 __skb_trim(skb, len);
2736 return 0;
2737}
2738
2739static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2740{
2741 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2742}
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2754{
2755 int err = pskb_trim(skb, len);
2756 BUG_ON(err);
2757}
2758
2759static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2760{
2761 unsigned int diff = len - skb->len;
2762
2763 if (skb_tailroom(skb) < diff) {
2764 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2765 GFP_ATOMIC);
2766 if (ret)
2767 return ret;
2768 }
2769 __skb_set_length(skb, len);
2770 return 0;
2771}
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static inline void skb_orphan(struct sk_buff *skb)
2782{
2783 if (skb->destructor) {
2784 skb->destructor(skb);
2785 skb->destructor = NULL;
2786 skb->sk = NULL;
2787 } else {
2788 BUG_ON(skb->sk);
2789 }
2790}
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2802{
2803 if (likely(!skb_zcopy(skb)))
2804 return 0;
2805 if (!skb_zcopy_is_nouarg(skb) &&
2806 skb_uarg(skb)->callback == msg_zerocopy_callback)
2807 return 0;
2808 return skb_copy_ubufs(skb, gfp_mask);
2809}
2810
2811
2812static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2813{
2814 if (likely(!skb_zcopy(skb)))
2815 return 0;
2816 return skb_copy_ubufs(skb, gfp_mask);
2817}
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827static inline void __skb_queue_purge(struct sk_buff_head *list)
2828{
2829 struct sk_buff *skb;
2830 while ((skb = __skb_dequeue(list)) != NULL)
2831 kfree_skb(skb);
2832}
2833void skb_queue_purge(struct sk_buff_head *list);
2834
2835unsigned int skb_rbtree_purge(struct rb_root *root);
2836
2837void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
2838
2839
2840
2841
2842
2843
2844
2845
2846static inline void *netdev_alloc_frag(unsigned int fragsz)
2847{
2848 return __netdev_alloc_frag_align(fragsz, ~0u);
2849}
2850
2851static inline void *netdev_alloc_frag_align(unsigned int fragsz,
2852 unsigned int align)
2853{
2854 WARN_ON_ONCE(!is_power_of_2(align));
2855 return __netdev_alloc_frag_align(fragsz, -align);
2856}
2857
2858struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2859 gfp_t gfp_mask);
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2875 unsigned int length)
2876{
2877 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2878}
2879
2880
2881static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2882 gfp_t gfp_mask)
2883{
2884 return __netdev_alloc_skb(NULL, length, gfp_mask);
2885}
2886
2887
2888static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2889{
2890 return netdev_alloc_skb(NULL, length);
2891}
2892
2893
2894static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2895 unsigned int length, gfp_t gfp)
2896{
2897 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2898
2899 if (NET_IP_ALIGN && skb)
2900 skb_reserve(skb, NET_IP_ALIGN);
2901 return skb;
2902}
2903
2904static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2905 unsigned int length)
2906{
2907 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2908}
2909
2910static inline void skb_free_frag(void *addr)
2911{
2912 page_frag_free(addr);
2913}
2914
2915void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
2916
2917static inline void *napi_alloc_frag(unsigned int fragsz)
2918{
2919 return __napi_alloc_frag_align(fragsz, ~0u);
2920}
2921
2922static inline void *napi_alloc_frag_align(unsigned int fragsz,
2923 unsigned int align)
2924{
2925 WARN_ON_ONCE(!is_power_of_2(align));
2926 return __napi_alloc_frag_align(fragsz, -align);
2927}
2928
2929struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2930 unsigned int length, gfp_t gfp_mask);
2931static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2932 unsigned int length)
2933{
2934 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2935}
2936void napi_consume_skb(struct sk_buff *skb, int budget);
2937
2938void napi_skb_free_stolen_head(struct sk_buff *skb);
2939void __kfree_skb_defer(struct sk_buff *skb);
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2951 unsigned int order)
2952{
2953
2954
2955
2956
2957
2958
2959
2960
2961 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2962
2963 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2964}
2965
2966static inline struct page *dev_alloc_pages(unsigned int order)
2967{
2968 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2969}
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2980{
2981 return __dev_alloc_pages(gfp_mask, 0);
2982}
2983
2984static inline struct page *dev_alloc_page(void)
2985{
2986 return dev_alloc_pages(0);
2987}
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999static inline bool dev_page_is_reusable(const struct page *page)
3000{
3001 return likely(page_to_nid(page) == numa_mem_id() &&
3002 !page_is_pfmemalloc(page));
3003}
3004
3005
3006
3007
3008
3009
3010static inline void skb_propagate_pfmemalloc(const struct page *page,
3011 struct sk_buff *skb)
3012{
3013 if (page_is_pfmemalloc(page))
3014 skb->pfmemalloc = true;
3015}
3016
3017
3018
3019
3020
3021static inline unsigned int skb_frag_off(const skb_frag_t *frag)
3022{
3023 return frag->bv_offset;
3024}
3025
3026
3027
3028
3029
3030
3031static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3032{
3033 frag->bv_offset += delta;
3034}
3035
3036
3037
3038
3039
3040
3041static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3042{
3043 frag->bv_offset = offset;
3044}
3045
3046
3047
3048
3049
3050
3051static inline void skb_frag_off_copy(skb_frag_t *fragto,
3052 const skb_frag_t *fragfrom)
3053{
3054 fragto->bv_offset = fragfrom->bv_offset;
3055}
3056
3057
3058
3059
3060
3061
3062
3063static inline struct page *skb_frag_page(const skb_frag_t *frag)
3064{
3065 return frag->bv_page;
3066}
3067
3068
3069
3070
3071
3072
3073
3074static inline void __skb_frag_ref(skb_frag_t *frag)
3075{
3076 get_page(skb_frag_page(frag));
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086static inline void skb_frag_ref(struct sk_buff *skb, int f)
3087{
3088 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3089}
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
3100{
3101 struct page *page = skb_frag_page(frag);
3102
3103#ifdef CONFIG_PAGE_POOL
3104 if (recycle && page_pool_return_skb_page(page))
3105 return;
3106#endif
3107 put_page(page);
3108}
3109
3110
3111
3112
3113
3114
3115
3116
3117static inline void skb_frag_unref(struct sk_buff *skb, int f)
3118{
3119 __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
3120}
3121
3122
3123
3124
3125
3126
3127
3128
3129static inline void *skb_frag_address(const skb_frag_t *frag)
3130{
3131 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3132}
3133
3134
3135
3136
3137
3138
3139
3140
3141static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3142{
3143 void *ptr = page_address(skb_frag_page(frag));
3144 if (unlikely(!ptr))
3145 return NULL;
3146
3147 return ptr + skb_frag_off(frag);
3148}
3149
3150
3151
3152
3153
3154
3155static inline void skb_frag_page_copy(skb_frag_t *fragto,
3156 const skb_frag_t *fragfrom)
3157{
3158 fragto->bv_page = fragfrom->bv_page;
3159}
3160
3161
3162
3163
3164
3165
3166
3167
3168static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3169{
3170 frag->bv_page = page;
3171}
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3182 struct page *page)
3183{
3184 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3185}
3186
3187bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3201 const skb_frag_t *frag,
3202 size_t offset, size_t size,
3203 enum dma_data_direction dir)
3204{
3205 return dma_map_page(dev, skb_frag_page(frag),
3206 skb_frag_off(frag) + offset, size, dir);
3207}
3208
3209static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3210 gfp_t gfp_mask)
3211{
3212 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3213}
3214
3215
3216static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3217 gfp_t gfp_mask)
3218{
3219 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3220}
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3232{
3233 return !skb_header_cloned(skb) &&
3234 skb_headroom(skb) + len <= skb->hdr_len;
3235}
3236
3237static inline int skb_try_make_writable(struct sk_buff *skb,
3238 unsigned int write_len)
3239{
3240 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3241 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3242}
3243
3244static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3245 int cloned)
3246{
3247 int delta = 0;
3248
3249 if (headroom > skb_headroom(skb))
3250 delta = headroom - skb_headroom(skb);
3251
3252 if (delta || cloned)
3253 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3254 GFP_ATOMIC);
3255 return 0;
3256}
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3271{
3272 return __skb_cow(skb, headroom, skb_cloned(skb));
3273}
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3286{
3287 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3288}
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3301{
3302 unsigned int size = skb->len;
3303 if (likely(size >= len))
3304 return 0;
3305 return skb_pad(skb, len - size);
3306}
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3320 unsigned int len,
3321 bool free_on_error)
3322{
3323 unsigned int size = skb->len;
3324
3325 if (unlikely(size < len)) {
3326 len -= size;
3327 if (__skb_pad(skb, len, free_on_error))
3328 return -ENOMEM;
3329 __skb_put(skb, len);
3330 }
3331 return 0;
3332}
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3345{
3346 return __skb_put_padto(skb, len, true);
3347}
3348
3349static inline int skb_add_data(struct sk_buff *skb,
3350 struct iov_iter *from, int copy)
3351{
3352 const int off = skb->len;
3353
3354 if (skb->ip_summed == CHECKSUM_NONE) {
3355 __wsum csum = 0;
3356 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3357 &csum, from)) {
3358 skb->csum = csum_block_add(skb->csum, csum, off);
3359 return 0;
3360 }
3361 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3362 return 0;
3363
3364 __skb_trim(skb, off);
3365 return -EFAULT;
3366}
3367
3368static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3369 const struct page *page, int off)
3370{
3371 if (skb_zcopy(skb))
3372 return false;
3373 if (i) {
3374 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3375
3376 return page == skb_frag_page(frag) &&
3377 off == skb_frag_off(frag) + skb_frag_size(frag);
3378 }
3379 return false;
3380}
3381
3382static inline int __skb_linearize(struct sk_buff *skb)
3383{
3384 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3385}
3386
3387
3388
3389
3390
3391
3392
3393
3394static inline int skb_linearize(struct sk_buff *skb)
3395{
3396 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3407{
3408 return skb_is_nonlinear(skb) &&
3409 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
3410}
3411
3412
3413
3414
3415
3416
3417
3418
3419static inline int skb_linearize_cow(struct sk_buff *skb)
3420{
3421 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3422 __skb_linearize(skb) : 0;
3423}
3424
3425static __always_inline void
3426__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3427 unsigned int off)
3428{
3429 if (skb->ip_summed == CHECKSUM_COMPLETE)
3430 skb->csum = csum_block_sub(skb->csum,
3431 csum_partial(start, len, 0), off);
3432 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3433 skb_checksum_start_offset(skb) < 0)
3434 skb->ip_summed = CHECKSUM_NONE;
3435}
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447static inline void skb_postpull_rcsum(struct sk_buff *skb,
3448 const void *start, unsigned int len)
3449{
3450 __skb_postpull_rcsum(skb, start, len, 0);
3451}
3452
3453static __always_inline void
3454__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3455 unsigned int off)
3456{
3457 if (skb->ip_summed == CHECKSUM_COMPLETE)
3458 skb->csum = csum_block_add(skb->csum,
3459 csum_partial(start, len, 0), off);
3460}
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471static inline void skb_postpush_rcsum(struct sk_buff *skb,
3472 const void *start, unsigned int len)
3473{
3474 __skb_postpush_rcsum(skb, start, len, 0);
3475}
3476
3477void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3491{
3492 skb_push(skb, len);
3493 skb_postpush_rcsum(skb, skb->data, len);
3494 return skb->data;
3495}
3496
3497int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3509{
3510 if (likely(len >= skb->len))
3511 return 0;
3512 return pskb_trim_rcsum_slow(skb, len);
3513}
3514
3515static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3516{
3517 if (skb->ip_summed == CHECKSUM_COMPLETE)
3518 skb->ip_summed = CHECKSUM_NONE;
3519 __skb_trim(skb, len);
3520 return 0;
3521}
3522
3523static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3524{
3525 if (skb->ip_summed == CHECKSUM_COMPLETE)
3526 skb->ip_summed = CHECKSUM_NONE;
3527 return __skb_grow(skb, len);
3528}
3529
3530#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3531#define skb_rb_first(root) rb_to_skb(rb_first(root))
3532#define skb_rb_last(root) rb_to_skb(rb_last(root))
3533#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3534#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3535
3536#define skb_queue_walk(queue, skb) \
3537 for (skb = (queue)->next; \
3538 skb != (struct sk_buff *)(queue); \
3539 skb = skb->next)
3540
3541#define skb_queue_walk_safe(queue, skb, tmp) \
3542 for (skb = (queue)->next, tmp = skb->next; \
3543 skb != (struct sk_buff *)(queue); \
3544 skb = tmp, tmp = skb->next)
3545
3546#define skb_queue_walk_from(queue, skb) \
3547 for (; skb != (struct sk_buff *)(queue); \
3548 skb = skb->next)
3549
3550#define skb_rbtree_walk(skb, root) \
3551 for (skb = skb_rb_first(root); skb != NULL; \
3552 skb = skb_rb_next(skb))
3553
3554#define skb_rbtree_walk_from(skb) \
3555 for (; skb != NULL; \
3556 skb = skb_rb_next(skb))
3557
3558#define skb_rbtree_walk_from_safe(skb, tmp) \
3559 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3560 skb = tmp)
3561
3562#define skb_queue_walk_from_safe(queue, skb, tmp) \
3563 for (tmp = skb->next; \
3564 skb != (struct sk_buff *)(queue); \
3565 skb = tmp, tmp = skb->next)
3566
3567#define skb_queue_reverse_walk(queue, skb) \
3568 for (skb = (queue)->prev; \
3569 skb != (struct sk_buff *)(queue); \
3570 skb = skb->prev)
3571
3572#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3573 for (skb = (queue)->prev, tmp = skb->prev; \
3574 skb != (struct sk_buff *)(queue); \
3575 skb = tmp, tmp = skb->prev)
3576
3577#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3578 for (tmp = skb->prev; \
3579 skb != (struct sk_buff *)(queue); \
3580 skb = tmp, tmp = skb->prev)
3581
3582static inline bool skb_has_frag_list(const struct sk_buff *skb)
3583{
3584 return skb_shinfo(skb)->frag_list != NULL;
3585}
3586
3587static inline void skb_frag_list_init(struct sk_buff *skb)
3588{
3589 skb_shinfo(skb)->frag_list = NULL;
3590}
3591
3592#define skb_walk_frags(skb, iter) \
3593 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3594
3595
3596int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3597 int *err, long *timeo_p,
3598 const struct sk_buff *skb);
3599struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3600 struct sk_buff_head *queue,
3601 unsigned int flags,
3602 int *off, int *err,
3603 struct sk_buff **last);
3604struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3605 struct sk_buff_head *queue,
3606 unsigned int flags, int *off, int *err,
3607 struct sk_buff **last);
3608struct sk_buff *__skb_recv_datagram(struct sock *sk,
3609 struct sk_buff_head *sk_queue,
3610 unsigned int flags, int *off, int *err);
3611struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3612 int *err);
3613__poll_t datagram_poll(struct file *file, struct socket *sock,
3614 struct poll_table_struct *wait);
3615int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3616 struct iov_iter *to, int size);
3617static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3618 struct msghdr *msg, int size)
3619{
3620 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3621}
3622int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3623 struct msghdr *msg);
3624int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3625 struct iov_iter *to, int len,
3626 struct ahash_request *hash);
3627int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3628 struct iov_iter *from, int len);
3629int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3630void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3631void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3632static inline void skb_free_datagram_locked(struct sock *sk,
3633 struct sk_buff *skb)
3634{
3635 __skb_free_datagram_locked(sk, skb, 0);
3636}
3637int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3638int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3639int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3640__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3641 int len);
3642int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3643 struct pipe_inode_info *pipe, unsigned int len,
3644 unsigned int flags);
3645int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3646 int len);
3647int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3648void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3649unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3650int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3651 int len, int hlen);
3652void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3653int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3654void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3655bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3656bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3657struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3658struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3659 unsigned int offset);
3660struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3661int skb_ensure_writable(struct sk_buff *skb, int write_len);
3662int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3663int skb_vlan_pop(struct sk_buff *skb);
3664int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3665int skb_eth_pop(struct sk_buff *skb);
3666int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3667 const unsigned char *src);
3668int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3669 int mac_len, bool ethernet);
3670int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3671 bool ethernet);
3672int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3673int skb_mpls_dec_ttl(struct sk_buff *skb);
3674struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3675 gfp_t gfp);
3676
3677static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3678{
3679 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3680}
3681
3682static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3683{
3684 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3685}
3686
3687struct skb_checksum_ops {
3688 __wsum (*update)(const void *mem, int len, __wsum wsum);
3689 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3690};
3691
3692extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3693
3694__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3695 __wsum csum, const struct skb_checksum_ops *ops);
3696__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3697 __wsum csum);
3698
3699static inline void * __must_check
3700__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
3701 const void *data, int hlen, void *buffer)
3702{
3703 if (likely(hlen - offset >= len))
3704 return (void *)data + offset;
3705
3706 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
3707 return NULL;
3708
3709 return buffer;
3710}
3711
3712static inline void * __must_check
3713skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3714{
3715 return __skb_header_pointer(skb, offset, len, skb->data,
3716 skb_headlen(skb), buffer);
3717}
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729static inline bool skb_needs_linearize(struct sk_buff *skb,
3730 netdev_features_t features)
3731{
3732 return skb_is_nonlinear(skb) &&
3733 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3734 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3735}
3736
3737static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3738 void *to,
3739 const unsigned int len)
3740{
3741 memcpy(to, skb->data, len);
3742}
3743
3744static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3745 const int offset, void *to,
3746 const unsigned int len)
3747{
3748 memcpy(to, skb->data + offset, len);
3749}
3750
3751static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3752 const void *from,
3753 const unsigned int len)
3754{
3755 memcpy(skb->data, from, len);
3756}
3757
3758static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3759 const int offset,
3760 const void *from,
3761 const unsigned int len)
3762{
3763 memcpy(skb->data + offset, from, len);
3764}
3765
3766void skb_init(void);
3767
3768static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3769{
3770 return skb->tstamp;
3771}
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782static inline void skb_get_timestamp(const struct sk_buff *skb,
3783 struct __kernel_old_timeval *stamp)
3784{
3785 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3786}
3787
3788static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3789 struct __kernel_sock_timeval *stamp)
3790{
3791 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3792
3793 stamp->tv_sec = ts.tv_sec;
3794 stamp->tv_usec = ts.tv_nsec / 1000;
3795}
3796
3797static inline void skb_get_timestampns(const struct sk_buff *skb,
3798 struct __kernel_old_timespec *stamp)
3799{
3800 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3801
3802 stamp->tv_sec = ts.tv_sec;
3803 stamp->tv_nsec = ts.tv_nsec;
3804}
3805
3806static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3807 struct __kernel_timespec *stamp)
3808{
3809 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3810
3811 stamp->tv_sec = ts.tv_sec;
3812 stamp->tv_nsec = ts.tv_nsec;
3813}
3814
3815static inline void __net_timestamp(struct sk_buff *skb)
3816{
3817 skb->tstamp = ktime_get_real();
3818}
3819
3820static inline ktime_t net_timedelta(ktime_t t)
3821{
3822 return ktime_sub(ktime_get_real(), t);
3823}
3824
3825static inline ktime_t net_invalid_timestamp(void)
3826{
3827 return 0;
3828}
3829
3830static inline u8 skb_metadata_len(const struct sk_buff *skb)
3831{
3832 return skb_shinfo(skb)->meta_len;
3833}
3834
3835static inline void *skb_metadata_end(const struct sk_buff *skb)
3836{
3837 return skb_mac_header(skb);
3838}
3839
3840static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3841 const struct sk_buff *skb_b,
3842 u8 meta_len)
3843{
3844 const void *a = skb_metadata_end(skb_a);
3845 const void *b = skb_metadata_end(skb_b);
3846
3847#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3848 u64 diffs = 0;
3849
3850 switch (meta_len) {
3851#define __it(x, op) (x -= sizeof(u##op))
3852#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3853 case 32: diffs |= __it_diff(a, b, 64);
3854 fallthrough;
3855 case 24: diffs |= __it_diff(a, b, 64);
3856 fallthrough;
3857 case 16: diffs |= __it_diff(a, b, 64);
3858 fallthrough;
3859 case 8: diffs |= __it_diff(a, b, 64);
3860 break;
3861 case 28: diffs |= __it_diff(a, b, 64);
3862 fallthrough;
3863 case 20: diffs |= __it_diff(a, b, 64);
3864 fallthrough;
3865 case 12: diffs |= __it_diff(a, b, 64);
3866 fallthrough;
3867 case 4: diffs |= __it_diff(a, b, 32);
3868 break;
3869 }
3870 return diffs;
3871#else
3872 return memcmp(a - meta_len, b - meta_len, meta_len);
3873#endif
3874}
3875
3876static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3877 const struct sk_buff *skb_b)
3878{
3879 u8 len_a = skb_metadata_len(skb_a);
3880 u8 len_b = skb_metadata_len(skb_b);
3881
3882 if (!(len_a | len_b))
3883 return false;
3884
3885 return len_a != len_b ?
3886 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3887}
3888
3889static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3890{
3891 skb_shinfo(skb)->meta_len = meta_len;
3892}
3893
3894static inline void skb_metadata_clear(struct sk_buff *skb)
3895{
3896 skb_metadata_set(skb, 0);
3897}
3898
3899struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3900
3901#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3902
3903void skb_clone_tx_timestamp(struct sk_buff *skb);
3904bool skb_defer_rx_timestamp(struct sk_buff *skb);
3905
3906#else
3907
3908static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3909{
3910}
3911
3912static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3913{
3914 return false;
3915}
3916
3917#endif
3918
3919
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929
3930
3931void skb_complete_tx_timestamp(struct sk_buff *skb,
3932 struct skb_shared_hwtstamps *hwtstamps);
3933
3934void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
3935 struct skb_shared_hwtstamps *hwtstamps,
3936 struct sock *sk, int tstype);
3937
3938
3939
3940
3941
3942
3943
3944
3945
3946
3947
3948
3949void skb_tstamp_tx(struct sk_buff *orig_skb,
3950 struct skb_shared_hwtstamps *hwtstamps);
3951
3952
3953
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964static inline void skb_tx_timestamp(struct sk_buff *skb)
3965{
3966 skb_clone_tx_timestamp(skb);
3967 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3968 skb_tstamp_tx(skb, NULL);
3969}
3970
3971
3972
3973
3974
3975
3976
3977
3978void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3979
3980__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3981__sum16 __skb_checksum_complete(struct sk_buff *skb);
3982
3983static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3984{
3985 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3986 skb->csum_valid ||
3987 (skb->ip_summed == CHECKSUM_PARTIAL &&
3988 skb_checksum_start_offset(skb) >= 0));
3989}
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
4008{
4009 return skb_csum_unnecessary(skb) ?
4010 0 : __skb_checksum_complete(skb);
4011}
4012
4013static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
4014{
4015 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4016 if (skb->csum_level == 0)
4017 skb->ip_summed = CHECKSUM_NONE;
4018 else
4019 skb->csum_level--;
4020 }
4021}
4022
4023static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
4024{
4025 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4026 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
4027 skb->csum_level++;
4028 } else if (skb->ip_summed == CHECKSUM_NONE) {
4029 skb->ip_summed = CHECKSUM_UNNECESSARY;
4030 skb->csum_level = 0;
4031 }
4032}
4033
4034static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4035{
4036 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4037 skb->ip_summed = CHECKSUM_NONE;
4038 skb->csum_level = 0;
4039 }
4040}
4041
4042
4043
4044
4045
4046
4047static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
4048 bool zero_okay,
4049 __sum16 check)
4050{
4051 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
4052 skb->csum_valid = 1;
4053 __skb_decr_checksum_unnecessary(skb);
4054 return false;
4055 }
4056
4057 return true;
4058}
4059
4060
4061
4062
4063#define CHECKSUM_BREAK 76
4064
4065
4066
4067
4068
4069
4070
4071static inline void skb_checksum_complete_unset(struct sk_buff *skb)
4072{
4073 if (skb->ip_summed == CHECKSUM_COMPLETE)
4074 skb->ip_summed = CHECKSUM_NONE;
4075}
4076
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
4087 bool complete,
4088 __wsum psum)
4089{
4090 if (skb->ip_summed == CHECKSUM_COMPLETE) {
4091 if (!csum_fold(csum_add(psum, skb->csum))) {
4092 skb->csum_valid = 1;
4093 return 0;
4094 }
4095 }
4096
4097 skb->csum = psum;
4098
4099 if (complete || skb->len <= CHECKSUM_BREAK) {
4100 __sum16 csum;
4101
4102 csum = __skb_checksum_complete(skb);
4103 skb->csum_valid = !csum;
4104 return csum;
4105 }
4106
4107 return 0;
4108}
4109
4110static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4111{
4112 return 0;
4113}
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125#define __skb_checksum_validate(skb, proto, complete, \
4126 zero_okay, check, compute_pseudo) \
4127({ \
4128 __sum16 __ret = 0; \
4129 skb->csum_valid = 0; \
4130 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4131 __ret = __skb_checksum_validate_complete(skb, \
4132 complete, compute_pseudo(skb, proto)); \
4133 __ret; \
4134})
4135
4136#define skb_checksum_init(skb, proto, compute_pseudo) \
4137 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4138
4139#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4140 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4141
4142#define skb_checksum_validate(skb, proto, compute_pseudo) \
4143 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4144
4145#define skb_checksum_validate_zero_check(skb, proto, check, \
4146 compute_pseudo) \
4147 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4148
4149#define skb_checksum_simple_validate(skb) \
4150 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4151
4152static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4153{
4154 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4155}
4156
4157static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4158{
4159 skb->csum = ~pseudo;
4160 skb->ip_summed = CHECKSUM_COMPLETE;
4161}
4162
4163#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4164do { \
4165 if (__skb_checksum_convert_check(skb)) \
4166 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4167} while (0)
4168
4169static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4170 u16 start, u16 offset)
4171{
4172 skb->ip_summed = CHECKSUM_PARTIAL;
4173 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4174 skb->csum_offset = offset - start;
4175}
4176
4177
4178
4179
4180
4181
4182static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4183 int start, int offset, bool nopartial)
4184{
4185 __wsum delta;
4186
4187 if (!nopartial) {
4188 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4189 return;
4190 }
4191
4192 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4193 __skb_checksum_complete(skb);
4194 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4195 }
4196
4197 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4198
4199
4200 skb->csum = csum_add(skb->csum, delta);
4201}
4202
4203static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4204{
4205#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4206 return (void *)(skb->_nfct & NFCT_PTRMASK);
4207#else
4208 return NULL;
4209#endif
4210}
4211
4212static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4213{
4214#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4215 return skb->_nfct;
4216#else
4217 return 0UL;
4218#endif
4219}
4220
4221static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4222{
4223#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4224 skb->slow_gro |= !!nfct;
4225 skb->_nfct = nfct;
4226#endif
4227}
4228
4229#ifdef CONFIG_SKB_EXTENSIONS
4230enum skb_ext_id {
4231#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4232 SKB_EXT_BRIDGE_NF,
4233#endif
4234#ifdef CONFIG_XFRM
4235 SKB_EXT_SEC_PATH,
4236#endif
4237#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4238 TC_SKB_EXT,
4239#endif
4240#if IS_ENABLED(CONFIG_MPTCP)
4241 SKB_EXT_MPTCP,
4242#endif
4243 SKB_EXT_NUM,
4244};
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256struct skb_ext {
4257 refcount_t refcnt;
4258 u8 offset[SKB_EXT_NUM];
4259 u8 chunks;
4260 char data[] __aligned(8);
4261};
4262
4263struct skb_ext *__skb_ext_alloc(gfp_t flags);
4264void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4265 struct skb_ext *ext);
4266void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4267void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4268void __skb_ext_put(struct skb_ext *ext);
4269
4270static inline void skb_ext_put(struct sk_buff *skb)
4271{
4272 if (skb->active_extensions)
4273 __skb_ext_put(skb->extensions);
4274}
4275
4276static inline void __skb_ext_copy(struct sk_buff *dst,
4277 const struct sk_buff *src)
4278{
4279 dst->active_extensions = src->active_extensions;
4280
4281 if (src->active_extensions) {
4282 struct skb_ext *ext = src->extensions;
4283
4284 refcount_inc(&ext->refcnt);
4285 dst->extensions = ext;
4286 }
4287}
4288
4289static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4290{
4291 skb_ext_put(dst);
4292 __skb_ext_copy(dst, src);
4293}
4294
4295static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4296{
4297 return !!ext->offset[i];
4298}
4299
4300static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4301{
4302 return skb->active_extensions & (1 << id);
4303}
4304
4305static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4306{
4307 if (skb_ext_exist(skb, id))
4308 __skb_ext_del(skb, id);
4309}
4310
4311static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4312{
4313 if (skb_ext_exist(skb, id)) {
4314 struct skb_ext *ext = skb->extensions;
4315
4316 return (void *)ext + (ext->offset[id] << 3);
4317 }
4318
4319 return NULL;
4320}
4321
4322static inline void skb_ext_reset(struct sk_buff *skb)
4323{
4324 if (unlikely(skb->active_extensions)) {
4325 __skb_ext_put(skb->extensions);
4326 skb->active_extensions = 0;
4327 }
4328}
4329
4330static inline bool skb_has_extensions(struct sk_buff *skb)
4331{
4332 return unlikely(skb->active_extensions);
4333}
4334#else
4335static inline void skb_ext_put(struct sk_buff *skb) {}
4336static inline void skb_ext_reset(struct sk_buff *skb) {}
4337static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4338static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4339static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4340static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4341#endif
4342
4343static inline void nf_reset_ct(struct sk_buff *skb)
4344{
4345#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4346 nf_conntrack_put(skb_nfct(skb));
4347 skb->_nfct = 0;
4348#endif
4349}
4350
4351static inline void nf_reset_trace(struct sk_buff *skb)
4352{
4353#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4354 skb->nf_trace = 0;
4355#endif
4356}
4357
4358static inline void ipvs_reset(struct sk_buff *skb)
4359{
4360#if IS_ENABLED(CONFIG_IP_VS)
4361 skb->ipvs_property = 0;
4362#endif
4363}
4364
4365
4366static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4367 bool copy)
4368{
4369#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4370 dst->_nfct = src->_nfct;
4371 nf_conntrack_get(skb_nfct(src));
4372#endif
4373#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4374 if (copy)
4375 dst->nf_trace = src->nf_trace;
4376#endif
4377}
4378
4379static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4380{
4381#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4382 nf_conntrack_put(skb_nfct(dst));
4383#endif
4384 dst->slow_gro = src->slow_gro;
4385 __nf_copy(dst, src, true);
4386}
4387
4388#ifdef CONFIG_NETWORK_SECMARK
4389static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4390{
4391 to->secmark = from->secmark;
4392}
4393
4394static inline void skb_init_secmark(struct sk_buff *skb)
4395{
4396 skb->secmark = 0;
4397}
4398#else
4399static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4400{ }
4401
4402static inline void skb_init_secmark(struct sk_buff *skb)
4403{ }
4404#endif
4405
4406static inline int secpath_exists(const struct sk_buff *skb)
4407{
4408#ifdef CONFIG_XFRM
4409 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4410#else
4411 return 0;
4412#endif
4413}
4414
4415static inline bool skb_irq_freeable(const struct sk_buff *skb)
4416{
4417 return !skb->destructor &&
4418 !secpath_exists(skb) &&
4419 !skb_nfct(skb) &&
4420 !skb->_skb_refdst &&
4421 !skb_has_frag_list(skb);
4422}
4423
4424static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4425{
4426 skb->queue_mapping = queue_mapping;
4427}
4428
4429static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4430{
4431 return skb->queue_mapping;
4432}
4433
4434static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4435{
4436 to->queue_mapping = from->queue_mapping;
4437}
4438
4439static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4440{
4441 skb->queue_mapping = rx_queue + 1;
4442}
4443
4444static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4445{
4446 return skb->queue_mapping - 1;
4447}
4448
4449static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4450{
4451 return skb->queue_mapping != 0;
4452}
4453
4454static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4455{
4456 skb->dst_pending_confirm = val;
4457}
4458
4459static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4460{
4461 return skb->dst_pending_confirm != 0;
4462}
4463
4464static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4465{
4466#ifdef CONFIG_XFRM
4467 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4468#else
4469 return NULL;
4470#endif
4471}
4472
4473
4474
4475
4476
4477
4478
4479struct skb_gso_cb {
4480 union {
4481 int mac_offset;
4482 int data_offset;
4483 };
4484 int encap_level;
4485 __wsum csum;
4486 __u16 csum_start;
4487};
4488#define SKB_GSO_CB_OFFSET 32
4489#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4490
4491static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4492{
4493 return (skb_mac_header(inner_skb) - inner_skb->head) -
4494 SKB_GSO_CB(inner_skb)->mac_offset;
4495}
4496
4497static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4498{
4499 int new_headroom, headroom;
4500 int ret;
4501
4502 headroom = skb_headroom(skb);
4503 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4504 if (ret)
4505 return ret;
4506
4507 new_headroom = skb_headroom(skb);
4508 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4509 return 0;
4510}
4511
4512static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4513{
4514
4515 if (skb->remcsum_offload)
4516 return;
4517
4518 SKB_GSO_CB(skb)->csum = res;
4519 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4520}
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4531{
4532 unsigned char *csum_start = skb_transport_header(skb);
4533 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4534 __wsum partial = SKB_GSO_CB(skb)->csum;
4535
4536 SKB_GSO_CB(skb)->csum = res;
4537 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4538
4539 return csum_fold(csum_partial(csum_start, plen, partial));
4540}
4541
4542static inline bool skb_is_gso(const struct sk_buff *skb)
4543{
4544 return skb_shinfo(skb)->gso_size;
4545}
4546
4547
4548static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4549{
4550 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4551}
4552
4553
4554static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4555{
4556 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4557}
4558
4559
4560static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4561{
4562 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4563}
4564
4565static inline void skb_gso_reset(struct sk_buff *skb)
4566{
4567 skb_shinfo(skb)->gso_size = 0;
4568 skb_shinfo(skb)->gso_segs = 0;
4569 skb_shinfo(skb)->gso_type = 0;
4570}
4571
4572static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4573 u16 increment)
4574{
4575 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4576 return;
4577 shinfo->gso_size += increment;
4578}
4579
4580static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4581 u16 decrement)
4582{
4583 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4584 return;
4585 shinfo->gso_size -= decrement;
4586}
4587
4588void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4589
4590static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4591{
4592
4593
4594 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4595
4596 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4597 unlikely(shinfo->gso_type == 0)) {
4598 __skb_warn_lro_forwarding(skb);
4599 return true;
4600 }
4601 return false;
4602}
4603
4604static inline void skb_forward_csum(struct sk_buff *skb)
4605{
4606
4607 if (skb->ip_summed == CHECKSUM_COMPLETE)
4608 skb->ip_summed = CHECKSUM_NONE;
4609}
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4620{
4621#ifdef DEBUG
4622 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4623#endif
4624}
4625
4626bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4627
4628int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4629struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4630 unsigned int transport_len,
4631 __sum16(*skb_chkf)(struct sk_buff *skb));
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642static inline bool skb_head_is_locked(const struct sk_buff *skb)
4643{
4644 return !skb->head_frag || skb_cloned(skb);
4645}
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656static inline __wsum lco_csum(struct sk_buff *skb)
4657{
4658 unsigned char *csum_start = skb_checksum_start(skb);
4659 unsigned char *l4_hdr = skb_transport_header(skb);
4660 __wsum partial;
4661
4662
4663 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4664 skb->csum_offset));
4665
4666
4667
4668
4669 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4670}
4671
4672static inline bool skb_is_redirected(const struct sk_buff *skb)
4673{
4674 return skb->redirected;
4675}
4676
4677static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4678{
4679 skb->redirected = 1;
4680#ifdef CONFIG_NET_REDIRECT
4681 skb->from_ingress = from_ingress;
4682 if (skb->from_ingress)
4683 skb->tstamp = 0;
4684#endif
4685}
4686
4687static inline void skb_reset_redirect(struct sk_buff *skb)
4688{
4689 skb->redirected = 0;
4690}
4691
4692static inline bool skb_csum_is_sctp(struct sk_buff *skb)
4693{
4694 return skb->csum_not_inet;
4695}
4696
4697static inline void skb_set_kcov_handle(struct sk_buff *skb,
4698 const u64 kcov_handle)
4699{
4700#ifdef CONFIG_KCOV
4701 skb->kcov_handle = kcov_handle;
4702#endif
4703}
4704
4705static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
4706{
4707#ifdef CONFIG_KCOV
4708 return skb->kcov_handle;
4709#else
4710 return 0;
4711#endif
4712}
4713
4714#ifdef CONFIG_PAGE_POOL
4715static inline void skb_mark_for_recycle(struct sk_buff *skb)
4716{
4717 skb->pp_recycle = 1;
4718}
4719#endif
4720
4721static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
4722{
4723 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
4724 return false;
4725 return page_pool_return_skb_page(virt_to_page(data));
4726}
4727
4728#endif
4729#endif
4730