1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/bvec.h>
18#include <linux/cache.h>
19#include <linux/rbtree.h>
20#include <linux/socket.h>
21#include <linux/refcount.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/dma-mapping.h>
32#include <linux/netdev_features.h>
33#include <linux/sched.h>
34#include <linux/sched/clock.h>
35#include <net/flow_dissector.h>
36#include <linux/splice.h>
37#include <linux/in6.h>
38#include <linux/if_packet.h>
39#include <linux/llist.h>
40#include <net/flow.h>
41#include <net/page_pool.h>
42#if IS_ENABLED(CONFIG_NF_CONNTRACK)
43#include <linux/netfilter/nf_conntrack_common.h>
44#endif
45#include <net/net_debug.h>
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252#define CHECKSUM_NONE 0
253#define CHECKSUM_UNNECESSARY 1
254#define CHECKSUM_COMPLETE 2
255#define CHECKSUM_PARTIAL 3
256
257
258#define SKB_MAX_CSUM_LEVEL 3
259
260#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
261#define SKB_WITH_OVERHEAD(X) \
262 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
263#define SKB_MAX_ORDER(X, ORDER) \
264 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
265#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
266#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
267
268
269#define SKB_TRUESIZE(X) ((X) + \
270 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
271 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
272
273struct ahash_request;
274struct net_device;
275struct scatterlist;
276struct pipe_inode_info;
277struct iov_iter;
278struct napi_struct;
279struct bpf_prog;
280union bpf_attr;
281struct skb_ext;
282
283#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
284struct nf_bridge_info {
285 enum {
286 BRNF_PROTO_UNCHANGED,
287 BRNF_PROTO_8021Q,
288 BRNF_PROTO_PPPOE
289 } orig_proto:8;
290 u8 pkt_otherhost:1;
291 u8 in_prerouting:1;
292 u8 bridged_dnat:1;
293 __u16 frag_max_size;
294 struct net_device *physindev;
295
296
297 struct net_device *physoutdev;
298 union {
299
300 __be32 ipv4_daddr;
301 struct in6_addr ipv6_daddr;
302
303
304
305
306
307 char neigh_header[8];
308 };
309};
310#endif
311
312#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
313
314
315
316
317struct tc_skb_ext {
318 __u32 chain;
319 __u16 mru;
320 __u16 zone;
321 u8 post_ct:1;
322 u8 post_ct_snat:1;
323 u8 post_ct_dnat:1;
324};
325#endif
326
327struct sk_buff_head {
328
329 struct_group_tagged(sk_buff_list, list,
330 struct sk_buff *next;
331 struct sk_buff *prev;
332 );
333
334 __u32 qlen;
335 spinlock_t lock;
336};
337
338struct sk_buff;
339
340
341
342
343
344
345
346enum skb_drop_reason {
347 SKB_NOT_DROPPED_YET = 0,
348 SKB_DROP_REASON_NOT_SPECIFIED,
349 SKB_DROP_REASON_NO_SOCKET,
350 SKB_DROP_REASON_PKT_TOO_SMALL,
351 SKB_DROP_REASON_TCP_CSUM,
352 SKB_DROP_REASON_SOCKET_FILTER,
353 SKB_DROP_REASON_UDP_CSUM,
354 SKB_DROP_REASON_NETFILTER_DROP,
355 SKB_DROP_REASON_OTHERHOST,
356
357
358
359 SKB_DROP_REASON_IP_CSUM,
360 SKB_DROP_REASON_IP_INHDR,
361
362
363
364 SKB_DROP_REASON_IP_RPFILTER,
365
366
367
368
369 SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST,
370
371
372
373 SKB_DROP_REASON_XFRM_POLICY,
374 SKB_DROP_REASON_IP_NOPROTO,
375 SKB_DROP_REASON_SOCKET_RCVBUFF,
376 SKB_DROP_REASON_PROTO_MEM,
377
378
379
380 SKB_DROP_REASON_TCP_MD5NOTFOUND,
381
382
383
384 SKB_DROP_REASON_TCP_MD5UNEXPECTED,
385
386
387
388 SKB_DROP_REASON_TCP_MD5FAILURE,
389
390
391
392 SKB_DROP_REASON_SOCKET_BACKLOG,
393
394
395
396 SKB_DROP_REASON_TCP_FLAGS,
397 SKB_DROP_REASON_TCP_ZEROWINDOW,
398
399
400 SKB_DROP_REASON_TCP_OLD_DATA,
401
402
403
404
405 SKB_DROP_REASON_TCP_OVERWINDOW,
406
407
408
409
410 SKB_DROP_REASON_TCP_OFOMERGE,
411
412
413
414 SKB_DROP_REASON_TCP_RFC7323_PAWS,
415
416
417 SKB_DROP_REASON_TCP_INVALID_SEQUENCE,
418 SKB_DROP_REASON_TCP_RESET,
419 SKB_DROP_REASON_TCP_INVALID_SYN,
420 SKB_DROP_REASON_TCP_CLOSE,
421 SKB_DROP_REASON_TCP_FASTOPEN,
422 SKB_DROP_REASON_TCP_OLD_ACK,
423 SKB_DROP_REASON_TCP_TOO_OLD_ACK,
424 SKB_DROP_REASON_TCP_ACK_UNSENT_DATA,
425 SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE,
426 SKB_DROP_REASON_TCP_OFO_DROP,
427 SKB_DROP_REASON_IP_OUTNOROUTES,
428 SKB_DROP_REASON_BPF_CGROUP_EGRESS,
429
430
431
432 SKB_DROP_REASON_IPV6DISABLED,
433 SKB_DROP_REASON_NEIGH_CREATEFAIL,
434
435
436 SKB_DROP_REASON_NEIGH_FAILED,
437 SKB_DROP_REASON_NEIGH_QUEUEFULL,
438
439
440 SKB_DROP_REASON_NEIGH_DEAD,
441 SKB_DROP_REASON_TC_EGRESS,
442 SKB_DROP_REASON_QDISC_DROP,
443
444
445
446 SKB_DROP_REASON_CPU_BACKLOG,
447
448
449
450
451
452 SKB_DROP_REASON_XDP,
453 SKB_DROP_REASON_TC_INGRESS,
454 SKB_DROP_REASON_UNHANDLED_PROTO,
455
456
457 SKB_DROP_REASON_SKB_CSUM,
458
459
460 SKB_DROP_REASON_SKB_GSO_SEG,
461 SKB_DROP_REASON_SKB_UCOPY_FAULT,
462
463
464
465
466 SKB_DROP_REASON_DEV_HDR,
467
468
469
470
471
472
473 SKB_DROP_REASON_DEV_READY,
474 SKB_DROP_REASON_FULL_RING,
475 SKB_DROP_REASON_NOMEM,
476 SKB_DROP_REASON_HDR_TRUNC,
477
478
479
480
481 SKB_DROP_REASON_TAP_FILTER,
482
483
484
485 SKB_DROP_REASON_TAP_TXFILTER,
486
487
488 SKB_DROP_REASON_ICMP_CSUM,
489 SKB_DROP_REASON_INVALID_PROTO,
490
491
492
493 SKB_DROP_REASON_IP_INADDRERRORS,
494
495
496 SKB_DROP_REASON_IP_INNOROUTES,
497
498
499 SKB_DROP_REASON_PKT_TOO_BIG,
500
501
502 SKB_DROP_REASON_MAX,
503};
504
505#define SKB_DR_INIT(name, reason) \
506 enum skb_drop_reason name = SKB_DROP_REASON_##reason
507#define SKB_DR(name) \
508 SKB_DR_INIT(name, NOT_SPECIFIED)
509#define SKB_DR_SET(name, reason) \
510 (name = SKB_DROP_REASON_##reason)
511#define SKB_DR_OR(name, reason) \
512 do { \
513 if (name == SKB_DROP_REASON_NOT_SPECIFIED || \
514 name == SKB_NOT_DROPPED_YET) \
515 SKB_DR_SET(name, reason); \
516 } while (0)
517
518
519
520
521
522
523
524
525#if (65536/PAGE_SIZE + 1) < 16
526#define MAX_SKB_FRAGS 16UL
527#else
528#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
529#endif
530extern int sysctl_max_skb_frags;
531
532
533
534
535#define GSO_BY_FRAGS 0xFFFF
536
537typedef struct bio_vec skb_frag_t;
538
539
540
541
542
543static inline unsigned int skb_frag_size(const skb_frag_t *frag)
544{
545 return frag->bv_len;
546}
547
548
549
550
551
552
553static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
554{
555 frag->bv_len = size;
556}
557
558
559
560
561
562
563static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
564{
565 frag->bv_len += delta;
566}
567
568
569
570
571
572
573static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
574{
575 frag->bv_len -= delta;
576}
577
578
579
580
581
582static inline bool skb_frag_must_loop(struct page *p)
583{
584#if defined(CONFIG_HIGHMEM)
585 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
586 return true;
587#endif
588 return false;
589}
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
609 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
610 p_off = (f_off) & (PAGE_SIZE - 1), \
611 p_len = skb_frag_must_loop(p) ? \
612 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
613 copied = 0; \
614 copied < f_len; \
615 copied += p_len, p++, p_off = 0, \
616 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
617
618#define HAVE_HW_TIME_STAMP
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636struct skb_shared_hwtstamps {
637 union {
638 ktime_t hwtstamp;
639 void *netdev_data;
640 };
641};
642
643
644enum {
645
646 SKBTX_HW_TSTAMP = 1 << 0,
647
648
649 SKBTX_SW_TSTAMP = 1 << 1,
650
651
652 SKBTX_IN_PROGRESS = 1 << 2,
653
654
655 SKBTX_HW_TSTAMP_USE_CYCLES = 1 << 3,
656
657
658 SKBTX_WIFI_STATUS = 1 << 4,
659
660
661 SKBTX_HW_TSTAMP_NETDEV = 1 << 5,
662
663
664 SKBTX_SCHED_TSTAMP = 1 << 6,
665};
666
667#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
668 SKBTX_SCHED_TSTAMP)
669#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | \
670 SKBTX_HW_TSTAMP_USE_CYCLES | \
671 SKBTX_ANY_SW_TSTAMP)
672
673
674enum {
675
676 SKBFL_ZEROCOPY_ENABLE = BIT(0),
677
678
679
680
681
682
683 SKBFL_SHARED_FRAG = BIT(1),
684
685
686
687
688 SKBFL_PURE_ZEROCOPY = BIT(2),
689};
690
691#define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG)
692#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY)
693
694
695
696
697
698
699
700
701
702struct ubuf_info {
703 void (*callback)(struct sk_buff *, struct ubuf_info *,
704 bool zerocopy_success);
705 union {
706 struct {
707 unsigned long desc;
708 void *ctx;
709 };
710 struct {
711 u32 id;
712 u16 len;
713 u16 zerocopy:1;
714 u32 bytelen;
715 };
716 };
717 refcount_t refcnt;
718 u8 flags;
719
720 struct mmpin {
721 struct user_struct *user;
722 unsigned int num_pg;
723 } mmp;
724};
725
726#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
727
728int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
729void mm_unaccount_pinned_pages(struct mmpin *mmp);
730
731
732
733
734struct skb_shared_info {
735 __u8 flags;
736 __u8 meta_len;
737 __u8 nr_frags;
738 __u8 tx_flags;
739 unsigned short gso_size;
740
741 unsigned short gso_segs;
742 struct sk_buff *frag_list;
743 struct skb_shared_hwtstamps hwtstamps;
744 unsigned int gso_type;
745 u32 tskey;
746
747
748
749
750 atomic_t dataref;
751 unsigned int xdp_frags_size;
752
753
754
755 void * destructor_arg;
756
757
758 skb_frag_t frags[MAX_SKB_FRAGS];
759};
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788#define SKB_DATAREF_SHIFT 16
789#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
790
791
792enum {
793 SKB_FCLONE_UNAVAILABLE,
794 SKB_FCLONE_ORIG,
795 SKB_FCLONE_CLONE,
796};
797
798enum {
799 SKB_GSO_TCPV4 = 1 << 0,
800
801
802 SKB_GSO_DODGY = 1 << 1,
803
804
805 SKB_GSO_TCP_ECN = 1 << 2,
806
807 SKB_GSO_TCP_FIXEDID = 1 << 3,
808
809 SKB_GSO_TCPV6 = 1 << 4,
810
811 SKB_GSO_FCOE = 1 << 5,
812
813 SKB_GSO_GRE = 1 << 6,
814
815 SKB_GSO_GRE_CSUM = 1 << 7,
816
817 SKB_GSO_IPXIP4 = 1 << 8,
818
819 SKB_GSO_IPXIP6 = 1 << 9,
820
821 SKB_GSO_UDP_TUNNEL = 1 << 10,
822
823 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
824
825 SKB_GSO_PARTIAL = 1 << 12,
826
827 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
828
829 SKB_GSO_SCTP = 1 << 14,
830
831 SKB_GSO_ESP = 1 << 15,
832
833 SKB_GSO_UDP = 1 << 16,
834
835 SKB_GSO_UDP_L4 = 1 << 17,
836
837 SKB_GSO_FRAGLIST = 1 << 18,
838};
839
840#if BITS_PER_LONG > 32
841#define NET_SKBUFF_DATA_USES_OFFSET 1
842#endif
843
844#ifdef NET_SKBUFF_DATA_USES_OFFSET
845typedef unsigned int sk_buff_data_t;
846#else
847typedef unsigned char *sk_buff_data_t;
848#endif
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004struct sk_buff {
1005 union {
1006 struct {
1007
1008 struct sk_buff *next;
1009 struct sk_buff *prev;
1010
1011 union {
1012 struct net_device *dev;
1013
1014
1015
1016
1017 unsigned long dev_scratch;
1018 };
1019 };
1020 struct rb_node rbnode;
1021 struct list_head list;
1022 struct llist_node ll_node;
1023 };
1024
1025 union {
1026 struct sock *sk;
1027 int ip_defrag_offset;
1028 };
1029
1030 union {
1031 ktime_t tstamp;
1032 u64 skb_mstamp_ns;
1033 };
1034
1035
1036
1037
1038
1039
1040 char cb[48] __aligned(8);
1041
1042 union {
1043 struct {
1044 unsigned long _skb_refdst;
1045 void (*destructor)(struct sk_buff *skb);
1046 };
1047 struct list_head tcp_tsorted_anchor;
1048#ifdef CONFIG_NET_SOCK_MSG
1049 unsigned long _sk_redir;
1050#endif
1051 };
1052
1053#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1054 unsigned long _nfct;
1055#endif
1056 unsigned int len,
1057 data_len;
1058 __u16 mac_len,
1059 hdr_len;
1060
1061
1062
1063
1064 __u16 queue_mapping;
1065
1066
1067#ifdef __BIG_ENDIAN_BITFIELD
1068#define CLONED_MASK (1 << 7)
1069#else
1070#define CLONED_MASK 1
1071#endif
1072#define CLONED_OFFSET offsetof(struct sk_buff, __cloned_offset)
1073
1074
1075 __u8 __cloned_offset[0];
1076
1077 __u8 cloned:1,
1078 nohdr:1,
1079 fclone:2,
1080 peeked:1,
1081 head_frag:1,
1082 pfmemalloc:1,
1083 pp_recycle:1;
1084#ifdef CONFIG_SKB_EXTENSIONS
1085 __u8 active_extensions;
1086#endif
1087
1088
1089
1090
1091 struct_group(headers,
1092
1093
1094 __u8 __pkt_type_offset[0];
1095
1096 __u8 pkt_type:3;
1097 __u8 ignore_df:1;
1098 __u8 nf_trace:1;
1099 __u8 ip_summed:2;
1100 __u8 ooo_okay:1;
1101
1102 __u8 l4_hash:1;
1103 __u8 sw_hash:1;
1104 __u8 wifi_acked_valid:1;
1105 __u8 wifi_acked:1;
1106 __u8 no_fcs:1;
1107
1108 __u8 encapsulation:1;
1109 __u8 encap_hdr_csum:1;
1110 __u8 csum_valid:1;
1111
1112
1113 __u8 __pkt_vlan_present_offset[0];
1114
1115 __u8 vlan_present:1;
1116 __u8 csum_complete_sw:1;
1117 __u8 csum_level:2;
1118 __u8 dst_pending_confirm:1;
1119 __u8 mono_delivery_time:1;
1120#ifdef CONFIG_NET_CLS_ACT
1121 __u8 tc_skip_classify:1;
1122 __u8 tc_at_ingress:1;
1123#endif
1124#ifdef CONFIG_IPV6_NDISC_NODETYPE
1125 __u8 ndisc_nodetype:2;
1126#endif
1127
1128 __u8 ipvs_property:1;
1129 __u8 inner_protocol_type:1;
1130 __u8 remcsum_offload:1;
1131#ifdef CONFIG_NET_SWITCHDEV
1132 __u8 offload_fwd_mark:1;
1133 __u8 offload_l3_fwd_mark:1;
1134#endif
1135 __u8 redirected:1;
1136#ifdef CONFIG_NET_REDIRECT
1137 __u8 from_ingress:1;
1138#endif
1139#ifdef CONFIG_NETFILTER_SKIP_EGRESS
1140 __u8 nf_skip_egress:1;
1141#endif
1142#ifdef CONFIG_TLS_DEVICE
1143 __u8 decrypted:1;
1144#endif
1145 __u8 slow_gro:1;
1146 __u8 csum_not_inet:1;
1147
1148#ifdef CONFIG_NET_SCHED
1149 __u16 tc_index;
1150#endif
1151
1152 union {
1153 __wsum csum;
1154 struct {
1155 __u16 csum_start;
1156 __u16 csum_offset;
1157 };
1158 };
1159 __u32 priority;
1160 int skb_iif;
1161 __u32 hash;
1162 __be16 vlan_proto;
1163 __u16 vlan_tci;
1164#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
1165 union {
1166 unsigned int napi_id;
1167 unsigned int sender_cpu;
1168 };
1169#endif
1170 u16 alloc_cpu;
1171#ifdef CONFIG_NETWORK_SECMARK
1172 __u32 secmark;
1173#endif
1174
1175 union {
1176 __u32 mark;
1177 __u32 reserved_tailroom;
1178 };
1179
1180 union {
1181 __be16 inner_protocol;
1182 __u8 inner_ipproto;
1183 };
1184
1185 __u16 inner_transport_header;
1186 __u16 inner_network_header;
1187 __u16 inner_mac_header;
1188
1189 __be16 protocol;
1190 __u16 transport_header;
1191 __u16 network_header;
1192 __u16 mac_header;
1193
1194#ifdef CONFIG_KCOV
1195 u64 kcov_handle;
1196#endif
1197
1198 );
1199
1200
1201 sk_buff_data_t tail;
1202 sk_buff_data_t end;
1203 unsigned char *head,
1204 *data;
1205 unsigned int truesize;
1206 refcount_t users;
1207
1208#ifdef CONFIG_SKB_EXTENSIONS
1209
1210 struct skb_ext *extensions;
1211#endif
1212};
1213
1214
1215#ifdef __BIG_ENDIAN_BITFIELD
1216#define PKT_TYPE_MAX (7 << 5)
1217#else
1218#define PKT_TYPE_MAX 7
1219#endif
1220#define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset)
1221
1222
1223
1224
1225#ifdef __BIG_ENDIAN_BITFIELD
1226#define PKT_VLAN_PRESENT_BIT 7
1227#define TC_AT_INGRESS_MASK (1 << 0)
1228#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2)
1229#else
1230#define PKT_VLAN_PRESENT_BIT 0
1231#define TC_AT_INGRESS_MASK (1 << 7)
1232#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5)
1233#endif
1234#define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset)
1235
1236#ifdef __KERNEL__
1237
1238
1239
1240
1241#define SKB_ALLOC_FCLONE 0x01
1242#define SKB_ALLOC_RX 0x02
1243#define SKB_ALLOC_NAPI 0x04
1244
1245
1246
1247
1248
1249static inline bool skb_pfmemalloc(const struct sk_buff *skb)
1250{
1251 return unlikely(skb->pfmemalloc);
1252}
1253
1254
1255
1256
1257
1258#define SKB_DST_NOREF 1UL
1259#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
1260
1261
1262
1263
1264
1265
1266
1267static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
1268{
1269
1270
1271
1272 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
1273 !rcu_read_lock_held() &&
1274 !rcu_read_lock_bh_held());
1275 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
1287{
1288 skb->slow_gro |= !!dst;
1289 skb->_skb_refdst = (unsigned long)dst;
1290}
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
1303{
1304 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
1305 skb->slow_gro |= !!dst;
1306 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
1307}
1308
1309
1310
1311
1312
1313static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1314{
1315 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1316}
1317
1318
1319
1320
1321
1322static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1323{
1324 return (struct rtable *)skb_dst(skb);
1325}
1326
1327
1328
1329
1330
1331static inline bool skb_pkt_type_ok(u32 ptype)
1332{
1333 return ptype <= PACKET_OTHERHOST;
1334}
1335
1336
1337
1338
1339
1340static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1341{
1342#ifdef CONFIG_NET_RX_BUSY_POLL
1343 return skb->napi_id;
1344#else
1345 return 0;
1346#endif
1347}
1348
1349
1350
1351
1352
1353
1354
1355static inline bool skb_unref(struct sk_buff *skb)
1356{
1357 if (unlikely(!skb))
1358 return false;
1359 if (likely(refcount_read(&skb->users) == 1))
1360 smp_rmb();
1361 else if (likely(!refcount_dec_and_test(&skb->users)))
1362 return false;
1363
1364 return true;
1365}
1366
1367void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason);
1368
1369
1370
1371
1372
1373static inline void kfree_skb(struct sk_buff *skb)
1374{
1375 kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
1376}
1377
1378void skb_release_head_state(struct sk_buff *skb);
1379void kfree_skb_list_reason(struct sk_buff *segs,
1380 enum skb_drop_reason reason);
1381void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1382void skb_tx_error(struct sk_buff *skb);
1383
1384static inline void kfree_skb_list(struct sk_buff *segs)
1385{
1386 kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
1387}
1388
1389#ifdef CONFIG_TRACEPOINTS
1390void consume_skb(struct sk_buff *skb);
1391#else
1392static inline void consume_skb(struct sk_buff *skb)
1393{
1394 return kfree_skb(skb);
1395}
1396#endif
1397
1398void __consume_stateless_skb(struct sk_buff *skb);
1399void __kfree_skb(struct sk_buff *skb);
1400extern struct kmem_cache *skbuff_head_cache;
1401
1402void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1403bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1404 bool *fragstolen, int *delta_truesize);
1405
1406struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1407 int node);
1408struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1409struct sk_buff *build_skb(void *data, unsigned int frag_size);
1410struct sk_buff *build_skb_around(struct sk_buff *skb,
1411 void *data, unsigned int frag_size);
1412void skb_attempt_defer_free(struct sk_buff *skb);
1413
1414struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
1415
1416
1417
1418
1419
1420
1421
1422
1423static inline struct sk_buff *alloc_skb(unsigned int size,
1424 gfp_t priority)
1425{
1426 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1427}
1428
1429struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1430 unsigned long data_len,
1431 int max_page_order,
1432 int *errcode,
1433 gfp_t gfp_mask);
1434struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1435
1436
1437struct sk_buff_fclones {
1438 struct sk_buff skb1;
1439
1440 struct sk_buff skb2;
1441
1442 refcount_t fclone_ref;
1443};
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static inline bool skb_fclone_busy(const struct sock *sk,
1455 const struct sk_buff *skb)
1456{
1457 const struct sk_buff_fclones *fclones;
1458
1459 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1460
1461 return skb->fclone == SKB_FCLONE_ORIG &&
1462 refcount_read(&fclones->fclone_ref) > 1 &&
1463 READ_ONCE(fclones->skb2.sk) == sk;
1464}
1465
1466
1467
1468
1469
1470
1471
1472
1473static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1474 gfp_t priority)
1475{
1476 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1477}
1478
1479struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1480void skb_headers_offset_update(struct sk_buff *skb, int off);
1481int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1482struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1483void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1484struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1485struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1486 gfp_t gfp_mask, bool fclone);
1487static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1488 gfp_t gfp_mask)
1489{
1490 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1491}
1492
1493int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1494struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1495 unsigned int headroom);
1496struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom);
1497struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1498 int newtailroom, gfp_t priority);
1499int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1500 int offset, int len);
1501int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1502 int offset, int len);
1503int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1504int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517static inline int skb_pad(struct sk_buff *skb, int pad)
1518{
1519 return __skb_pad(skb, pad, true);
1520}
1521#define dev_kfree_skb(a) consume_skb(a)
1522
1523int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1524 int offset, size_t size);
1525
1526struct skb_seq_state {
1527 __u32 lower_offset;
1528 __u32 upper_offset;
1529 __u32 frag_idx;
1530 __u32 stepped_offset;
1531 struct sk_buff *root_skb;
1532 struct sk_buff *cur_skb;
1533 __u8 *frag_data;
1534 __u32 frag_off;
1535};
1536
1537void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1538 unsigned int to, struct skb_seq_state *st);
1539unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1540 struct skb_seq_state *st);
1541void skb_abort_seq_read(struct skb_seq_state *st);
1542
1543unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1544 unsigned int to, struct ts_config *config);
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572enum pkt_hash_types {
1573 PKT_HASH_TYPE_NONE,
1574 PKT_HASH_TYPE_L2,
1575 PKT_HASH_TYPE_L3,
1576 PKT_HASH_TYPE_L4,
1577};
1578
1579static inline void skb_clear_hash(struct sk_buff *skb)
1580{
1581 skb->hash = 0;
1582 skb->sw_hash = 0;
1583 skb->l4_hash = 0;
1584}
1585
1586static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1587{
1588 if (!skb->l4_hash)
1589 skb_clear_hash(skb);
1590}
1591
1592static inline void
1593__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1594{
1595 skb->l4_hash = is_l4;
1596 skb->sw_hash = is_sw;
1597 skb->hash = hash;
1598}
1599
1600static inline void
1601skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1602{
1603
1604 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1605}
1606
1607static inline void
1608__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1609{
1610 __skb_set_hash(skb, hash, true, is_l4);
1611}
1612
1613void __skb_get_hash(struct sk_buff *skb);
1614u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1615u32 skb_get_poff(const struct sk_buff *skb);
1616u32 __skb_get_poff(const struct sk_buff *skb, const void *data,
1617 const struct flow_keys_basic *keys, int hlen);
1618__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1619 const void *data, int hlen_proto);
1620
1621static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1622 int thoff, u8 ip_proto)
1623{
1624 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1625}
1626
1627void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1628 const struct flow_dissector_key *key,
1629 unsigned int key_count);
1630
1631struct bpf_flow_dissector;
1632bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1633 __be16 proto, int nhoff, int hlen, unsigned int flags);
1634
1635bool __skb_flow_dissect(const struct net *net,
1636 const struct sk_buff *skb,
1637 struct flow_dissector *flow_dissector,
1638 void *target_container, const void *data,
1639 __be16 proto, int nhoff, int hlen, unsigned int flags);
1640
1641static inline bool skb_flow_dissect(const struct sk_buff *skb,
1642 struct flow_dissector *flow_dissector,
1643 void *target_container, unsigned int flags)
1644{
1645 return __skb_flow_dissect(NULL, skb, flow_dissector,
1646 target_container, NULL, 0, 0, 0, flags);
1647}
1648
1649static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1650 struct flow_keys *flow,
1651 unsigned int flags)
1652{
1653 memset(flow, 0, sizeof(*flow));
1654 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1655 flow, NULL, 0, 0, 0, flags);
1656}
1657
1658static inline bool
1659skb_flow_dissect_flow_keys_basic(const struct net *net,
1660 const struct sk_buff *skb,
1661 struct flow_keys_basic *flow,
1662 const void *data, __be16 proto,
1663 int nhoff, int hlen, unsigned int flags)
1664{
1665 memset(flow, 0, sizeof(*flow));
1666 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1667 data, proto, nhoff, hlen, flags);
1668}
1669
1670void skb_flow_dissect_meta(const struct sk_buff *skb,
1671 struct flow_dissector *flow_dissector,
1672 void *target_container);
1673
1674
1675
1676
1677
1678void
1679skb_flow_dissect_ct(const struct sk_buff *skb,
1680 struct flow_dissector *flow_dissector,
1681 void *target_container,
1682 u16 *ctinfo_map, size_t mapsize,
1683 bool post_ct, u16 zone);
1684void
1685skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1686 struct flow_dissector *flow_dissector,
1687 void *target_container);
1688
1689void skb_flow_dissect_hash(const struct sk_buff *skb,
1690 struct flow_dissector *flow_dissector,
1691 void *target_container);
1692
1693static inline __u32 skb_get_hash(struct sk_buff *skb)
1694{
1695 if (!skb->l4_hash && !skb->sw_hash)
1696 __skb_get_hash(skb);
1697
1698 return skb->hash;
1699}
1700
1701static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1702{
1703 if (!skb->l4_hash && !skb->sw_hash) {
1704 struct flow_keys keys;
1705 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1706
1707 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1708 }
1709
1710 return skb->hash;
1711}
1712
1713__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1714 const siphash_key_t *perturb);
1715
1716static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1717{
1718 return skb->hash;
1719}
1720
1721static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1722{
1723 to->hash = from->hash;
1724 to->sw_hash = from->sw_hash;
1725 to->l4_hash = from->l4_hash;
1726};
1727
1728static inline void skb_copy_decrypted(struct sk_buff *to,
1729 const struct sk_buff *from)
1730{
1731#ifdef CONFIG_TLS_DEVICE
1732 to->decrypted = from->decrypted;
1733#endif
1734}
1735
1736#ifdef NET_SKBUFF_DATA_USES_OFFSET
1737static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1738{
1739 return skb->head + skb->end;
1740}
1741
1742static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1743{
1744 return skb->end;
1745}
1746
1747static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1748{
1749 skb->end = offset;
1750}
1751#else
1752static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1753{
1754 return skb->end;
1755}
1756
1757static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1758{
1759 return skb->end - skb->head;
1760}
1761
1762static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset)
1763{
1764 skb->end = skb->head + offset;
1765}
1766#endif
1767
1768struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1769 struct ubuf_info *uarg);
1770
1771void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
1772
1773void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1774 bool success);
1775
1776int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1777 struct iov_iter *from, size_t length);
1778
1779static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
1780 struct msghdr *msg, int len)
1781{
1782 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1783}
1784
1785int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1786 struct msghdr *msg, int len,
1787 struct ubuf_info *uarg);
1788
1789
1790#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1791
1792static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1793{
1794 return &skb_shinfo(skb)->hwtstamps;
1795}
1796
1797static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1798{
1799 bool is_zcopy = skb && skb_shinfo(skb)->flags & SKBFL_ZEROCOPY_ENABLE;
1800
1801 return is_zcopy ? skb_uarg(skb) : NULL;
1802}
1803
1804static inline bool skb_zcopy_pure(const struct sk_buff *skb)
1805{
1806 return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY;
1807}
1808
1809static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1,
1810 const struct sk_buff *skb2)
1811{
1812 return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2);
1813}
1814
1815static inline void net_zcopy_get(struct ubuf_info *uarg)
1816{
1817 refcount_inc(&uarg->refcnt);
1818}
1819
1820static inline void skb_zcopy_init(struct sk_buff *skb, struct ubuf_info *uarg)
1821{
1822 skb_shinfo(skb)->destructor_arg = uarg;
1823 skb_shinfo(skb)->flags |= uarg->flags;
1824}
1825
1826static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1827 bool *have_ref)
1828{
1829 if (skb && uarg && !skb_zcopy(skb)) {
1830 if (unlikely(have_ref && *have_ref))
1831 *have_ref = false;
1832 else
1833 net_zcopy_get(uarg);
1834 skb_zcopy_init(skb, uarg);
1835 }
1836}
1837
1838static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1839{
1840 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1841 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_FRAG;
1842}
1843
1844static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1845{
1846 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1847}
1848
1849static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1850{
1851 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1852}
1853
1854static inline void net_zcopy_put(struct ubuf_info *uarg)
1855{
1856 if (uarg)
1857 uarg->callback(NULL, uarg, true);
1858}
1859
1860static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1861{
1862 if (uarg) {
1863 if (uarg->callback == msg_zerocopy_callback)
1864 msg_zerocopy_put_abort(uarg, have_uref);
1865 else if (have_uref)
1866 net_zcopy_put(uarg);
1867 }
1868}
1869
1870
1871static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success)
1872{
1873 struct ubuf_info *uarg = skb_zcopy(skb);
1874
1875 if (uarg) {
1876 if (!skb_zcopy_is_nouarg(skb))
1877 uarg->callback(skb, uarg, zerocopy_success);
1878
1879 skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY;
1880 }
1881}
1882
1883static inline void skb_mark_not_on_list(struct sk_buff *skb)
1884{
1885 skb->next = NULL;
1886}
1887
1888
1889#define skb_list_walk_safe(first, skb, next_skb) \
1890 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1891 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1892
1893static inline void skb_list_del_init(struct sk_buff *skb)
1894{
1895 __list_del_entry(&skb->list);
1896 skb_mark_not_on_list(skb);
1897}
1898
1899
1900
1901
1902
1903
1904
1905static inline int skb_queue_empty(const struct sk_buff_head *list)
1906{
1907 return list->next == (const struct sk_buff *) list;
1908}
1909
1910
1911
1912
1913
1914
1915
1916
1917static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1918{
1919 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1920}
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1931 const struct sk_buff *skb)
1932{
1933 return skb->next == (const struct sk_buff *) list;
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1944 const struct sk_buff *skb)
1945{
1946 return skb->prev == (const struct sk_buff *) list;
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1958 const struct sk_buff *skb)
1959{
1960
1961
1962
1963 BUG_ON(skb_queue_is_last(list, skb));
1964 return skb->next;
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1976 const struct sk_buff *skb)
1977{
1978
1979
1980
1981 BUG_ON(skb_queue_is_first(list, skb));
1982 return skb->prev;
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992static inline struct sk_buff *skb_get(struct sk_buff *skb)
1993{
1994 refcount_inc(&skb->users);
1995 return skb;
1996}
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010static inline int skb_cloned(const struct sk_buff *skb)
2011{
2012 return skb->cloned &&
2013 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
2014}
2015
2016static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
2017{
2018 might_sleep_if(gfpflags_allow_blocking(pri));
2019
2020 if (skb_cloned(skb))
2021 return pskb_expand_head(skb, 0, 0, pri);
2022
2023 return 0;
2024}
2025
2026
2027
2028
2029
2030
2031
2032int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri);
2033static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
2034{
2035 might_sleep_if(gfpflags_allow_blocking(pri));
2036
2037 if (skb_cloned(skb))
2038 return __skb_unclone_keeptruesize(skb, pri);
2039 return 0;
2040}
2041
2042
2043
2044
2045
2046
2047
2048
2049static inline int skb_header_cloned(const struct sk_buff *skb)
2050{
2051 int dataref;
2052
2053 if (!skb->cloned)
2054 return 0;
2055
2056 dataref = atomic_read(&skb_shinfo(skb)->dataref);
2057 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
2058 return dataref != 1;
2059}
2060
2061static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
2062{
2063 might_sleep_if(gfpflags_allow_blocking(pri));
2064
2065 if (skb_header_cloned(skb))
2066 return pskb_expand_head(skb, 0, 0, pri);
2067
2068 return 0;
2069}
2070
2071
2072
2073
2074
2075
2076
2077static inline void __skb_header_release(struct sk_buff *skb)
2078{
2079 skb->nohdr = 1;
2080 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
2081}
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091static inline int skb_shared(const struct sk_buff *skb)
2092{
2093 return refcount_read(&skb->users) != 1;
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
2110{
2111 might_sleep_if(gfpflags_allow_blocking(pri));
2112 if (skb_shared(skb)) {
2113 struct sk_buff *nskb = skb_clone(skb, pri);
2114
2115 if (likely(nskb))
2116 consume_skb(skb);
2117 else
2118 kfree_skb(skb);
2119 skb = nskb;
2120 }
2121 return skb;
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
2145 gfp_t pri)
2146{
2147 might_sleep_if(gfpflags_allow_blocking(pri));
2148 if (skb_cloned(skb)) {
2149 struct sk_buff *nskb = skb_copy(skb, pri);
2150
2151
2152 if (likely(nskb))
2153 consume_skb(skb);
2154 else
2155 kfree_skb(skb);
2156 skb = nskb;
2157 }
2158 return skb;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
2175{
2176 struct sk_buff *skb = list_->next;
2177
2178 if (skb == (struct sk_buff *)list_)
2179 skb = NULL;
2180 return skb;
2181}
2182
2183
2184
2185
2186
2187
2188
2189static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
2190{
2191 return list_->next;
2192}
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
2204 const struct sk_buff_head *list_)
2205{
2206 struct sk_buff *next = skb->next;
2207
2208 if (next == (struct sk_buff *)list_)
2209 next = NULL;
2210 return next;
2211}
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
2227{
2228 struct sk_buff *skb = READ_ONCE(list_->prev);
2229
2230 if (skb == (struct sk_buff *)list_)
2231 skb = NULL;
2232 return skb;
2233
2234}
2235
2236
2237
2238
2239
2240
2241
2242static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
2243{
2244 return list_->qlen;
2245}
2246
2247
2248
2249
2250
2251
2252
2253
2254static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
2255{
2256 return READ_ONCE(list_->qlen);
2257}
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269static inline void __skb_queue_head_init(struct sk_buff_head *list)
2270{
2271 list->prev = list->next = (struct sk_buff *)list;
2272 list->qlen = 0;
2273}
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283static inline void skb_queue_head_init(struct sk_buff_head *list)
2284{
2285 spin_lock_init(&list->lock);
2286 __skb_queue_head_init(list);
2287}
2288
2289static inline void skb_queue_head_init_class(struct sk_buff_head *list,
2290 struct lock_class_key *class)
2291{
2292 skb_queue_head_init(list);
2293 lockdep_set_class(&list->lock, class);
2294}
2295
2296
2297
2298
2299
2300
2301
2302static inline void __skb_insert(struct sk_buff *newsk,
2303 struct sk_buff *prev, struct sk_buff *next,
2304 struct sk_buff_head *list)
2305{
2306
2307
2308
2309 WRITE_ONCE(newsk->next, next);
2310 WRITE_ONCE(newsk->prev, prev);
2311 WRITE_ONCE(((struct sk_buff_list *)next)->prev, newsk);
2312 WRITE_ONCE(((struct sk_buff_list *)prev)->next, newsk);
2313 WRITE_ONCE(list->qlen, list->qlen + 1);
2314}
2315
2316static inline void __skb_queue_splice(const struct sk_buff_head *list,
2317 struct sk_buff *prev,
2318 struct sk_buff *next)
2319{
2320 struct sk_buff *first = list->next;
2321 struct sk_buff *last = list->prev;
2322
2323 WRITE_ONCE(first->prev, prev);
2324 WRITE_ONCE(prev->next, first);
2325
2326 WRITE_ONCE(last->next, next);
2327 WRITE_ONCE(next->prev, last);
2328}
2329
2330
2331
2332
2333
2334
2335static inline void skb_queue_splice(const struct sk_buff_head *list,
2336 struct sk_buff_head *head)
2337{
2338 if (!skb_queue_empty(list)) {
2339 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
2340 head->qlen += list->qlen;
2341 }
2342}
2343
2344
2345
2346
2347
2348
2349
2350
2351static inline void skb_queue_splice_init(struct sk_buff_head *list,
2352 struct sk_buff_head *head)
2353{
2354 if (!skb_queue_empty(list)) {
2355 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
2356 head->qlen += list->qlen;
2357 __skb_queue_head_init(list);
2358 }
2359}
2360
2361
2362
2363
2364
2365
2366static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
2367 struct sk_buff_head *head)
2368{
2369 if (!skb_queue_empty(list)) {
2370 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2371 head->qlen += list->qlen;
2372 }
2373}
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
2384 struct sk_buff_head *head)
2385{
2386 if (!skb_queue_empty(list)) {
2387 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
2388 head->qlen += list->qlen;
2389 __skb_queue_head_init(list);
2390 }
2391}
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404static inline void __skb_queue_after(struct sk_buff_head *list,
2405 struct sk_buff *prev,
2406 struct sk_buff *newsk)
2407{
2408 __skb_insert(newsk, prev, ((struct sk_buff_list *)prev)->next, list);
2409}
2410
2411void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2412 struct sk_buff_head *list);
2413
2414static inline void __skb_queue_before(struct sk_buff_head *list,
2415 struct sk_buff *next,
2416 struct sk_buff *newsk)
2417{
2418 __skb_insert(newsk, ((struct sk_buff_list *)next)->prev, next, list);
2419}
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431static inline void __skb_queue_head(struct sk_buff_head *list,
2432 struct sk_buff *newsk)
2433{
2434 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2435}
2436void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448static inline void __skb_queue_tail(struct sk_buff_head *list,
2449 struct sk_buff *newsk)
2450{
2451 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2452}
2453void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2454
2455
2456
2457
2458
2459void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2460static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2461{
2462 struct sk_buff *next, *prev;
2463
2464 WRITE_ONCE(list->qlen, list->qlen - 1);
2465 next = skb->next;
2466 prev = skb->prev;
2467 skb->next = skb->prev = NULL;
2468 WRITE_ONCE(next->prev, prev);
2469 WRITE_ONCE(prev->next, next);
2470}
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2481{
2482 struct sk_buff *skb = skb_peek(list);
2483 if (skb)
2484 __skb_unlink(skb, list);
2485 return skb;
2486}
2487struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2498{
2499 struct sk_buff *skb = skb_peek_tail(list);
2500 if (skb)
2501 __skb_unlink(skb, list);
2502 return skb;
2503}
2504struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2505
2506
2507static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2508{
2509 return skb->data_len;
2510}
2511
2512static inline unsigned int skb_headlen(const struct sk_buff *skb)
2513{
2514 return skb->len - skb->data_len;
2515}
2516
2517static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2518{
2519 unsigned int i, len = 0;
2520
2521 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2522 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2523 return len;
2524}
2525
2526static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2527{
2528 return skb_headlen(skb) + __skb_pagelen(skb);
2529}
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2545 struct page *page, int off, int size)
2546{
2547 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2548
2549
2550
2551
2552
2553
2554 frag->bv_page = page;
2555 frag->bv_offset = off;
2556 skb_frag_size_set(frag, size);
2557
2558 page = compound_head(page);
2559 if (page_is_pfmemalloc(page))
2560 skb->pfmemalloc = true;
2561}
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2578 struct page *page, int off, int size)
2579{
2580 __skb_fill_page_desc(skb, i, page, off, size);
2581 skb_shinfo(skb)->nr_frags = i + 1;
2582}
2583
2584void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2585 int size, unsigned int truesize);
2586
2587void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2588 unsigned int truesize);
2589
2590#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2591
2592#ifdef NET_SKBUFF_DATA_USES_OFFSET
2593static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2594{
2595 return skb->head + skb->tail;
2596}
2597
2598static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2599{
2600 skb->tail = skb->data - skb->head;
2601}
2602
2603static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2604{
2605 skb_reset_tail_pointer(skb);
2606 skb->tail += offset;
2607}
2608
2609#else
2610static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2611{
2612 return skb->tail;
2613}
2614
2615static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2616{
2617 skb->tail = skb->data;
2618}
2619
2620static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2621{
2622 skb->tail = skb->data + offset;
2623}
2624
2625#endif
2626
2627
2628
2629
2630void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2631void *skb_put(struct sk_buff *skb, unsigned int len);
2632static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2633{
2634 void *tmp = skb_tail_pointer(skb);
2635 SKB_LINEAR_ASSERT(skb);
2636 skb->tail += len;
2637 skb->len += len;
2638 return tmp;
2639}
2640
2641static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2642{
2643 void *tmp = __skb_put(skb, len);
2644
2645 memset(tmp, 0, len);
2646 return tmp;
2647}
2648
2649static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2650 unsigned int len)
2651{
2652 void *tmp = __skb_put(skb, len);
2653
2654 memcpy(tmp, data, len);
2655 return tmp;
2656}
2657
2658static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2659{
2660 *(u8 *)__skb_put(skb, 1) = val;
2661}
2662
2663static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2664{
2665 void *tmp = skb_put(skb, len);
2666
2667 memset(tmp, 0, len);
2668
2669 return tmp;
2670}
2671
2672static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2673 unsigned int len)
2674{
2675 void *tmp = skb_put(skb, len);
2676
2677 memcpy(tmp, data, len);
2678
2679 return tmp;
2680}
2681
2682static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2683{
2684 *(u8 *)skb_put(skb, 1) = val;
2685}
2686
2687void *skb_push(struct sk_buff *skb, unsigned int len);
2688static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2689{
2690 skb->data -= len;
2691 skb->len += len;
2692 return skb->data;
2693}
2694
2695void *skb_pull(struct sk_buff *skb, unsigned int len);
2696static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2697{
2698 skb->len -= len;
2699 if (unlikely(skb->len < skb->data_len)) {
2700#if defined(CONFIG_DEBUG_NET)
2701 skb->len += len;
2702 pr_err("__skb_pull(len=%u)\n", len);
2703 skb_dump(KERN_ERR, skb, false);
2704#endif
2705 BUG();
2706 }
2707 return skb->data += len;
2708}
2709
2710static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2711{
2712 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2713}
2714
2715void *skb_pull_data(struct sk_buff *skb, size_t len);
2716
2717void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2718
2719static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2720{
2721 if (len > skb_headlen(skb) &&
2722 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2723 return NULL;
2724 skb->len -= len;
2725 return skb->data += len;
2726}
2727
2728static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2729{
2730 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2731}
2732
2733static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2734{
2735 if (likely(len <= skb_headlen(skb)))
2736 return true;
2737 if (unlikely(len > skb->len))
2738 return false;
2739 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2740}
2741
2742void skb_condense(struct sk_buff *skb);
2743
2744
2745
2746
2747
2748
2749
2750static inline unsigned int skb_headroom(const struct sk_buff *skb)
2751{
2752 return skb->data - skb->head;
2753}
2754
2755
2756
2757
2758
2759
2760
2761static inline int skb_tailroom(const struct sk_buff *skb)
2762{
2763 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773static inline int skb_availroom(const struct sk_buff *skb)
2774{
2775 if (skb_is_nonlinear(skb))
2776 return 0;
2777
2778 return skb->end - skb->tail - skb->reserved_tailroom;
2779}
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789static inline void skb_reserve(struct sk_buff *skb, int len)
2790{
2791 skb->data += len;
2792 skb->tail += len;
2793}
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2808 unsigned int needed_tailroom)
2809{
2810 SKB_LINEAR_ASSERT(skb);
2811 if (mtu < skb_tailroom(skb) - needed_tailroom)
2812
2813 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2814 else
2815
2816 skb->reserved_tailroom = needed_tailroom;
2817}
2818
2819#define ENCAP_TYPE_ETHER 0
2820#define ENCAP_TYPE_IPPROTO 1
2821
2822static inline void skb_set_inner_protocol(struct sk_buff *skb,
2823 __be16 protocol)
2824{
2825 skb->inner_protocol = protocol;
2826 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2827}
2828
2829static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2830 __u8 ipproto)
2831{
2832 skb->inner_ipproto = ipproto;
2833 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2834}
2835
2836static inline void skb_reset_inner_headers(struct sk_buff *skb)
2837{
2838 skb->inner_mac_header = skb->mac_header;
2839 skb->inner_network_header = skb->network_header;
2840 skb->inner_transport_header = skb->transport_header;
2841}
2842
2843static inline void skb_reset_mac_len(struct sk_buff *skb)
2844{
2845 skb->mac_len = skb->network_header - skb->mac_header;
2846}
2847
2848static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2849 *skb)
2850{
2851 return skb->head + skb->inner_transport_header;
2852}
2853
2854static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2855{
2856 return skb_inner_transport_header(skb) - skb->data;
2857}
2858
2859static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2860{
2861 skb->inner_transport_header = skb->data - skb->head;
2862}
2863
2864static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2865 const int offset)
2866{
2867 skb_reset_inner_transport_header(skb);
2868 skb->inner_transport_header += offset;
2869}
2870
2871static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2872{
2873 return skb->head + skb->inner_network_header;
2874}
2875
2876static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2877{
2878 skb->inner_network_header = skb->data - skb->head;
2879}
2880
2881static inline void skb_set_inner_network_header(struct sk_buff *skb,
2882 const int offset)
2883{
2884 skb_reset_inner_network_header(skb);
2885 skb->inner_network_header += offset;
2886}
2887
2888static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2889{
2890 return skb->head + skb->inner_mac_header;
2891}
2892
2893static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2894{
2895 skb->inner_mac_header = skb->data - skb->head;
2896}
2897
2898static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2899 const int offset)
2900{
2901 skb_reset_inner_mac_header(skb);
2902 skb->inner_mac_header += offset;
2903}
2904static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2905{
2906 return skb->transport_header != (typeof(skb->transport_header))~0U;
2907}
2908
2909static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2910{
2911 DEBUG_NET_WARN_ON_ONCE(!skb_transport_header_was_set(skb));
2912 return skb->head + skb->transport_header;
2913}
2914
2915static inline void skb_reset_transport_header(struct sk_buff *skb)
2916{
2917 skb->transport_header = skb->data - skb->head;
2918}
2919
2920static inline void skb_set_transport_header(struct sk_buff *skb,
2921 const int offset)
2922{
2923 skb_reset_transport_header(skb);
2924 skb->transport_header += offset;
2925}
2926
2927static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2928{
2929 return skb->head + skb->network_header;
2930}
2931
2932static inline void skb_reset_network_header(struct sk_buff *skb)
2933{
2934 skb->network_header = skb->data - skb->head;
2935}
2936
2937static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2938{
2939 skb_reset_network_header(skb);
2940 skb->network_header += offset;
2941}
2942
2943static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2944{
2945 return skb->head + skb->mac_header;
2946}
2947
2948static inline int skb_mac_offset(const struct sk_buff *skb)
2949{
2950 return skb_mac_header(skb) - skb->data;
2951}
2952
2953static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2954{
2955 return skb->network_header - skb->mac_header;
2956}
2957
2958static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2959{
2960 return skb->mac_header != (typeof(skb->mac_header))~0U;
2961}
2962
2963static inline void skb_unset_mac_header(struct sk_buff *skb)
2964{
2965 skb->mac_header = (typeof(skb->mac_header))~0U;
2966}
2967
2968static inline void skb_reset_mac_header(struct sk_buff *skb)
2969{
2970 skb->mac_header = skb->data - skb->head;
2971}
2972
2973static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2974{
2975 skb_reset_mac_header(skb);
2976 skb->mac_header += offset;
2977}
2978
2979static inline void skb_pop_mac_header(struct sk_buff *skb)
2980{
2981 skb->mac_header = skb->network_header;
2982}
2983
2984static inline void skb_probe_transport_header(struct sk_buff *skb)
2985{
2986 struct flow_keys_basic keys;
2987
2988 if (skb_transport_header_was_set(skb))
2989 return;
2990
2991 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2992 NULL, 0, 0, 0, 0))
2993 skb_set_transport_header(skb, keys.control.thoff);
2994}
2995
2996static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2997{
2998 if (skb_mac_header_was_set(skb)) {
2999 const unsigned char *old_mac = skb_mac_header(skb);
3000
3001 skb_set_mac_header(skb, -skb->mac_len);
3002 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
3003 }
3004}
3005
3006static inline int skb_checksum_start_offset(const struct sk_buff *skb)
3007{
3008 return skb->csum_start - skb_headroom(skb);
3009}
3010
3011static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
3012{
3013 return skb->head + skb->csum_start;
3014}
3015
3016static inline int skb_transport_offset(const struct sk_buff *skb)
3017{
3018 return skb_transport_header(skb) - skb->data;
3019}
3020
3021static inline u32 skb_network_header_len(const struct sk_buff *skb)
3022{
3023 return skb->transport_header - skb->network_header;
3024}
3025
3026static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
3027{
3028 return skb->inner_transport_header - skb->inner_network_header;
3029}
3030
3031static inline int skb_network_offset(const struct sk_buff *skb)
3032{
3033 return skb_network_header(skb) - skb->data;
3034}
3035
3036static inline int skb_inner_network_offset(const struct sk_buff *skb)
3037{
3038 return skb_inner_network_header(skb) - skb->data;
3039}
3040
3041static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
3042{
3043 return pskb_may_pull(skb, skb_network_offset(skb) + len);
3044}
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066#ifndef NET_IP_ALIGN
3067#define NET_IP_ALIGN 2
3068#endif
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090#ifndef NET_SKB_PAD
3091#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
3092#endif
3093
3094int ___pskb_trim(struct sk_buff *skb, unsigned int len);
3095
3096static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
3097{
3098 if (WARN_ON(skb_is_nonlinear(skb)))
3099 return;
3100 skb->len = len;
3101 skb_set_tail_pointer(skb, len);
3102}
3103
3104static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
3105{
3106 __skb_set_length(skb, len);
3107}
3108
3109void skb_trim(struct sk_buff *skb, unsigned int len);
3110
3111static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
3112{
3113 if (skb->data_len)
3114 return ___pskb_trim(skb, len);
3115 __skb_trim(skb, len);
3116 return 0;
3117}
3118
3119static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
3120{
3121 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
3122}
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
3134{
3135 int err = pskb_trim(skb, len);
3136 BUG_ON(err);
3137}
3138
3139static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
3140{
3141 unsigned int diff = len - skb->len;
3142
3143 if (skb_tailroom(skb) < diff) {
3144 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
3145 GFP_ATOMIC);
3146 if (ret)
3147 return ret;
3148 }
3149 __skb_set_length(skb, len);
3150 return 0;
3151}
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161static inline void skb_orphan(struct sk_buff *skb)
3162{
3163 if (skb->destructor) {
3164 skb->destructor(skb);
3165 skb->destructor = NULL;
3166 skb->sk = NULL;
3167 } else {
3168 BUG_ON(skb->sk);
3169 }
3170}
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
3182{
3183 if (likely(!skb_zcopy(skb)))
3184 return 0;
3185 if (!skb_zcopy_is_nouarg(skb) &&
3186 skb_uarg(skb)->callback == msg_zerocopy_callback)
3187 return 0;
3188 return skb_copy_ubufs(skb, gfp_mask);
3189}
3190
3191
3192static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
3193{
3194 if (likely(!skb_zcopy(skb)))
3195 return 0;
3196 return skb_copy_ubufs(skb, gfp_mask);
3197}
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207static inline void __skb_queue_purge(struct sk_buff_head *list)
3208{
3209 struct sk_buff *skb;
3210 while ((skb = __skb_dequeue(list)) != NULL)
3211 kfree_skb(skb);
3212}
3213void skb_queue_purge(struct sk_buff_head *list);
3214
3215unsigned int skb_rbtree_purge(struct rb_root *root);
3216
3217void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
3218
3219
3220
3221
3222
3223
3224
3225
3226static inline void *netdev_alloc_frag(unsigned int fragsz)
3227{
3228 return __netdev_alloc_frag_align(fragsz, ~0u);
3229}
3230
3231static inline void *netdev_alloc_frag_align(unsigned int fragsz,
3232 unsigned int align)
3233{
3234 WARN_ON_ONCE(!is_power_of_2(align));
3235 return __netdev_alloc_frag_align(fragsz, -align);
3236}
3237
3238struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
3239 gfp_t gfp_mask);
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
3255 unsigned int length)
3256{
3257 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
3258}
3259
3260
3261static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
3262 gfp_t gfp_mask)
3263{
3264 return __netdev_alloc_skb(NULL, length, gfp_mask);
3265}
3266
3267
3268static inline struct sk_buff *dev_alloc_skb(unsigned int length)
3269{
3270 return netdev_alloc_skb(NULL, length);
3271}
3272
3273
3274static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
3275 unsigned int length, gfp_t gfp)
3276{
3277 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
3278
3279 if (NET_IP_ALIGN && skb)
3280 skb_reserve(skb, NET_IP_ALIGN);
3281 return skb;
3282}
3283
3284static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
3285 unsigned int length)
3286{
3287 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
3288}
3289
3290static inline void skb_free_frag(void *addr)
3291{
3292 page_frag_free(addr);
3293}
3294
3295void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
3296
3297static inline void *napi_alloc_frag(unsigned int fragsz)
3298{
3299 return __napi_alloc_frag_align(fragsz, ~0u);
3300}
3301
3302static inline void *napi_alloc_frag_align(unsigned int fragsz,
3303 unsigned int align)
3304{
3305 WARN_ON_ONCE(!is_power_of_2(align));
3306 return __napi_alloc_frag_align(fragsz, -align);
3307}
3308
3309struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
3310 unsigned int length, gfp_t gfp_mask);
3311static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
3312 unsigned int length)
3313{
3314 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
3315}
3316void napi_consume_skb(struct sk_buff *skb, int budget);
3317
3318void napi_skb_free_stolen_head(struct sk_buff *skb);
3319void __kfree_skb_defer(struct sk_buff *skb);
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
3331 unsigned int order)
3332{
3333
3334
3335
3336
3337
3338
3339
3340
3341 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
3342
3343 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
3344}
3345
3346static inline struct page *dev_alloc_pages(unsigned int order)
3347{
3348 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
3349}
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
3360{
3361 return __dev_alloc_pages(gfp_mask, 0);
3362}
3363
3364static inline struct page *dev_alloc_page(void)
3365{
3366 return dev_alloc_pages(0);
3367}
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379static inline bool dev_page_is_reusable(const struct page *page)
3380{
3381 return likely(page_to_nid(page) == numa_mem_id() &&
3382 !page_is_pfmemalloc(page));
3383}
3384
3385
3386
3387
3388
3389
3390static inline void skb_propagate_pfmemalloc(const struct page *page,
3391 struct sk_buff *skb)
3392{
3393 if (page_is_pfmemalloc(page))
3394 skb->pfmemalloc = true;
3395}
3396
3397
3398
3399
3400
3401static inline unsigned int skb_frag_off(const skb_frag_t *frag)
3402{
3403 return frag->bv_offset;
3404}
3405
3406
3407
3408
3409
3410
3411static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
3412{
3413 frag->bv_offset += delta;
3414}
3415
3416
3417
3418
3419
3420
3421static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
3422{
3423 frag->bv_offset = offset;
3424}
3425
3426
3427
3428
3429
3430
3431static inline void skb_frag_off_copy(skb_frag_t *fragto,
3432 const skb_frag_t *fragfrom)
3433{
3434 fragto->bv_offset = fragfrom->bv_offset;
3435}
3436
3437
3438
3439
3440
3441
3442
3443static inline struct page *skb_frag_page(const skb_frag_t *frag)
3444{
3445 return frag->bv_page;
3446}
3447
3448
3449
3450
3451
3452
3453
3454static inline void __skb_frag_ref(skb_frag_t *frag)
3455{
3456 get_page(skb_frag_page(frag));
3457}
3458
3459
3460
3461
3462
3463
3464
3465
3466static inline void skb_frag_ref(struct sk_buff *skb, int f)
3467{
3468 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3469}
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
3480{
3481 struct page *page = skb_frag_page(frag);
3482
3483#ifdef CONFIG_PAGE_POOL
3484 if (recycle && page_pool_return_skb_page(page))
3485 return;
3486#endif
3487 put_page(page);
3488}
3489
3490
3491
3492
3493
3494
3495
3496
3497static inline void skb_frag_unref(struct sk_buff *skb, int f)
3498{
3499 __skb_frag_unref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
3500}
3501
3502
3503
3504
3505
3506
3507
3508
3509static inline void *skb_frag_address(const skb_frag_t *frag)
3510{
3511 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3522{
3523 void *ptr = page_address(skb_frag_page(frag));
3524 if (unlikely(!ptr))
3525 return NULL;
3526
3527 return ptr + skb_frag_off(frag);
3528}
3529
3530
3531
3532
3533
3534
3535static inline void skb_frag_page_copy(skb_frag_t *fragto,
3536 const skb_frag_t *fragfrom)
3537{
3538 fragto->bv_page = fragfrom->bv_page;
3539}
3540
3541
3542
3543
3544
3545
3546
3547
3548static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3549{
3550 frag->bv_page = page;
3551}
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3562 struct page *page)
3563{
3564 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3565}
3566
3567bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3581 const skb_frag_t *frag,
3582 size_t offset, size_t size,
3583 enum dma_data_direction dir)
3584{
3585 return dma_map_page(dev, skb_frag_page(frag),
3586 skb_frag_off(frag) + offset, size, dir);
3587}
3588
3589static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3590 gfp_t gfp_mask)
3591{
3592 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3593}
3594
3595
3596static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3597 gfp_t gfp_mask)
3598{
3599 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3600}
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3612{
3613 return !skb_header_cloned(skb) &&
3614 skb_headroom(skb) + len <= skb->hdr_len;
3615}
3616
3617static inline int skb_try_make_writable(struct sk_buff *skb,
3618 unsigned int write_len)
3619{
3620 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3621 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3622}
3623
3624static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3625 int cloned)
3626{
3627 int delta = 0;
3628
3629 if (headroom > skb_headroom(skb))
3630 delta = headroom - skb_headroom(skb);
3631
3632 if (delta || cloned)
3633 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3634 GFP_ATOMIC);
3635 return 0;
3636}
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3651{
3652 return __skb_cow(skb, headroom, skb_cloned(skb));
3653}
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3666{
3667 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3668}
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3681{
3682 unsigned int size = skb->len;
3683 if (likely(size >= len))
3684 return 0;
3685 return skb_pad(skb, len - size);
3686}
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3700 unsigned int len,
3701 bool free_on_error)
3702{
3703 unsigned int size = skb->len;
3704
3705 if (unlikely(size < len)) {
3706 len -= size;
3707 if (__skb_pad(skb, len, free_on_error))
3708 return -ENOMEM;
3709 __skb_put(skb, len);
3710 }
3711 return 0;
3712}
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3725{
3726 return __skb_put_padto(skb, len, true);
3727}
3728
3729static inline int skb_add_data(struct sk_buff *skb,
3730 struct iov_iter *from, int copy)
3731{
3732 const int off = skb->len;
3733
3734 if (skb->ip_summed == CHECKSUM_NONE) {
3735 __wsum csum = 0;
3736 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3737 &csum, from)) {
3738 skb->csum = csum_block_add(skb->csum, csum, off);
3739 return 0;
3740 }
3741 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3742 return 0;
3743
3744 __skb_trim(skb, off);
3745 return -EFAULT;
3746}
3747
3748static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3749 const struct page *page, int off)
3750{
3751 if (skb_zcopy(skb))
3752 return false;
3753 if (i) {
3754 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3755
3756 return page == skb_frag_page(frag) &&
3757 off == skb_frag_off(frag) + skb_frag_size(frag);
3758 }
3759 return false;
3760}
3761
3762static inline int __skb_linearize(struct sk_buff *skb)
3763{
3764 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3765}
3766
3767
3768
3769
3770
3771
3772
3773
3774static inline int skb_linearize(struct sk_buff *skb)
3775{
3776 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3777}
3778
3779
3780
3781
3782
3783
3784
3785
3786static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3787{
3788 return skb_is_nonlinear(skb) &&
3789 skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
3790}
3791
3792
3793
3794
3795
3796
3797
3798
3799static inline int skb_linearize_cow(struct sk_buff *skb)
3800{
3801 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3802 __skb_linearize(skb) : 0;
3803}
3804
3805static __always_inline void
3806__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3807 unsigned int off)
3808{
3809 if (skb->ip_summed == CHECKSUM_COMPLETE)
3810 skb->csum = csum_block_sub(skb->csum,
3811 csum_partial(start, len, 0), off);
3812 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3813 skb_checksum_start_offset(skb) < 0)
3814 skb->ip_summed = CHECKSUM_NONE;
3815}
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827static inline void skb_postpull_rcsum(struct sk_buff *skb,
3828 const void *start, unsigned int len)
3829{
3830 if (skb->ip_summed == CHECKSUM_COMPLETE)
3831 skb->csum = wsum_negate(csum_partial(start, len,
3832 wsum_negate(skb->csum)));
3833 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3834 skb_checksum_start_offset(skb) < 0)
3835 skb->ip_summed = CHECKSUM_NONE;
3836}
3837
3838static __always_inline void
3839__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3840 unsigned int off)
3841{
3842 if (skb->ip_summed == CHECKSUM_COMPLETE)
3843 skb->csum = csum_block_add(skb->csum,
3844 csum_partial(start, len, 0), off);
3845}
3846
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856static inline void skb_postpush_rcsum(struct sk_buff *skb,
3857 const void *start, unsigned int len)
3858{
3859 __skb_postpush_rcsum(skb, start, len, 0);
3860}
3861
3862void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3876{
3877 skb_push(skb, len);
3878 skb_postpush_rcsum(skb, skb->data, len);
3879 return skb->data;
3880}
3881
3882int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3894{
3895 if (likely(len >= skb->len))
3896 return 0;
3897 return pskb_trim_rcsum_slow(skb, len);
3898}
3899
3900static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3901{
3902 if (skb->ip_summed == CHECKSUM_COMPLETE)
3903 skb->ip_summed = CHECKSUM_NONE;
3904 __skb_trim(skb, len);
3905 return 0;
3906}
3907
3908static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3909{
3910 if (skb->ip_summed == CHECKSUM_COMPLETE)
3911 skb->ip_summed = CHECKSUM_NONE;
3912 return __skb_grow(skb, len);
3913}
3914
3915#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3916#define skb_rb_first(root) rb_to_skb(rb_first(root))
3917#define skb_rb_last(root) rb_to_skb(rb_last(root))
3918#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3919#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3920
3921#define skb_queue_walk(queue, skb) \
3922 for (skb = (queue)->next; \
3923 skb != (struct sk_buff *)(queue); \
3924 skb = skb->next)
3925
3926#define skb_queue_walk_safe(queue, skb, tmp) \
3927 for (skb = (queue)->next, tmp = skb->next; \
3928 skb != (struct sk_buff *)(queue); \
3929 skb = tmp, tmp = skb->next)
3930
3931#define skb_queue_walk_from(queue, skb) \
3932 for (; skb != (struct sk_buff *)(queue); \
3933 skb = skb->next)
3934
3935#define skb_rbtree_walk(skb, root) \
3936 for (skb = skb_rb_first(root); skb != NULL; \
3937 skb = skb_rb_next(skb))
3938
3939#define skb_rbtree_walk_from(skb) \
3940 for (; skb != NULL; \
3941 skb = skb_rb_next(skb))
3942
3943#define skb_rbtree_walk_from_safe(skb, tmp) \
3944 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3945 skb = tmp)
3946
3947#define skb_queue_walk_from_safe(queue, skb, tmp) \
3948 for (tmp = skb->next; \
3949 skb != (struct sk_buff *)(queue); \
3950 skb = tmp, tmp = skb->next)
3951
3952#define skb_queue_reverse_walk(queue, skb) \
3953 for (skb = (queue)->prev; \
3954 skb != (struct sk_buff *)(queue); \
3955 skb = skb->prev)
3956
3957#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3958 for (skb = (queue)->prev, tmp = skb->prev; \
3959 skb != (struct sk_buff *)(queue); \
3960 skb = tmp, tmp = skb->prev)
3961
3962#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3963 for (tmp = skb->prev; \
3964 skb != (struct sk_buff *)(queue); \
3965 skb = tmp, tmp = skb->prev)
3966
3967static inline bool skb_has_frag_list(const struct sk_buff *skb)
3968{
3969 return skb_shinfo(skb)->frag_list != NULL;
3970}
3971
3972static inline void skb_frag_list_init(struct sk_buff *skb)
3973{
3974 skb_shinfo(skb)->frag_list = NULL;
3975}
3976
3977#define skb_walk_frags(skb, iter) \
3978 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3979
3980
3981int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3982 int *err, long *timeo_p,
3983 const struct sk_buff *skb);
3984struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3985 struct sk_buff_head *queue,
3986 unsigned int flags,
3987 int *off, int *err,
3988 struct sk_buff **last);
3989struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3990 struct sk_buff_head *queue,
3991 unsigned int flags, int *off, int *err,
3992 struct sk_buff **last);
3993struct sk_buff *__skb_recv_datagram(struct sock *sk,
3994 struct sk_buff_head *sk_queue,
3995 unsigned int flags, int *off, int *err);
3996struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags, int *err);
3997__poll_t datagram_poll(struct file *file, struct socket *sock,
3998 struct poll_table_struct *wait);
3999int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
4000 struct iov_iter *to, int size);
4001static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
4002 struct msghdr *msg, int size)
4003{
4004 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
4005}
4006int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
4007 struct msghdr *msg);
4008int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
4009 struct iov_iter *to, int len,
4010 struct ahash_request *hash);
4011int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
4012 struct iov_iter *from, int len);
4013int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
4014void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
4015void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
4016static inline void skb_free_datagram_locked(struct sock *sk,
4017 struct sk_buff *skb)
4018{
4019 __skb_free_datagram_locked(sk, skb, 0);
4020}
4021int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
4022int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
4023int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
4024__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
4025 int len);
4026int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
4027 struct pipe_inode_info *pipe, unsigned int len,
4028 unsigned int flags);
4029int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
4030 int len);
4031int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
4032void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
4033unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
4034int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
4035 int len, int hlen);
4036void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
4037int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
4038void skb_scrub_packet(struct sk_buff *skb, bool xnet);
4039bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
4040bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
4041struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
4042struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
4043 unsigned int offset);
4044struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
4045int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
4046int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
4047int skb_vlan_pop(struct sk_buff *skb);
4048int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
4049int skb_eth_pop(struct sk_buff *skb);
4050int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
4051 const unsigned char *src);
4052int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
4053 int mac_len, bool ethernet);
4054int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
4055 bool ethernet);
4056int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
4057int skb_mpls_dec_ttl(struct sk_buff *skb);
4058struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
4059 gfp_t gfp);
4060
4061static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
4062{
4063 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
4064}
4065
4066static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
4067{
4068 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
4069}
4070
4071struct skb_checksum_ops {
4072 __wsum (*update)(const void *mem, int len, __wsum wsum);
4073 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
4074};
4075
4076extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
4077
4078__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
4079 __wsum csum, const struct skb_checksum_ops *ops);
4080__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
4081 __wsum csum);
4082
4083static inline void * __must_check
4084__skb_header_pointer(const struct sk_buff *skb, int offset, int len,
4085 const void *data, int hlen, void *buffer)
4086{
4087 if (likely(hlen - offset >= len))
4088 return (void *)data + offset;
4089
4090 if (!skb || unlikely(skb_copy_bits(skb, offset, buffer, len) < 0))
4091 return NULL;
4092
4093 return buffer;
4094}
4095
4096static inline void * __must_check
4097skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
4098{
4099 return __skb_header_pointer(skb, offset, len, skb->data,
4100 skb_headlen(skb), buffer);
4101}
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111
4112
4113static inline bool skb_needs_linearize(struct sk_buff *skb,
4114 netdev_features_t features)
4115{
4116 return skb_is_nonlinear(skb) &&
4117 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
4118 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
4119}
4120
4121static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
4122 void *to,
4123 const unsigned int len)
4124{
4125 memcpy(to, skb->data, len);
4126}
4127
4128static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
4129 const int offset, void *to,
4130 const unsigned int len)
4131{
4132 memcpy(to, skb->data + offset, len);
4133}
4134
4135static inline void skb_copy_to_linear_data(struct sk_buff *skb,
4136 const void *from,
4137 const unsigned int len)
4138{
4139 memcpy(skb->data, from, len);
4140}
4141
4142static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
4143 const int offset,
4144 const void *from,
4145 const unsigned int len)
4146{
4147 memcpy(skb->data + offset, from, len);
4148}
4149
4150void skb_init(void);
4151
4152static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
4153{
4154 return skb->tstamp;
4155}
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166static inline void skb_get_timestamp(const struct sk_buff *skb,
4167 struct __kernel_old_timeval *stamp)
4168{
4169 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
4170}
4171
4172static inline void skb_get_new_timestamp(const struct sk_buff *skb,
4173 struct __kernel_sock_timeval *stamp)
4174{
4175 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
4176
4177 stamp->tv_sec = ts.tv_sec;
4178 stamp->tv_usec = ts.tv_nsec / 1000;
4179}
4180
4181static inline void skb_get_timestampns(const struct sk_buff *skb,
4182 struct __kernel_old_timespec *stamp)
4183{
4184 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
4185
4186 stamp->tv_sec = ts.tv_sec;
4187 stamp->tv_nsec = ts.tv_nsec;
4188}
4189
4190static inline void skb_get_new_timestampns(const struct sk_buff *skb,
4191 struct __kernel_timespec *stamp)
4192{
4193 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
4194
4195 stamp->tv_sec = ts.tv_sec;
4196 stamp->tv_nsec = ts.tv_nsec;
4197}
4198
4199static inline void __net_timestamp(struct sk_buff *skb)
4200{
4201 skb->tstamp = ktime_get_real();
4202 skb->mono_delivery_time = 0;
4203}
4204
4205static inline ktime_t net_timedelta(ktime_t t)
4206{
4207 return ktime_sub(ktime_get_real(), t);
4208}
4209
4210static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
4211 bool mono)
4212{
4213 skb->tstamp = kt;
4214 skb->mono_delivery_time = kt && mono;
4215}
4216
4217DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
4218
4219
4220
4221
4222static inline void skb_clear_delivery_time(struct sk_buff *skb)
4223{
4224 if (skb->mono_delivery_time) {
4225 skb->mono_delivery_time = 0;
4226 if (static_branch_unlikely(&netstamp_needed_key))
4227 skb->tstamp = ktime_get_real();
4228 else
4229 skb->tstamp = 0;
4230 }
4231}
4232
4233static inline void skb_clear_tstamp(struct sk_buff *skb)
4234{
4235 if (skb->mono_delivery_time)
4236 return;
4237
4238 skb->tstamp = 0;
4239}
4240
4241static inline ktime_t skb_tstamp(const struct sk_buff *skb)
4242{
4243 if (skb->mono_delivery_time)
4244 return 0;
4245
4246 return skb->tstamp;
4247}
4248
4249static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
4250{
4251 if (!skb->mono_delivery_time && skb->tstamp)
4252 return skb->tstamp;
4253
4254 if (static_branch_unlikely(&netstamp_needed_key) || cond)
4255 return ktime_get_real();
4256
4257 return 0;
4258}
4259
4260static inline u8 skb_metadata_len(const struct sk_buff *skb)
4261{
4262 return skb_shinfo(skb)->meta_len;
4263}
4264
4265static inline void *skb_metadata_end(const struct sk_buff *skb)
4266{
4267 return skb_mac_header(skb);
4268}
4269
4270static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
4271 const struct sk_buff *skb_b,
4272 u8 meta_len)
4273{
4274 const void *a = skb_metadata_end(skb_a);
4275 const void *b = skb_metadata_end(skb_b);
4276
4277#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
4278 u64 diffs = 0;
4279
4280 switch (meta_len) {
4281#define __it(x, op) (x -= sizeof(u##op))
4282#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
4283 case 32: diffs |= __it_diff(a, b, 64);
4284 fallthrough;
4285 case 24: diffs |= __it_diff(a, b, 64);
4286 fallthrough;
4287 case 16: diffs |= __it_diff(a, b, 64);
4288 fallthrough;
4289 case 8: diffs |= __it_diff(a, b, 64);
4290 break;
4291 case 28: diffs |= __it_diff(a, b, 64);
4292 fallthrough;
4293 case 20: diffs |= __it_diff(a, b, 64);
4294 fallthrough;
4295 case 12: diffs |= __it_diff(a, b, 64);
4296 fallthrough;
4297 case 4: diffs |= __it_diff(a, b, 32);
4298 break;
4299 }
4300 return diffs;
4301#else
4302 return memcmp(a - meta_len, b - meta_len, meta_len);
4303#endif
4304}
4305
4306static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
4307 const struct sk_buff *skb_b)
4308{
4309 u8 len_a = skb_metadata_len(skb_a);
4310 u8 len_b = skb_metadata_len(skb_b);
4311
4312 if (!(len_a | len_b))
4313 return false;
4314
4315 return len_a != len_b ?
4316 true : __skb_metadata_differs(skb_a, skb_b, len_a);
4317}
4318
4319static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
4320{
4321 skb_shinfo(skb)->meta_len = meta_len;
4322}
4323
4324static inline void skb_metadata_clear(struct sk_buff *skb)
4325{
4326 skb_metadata_set(skb, 0);
4327}
4328
4329struct sk_buff *skb_clone_sk(struct sk_buff *skb);
4330
4331#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
4332
4333void skb_clone_tx_timestamp(struct sk_buff *skb);
4334bool skb_defer_rx_timestamp(struct sk_buff *skb);
4335
4336#else
4337
4338static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
4339{
4340}
4341
4342static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
4343{
4344 return false;
4345}
4346
4347#endif
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361void skb_complete_tx_timestamp(struct sk_buff *skb,
4362 struct skb_shared_hwtstamps *hwtstamps);
4363
4364void __skb_tstamp_tx(struct sk_buff *orig_skb, const struct sk_buff *ack_skb,
4365 struct skb_shared_hwtstamps *hwtstamps,
4366 struct sock *sk, int tstype);
4367
4368
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379void skb_tstamp_tx(struct sk_buff *orig_skb,
4380 struct skb_shared_hwtstamps *hwtstamps);
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393
4394static inline void skb_tx_timestamp(struct sk_buff *skb)
4395{
4396 skb_clone_tx_timestamp(skb);
4397 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
4398 skb_tstamp_tx(skb, NULL);
4399}
4400
4401
4402
4403
4404
4405
4406
4407
4408void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
4409
4410__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
4411__sum16 __skb_checksum_complete(struct sk_buff *skb);
4412
4413static inline int skb_csum_unnecessary(const struct sk_buff *skb)
4414{
4415 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
4416 skb->csum_valid ||
4417 (skb->ip_summed == CHECKSUM_PARTIAL &&
4418 skb_checksum_start_offset(skb) >= 0));
4419}
4420
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435
4436
4437static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
4438{
4439 return skb_csum_unnecessary(skb) ?
4440 0 : __skb_checksum_complete(skb);
4441}
4442
4443static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
4444{
4445 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4446 if (skb->csum_level == 0)
4447 skb->ip_summed = CHECKSUM_NONE;
4448 else
4449 skb->csum_level--;
4450 }
4451}
4452
4453static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
4454{
4455 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4456 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
4457 skb->csum_level++;
4458 } else if (skb->ip_summed == CHECKSUM_NONE) {
4459 skb->ip_summed = CHECKSUM_UNNECESSARY;
4460 skb->csum_level = 0;
4461 }
4462}
4463
4464static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
4465{
4466 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4467 skb->ip_summed = CHECKSUM_NONE;
4468 skb->csum_level = 0;
4469 }
4470}
4471
4472
4473
4474
4475
4476
4477static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
4478 bool zero_okay,
4479 __sum16 check)
4480{
4481 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
4482 skb->csum_valid = 1;
4483 __skb_decr_checksum_unnecessary(skb);
4484 return false;
4485 }
4486
4487 return true;
4488}
4489
4490
4491
4492
4493#define CHECKSUM_BREAK 76
4494
4495
4496
4497
4498
4499
4500
4501static inline void skb_checksum_complete_unset(struct sk_buff *skb)
4502{
4503 if (skb->ip_summed == CHECKSUM_COMPLETE)
4504 skb->ip_summed = CHECKSUM_NONE;
4505}
4506
4507
4508
4509
4510
4511
4512
4513
4514
4515
4516static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
4517 bool complete,
4518 __wsum psum)
4519{
4520 if (skb->ip_summed == CHECKSUM_COMPLETE) {
4521 if (!csum_fold(csum_add(psum, skb->csum))) {
4522 skb->csum_valid = 1;
4523 return 0;
4524 }
4525 }
4526
4527 skb->csum = psum;
4528
4529 if (complete || skb->len <= CHECKSUM_BREAK) {
4530 __sum16 csum;
4531
4532 csum = __skb_checksum_complete(skb);
4533 skb->csum_valid = !csum;
4534 return csum;
4535 }
4536
4537 return 0;
4538}
4539
4540static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4541{
4542 return 0;
4543}
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555#define __skb_checksum_validate(skb, proto, complete, \
4556 zero_okay, check, compute_pseudo) \
4557({ \
4558 __sum16 __ret = 0; \
4559 skb->csum_valid = 0; \
4560 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4561 __ret = __skb_checksum_validate_complete(skb, \
4562 complete, compute_pseudo(skb, proto)); \
4563 __ret; \
4564})
4565
4566#define skb_checksum_init(skb, proto, compute_pseudo) \
4567 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4568
4569#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4570 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4571
4572#define skb_checksum_validate(skb, proto, compute_pseudo) \
4573 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4574
4575#define skb_checksum_validate_zero_check(skb, proto, check, \
4576 compute_pseudo) \
4577 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4578
4579#define skb_checksum_simple_validate(skb) \
4580 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4581
4582static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4583{
4584 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4585}
4586
4587static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4588{
4589 skb->csum = ~pseudo;
4590 skb->ip_summed = CHECKSUM_COMPLETE;
4591}
4592
4593#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4594do { \
4595 if (__skb_checksum_convert_check(skb)) \
4596 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4597} while (0)
4598
4599static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4600 u16 start, u16 offset)
4601{
4602 skb->ip_summed = CHECKSUM_PARTIAL;
4603 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4604 skb->csum_offset = offset - start;
4605}
4606
4607
4608
4609
4610
4611
4612static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4613 int start, int offset, bool nopartial)
4614{
4615 __wsum delta;
4616
4617 if (!nopartial) {
4618 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4619 return;
4620 }
4621
4622 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4623 __skb_checksum_complete(skb);
4624 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4625 }
4626
4627 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4628
4629
4630 skb->csum = csum_add(skb->csum, delta);
4631}
4632
4633static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4634{
4635#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4636 return (void *)(skb->_nfct & NFCT_PTRMASK);
4637#else
4638 return NULL;
4639#endif
4640}
4641
4642static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4643{
4644#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4645 return skb->_nfct;
4646#else
4647 return 0UL;
4648#endif
4649}
4650
4651static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4652{
4653#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4654 skb->slow_gro |= !!nfct;
4655 skb->_nfct = nfct;
4656#endif
4657}
4658
4659#ifdef CONFIG_SKB_EXTENSIONS
4660enum skb_ext_id {
4661#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4662 SKB_EXT_BRIDGE_NF,
4663#endif
4664#ifdef CONFIG_XFRM
4665 SKB_EXT_SEC_PATH,
4666#endif
4667#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4668 TC_SKB_EXT,
4669#endif
4670#if IS_ENABLED(CONFIG_MPTCP)
4671 SKB_EXT_MPTCP,
4672#endif
4673#if IS_ENABLED(CONFIG_MCTP_FLOWS)
4674 SKB_EXT_MCTP,
4675#endif
4676 SKB_EXT_NUM,
4677};
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687
4688
4689struct skb_ext {
4690 refcount_t refcnt;
4691 u8 offset[SKB_EXT_NUM];
4692 u8 chunks;
4693 char data[] __aligned(8);
4694};
4695
4696struct skb_ext *__skb_ext_alloc(gfp_t flags);
4697void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4698 struct skb_ext *ext);
4699void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4700void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4701void __skb_ext_put(struct skb_ext *ext);
4702
4703static inline void skb_ext_put(struct sk_buff *skb)
4704{
4705 if (skb->active_extensions)
4706 __skb_ext_put(skb->extensions);
4707}
4708
4709static inline void __skb_ext_copy(struct sk_buff *dst,
4710 const struct sk_buff *src)
4711{
4712 dst->active_extensions = src->active_extensions;
4713
4714 if (src->active_extensions) {
4715 struct skb_ext *ext = src->extensions;
4716
4717 refcount_inc(&ext->refcnt);
4718 dst->extensions = ext;
4719 }
4720}
4721
4722static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4723{
4724 skb_ext_put(dst);
4725 __skb_ext_copy(dst, src);
4726}
4727
4728static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4729{
4730 return !!ext->offset[i];
4731}
4732
4733static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4734{
4735 return skb->active_extensions & (1 << id);
4736}
4737
4738static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4739{
4740 if (skb_ext_exist(skb, id))
4741 __skb_ext_del(skb, id);
4742}
4743
4744static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4745{
4746 if (skb_ext_exist(skb, id)) {
4747 struct skb_ext *ext = skb->extensions;
4748
4749 return (void *)ext + (ext->offset[id] << 3);
4750 }
4751
4752 return NULL;
4753}
4754
4755static inline void skb_ext_reset(struct sk_buff *skb)
4756{
4757 if (unlikely(skb->active_extensions)) {
4758 __skb_ext_put(skb->extensions);
4759 skb->active_extensions = 0;
4760 }
4761}
4762
4763static inline bool skb_has_extensions(struct sk_buff *skb)
4764{
4765 return unlikely(skb->active_extensions);
4766}
4767#else
4768static inline void skb_ext_put(struct sk_buff *skb) {}
4769static inline void skb_ext_reset(struct sk_buff *skb) {}
4770static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4771static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4772static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4773static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4774#endif
4775
4776static inline void nf_reset_ct(struct sk_buff *skb)
4777{
4778#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4779 nf_conntrack_put(skb_nfct(skb));
4780 skb->_nfct = 0;
4781#endif
4782}
4783
4784static inline void nf_reset_trace(struct sk_buff *skb)
4785{
4786#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4787 skb->nf_trace = 0;
4788#endif
4789}
4790
4791static inline void ipvs_reset(struct sk_buff *skb)
4792{
4793#if IS_ENABLED(CONFIG_IP_VS)
4794 skb->ipvs_property = 0;
4795#endif
4796}
4797
4798
4799static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4800 bool copy)
4801{
4802#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4803 dst->_nfct = src->_nfct;
4804 nf_conntrack_get(skb_nfct(src));
4805#endif
4806#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4807 if (copy)
4808 dst->nf_trace = src->nf_trace;
4809#endif
4810}
4811
4812static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4813{
4814#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4815 nf_conntrack_put(skb_nfct(dst));
4816#endif
4817 dst->slow_gro = src->slow_gro;
4818 __nf_copy(dst, src, true);
4819}
4820
4821#ifdef CONFIG_NETWORK_SECMARK
4822static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4823{
4824 to->secmark = from->secmark;
4825}
4826
4827static inline void skb_init_secmark(struct sk_buff *skb)
4828{
4829 skb->secmark = 0;
4830}
4831#else
4832static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4833{ }
4834
4835static inline void skb_init_secmark(struct sk_buff *skb)
4836{ }
4837#endif
4838
4839static inline int secpath_exists(const struct sk_buff *skb)
4840{
4841#ifdef CONFIG_XFRM
4842 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4843#else
4844 return 0;
4845#endif
4846}
4847
4848static inline bool skb_irq_freeable(const struct sk_buff *skb)
4849{
4850 return !skb->destructor &&
4851 !secpath_exists(skb) &&
4852 !skb_nfct(skb) &&
4853 !skb->_skb_refdst &&
4854 !skb_has_frag_list(skb);
4855}
4856
4857static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4858{
4859 skb->queue_mapping = queue_mapping;
4860}
4861
4862static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4863{
4864 return skb->queue_mapping;
4865}
4866
4867static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4868{
4869 to->queue_mapping = from->queue_mapping;
4870}
4871
4872static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4873{
4874 skb->queue_mapping = rx_queue + 1;
4875}
4876
4877static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4878{
4879 return skb->queue_mapping - 1;
4880}
4881
4882static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4883{
4884 return skb->queue_mapping != 0;
4885}
4886
4887static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4888{
4889 skb->dst_pending_confirm = val;
4890}
4891
4892static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4893{
4894 return skb->dst_pending_confirm != 0;
4895}
4896
4897static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4898{
4899#ifdef CONFIG_XFRM
4900 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4901#else
4902 return NULL;
4903#endif
4904}
4905
4906
4907
4908
4909
4910
4911
4912struct skb_gso_cb {
4913 union {
4914 int mac_offset;
4915 int data_offset;
4916 };
4917 int encap_level;
4918 __wsum csum;
4919 __u16 csum_start;
4920};
4921#define SKB_GSO_CB_OFFSET 32
4922#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4923
4924static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4925{
4926 return (skb_mac_header(inner_skb) - inner_skb->head) -
4927 SKB_GSO_CB(inner_skb)->mac_offset;
4928}
4929
4930static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4931{
4932 int new_headroom, headroom;
4933 int ret;
4934
4935 headroom = skb_headroom(skb);
4936 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4937 if (ret)
4938 return ret;
4939
4940 new_headroom = skb_headroom(skb);
4941 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4942 return 0;
4943}
4944
4945static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4946{
4947
4948 if (skb->remcsum_offload)
4949 return;
4950
4951 SKB_GSO_CB(skb)->csum = res;
4952 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4953}
4954
4955
4956
4957
4958
4959
4960
4961
4962
4963static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4964{
4965 unsigned char *csum_start = skb_transport_header(skb);
4966 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4967 __wsum partial = SKB_GSO_CB(skb)->csum;
4968
4969 SKB_GSO_CB(skb)->csum = res;
4970 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4971
4972 return csum_fold(csum_partial(csum_start, plen, partial));
4973}
4974
4975static inline bool skb_is_gso(const struct sk_buff *skb)
4976{
4977 return skb_shinfo(skb)->gso_size;
4978}
4979
4980
4981static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4982{
4983 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4984}
4985
4986
4987static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4988{
4989 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4990}
4991
4992
4993static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4994{
4995 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4996}
4997
4998static inline void skb_gso_reset(struct sk_buff *skb)
4999{
5000 skb_shinfo(skb)->gso_size = 0;
5001 skb_shinfo(skb)->gso_segs = 0;
5002 skb_shinfo(skb)->gso_type = 0;
5003}
5004
5005static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
5006 u16 increment)
5007{
5008 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
5009 return;
5010 shinfo->gso_size += increment;
5011}
5012
5013static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
5014 u16 decrement)
5015{
5016 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
5017 return;
5018 shinfo->gso_size -= decrement;
5019}
5020
5021void __skb_warn_lro_forwarding(const struct sk_buff *skb);
5022
5023static inline bool skb_warn_if_lro(const struct sk_buff *skb)
5024{
5025
5026
5027 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5028
5029 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
5030 unlikely(shinfo->gso_type == 0)) {
5031 __skb_warn_lro_forwarding(skb);
5032 return true;
5033 }
5034 return false;
5035}
5036
5037static inline void skb_forward_csum(struct sk_buff *skb)
5038{
5039
5040 if (skb->ip_summed == CHECKSUM_COMPLETE)
5041 skb->ip_summed = CHECKSUM_NONE;
5042}
5043
5044
5045
5046
5047
5048
5049
5050
5051
5052static inline void skb_checksum_none_assert(const struct sk_buff *skb)
5053{
5054 DEBUG_NET_WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
5055}
5056
5057bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
5058
5059int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
5060struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5061 unsigned int transport_len,
5062 __sum16(*skb_chkf)(struct sk_buff *skb));
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073static inline bool skb_head_is_locked(const struct sk_buff *skb)
5074{
5075 return !skb->head_frag || skb_cloned(skb);
5076}
5077
5078
5079
5080
5081
5082
5083
5084
5085
5086
5087static inline __wsum lco_csum(struct sk_buff *skb)
5088{
5089 unsigned char *csum_start = skb_checksum_start(skb);
5090 unsigned char *l4_hdr = skb_transport_header(skb);
5091 __wsum partial;
5092
5093
5094 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
5095 skb->csum_offset));
5096
5097
5098
5099
5100 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
5101}
5102
5103static inline bool skb_is_redirected(const struct sk_buff *skb)
5104{
5105 return skb->redirected;
5106}
5107
5108static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
5109{
5110 skb->redirected = 1;
5111#ifdef CONFIG_NET_REDIRECT
5112 skb->from_ingress = from_ingress;
5113 if (skb->from_ingress)
5114 skb_clear_tstamp(skb);
5115#endif
5116}
5117
5118static inline void skb_reset_redirect(struct sk_buff *skb)
5119{
5120 skb->redirected = 0;
5121}
5122
5123static inline bool skb_csum_is_sctp(struct sk_buff *skb)
5124{
5125 return skb->csum_not_inet;
5126}
5127
5128static inline void skb_set_kcov_handle(struct sk_buff *skb,
5129 const u64 kcov_handle)
5130{
5131#ifdef CONFIG_KCOV
5132 skb->kcov_handle = kcov_handle;
5133#endif
5134}
5135
5136static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
5137{
5138#ifdef CONFIG_KCOV
5139 return skb->kcov_handle;
5140#else
5141 return 0;
5142#endif
5143}
5144
5145#ifdef CONFIG_PAGE_POOL
5146static inline void skb_mark_for_recycle(struct sk_buff *skb)
5147{
5148 skb->pp_recycle = 1;
5149}
5150#endif
5151
5152static inline bool skb_pp_recycle(struct sk_buff *skb, void *data)
5153{
5154 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
5155 return false;
5156 return page_pool_return_skb_page(virt_to_page(data));
5157}
5158
5159#endif
5160#endif
5161