1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/bvec.h>
18#include <linux/cache.h>
19#include <linux/rbtree.h>
20#include <linux/socket.h>
21#include <linux/refcount.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/dma-mapping.h>
32#include <linux/netdev_features.h>
33#include <linux/sched.h>
34#include <linux/sched/clock.h>
35#include <net/flow_dissector.h>
36#include <linux/splice.h>
37#include <linux/in6.h>
38#include <linux/if_packet.h>
39#include <net/flow.h>
40#if IS_ENABLED(CONFIG_NF_CONNTRACK)
41#include <linux/netfilter/nf_conntrack_common.h>
42#endif
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct ahash_request;
242struct net_device;
243struct scatterlist;
244struct pipe_inode_info;
245struct iov_iter;
246struct napi_struct;
247struct bpf_prog;
248union bpf_attr;
249struct skb_ext;
250
251#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
252struct nf_bridge_info {
253 enum {
254 BRNF_PROTO_UNCHANGED,
255 BRNF_PROTO_8021Q,
256 BRNF_PROTO_PPPOE
257 } orig_proto:8;
258 u8 pkt_otherhost:1;
259 u8 in_prerouting:1;
260 u8 bridged_dnat:1;
261 __u16 frag_max_size;
262 struct net_device *physindev;
263
264
265 struct net_device *physoutdev;
266 union {
267
268 __be32 ipv4_daddr;
269 struct in6_addr ipv6_daddr;
270
271
272
273
274
275 char neigh_header[8];
276 };
277};
278#endif
279
280#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
281
282
283
284
285struct tc_skb_ext {
286 __u32 chain;
287 __u16 mru;
288};
289#endif
290
291struct sk_buff_head {
292
293 struct sk_buff *next;
294 struct sk_buff *prev;
295
296 __u32 qlen;
297 spinlock_t lock;
298};
299
300struct sk_buff;
301
302
303
304
305
306
307
308
309#if (65536/PAGE_SIZE + 1) < 16
310#define MAX_SKB_FRAGS 16UL
311#else
312#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
313#endif
314extern int sysctl_max_skb_frags;
315
316
317
318
319#define GSO_BY_FRAGS 0xFFFF
320
321typedef struct bio_vec skb_frag_t;
322
323
324
325
326
327static inline unsigned int skb_frag_size(const skb_frag_t *frag)
328{
329 return frag->bv_len;
330}
331
332
333
334
335
336
337static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
338{
339 frag->bv_len = size;
340}
341
342
343
344
345
346
347static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
348{
349 frag->bv_len += delta;
350}
351
352
353
354
355
356
357static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
358{
359 frag->bv_len -= delta;
360}
361
362
363
364
365
366static inline bool skb_frag_must_loop(struct page *p)
367{
368#if defined(CONFIG_HIGHMEM)
369 if (PageHighMem(p))
370 return true;
371#endif
372 return false;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
393 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
394 p_off = (f_off) & (PAGE_SIZE - 1), \
395 p_len = skb_frag_must_loop(p) ? \
396 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
397 copied = 0; \
398 copied < f_len; \
399 copied += p_len, p++, p_off = 0, \
400 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
401
402#define HAVE_HW_TIME_STAMP
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418struct skb_shared_hwtstamps {
419 ktime_t hwtstamp;
420};
421
422
423enum {
424
425 SKBTX_HW_TSTAMP = 1 << 0,
426
427
428 SKBTX_SW_TSTAMP = 1 << 1,
429
430
431 SKBTX_IN_PROGRESS = 1 << 2,
432
433
434 SKBTX_DEV_ZEROCOPY = 1 << 3,
435
436
437 SKBTX_WIFI_STATUS = 1 << 4,
438
439
440
441
442
443
444 SKBTX_SHARED_FRAG = 1 << 5,
445
446
447 SKBTX_SCHED_TSTAMP = 1 << 6,
448};
449
450#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
451#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
452 SKBTX_SCHED_TSTAMP)
453#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
454
455
456
457
458
459
460
461
462
463struct ubuf_info {
464 void (*callback)(struct ubuf_info *, bool zerocopy_success);
465 union {
466 struct {
467 unsigned long desc;
468 void *ctx;
469 };
470 struct {
471 u32 id;
472 u16 len;
473 u16 zerocopy:1;
474 u32 bytelen;
475 };
476 };
477 refcount_t refcnt;
478
479 struct mmpin {
480 struct user_struct *user;
481 unsigned int num_pg;
482 } mmp;
483};
484
485#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
486
487int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
488void mm_unaccount_pinned_pages(struct mmpin *mmp);
489
490struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
491struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
492 struct ubuf_info *uarg);
493
494static inline void sock_zerocopy_get(struct ubuf_info *uarg)
495{
496 refcount_inc(&uarg->refcnt);
497}
498
499void sock_zerocopy_put(struct ubuf_info *uarg);
500void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
501
502void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
503
504int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
505int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
506 struct msghdr *msg, int len,
507 struct ubuf_info *uarg);
508
509
510
511
512struct skb_shared_info {
513 __u8 __unused;
514 __u8 meta_len;
515 __u8 nr_frags;
516 __u8 tx_flags;
517 unsigned short gso_size;
518
519 unsigned short gso_segs;
520 struct sk_buff *frag_list;
521 struct skb_shared_hwtstamps hwtstamps;
522 unsigned int gso_type;
523 u32 tskey;
524
525
526
527
528 atomic_t dataref;
529
530
531
532 void * destructor_arg;
533
534
535 skb_frag_t frags[MAX_SKB_FRAGS];
536};
537
538
539
540
541
542
543
544
545
546
547
548
549#define SKB_DATAREF_SHIFT 16
550#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
551
552
553enum {
554 SKB_FCLONE_UNAVAILABLE,
555 SKB_FCLONE_ORIG,
556 SKB_FCLONE_CLONE,
557};
558
559enum {
560 SKB_GSO_TCPV4 = 1 << 0,
561
562
563 SKB_GSO_DODGY = 1 << 1,
564
565
566 SKB_GSO_TCP_ECN = 1 << 2,
567
568 SKB_GSO_TCP_FIXEDID = 1 << 3,
569
570 SKB_GSO_TCPV6 = 1 << 4,
571
572 SKB_GSO_FCOE = 1 << 5,
573
574 SKB_GSO_GRE = 1 << 6,
575
576 SKB_GSO_GRE_CSUM = 1 << 7,
577
578 SKB_GSO_IPXIP4 = 1 << 8,
579
580 SKB_GSO_IPXIP6 = 1 << 9,
581
582 SKB_GSO_UDP_TUNNEL = 1 << 10,
583
584 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
585
586 SKB_GSO_PARTIAL = 1 << 12,
587
588 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
589
590 SKB_GSO_SCTP = 1 << 14,
591
592 SKB_GSO_ESP = 1 << 15,
593
594 SKB_GSO_UDP = 1 << 16,
595
596 SKB_GSO_UDP_L4 = 1 << 17,
597
598 SKB_GSO_FRAGLIST = 1 << 18,
599};
600
601#if BITS_PER_LONG > 32
602#define NET_SKBUFF_DATA_USES_OFFSET 1
603#endif
604
605#ifdef NET_SKBUFF_DATA_USES_OFFSET
606typedef unsigned int sk_buff_data_t;
607#else
608typedef unsigned char *sk_buff_data_t;
609#endif
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713struct sk_buff {
714 union {
715 struct {
716
717 struct sk_buff *next;
718 struct sk_buff *prev;
719
720 union {
721 struct net_device *dev;
722
723
724
725
726 unsigned long dev_scratch;
727 };
728 };
729 struct rb_node rbnode;
730 struct list_head list;
731 };
732
733 union {
734 struct sock *sk;
735 int ip_defrag_offset;
736 };
737
738 union {
739 ktime_t tstamp;
740 u64 skb_mstamp_ns;
741 };
742
743
744
745
746
747
748 char cb[48] __aligned(8);
749
750 union {
751 struct {
752 unsigned long _skb_refdst;
753 void (*destructor)(struct sk_buff *skb);
754 };
755 struct list_head tcp_tsorted_anchor;
756 };
757
758#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
759 unsigned long _nfct;
760#endif
761 unsigned int len,
762 data_len;
763 __u16 mac_len,
764 hdr_len;
765
766
767
768
769 __u16 queue_mapping;
770
771
772#ifdef __BIG_ENDIAN_BITFIELD
773#define CLONED_MASK (1 << 7)
774#else
775#define CLONED_MASK 1
776#endif
777#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
778
779
780 __u8 __cloned_offset[0];
781
782 __u8 cloned:1,
783 nohdr:1,
784 fclone:2,
785 peeked:1,
786 head_frag:1,
787 pfmemalloc:1;
788#ifdef CONFIG_SKB_EXTENSIONS
789 __u8 active_extensions;
790#endif
791
792
793
794
795 __u32 headers_start[0];
796
797
798
799#ifdef __BIG_ENDIAN_BITFIELD
800#define PKT_TYPE_MAX (7 << 5)
801#else
802#define PKT_TYPE_MAX 7
803#endif
804#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
805
806
807 __u8 __pkt_type_offset[0];
808
809 __u8 pkt_type:3;
810 __u8 ignore_df:1;
811 __u8 nf_trace:1;
812 __u8 ip_summed:2;
813 __u8 ooo_okay:1;
814
815 __u8 l4_hash:1;
816 __u8 sw_hash:1;
817 __u8 wifi_acked_valid:1;
818 __u8 wifi_acked:1;
819 __u8 no_fcs:1;
820
821 __u8 encapsulation:1;
822 __u8 encap_hdr_csum:1;
823 __u8 csum_valid:1;
824
825#ifdef __BIG_ENDIAN_BITFIELD
826#define PKT_VLAN_PRESENT_BIT 7
827#else
828#define PKT_VLAN_PRESENT_BIT 0
829#endif
830#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
831
832 __u8 __pkt_vlan_present_offset[0];
833
834 __u8 vlan_present:1;
835 __u8 csum_complete_sw:1;
836 __u8 csum_level:2;
837 __u8 csum_not_inet:1;
838 __u8 dst_pending_confirm:1;
839#ifdef CONFIG_IPV6_NDISC_NODETYPE
840 __u8 ndisc_nodetype:2;
841#endif
842
843 __u8 ipvs_property:1;
844 __u8 inner_protocol_type:1;
845 __u8 remcsum_offload:1;
846#ifdef CONFIG_NET_SWITCHDEV
847 __u8 offload_fwd_mark:1;
848 __u8 offload_l3_fwd_mark:1;
849#endif
850#ifdef CONFIG_NET_CLS_ACT
851 __u8 tc_skip_classify:1;
852 __u8 tc_at_ingress:1;
853#endif
854#ifdef CONFIG_NET_REDIRECT
855 __u8 redirected:1;
856 __u8 from_ingress:1;
857#endif
858#ifdef CONFIG_TLS_DEVICE
859 __u8 decrypted:1;
860#endif
861
862#ifdef CONFIG_NET_SCHED
863 __u16 tc_index;
864#endif
865
866 union {
867 __wsum csum;
868 struct {
869 __u16 csum_start;
870 __u16 csum_offset;
871 };
872 };
873 __u32 priority;
874 int skb_iif;
875 __u32 hash;
876 __be16 vlan_proto;
877 __u16 vlan_tci;
878#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
879 union {
880 unsigned int napi_id;
881 unsigned int sender_cpu;
882 };
883#endif
884#ifdef CONFIG_NETWORK_SECMARK
885 __u32 secmark;
886#endif
887
888 union {
889 __u32 mark;
890 __u32 reserved_tailroom;
891 };
892
893 union {
894 __be16 inner_protocol;
895 __u8 inner_ipproto;
896 };
897
898 __u16 inner_transport_header;
899 __u16 inner_network_header;
900 __u16 inner_mac_header;
901
902 __be16 protocol;
903 __u16 transport_header;
904 __u16 network_header;
905 __u16 mac_header;
906
907
908 __u32 headers_end[0];
909
910
911
912 sk_buff_data_t tail;
913 sk_buff_data_t end;
914 unsigned char *head,
915 *data;
916 unsigned int truesize;
917 refcount_t users;
918
919#ifdef CONFIG_SKB_EXTENSIONS
920
921 struct skb_ext *extensions;
922#endif
923};
924
925#ifdef __KERNEL__
926
927
928
929
930#define SKB_ALLOC_FCLONE 0x01
931#define SKB_ALLOC_RX 0x02
932#define SKB_ALLOC_NAPI 0x04
933
934
935
936
937
938static inline bool skb_pfmemalloc(const struct sk_buff *skb)
939{
940 return unlikely(skb->pfmemalloc);
941}
942
943
944
945
946
947#define SKB_DST_NOREF 1UL
948#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
949
950
951
952
953
954
955
956static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
957{
958
959
960
961 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
962 !rcu_read_lock_held() &&
963 !rcu_read_lock_bh_held());
964 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
965}
966
967
968
969
970
971
972
973
974
975static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
976{
977 skb->_skb_refdst = (unsigned long)dst;
978}
979
980
981
982
983
984
985
986
987
988
989
990static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
991{
992 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
993 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
994}
995
996
997
998
999
1000static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1001{
1002 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1003}
1004
1005
1006
1007
1008
1009static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1010{
1011 return (struct rtable *)skb_dst(skb);
1012}
1013
1014
1015
1016
1017
1018static inline bool skb_pkt_type_ok(u32 ptype)
1019{
1020 return ptype <= PACKET_OTHERHOST;
1021}
1022
1023
1024
1025
1026
1027static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1028{
1029#ifdef CONFIG_NET_RX_BUSY_POLL
1030 return skb->napi_id;
1031#else
1032 return 0;
1033#endif
1034}
1035
1036
1037
1038
1039
1040
1041
1042static inline bool skb_unref(struct sk_buff *skb)
1043{
1044 if (unlikely(!skb))
1045 return false;
1046 if (likely(refcount_read(&skb->users) == 1))
1047 smp_rmb();
1048 else if (likely(!refcount_dec_and_test(&skb->users)))
1049 return false;
1050
1051 return true;
1052}
1053
1054void skb_release_head_state(struct sk_buff *skb);
1055void kfree_skb(struct sk_buff *skb);
1056void kfree_skb_list(struct sk_buff *segs);
1057void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1058void skb_tx_error(struct sk_buff *skb);
1059
1060#ifdef CONFIG_TRACEPOINTS
1061void consume_skb(struct sk_buff *skb);
1062#else
1063static inline void consume_skb(struct sk_buff *skb)
1064{
1065 return kfree_skb(skb);
1066}
1067#endif
1068
1069void __consume_stateless_skb(struct sk_buff *skb);
1070void __kfree_skb(struct sk_buff *skb);
1071extern struct kmem_cache *skbuff_head_cache;
1072
1073void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1074bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1075 bool *fragstolen, int *delta_truesize);
1076
1077struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1078 int node);
1079struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1080struct sk_buff *build_skb(void *data, unsigned int frag_size);
1081struct sk_buff *build_skb_around(struct sk_buff *skb,
1082 void *data, unsigned int frag_size);
1083
1084
1085
1086
1087
1088
1089
1090
1091static inline struct sk_buff *alloc_skb(unsigned int size,
1092 gfp_t priority)
1093{
1094 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1095}
1096
1097struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1098 unsigned long data_len,
1099 int max_page_order,
1100 int *errcode,
1101 gfp_t gfp_mask);
1102struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1103
1104
1105struct sk_buff_fclones {
1106 struct sk_buff skb1;
1107
1108 struct sk_buff skb2;
1109
1110 refcount_t fclone_ref;
1111};
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122static inline bool skb_fclone_busy(const struct sock *sk,
1123 const struct sk_buff *skb)
1124{
1125 const struct sk_buff_fclones *fclones;
1126
1127 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1128
1129 return skb->fclone == SKB_FCLONE_ORIG &&
1130 refcount_read(&fclones->fclone_ref) > 1 &&
1131 fclones->skb2.sk == sk;
1132}
1133
1134
1135
1136
1137
1138
1139
1140
1141static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1142 gfp_t priority)
1143{
1144 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1145}
1146
1147struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1148void skb_headers_offset_update(struct sk_buff *skb, int off);
1149int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1150struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1151void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1152struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1153struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1154 gfp_t gfp_mask, bool fclone);
1155static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1156 gfp_t gfp_mask)
1157{
1158 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1159}
1160
1161int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1162struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1163 unsigned int headroom);
1164struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1165 int newtailroom, gfp_t priority);
1166int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1167 int offset, int len);
1168int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1169 int offset, int len);
1170int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1171int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static inline int skb_pad(struct sk_buff *skb, int pad)
1185{
1186 return __skb_pad(skb, pad, true);
1187}
1188#define dev_kfree_skb(a) consume_skb(a)
1189
1190int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1191 int offset, size_t size);
1192
1193struct skb_seq_state {
1194 __u32 lower_offset;
1195 __u32 upper_offset;
1196 __u32 frag_idx;
1197 __u32 stepped_offset;
1198 struct sk_buff *root_skb;
1199 struct sk_buff *cur_skb;
1200 __u8 *frag_data;
1201};
1202
1203void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1204 unsigned int to, struct skb_seq_state *st);
1205unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1206 struct skb_seq_state *st);
1207void skb_abort_seq_read(struct skb_seq_state *st);
1208
1209unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1210 unsigned int to, struct ts_config *config);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238enum pkt_hash_types {
1239 PKT_HASH_TYPE_NONE,
1240 PKT_HASH_TYPE_L2,
1241 PKT_HASH_TYPE_L3,
1242 PKT_HASH_TYPE_L4,
1243};
1244
1245static inline void skb_clear_hash(struct sk_buff *skb)
1246{
1247 skb->hash = 0;
1248 skb->sw_hash = 0;
1249 skb->l4_hash = 0;
1250}
1251
1252static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1253{
1254 if (!skb->l4_hash)
1255 skb_clear_hash(skb);
1256}
1257
1258static inline void
1259__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1260{
1261 skb->l4_hash = is_l4;
1262 skb->sw_hash = is_sw;
1263 skb->hash = hash;
1264}
1265
1266static inline void
1267skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1268{
1269
1270 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1271}
1272
1273static inline void
1274__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1275{
1276 __skb_set_hash(skb, hash, true, is_l4);
1277}
1278
1279void __skb_get_hash(struct sk_buff *skb);
1280u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1281u32 skb_get_poff(const struct sk_buff *skb);
1282u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1283 const struct flow_keys_basic *keys, int hlen);
1284__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1285 void *data, int hlen_proto);
1286
1287static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1288 int thoff, u8 ip_proto)
1289{
1290 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1291}
1292
1293void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1294 const struct flow_dissector_key *key,
1295 unsigned int key_count);
1296
1297struct bpf_flow_dissector;
1298bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1299 __be16 proto, int nhoff, int hlen, unsigned int flags);
1300
1301bool __skb_flow_dissect(const struct net *net,
1302 const struct sk_buff *skb,
1303 struct flow_dissector *flow_dissector,
1304 void *target_container,
1305 void *data, __be16 proto, int nhoff, int hlen,
1306 unsigned int flags);
1307
1308static inline bool skb_flow_dissect(const struct sk_buff *skb,
1309 struct flow_dissector *flow_dissector,
1310 void *target_container, unsigned int flags)
1311{
1312 return __skb_flow_dissect(NULL, skb, flow_dissector,
1313 target_container, NULL, 0, 0, 0, flags);
1314}
1315
1316static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1317 struct flow_keys *flow,
1318 unsigned int flags)
1319{
1320 memset(flow, 0, sizeof(*flow));
1321 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1322 flow, NULL, 0, 0, 0, flags);
1323}
1324
1325static inline bool
1326skb_flow_dissect_flow_keys_basic(const struct net *net,
1327 const struct sk_buff *skb,
1328 struct flow_keys_basic *flow, void *data,
1329 __be16 proto, int nhoff, int hlen,
1330 unsigned int flags)
1331{
1332 memset(flow, 0, sizeof(*flow));
1333 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1334 data, proto, nhoff, hlen, flags);
1335}
1336
1337void skb_flow_dissect_meta(const struct sk_buff *skb,
1338 struct flow_dissector *flow_dissector,
1339 void *target_container);
1340
1341
1342
1343
1344
1345void
1346skb_flow_dissect_ct(const struct sk_buff *skb,
1347 struct flow_dissector *flow_dissector,
1348 void *target_container,
1349 u16 *ctinfo_map,
1350 size_t mapsize);
1351void
1352skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1353 struct flow_dissector *flow_dissector,
1354 void *target_container);
1355
1356void skb_flow_dissect_hash(const struct sk_buff *skb,
1357 struct flow_dissector *flow_dissector,
1358 void *target_container);
1359
1360static inline __u32 skb_get_hash(struct sk_buff *skb)
1361{
1362 if (!skb->l4_hash && !skb->sw_hash)
1363 __skb_get_hash(skb);
1364
1365 return skb->hash;
1366}
1367
1368static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1369{
1370 if (!skb->l4_hash && !skb->sw_hash) {
1371 struct flow_keys keys;
1372 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1373
1374 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1375 }
1376
1377 return skb->hash;
1378}
1379
1380__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1381 const siphash_key_t *perturb);
1382
1383static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1384{
1385 return skb->hash;
1386}
1387
1388static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1389{
1390 to->hash = from->hash;
1391 to->sw_hash = from->sw_hash;
1392 to->l4_hash = from->l4_hash;
1393};
1394
1395static inline void skb_copy_decrypted(struct sk_buff *to,
1396 const struct sk_buff *from)
1397{
1398#ifdef CONFIG_TLS_DEVICE
1399 to->decrypted = from->decrypted;
1400#endif
1401}
1402
1403#ifdef NET_SKBUFF_DATA_USES_OFFSET
1404static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1405{
1406 return skb->head + skb->end;
1407}
1408
1409static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1410{
1411 return skb->end;
1412}
1413#else
1414static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1415{
1416 return skb->end;
1417}
1418
1419static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1420{
1421 return skb->end - skb->head;
1422}
1423#endif
1424
1425
1426#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1427
1428static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1429{
1430 return &skb_shinfo(skb)->hwtstamps;
1431}
1432
1433static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1434{
1435 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1436
1437 return is_zcopy ? skb_uarg(skb) : NULL;
1438}
1439
1440static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1441 bool *have_ref)
1442{
1443 if (skb && uarg && !skb_zcopy(skb)) {
1444 if (unlikely(have_ref && *have_ref))
1445 *have_ref = false;
1446 else
1447 sock_zerocopy_get(uarg);
1448 skb_shinfo(skb)->destructor_arg = uarg;
1449 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1450 }
1451}
1452
1453static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1454{
1455 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1456 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1457}
1458
1459static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1460{
1461 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1462}
1463
1464static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1465{
1466 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1467}
1468
1469
1470static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1471{
1472 struct ubuf_info *uarg = skb_zcopy(skb);
1473
1474 if (uarg) {
1475 if (skb_zcopy_is_nouarg(skb)) {
1476
1477 } else if (uarg->callback == sock_zerocopy_callback) {
1478 uarg->zerocopy = uarg->zerocopy && zerocopy;
1479 sock_zerocopy_put(uarg);
1480 } else {
1481 uarg->callback(uarg, zerocopy);
1482 }
1483
1484 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1485 }
1486}
1487
1488
1489static inline void skb_zcopy_abort(struct sk_buff *skb)
1490{
1491 struct ubuf_info *uarg = skb_zcopy(skb);
1492
1493 if (uarg) {
1494 sock_zerocopy_put_abort(uarg, false);
1495 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1496 }
1497}
1498
1499static inline void skb_mark_not_on_list(struct sk_buff *skb)
1500{
1501 skb->next = NULL;
1502}
1503
1504
1505#define skb_list_walk_safe(first, skb, next_skb) \
1506 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1507 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1508
1509static inline void skb_list_del_init(struct sk_buff *skb)
1510{
1511 __list_del_entry(&skb->list);
1512 skb_mark_not_on_list(skb);
1513}
1514
1515
1516
1517
1518
1519
1520
1521static inline int skb_queue_empty(const struct sk_buff_head *list)
1522{
1523 return list->next == (const struct sk_buff *) list;
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1534{
1535 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1536}
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1547 const struct sk_buff *skb)
1548{
1549 return skb->next == (const struct sk_buff *) list;
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1560 const struct sk_buff *skb)
1561{
1562 return skb->prev == (const struct sk_buff *) list;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1574 const struct sk_buff *skb)
1575{
1576
1577
1578
1579 BUG_ON(skb_queue_is_last(list, skb));
1580 return skb->next;
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1592 const struct sk_buff *skb)
1593{
1594
1595
1596
1597 BUG_ON(skb_queue_is_first(list, skb));
1598 return skb->prev;
1599}
1600
1601
1602
1603
1604
1605
1606
1607
1608static inline struct sk_buff *skb_get(struct sk_buff *skb)
1609{
1610 refcount_inc(&skb->users);
1611 return skb;
1612}
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static inline int skb_cloned(const struct sk_buff *skb)
1627{
1628 return skb->cloned &&
1629 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1630}
1631
1632static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1633{
1634 might_sleep_if(gfpflags_allow_blocking(pri));
1635
1636 if (skb_cloned(skb))
1637 return pskb_expand_head(skb, 0, 0, pri);
1638
1639 return 0;
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649static inline int skb_header_cloned(const struct sk_buff *skb)
1650{
1651 int dataref;
1652
1653 if (!skb->cloned)
1654 return 0;
1655
1656 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1657 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1658 return dataref != 1;
1659}
1660
1661static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1662{
1663 might_sleep_if(gfpflags_allow_blocking(pri));
1664
1665 if (skb_header_cloned(skb))
1666 return pskb_expand_head(skb, 0, 0, pri);
1667
1668 return 0;
1669}
1670
1671
1672
1673
1674
1675static inline void __skb_header_release(struct sk_buff *skb)
1676{
1677 skb->nohdr = 1;
1678 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1679}
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static inline int skb_shared(const struct sk_buff *skb)
1690{
1691 return refcount_read(&skb->users) != 1;
1692}
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1708{
1709 might_sleep_if(gfpflags_allow_blocking(pri));
1710 if (skb_shared(skb)) {
1711 struct sk_buff *nskb = skb_clone(skb, pri);
1712
1713 if (likely(nskb))
1714 consume_skb(skb);
1715 else
1716 kfree_skb(skb);
1717 skb = nskb;
1718 }
1719 return skb;
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1743 gfp_t pri)
1744{
1745 might_sleep_if(gfpflags_allow_blocking(pri));
1746 if (skb_cloned(skb)) {
1747 struct sk_buff *nskb = skb_copy(skb, pri);
1748
1749
1750 if (likely(nskb))
1751 consume_skb(skb);
1752 else
1753 kfree_skb(skb);
1754 skb = nskb;
1755 }
1756 return skb;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1773{
1774 struct sk_buff *skb = list_->next;
1775
1776 if (skb == (struct sk_buff *)list_)
1777 skb = NULL;
1778 return skb;
1779}
1780
1781
1782
1783
1784
1785
1786
1787static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1788{
1789 return list_->next;
1790}
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1802 const struct sk_buff_head *list_)
1803{
1804 struct sk_buff *next = skb->next;
1805
1806 if (next == (struct sk_buff *)list_)
1807 next = NULL;
1808 return next;
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1825{
1826 struct sk_buff *skb = READ_ONCE(list_->prev);
1827
1828 if (skb == (struct sk_buff *)list_)
1829 skb = NULL;
1830 return skb;
1831
1832}
1833
1834
1835
1836
1837
1838
1839
1840static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1841{
1842 return list_->qlen;
1843}
1844
1845
1846
1847
1848
1849
1850
1851
1852static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
1853{
1854 return READ_ONCE(list_->qlen);
1855}
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867static inline void __skb_queue_head_init(struct sk_buff_head *list)
1868{
1869 list->prev = list->next = (struct sk_buff *)list;
1870 list->qlen = 0;
1871}
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881static inline void skb_queue_head_init(struct sk_buff_head *list)
1882{
1883 spin_lock_init(&list->lock);
1884 __skb_queue_head_init(list);
1885}
1886
1887static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1888 struct lock_class_key *class)
1889{
1890 skb_queue_head_init(list);
1891 lockdep_set_class(&list->lock, class);
1892}
1893
1894
1895
1896
1897
1898
1899
1900static inline void __skb_insert(struct sk_buff *newsk,
1901 struct sk_buff *prev, struct sk_buff *next,
1902 struct sk_buff_head *list)
1903{
1904
1905
1906
1907 WRITE_ONCE(newsk->next, next);
1908 WRITE_ONCE(newsk->prev, prev);
1909 WRITE_ONCE(next->prev, newsk);
1910 WRITE_ONCE(prev->next, newsk);
1911 list->qlen++;
1912}
1913
1914static inline void __skb_queue_splice(const struct sk_buff_head *list,
1915 struct sk_buff *prev,
1916 struct sk_buff *next)
1917{
1918 struct sk_buff *first = list->next;
1919 struct sk_buff *last = list->prev;
1920
1921 WRITE_ONCE(first->prev, prev);
1922 WRITE_ONCE(prev->next, first);
1923
1924 WRITE_ONCE(last->next, next);
1925 WRITE_ONCE(next->prev, last);
1926}
1927
1928
1929
1930
1931
1932
1933static inline void skb_queue_splice(const struct sk_buff_head *list,
1934 struct sk_buff_head *head)
1935{
1936 if (!skb_queue_empty(list)) {
1937 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1938 head->qlen += list->qlen;
1939 }
1940}
1941
1942
1943
1944
1945
1946
1947
1948
1949static inline void skb_queue_splice_init(struct sk_buff_head *list,
1950 struct sk_buff_head *head)
1951{
1952 if (!skb_queue_empty(list)) {
1953 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1954 head->qlen += list->qlen;
1955 __skb_queue_head_init(list);
1956 }
1957}
1958
1959
1960
1961
1962
1963
1964static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1965 struct sk_buff_head *head)
1966{
1967 if (!skb_queue_empty(list)) {
1968 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1969 head->qlen += list->qlen;
1970 }
1971}
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1982 struct sk_buff_head *head)
1983{
1984 if (!skb_queue_empty(list)) {
1985 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1986 head->qlen += list->qlen;
1987 __skb_queue_head_init(list);
1988 }
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static inline void __skb_queue_after(struct sk_buff_head *list,
2003 struct sk_buff *prev,
2004 struct sk_buff *newsk)
2005{
2006 __skb_insert(newsk, prev, prev->next, list);
2007}
2008
2009void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2010 struct sk_buff_head *list);
2011
2012static inline void __skb_queue_before(struct sk_buff_head *list,
2013 struct sk_buff *next,
2014 struct sk_buff *newsk)
2015{
2016 __skb_insert(newsk, next->prev, next, list);
2017}
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static inline void __skb_queue_head(struct sk_buff_head *list,
2030 struct sk_buff *newsk)
2031{
2032 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2033}
2034void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static inline void __skb_queue_tail(struct sk_buff_head *list,
2047 struct sk_buff *newsk)
2048{
2049 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2050}
2051void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2052
2053
2054
2055
2056
2057void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2058static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2059{
2060 struct sk_buff *next, *prev;
2061
2062 WRITE_ONCE(list->qlen, list->qlen - 1);
2063 next = skb->next;
2064 prev = skb->prev;
2065 skb->next = skb->prev = NULL;
2066 WRITE_ONCE(next->prev, prev);
2067 WRITE_ONCE(prev->next, next);
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2079{
2080 struct sk_buff *skb = skb_peek(list);
2081 if (skb)
2082 __skb_unlink(skb, list);
2083 return skb;
2084}
2085struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2096{
2097 struct sk_buff *skb = skb_peek_tail(list);
2098 if (skb)
2099 __skb_unlink(skb, list);
2100 return skb;
2101}
2102struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2103
2104
2105static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2106{
2107 return skb->data_len;
2108}
2109
2110static inline unsigned int skb_headlen(const struct sk_buff *skb)
2111{
2112 return skb->len - skb->data_len;
2113}
2114
2115static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2116{
2117 unsigned int i, len = 0;
2118
2119 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2120 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2121 return len;
2122}
2123
2124static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2125{
2126 return skb_headlen(skb) + __skb_pagelen(skb);
2127}
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2143 struct page *page, int off, int size)
2144{
2145 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2146
2147
2148
2149
2150
2151
2152 frag->bv_page = page;
2153 frag->bv_offset = off;
2154 skb_frag_size_set(frag, size);
2155
2156 page = compound_head(page);
2157 if (page_is_pfmemalloc(page))
2158 skb->pfmemalloc = true;
2159}
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2176 struct page *page, int off, int size)
2177{
2178 __skb_fill_page_desc(skb, i, page, off, size);
2179 skb_shinfo(skb)->nr_frags = i + 1;
2180}
2181
2182void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2183 int size, unsigned int truesize);
2184
2185void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2186 unsigned int truesize);
2187
2188#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2189
2190#ifdef NET_SKBUFF_DATA_USES_OFFSET
2191static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2192{
2193 return skb->head + skb->tail;
2194}
2195
2196static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2197{
2198 skb->tail = skb->data - skb->head;
2199}
2200
2201static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2202{
2203 skb_reset_tail_pointer(skb);
2204 skb->tail += offset;
2205}
2206
2207#else
2208static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2209{
2210 return skb->tail;
2211}
2212
2213static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2214{
2215 skb->tail = skb->data;
2216}
2217
2218static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2219{
2220 skb->tail = skb->data + offset;
2221}
2222
2223#endif
2224
2225
2226
2227
2228void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2229void *skb_put(struct sk_buff *skb, unsigned int len);
2230static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2231{
2232 void *tmp = skb_tail_pointer(skb);
2233 SKB_LINEAR_ASSERT(skb);
2234 skb->tail += len;
2235 skb->len += len;
2236 return tmp;
2237}
2238
2239static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2240{
2241 void *tmp = __skb_put(skb, len);
2242
2243 memset(tmp, 0, len);
2244 return tmp;
2245}
2246
2247static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2248 unsigned int len)
2249{
2250 void *tmp = __skb_put(skb, len);
2251
2252 memcpy(tmp, data, len);
2253 return tmp;
2254}
2255
2256static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2257{
2258 *(u8 *)__skb_put(skb, 1) = val;
2259}
2260
2261static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2262{
2263 void *tmp = skb_put(skb, len);
2264
2265 memset(tmp, 0, len);
2266
2267 return tmp;
2268}
2269
2270static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2271 unsigned int len)
2272{
2273 void *tmp = skb_put(skb, len);
2274
2275 memcpy(tmp, data, len);
2276
2277 return tmp;
2278}
2279
2280static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2281{
2282 *(u8 *)skb_put(skb, 1) = val;
2283}
2284
2285void *skb_push(struct sk_buff *skb, unsigned int len);
2286static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2287{
2288 skb->data -= len;
2289 skb->len += len;
2290 return skb->data;
2291}
2292
2293void *skb_pull(struct sk_buff *skb, unsigned int len);
2294static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2295{
2296 skb->len -= len;
2297 BUG_ON(skb->len < skb->data_len);
2298 return skb->data += len;
2299}
2300
2301static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2302{
2303 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2304}
2305
2306void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2307
2308static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2309{
2310 if (len > skb_headlen(skb) &&
2311 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2312 return NULL;
2313 skb->len -= len;
2314 return skb->data += len;
2315}
2316
2317static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2318{
2319 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2320}
2321
2322static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2323{
2324 if (likely(len <= skb_headlen(skb)))
2325 return true;
2326 if (unlikely(len > skb->len))
2327 return false;
2328 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2329}
2330
2331void skb_condense(struct sk_buff *skb);
2332
2333
2334
2335
2336
2337
2338
2339static inline unsigned int skb_headroom(const struct sk_buff *skb)
2340{
2341 return skb->data - skb->head;
2342}
2343
2344
2345
2346
2347
2348
2349
2350static inline int skb_tailroom(const struct sk_buff *skb)
2351{
2352 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2353}
2354
2355
2356
2357
2358
2359
2360
2361
2362static inline int skb_availroom(const struct sk_buff *skb)
2363{
2364 if (skb_is_nonlinear(skb))
2365 return 0;
2366
2367 return skb->end - skb->tail - skb->reserved_tailroom;
2368}
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378static inline void skb_reserve(struct sk_buff *skb, int len)
2379{
2380 skb->data += len;
2381 skb->tail += len;
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2397 unsigned int needed_tailroom)
2398{
2399 SKB_LINEAR_ASSERT(skb);
2400 if (mtu < skb_tailroom(skb) - needed_tailroom)
2401
2402 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2403 else
2404
2405 skb->reserved_tailroom = needed_tailroom;
2406}
2407
2408#define ENCAP_TYPE_ETHER 0
2409#define ENCAP_TYPE_IPPROTO 1
2410
2411static inline void skb_set_inner_protocol(struct sk_buff *skb,
2412 __be16 protocol)
2413{
2414 skb->inner_protocol = protocol;
2415 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2416}
2417
2418static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2419 __u8 ipproto)
2420{
2421 skb->inner_ipproto = ipproto;
2422 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2423}
2424
2425static inline void skb_reset_inner_headers(struct sk_buff *skb)
2426{
2427 skb->inner_mac_header = skb->mac_header;
2428 skb->inner_network_header = skb->network_header;
2429 skb->inner_transport_header = skb->transport_header;
2430}
2431
2432static inline void skb_reset_mac_len(struct sk_buff *skb)
2433{
2434 skb->mac_len = skb->network_header - skb->mac_header;
2435}
2436
2437static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2438 *skb)
2439{
2440 return skb->head + skb->inner_transport_header;
2441}
2442
2443static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2444{
2445 return skb_inner_transport_header(skb) - skb->data;
2446}
2447
2448static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2449{
2450 skb->inner_transport_header = skb->data - skb->head;
2451}
2452
2453static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2454 const int offset)
2455{
2456 skb_reset_inner_transport_header(skb);
2457 skb->inner_transport_header += offset;
2458}
2459
2460static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2461{
2462 return skb->head + skb->inner_network_header;
2463}
2464
2465static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2466{
2467 skb->inner_network_header = skb->data - skb->head;
2468}
2469
2470static inline void skb_set_inner_network_header(struct sk_buff *skb,
2471 const int offset)
2472{
2473 skb_reset_inner_network_header(skb);
2474 skb->inner_network_header += offset;
2475}
2476
2477static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2478{
2479 return skb->head + skb->inner_mac_header;
2480}
2481
2482static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2483{
2484 skb->inner_mac_header = skb->data - skb->head;
2485}
2486
2487static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2488 const int offset)
2489{
2490 skb_reset_inner_mac_header(skb);
2491 skb->inner_mac_header += offset;
2492}
2493static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2494{
2495 return skb->transport_header != (typeof(skb->transport_header))~0U;
2496}
2497
2498static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2499{
2500 return skb->head + skb->transport_header;
2501}
2502
2503static inline void skb_reset_transport_header(struct sk_buff *skb)
2504{
2505 skb->transport_header = skb->data - skb->head;
2506}
2507
2508static inline void skb_set_transport_header(struct sk_buff *skb,
2509 const int offset)
2510{
2511 skb_reset_transport_header(skb);
2512 skb->transport_header += offset;
2513}
2514
2515static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2516{
2517 return skb->head + skb->network_header;
2518}
2519
2520static inline void skb_reset_network_header(struct sk_buff *skb)
2521{
2522 skb->network_header = skb->data - skb->head;
2523}
2524
2525static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2526{
2527 skb_reset_network_header(skb);
2528 skb->network_header += offset;
2529}
2530
2531static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2532{
2533 return skb->head + skb->mac_header;
2534}
2535
2536static inline int skb_mac_offset(const struct sk_buff *skb)
2537{
2538 return skb_mac_header(skb) - skb->data;
2539}
2540
2541static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2542{
2543 return skb->network_header - skb->mac_header;
2544}
2545
2546static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2547{
2548 return skb->mac_header != (typeof(skb->mac_header))~0U;
2549}
2550
2551static inline void skb_reset_mac_header(struct sk_buff *skb)
2552{
2553 skb->mac_header = skb->data - skb->head;
2554}
2555
2556static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2557{
2558 skb_reset_mac_header(skb);
2559 skb->mac_header += offset;
2560}
2561
2562static inline void skb_pop_mac_header(struct sk_buff *skb)
2563{
2564 skb->mac_header = skb->network_header;
2565}
2566
2567static inline void skb_probe_transport_header(struct sk_buff *skb)
2568{
2569 struct flow_keys_basic keys;
2570
2571 if (skb_transport_header_was_set(skb))
2572 return;
2573
2574 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2575 NULL, 0, 0, 0, 0))
2576 skb_set_transport_header(skb, keys.control.thoff);
2577}
2578
2579static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2580{
2581 if (skb_mac_header_was_set(skb)) {
2582 const unsigned char *old_mac = skb_mac_header(skb);
2583
2584 skb_set_mac_header(skb, -skb->mac_len);
2585 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2586 }
2587}
2588
2589static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2590{
2591 return skb->csum_start - skb_headroom(skb);
2592}
2593
2594static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2595{
2596 return skb->head + skb->csum_start;
2597}
2598
2599static inline int skb_transport_offset(const struct sk_buff *skb)
2600{
2601 return skb_transport_header(skb) - skb->data;
2602}
2603
2604static inline u32 skb_network_header_len(const struct sk_buff *skb)
2605{
2606 return skb->transport_header - skb->network_header;
2607}
2608
2609static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2610{
2611 return skb->inner_transport_header - skb->inner_network_header;
2612}
2613
2614static inline int skb_network_offset(const struct sk_buff *skb)
2615{
2616 return skb_network_header(skb) - skb->data;
2617}
2618
2619static inline int skb_inner_network_offset(const struct sk_buff *skb)
2620{
2621 return skb_inner_network_header(skb) - skb->data;
2622}
2623
2624static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2625{
2626 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2627}
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649#ifndef NET_IP_ALIGN
2650#define NET_IP_ALIGN 2
2651#endif
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673#ifndef NET_SKB_PAD
2674#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2675#endif
2676
2677int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2678
2679static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2680{
2681 if (WARN_ON(skb_is_nonlinear(skb)))
2682 return;
2683 skb->len = len;
2684 skb_set_tail_pointer(skb, len);
2685}
2686
2687static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2688{
2689 __skb_set_length(skb, len);
2690}
2691
2692void skb_trim(struct sk_buff *skb, unsigned int len);
2693
2694static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2695{
2696 if (skb->data_len)
2697 return ___pskb_trim(skb, len);
2698 __skb_trim(skb, len);
2699 return 0;
2700}
2701
2702static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2703{
2704 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2705}
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2717{
2718 int err = pskb_trim(skb, len);
2719 BUG_ON(err);
2720}
2721
2722static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2723{
2724 unsigned int diff = len - skb->len;
2725
2726 if (skb_tailroom(skb) < diff) {
2727 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2728 GFP_ATOMIC);
2729 if (ret)
2730 return ret;
2731 }
2732 __skb_set_length(skb, len);
2733 return 0;
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744static inline void skb_orphan(struct sk_buff *skb)
2745{
2746 if (skb->destructor) {
2747 skb->destructor(skb);
2748 skb->destructor = NULL;
2749 skb->sk = NULL;
2750 } else {
2751 BUG_ON(skb->sk);
2752 }
2753}
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2765{
2766 if (likely(!skb_zcopy(skb)))
2767 return 0;
2768 if (!skb_zcopy_is_nouarg(skb) &&
2769 skb_uarg(skb)->callback == sock_zerocopy_callback)
2770 return 0;
2771 return skb_copy_ubufs(skb, gfp_mask);
2772}
2773
2774
2775static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2776{
2777 if (likely(!skb_zcopy(skb)))
2778 return 0;
2779 return skb_copy_ubufs(skb, gfp_mask);
2780}
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790static inline void __skb_queue_purge(struct sk_buff_head *list)
2791{
2792 struct sk_buff *skb;
2793 while ((skb = __skb_dequeue(list)) != NULL)
2794 kfree_skb(skb);
2795}
2796void skb_queue_purge(struct sk_buff_head *list);
2797
2798unsigned int skb_rbtree_purge(struct rb_root *root);
2799
2800void *netdev_alloc_frag(unsigned int fragsz);
2801
2802struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2803 gfp_t gfp_mask);
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2819 unsigned int length)
2820{
2821 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2822}
2823
2824
2825static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2826 gfp_t gfp_mask)
2827{
2828 return __netdev_alloc_skb(NULL, length, gfp_mask);
2829}
2830
2831
2832static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2833{
2834 return netdev_alloc_skb(NULL, length);
2835}
2836
2837
2838static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2839 unsigned int length, gfp_t gfp)
2840{
2841 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2842
2843 if (NET_IP_ALIGN && skb)
2844 skb_reserve(skb, NET_IP_ALIGN);
2845 return skb;
2846}
2847
2848static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2849 unsigned int length)
2850{
2851 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2852}
2853
2854static inline void skb_free_frag(void *addr)
2855{
2856 page_frag_free(addr);
2857}
2858
2859void *napi_alloc_frag(unsigned int fragsz);
2860struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2861 unsigned int length, gfp_t gfp_mask);
2862static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2863 unsigned int length)
2864{
2865 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2866}
2867void napi_consume_skb(struct sk_buff *skb, int budget);
2868
2869void __kfree_skb_flush(void);
2870void __kfree_skb_defer(struct sk_buff *skb);
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2882 unsigned int order)
2883{
2884
2885
2886
2887
2888
2889
2890
2891
2892 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2893
2894 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2895}
2896
2897static inline struct page *dev_alloc_pages(unsigned int order)
2898{
2899 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2911{
2912 return __dev_alloc_pages(gfp_mask, 0);
2913}
2914
2915static inline struct page *dev_alloc_page(void)
2916{
2917 return dev_alloc_pages(0);
2918}
2919
2920
2921
2922
2923
2924
2925static inline void skb_propagate_pfmemalloc(struct page *page,
2926 struct sk_buff *skb)
2927{
2928 if (page_is_pfmemalloc(page))
2929 skb->pfmemalloc = true;
2930}
2931
2932
2933
2934
2935
2936static inline unsigned int skb_frag_off(const skb_frag_t *frag)
2937{
2938 return frag->bv_offset;
2939}
2940
2941
2942
2943
2944
2945
2946static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
2947{
2948 frag->bv_offset += delta;
2949}
2950
2951
2952
2953
2954
2955
2956static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
2957{
2958 frag->bv_offset = offset;
2959}
2960
2961
2962
2963
2964
2965
2966static inline void skb_frag_off_copy(skb_frag_t *fragto,
2967 const skb_frag_t *fragfrom)
2968{
2969 fragto->bv_offset = fragfrom->bv_offset;
2970}
2971
2972
2973
2974
2975
2976
2977
2978static inline struct page *skb_frag_page(const skb_frag_t *frag)
2979{
2980 return frag->bv_page;
2981}
2982
2983
2984
2985
2986
2987
2988
2989static inline void __skb_frag_ref(skb_frag_t *frag)
2990{
2991 get_page(skb_frag_page(frag));
2992}
2993
2994
2995
2996
2997
2998
2999
3000
3001static inline void skb_frag_ref(struct sk_buff *skb, int f)
3002{
3003 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3004}
3005
3006
3007
3008
3009
3010
3011
3012static inline void __skb_frag_unref(skb_frag_t *frag)
3013{
3014 put_page(skb_frag_page(frag));
3015}
3016
3017
3018
3019
3020
3021
3022
3023
3024static inline void skb_frag_unref(struct sk_buff *skb, int f)
3025{
3026 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036static inline void *skb_frag_address(const skb_frag_t *frag)
3037{
3038 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3039}
3040
3041
3042
3043
3044
3045
3046
3047
3048static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3049{
3050 void *ptr = page_address(skb_frag_page(frag));
3051 if (unlikely(!ptr))
3052 return NULL;
3053
3054 return ptr + skb_frag_off(frag);
3055}
3056
3057
3058
3059
3060
3061
3062static inline void skb_frag_page_copy(skb_frag_t *fragto,
3063 const skb_frag_t *fragfrom)
3064{
3065 fragto->bv_page = fragfrom->bv_page;
3066}
3067
3068
3069
3070
3071
3072
3073
3074
3075static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3076{
3077 frag->bv_page = page;
3078}
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3089 struct page *page)
3090{
3091 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3092}
3093
3094bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3108 const skb_frag_t *frag,
3109 size_t offset, size_t size,
3110 enum dma_data_direction dir)
3111{
3112 return dma_map_page(dev, skb_frag_page(frag),
3113 skb_frag_off(frag) + offset, size, dir);
3114}
3115
3116static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3117 gfp_t gfp_mask)
3118{
3119 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3120}
3121
3122
3123static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3124 gfp_t gfp_mask)
3125{
3126 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3127}
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3139{
3140 return !skb_header_cloned(skb) &&
3141 skb_headroom(skb) + len <= skb->hdr_len;
3142}
3143
3144static inline int skb_try_make_writable(struct sk_buff *skb,
3145 unsigned int write_len)
3146{
3147 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3148 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3149}
3150
3151static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3152 int cloned)
3153{
3154 int delta = 0;
3155
3156 if (headroom > skb_headroom(skb))
3157 delta = headroom - skb_headroom(skb);
3158
3159 if (delta || cloned)
3160 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3161 GFP_ATOMIC);
3162 return 0;
3163}
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3178{
3179 return __skb_cow(skb, headroom, skb_cloned(skb));
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3193{
3194 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3208{
3209 unsigned int size = skb->len;
3210 if (likely(size >= len))
3211 return 0;
3212 return skb_pad(skb, len - size);
3213}
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3227 unsigned int len,
3228 bool free_on_error)
3229{
3230 unsigned int size = skb->len;
3231
3232 if (unlikely(size < len)) {
3233 len -= size;
3234 if (__skb_pad(skb, len, free_on_error))
3235 return -ENOMEM;
3236 __skb_put(skb, len);
3237 }
3238 return 0;
3239}
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3252{
3253 return __skb_put_padto(skb, len, true);
3254}
3255
3256static inline int skb_add_data(struct sk_buff *skb,
3257 struct iov_iter *from, int copy)
3258{
3259 const int off = skb->len;
3260
3261 if (skb->ip_summed == CHECKSUM_NONE) {
3262 __wsum csum = 0;
3263 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3264 &csum, from)) {
3265 skb->csum = csum_block_add(skb->csum, csum, off);
3266 return 0;
3267 }
3268 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3269 return 0;
3270
3271 __skb_trim(skb, off);
3272 return -EFAULT;
3273}
3274
3275static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3276 const struct page *page, int off)
3277{
3278 if (skb_zcopy(skb))
3279 return false;
3280 if (i) {
3281 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3282
3283 return page == skb_frag_page(frag) &&
3284 off == skb_frag_off(frag) + skb_frag_size(frag);
3285 }
3286 return false;
3287}
3288
3289static inline int __skb_linearize(struct sk_buff *skb)
3290{
3291 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3292}
3293
3294
3295
3296
3297
3298
3299
3300
3301static inline int skb_linearize(struct sk_buff *skb)
3302{
3303 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3304}
3305
3306
3307
3308
3309
3310
3311
3312
3313static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3314{
3315 return skb_is_nonlinear(skb) &&
3316 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3317}
3318
3319
3320
3321
3322
3323
3324
3325
3326static inline int skb_linearize_cow(struct sk_buff *skb)
3327{
3328 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3329 __skb_linearize(skb) : 0;
3330}
3331
3332static __always_inline void
3333__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3334 unsigned int off)
3335{
3336 if (skb->ip_summed == CHECKSUM_COMPLETE)
3337 skb->csum = csum_block_sub(skb->csum,
3338 csum_partial(start, len, 0), off);
3339 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3340 skb_checksum_start_offset(skb) < 0)
3341 skb->ip_summed = CHECKSUM_NONE;
3342}
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354static inline void skb_postpull_rcsum(struct sk_buff *skb,
3355 const void *start, unsigned int len)
3356{
3357 __skb_postpull_rcsum(skb, start, len, 0);
3358}
3359
3360static __always_inline void
3361__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3362 unsigned int off)
3363{
3364 if (skb->ip_summed == CHECKSUM_COMPLETE)
3365 skb->csum = csum_block_add(skb->csum,
3366 csum_partial(start, len, 0), off);
3367}
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378static inline void skb_postpush_rcsum(struct sk_buff *skb,
3379 const void *start, unsigned int len)
3380{
3381 __skb_postpush_rcsum(skb, start, len, 0);
3382}
3383
3384void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3398{
3399 skb_push(skb, len);
3400 skb_postpush_rcsum(skb, skb->data, len);
3401 return skb->data;
3402}
3403
3404int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3416{
3417 if (likely(len >= skb->len))
3418 return 0;
3419 return pskb_trim_rcsum_slow(skb, len);
3420}
3421
3422static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3423{
3424 if (skb->ip_summed == CHECKSUM_COMPLETE)
3425 skb->ip_summed = CHECKSUM_NONE;
3426 __skb_trim(skb, len);
3427 return 0;
3428}
3429
3430static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3431{
3432 if (skb->ip_summed == CHECKSUM_COMPLETE)
3433 skb->ip_summed = CHECKSUM_NONE;
3434 return __skb_grow(skb, len);
3435}
3436
3437#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3438#define skb_rb_first(root) rb_to_skb(rb_first(root))
3439#define skb_rb_last(root) rb_to_skb(rb_last(root))
3440#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3441#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3442
3443#define skb_queue_walk(queue, skb) \
3444 for (skb = (queue)->next; \
3445 skb != (struct sk_buff *)(queue); \
3446 skb = skb->next)
3447
3448#define skb_queue_walk_safe(queue, skb, tmp) \
3449 for (skb = (queue)->next, tmp = skb->next; \
3450 skb != (struct sk_buff *)(queue); \
3451 skb = tmp, tmp = skb->next)
3452
3453#define skb_queue_walk_from(queue, skb) \
3454 for (; skb != (struct sk_buff *)(queue); \
3455 skb = skb->next)
3456
3457#define skb_rbtree_walk(skb, root) \
3458 for (skb = skb_rb_first(root); skb != NULL; \
3459 skb = skb_rb_next(skb))
3460
3461#define skb_rbtree_walk_from(skb) \
3462 for (; skb != NULL; \
3463 skb = skb_rb_next(skb))
3464
3465#define skb_rbtree_walk_from_safe(skb, tmp) \
3466 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3467 skb = tmp)
3468
3469#define skb_queue_walk_from_safe(queue, skb, tmp) \
3470 for (tmp = skb->next; \
3471 skb != (struct sk_buff *)(queue); \
3472 skb = tmp, tmp = skb->next)
3473
3474#define skb_queue_reverse_walk(queue, skb) \
3475 for (skb = (queue)->prev; \
3476 skb != (struct sk_buff *)(queue); \
3477 skb = skb->prev)
3478
3479#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3480 for (skb = (queue)->prev, tmp = skb->prev; \
3481 skb != (struct sk_buff *)(queue); \
3482 skb = tmp, tmp = skb->prev)
3483
3484#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3485 for (tmp = skb->prev; \
3486 skb != (struct sk_buff *)(queue); \
3487 skb = tmp, tmp = skb->prev)
3488
3489static inline bool skb_has_frag_list(const struct sk_buff *skb)
3490{
3491 return skb_shinfo(skb)->frag_list != NULL;
3492}
3493
3494static inline void skb_frag_list_init(struct sk_buff *skb)
3495{
3496 skb_shinfo(skb)->frag_list = NULL;
3497}
3498
3499#define skb_walk_frags(skb, iter) \
3500 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3501
3502
3503int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3504 int *err, long *timeo_p,
3505 const struct sk_buff *skb);
3506struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3507 struct sk_buff_head *queue,
3508 unsigned int flags,
3509 int *off, int *err,
3510 struct sk_buff **last);
3511struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3512 struct sk_buff_head *queue,
3513 unsigned int flags, int *off, int *err,
3514 struct sk_buff **last);
3515struct sk_buff *__skb_recv_datagram(struct sock *sk,
3516 struct sk_buff_head *sk_queue,
3517 unsigned int flags, int *off, int *err);
3518struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3519 int *err);
3520__poll_t datagram_poll(struct file *file, struct socket *sock,
3521 struct poll_table_struct *wait);
3522int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3523 struct iov_iter *to, int size);
3524static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3525 struct msghdr *msg, int size)
3526{
3527 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3528}
3529int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3530 struct msghdr *msg);
3531int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3532 struct iov_iter *to, int len,
3533 struct ahash_request *hash);
3534int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3535 struct iov_iter *from, int len);
3536int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3537void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3538void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3539static inline void skb_free_datagram_locked(struct sock *sk,
3540 struct sk_buff *skb)
3541{
3542 __skb_free_datagram_locked(sk, skb, 0);
3543}
3544int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3545int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3546int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3547__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3548 int len, __wsum csum);
3549int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3550 struct pipe_inode_info *pipe, unsigned int len,
3551 unsigned int flags);
3552int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3553 int len);
3554void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3555unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3556int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3557 int len, int hlen);
3558void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3559int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3560void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3561bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3562bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3563struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3564struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3565 unsigned int offset);
3566struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3567int skb_ensure_writable(struct sk_buff *skb, int write_len);
3568int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3569int skb_vlan_pop(struct sk_buff *skb);
3570int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3571int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3572 int mac_len, bool ethernet);
3573int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3574 bool ethernet);
3575int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3576int skb_mpls_dec_ttl(struct sk_buff *skb);
3577struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3578 gfp_t gfp);
3579
3580static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3581{
3582 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3583}
3584
3585static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3586{
3587 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3588}
3589
3590struct skb_checksum_ops {
3591 __wsum (*update)(const void *mem, int len, __wsum wsum);
3592 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3593};
3594
3595extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3596
3597__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3598 __wsum csum, const struct skb_checksum_ops *ops);
3599__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3600 __wsum csum);
3601
3602static inline void * __must_check
3603__skb_header_pointer(const struct sk_buff *skb, int offset,
3604 int len, void *data, int hlen, void *buffer)
3605{
3606 if (hlen - offset >= len)
3607 return data + offset;
3608
3609 if (!skb ||
3610 skb_copy_bits(skb, offset, buffer, len) < 0)
3611 return NULL;
3612
3613 return buffer;
3614}
3615
3616static inline void * __must_check
3617skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3618{
3619 return __skb_header_pointer(skb, offset, len, skb->data,
3620 skb_headlen(skb), buffer);
3621}
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633static inline bool skb_needs_linearize(struct sk_buff *skb,
3634 netdev_features_t features)
3635{
3636 return skb_is_nonlinear(skb) &&
3637 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3638 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3639}
3640
3641static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3642 void *to,
3643 const unsigned int len)
3644{
3645 memcpy(to, skb->data, len);
3646}
3647
3648static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3649 const int offset, void *to,
3650 const unsigned int len)
3651{
3652 memcpy(to, skb->data + offset, len);
3653}
3654
3655static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3656 const void *from,
3657 const unsigned int len)
3658{
3659 memcpy(skb->data, from, len);
3660}
3661
3662static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3663 const int offset,
3664 const void *from,
3665 const unsigned int len)
3666{
3667 memcpy(skb->data + offset, from, len);
3668}
3669
3670void skb_init(void);
3671
3672static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3673{
3674 return skb->tstamp;
3675}
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686static inline void skb_get_timestamp(const struct sk_buff *skb,
3687 struct __kernel_old_timeval *stamp)
3688{
3689 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3690}
3691
3692static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3693 struct __kernel_sock_timeval *stamp)
3694{
3695 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3696
3697 stamp->tv_sec = ts.tv_sec;
3698 stamp->tv_usec = ts.tv_nsec / 1000;
3699}
3700
3701static inline void skb_get_timestampns(const struct sk_buff *skb,
3702 struct __kernel_old_timespec *stamp)
3703{
3704 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3705
3706 stamp->tv_sec = ts.tv_sec;
3707 stamp->tv_nsec = ts.tv_nsec;
3708}
3709
3710static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3711 struct __kernel_timespec *stamp)
3712{
3713 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3714
3715 stamp->tv_sec = ts.tv_sec;
3716 stamp->tv_nsec = ts.tv_nsec;
3717}
3718
3719static inline void __net_timestamp(struct sk_buff *skb)
3720{
3721 skb->tstamp = ktime_get_real();
3722}
3723
3724static inline ktime_t net_timedelta(ktime_t t)
3725{
3726 return ktime_sub(ktime_get_real(), t);
3727}
3728
3729static inline ktime_t net_invalid_timestamp(void)
3730{
3731 return 0;
3732}
3733
3734static inline u8 skb_metadata_len(const struct sk_buff *skb)
3735{
3736 return skb_shinfo(skb)->meta_len;
3737}
3738
3739static inline void *skb_metadata_end(const struct sk_buff *skb)
3740{
3741 return skb_mac_header(skb);
3742}
3743
3744static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3745 const struct sk_buff *skb_b,
3746 u8 meta_len)
3747{
3748 const void *a = skb_metadata_end(skb_a);
3749 const void *b = skb_metadata_end(skb_b);
3750
3751#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3752 u64 diffs = 0;
3753
3754 switch (meta_len) {
3755#define __it(x, op) (x -= sizeof(u##op))
3756#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3757 case 32: diffs |= __it_diff(a, b, 64);
3758 fallthrough;
3759 case 24: diffs |= __it_diff(a, b, 64);
3760 fallthrough;
3761 case 16: diffs |= __it_diff(a, b, 64);
3762 fallthrough;
3763 case 8: diffs |= __it_diff(a, b, 64);
3764 break;
3765 case 28: diffs |= __it_diff(a, b, 64);
3766 fallthrough;
3767 case 20: diffs |= __it_diff(a, b, 64);
3768 fallthrough;
3769 case 12: diffs |= __it_diff(a, b, 64);
3770 fallthrough;
3771 case 4: diffs |= __it_diff(a, b, 32);
3772 break;
3773 }
3774 return diffs;
3775#else
3776 return memcmp(a - meta_len, b - meta_len, meta_len);
3777#endif
3778}
3779
3780static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3781 const struct sk_buff *skb_b)
3782{
3783 u8 len_a = skb_metadata_len(skb_a);
3784 u8 len_b = skb_metadata_len(skb_b);
3785
3786 if (!(len_a | len_b))
3787 return false;
3788
3789 return len_a != len_b ?
3790 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3791}
3792
3793static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3794{
3795 skb_shinfo(skb)->meta_len = meta_len;
3796}
3797
3798static inline void skb_metadata_clear(struct sk_buff *skb)
3799{
3800 skb_metadata_set(skb, 0);
3801}
3802
3803struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3804
3805#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3806
3807void skb_clone_tx_timestamp(struct sk_buff *skb);
3808bool skb_defer_rx_timestamp(struct sk_buff *skb);
3809
3810#else
3811
3812static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3813{
3814}
3815
3816static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3817{
3818 return false;
3819}
3820
3821#endif
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835void skb_complete_tx_timestamp(struct sk_buff *skb,
3836 struct skb_shared_hwtstamps *hwtstamps);
3837
3838void __skb_tstamp_tx(struct sk_buff *orig_skb,
3839 struct skb_shared_hwtstamps *hwtstamps,
3840 struct sock *sk, int tstype);
3841
3842
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852
3853void skb_tstamp_tx(struct sk_buff *orig_skb,
3854 struct skb_shared_hwtstamps *hwtstamps);
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868static inline void skb_tx_timestamp(struct sk_buff *skb)
3869{
3870 skb_clone_tx_timestamp(skb);
3871 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3872 skb_tstamp_tx(skb, NULL);
3873}
3874
3875
3876
3877
3878
3879
3880
3881
3882void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3883
3884__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3885__sum16 __skb_checksum_complete(struct sk_buff *skb);
3886
3887static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3888{
3889 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3890 skb->csum_valid ||
3891 (skb->ip_summed == CHECKSUM_PARTIAL &&
3892 skb_checksum_start_offset(skb) >= 0));
3893}
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3912{
3913 return skb_csum_unnecessary(skb) ?
3914 0 : __skb_checksum_complete(skb);
3915}
3916
3917static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3918{
3919 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3920 if (skb->csum_level == 0)
3921 skb->ip_summed = CHECKSUM_NONE;
3922 else
3923 skb->csum_level--;
3924 }
3925}
3926
3927static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3928{
3929 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3930 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3931 skb->csum_level++;
3932 } else if (skb->ip_summed == CHECKSUM_NONE) {
3933 skb->ip_summed = CHECKSUM_UNNECESSARY;
3934 skb->csum_level = 0;
3935 }
3936}
3937
3938static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
3939{
3940 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3941 skb->ip_summed = CHECKSUM_NONE;
3942 skb->csum_level = 0;
3943 }
3944}
3945
3946
3947
3948
3949
3950
3951static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3952 bool zero_okay,
3953 __sum16 check)
3954{
3955 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3956 skb->csum_valid = 1;
3957 __skb_decr_checksum_unnecessary(skb);
3958 return false;
3959 }
3960
3961 return true;
3962}
3963
3964
3965
3966
3967#define CHECKSUM_BREAK 76
3968
3969
3970
3971
3972
3973
3974
3975static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3976{
3977 if (skb->ip_summed == CHECKSUM_COMPLETE)
3978 skb->ip_summed = CHECKSUM_NONE;
3979}
3980
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3991 bool complete,
3992 __wsum psum)
3993{
3994 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3995 if (!csum_fold(csum_add(psum, skb->csum))) {
3996 skb->csum_valid = 1;
3997 return 0;
3998 }
3999 }
4000
4001 skb->csum = psum;
4002
4003 if (complete || skb->len <= CHECKSUM_BREAK) {
4004 __sum16 csum;
4005
4006 csum = __skb_checksum_complete(skb);
4007 skb->csum_valid = !csum;
4008 return csum;
4009 }
4010
4011 return 0;
4012}
4013
4014static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4015{
4016 return 0;
4017}
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029#define __skb_checksum_validate(skb, proto, complete, \
4030 zero_okay, check, compute_pseudo) \
4031({ \
4032 __sum16 __ret = 0; \
4033 skb->csum_valid = 0; \
4034 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4035 __ret = __skb_checksum_validate_complete(skb, \
4036 complete, compute_pseudo(skb, proto)); \
4037 __ret; \
4038})
4039
4040#define skb_checksum_init(skb, proto, compute_pseudo) \
4041 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4042
4043#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4044 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4045
4046#define skb_checksum_validate(skb, proto, compute_pseudo) \
4047 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4048
4049#define skb_checksum_validate_zero_check(skb, proto, check, \
4050 compute_pseudo) \
4051 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4052
4053#define skb_checksum_simple_validate(skb) \
4054 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4055
4056static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4057{
4058 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4059}
4060
4061static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4062{
4063 skb->csum = ~pseudo;
4064 skb->ip_summed = CHECKSUM_COMPLETE;
4065}
4066
4067#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4068do { \
4069 if (__skb_checksum_convert_check(skb)) \
4070 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4071} while (0)
4072
4073static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4074 u16 start, u16 offset)
4075{
4076 skb->ip_summed = CHECKSUM_PARTIAL;
4077 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4078 skb->csum_offset = offset - start;
4079}
4080
4081
4082
4083
4084
4085
4086static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4087 int start, int offset, bool nopartial)
4088{
4089 __wsum delta;
4090
4091 if (!nopartial) {
4092 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4093 return;
4094 }
4095
4096 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4097 __skb_checksum_complete(skb);
4098 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4099 }
4100
4101 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4102
4103
4104 skb->csum = csum_add(skb->csum, delta);
4105}
4106
4107static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4108{
4109#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4110 return (void *)(skb->_nfct & NFCT_PTRMASK);
4111#else
4112 return NULL;
4113#endif
4114}
4115
4116static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4117{
4118#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4119 return skb->_nfct;
4120#else
4121 return 0UL;
4122#endif
4123}
4124
4125static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4126{
4127#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4128 skb->_nfct = nfct;
4129#endif
4130}
4131
4132#ifdef CONFIG_SKB_EXTENSIONS
4133enum skb_ext_id {
4134#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4135 SKB_EXT_BRIDGE_NF,
4136#endif
4137#ifdef CONFIG_XFRM
4138 SKB_EXT_SEC_PATH,
4139#endif
4140#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4141 TC_SKB_EXT,
4142#endif
4143#if IS_ENABLED(CONFIG_MPTCP)
4144 SKB_EXT_MPTCP,
4145#endif
4146 SKB_EXT_NUM,
4147};
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159struct skb_ext {
4160 refcount_t refcnt;
4161 u8 offset[SKB_EXT_NUM];
4162 u8 chunks;
4163 char data[] __aligned(8);
4164};
4165
4166struct skb_ext *__skb_ext_alloc(gfp_t flags);
4167void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4168 struct skb_ext *ext);
4169void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4170void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4171void __skb_ext_put(struct skb_ext *ext);
4172
4173static inline void skb_ext_put(struct sk_buff *skb)
4174{
4175 if (skb->active_extensions)
4176 __skb_ext_put(skb->extensions);
4177}
4178
4179static inline void __skb_ext_copy(struct sk_buff *dst,
4180 const struct sk_buff *src)
4181{
4182 dst->active_extensions = src->active_extensions;
4183
4184 if (src->active_extensions) {
4185 struct skb_ext *ext = src->extensions;
4186
4187 refcount_inc(&ext->refcnt);
4188 dst->extensions = ext;
4189 }
4190}
4191
4192static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4193{
4194 skb_ext_put(dst);
4195 __skb_ext_copy(dst, src);
4196}
4197
4198static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4199{
4200 return !!ext->offset[i];
4201}
4202
4203static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4204{
4205 return skb->active_extensions & (1 << id);
4206}
4207
4208static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4209{
4210 if (skb_ext_exist(skb, id))
4211 __skb_ext_del(skb, id);
4212}
4213
4214static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4215{
4216 if (skb_ext_exist(skb, id)) {
4217 struct skb_ext *ext = skb->extensions;
4218
4219 return (void *)ext + (ext->offset[id] << 3);
4220 }
4221
4222 return NULL;
4223}
4224
4225static inline void skb_ext_reset(struct sk_buff *skb)
4226{
4227 if (unlikely(skb->active_extensions)) {
4228 __skb_ext_put(skb->extensions);
4229 skb->active_extensions = 0;
4230 }
4231}
4232
4233static inline bool skb_has_extensions(struct sk_buff *skb)
4234{
4235 return unlikely(skb->active_extensions);
4236}
4237#else
4238static inline void skb_ext_put(struct sk_buff *skb) {}
4239static inline void skb_ext_reset(struct sk_buff *skb) {}
4240static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4241static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4242static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4243static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4244#endif
4245
4246static inline void nf_reset_ct(struct sk_buff *skb)
4247{
4248#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4249 nf_conntrack_put(skb_nfct(skb));
4250 skb->_nfct = 0;
4251#endif
4252}
4253
4254static inline void nf_reset_trace(struct sk_buff *skb)
4255{
4256#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4257 skb->nf_trace = 0;
4258#endif
4259}
4260
4261static inline void ipvs_reset(struct sk_buff *skb)
4262{
4263#if IS_ENABLED(CONFIG_IP_VS)
4264 skb->ipvs_property = 0;
4265#endif
4266}
4267
4268
4269static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4270 bool copy)
4271{
4272#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4273 dst->_nfct = src->_nfct;
4274 nf_conntrack_get(skb_nfct(src));
4275#endif
4276#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4277 if (copy)
4278 dst->nf_trace = src->nf_trace;
4279#endif
4280}
4281
4282static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4283{
4284#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4285 nf_conntrack_put(skb_nfct(dst));
4286#endif
4287 __nf_copy(dst, src, true);
4288}
4289
4290#ifdef CONFIG_NETWORK_SECMARK
4291static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4292{
4293 to->secmark = from->secmark;
4294}
4295
4296static inline void skb_init_secmark(struct sk_buff *skb)
4297{
4298 skb->secmark = 0;
4299}
4300#else
4301static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4302{ }
4303
4304static inline void skb_init_secmark(struct sk_buff *skb)
4305{ }
4306#endif
4307
4308static inline int secpath_exists(const struct sk_buff *skb)
4309{
4310#ifdef CONFIG_XFRM
4311 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4312#else
4313 return 0;
4314#endif
4315}
4316
4317static inline bool skb_irq_freeable(const struct sk_buff *skb)
4318{
4319 return !skb->destructor &&
4320 !secpath_exists(skb) &&
4321 !skb_nfct(skb) &&
4322 !skb->_skb_refdst &&
4323 !skb_has_frag_list(skb);
4324}
4325
4326static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4327{
4328 skb->queue_mapping = queue_mapping;
4329}
4330
4331static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4332{
4333 return skb->queue_mapping;
4334}
4335
4336static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4337{
4338 to->queue_mapping = from->queue_mapping;
4339}
4340
4341static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4342{
4343 skb->queue_mapping = rx_queue + 1;
4344}
4345
4346static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4347{
4348 return skb->queue_mapping - 1;
4349}
4350
4351static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4352{
4353 return skb->queue_mapping != 0;
4354}
4355
4356static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4357{
4358 skb->dst_pending_confirm = val;
4359}
4360
4361static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4362{
4363 return skb->dst_pending_confirm != 0;
4364}
4365
4366static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4367{
4368#ifdef CONFIG_XFRM
4369 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4370#else
4371 return NULL;
4372#endif
4373}
4374
4375
4376
4377
4378
4379
4380
4381struct skb_gso_cb {
4382 union {
4383 int mac_offset;
4384 int data_offset;
4385 };
4386 int encap_level;
4387 __wsum csum;
4388 __u16 csum_start;
4389};
4390#define SKB_GSO_CB_OFFSET 32
4391#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4392
4393static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4394{
4395 return (skb_mac_header(inner_skb) - inner_skb->head) -
4396 SKB_GSO_CB(inner_skb)->mac_offset;
4397}
4398
4399static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4400{
4401 int new_headroom, headroom;
4402 int ret;
4403
4404 headroom = skb_headroom(skb);
4405 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4406 if (ret)
4407 return ret;
4408
4409 new_headroom = skb_headroom(skb);
4410 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4411 return 0;
4412}
4413
4414static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4415{
4416
4417 if (skb->remcsum_offload)
4418 return;
4419
4420 SKB_GSO_CB(skb)->csum = res;
4421 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4422}
4423
4424
4425
4426
4427
4428
4429
4430
4431
4432static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4433{
4434 unsigned char *csum_start = skb_transport_header(skb);
4435 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4436 __wsum partial = SKB_GSO_CB(skb)->csum;
4437
4438 SKB_GSO_CB(skb)->csum = res;
4439 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4440
4441 return csum_fold(csum_partial(csum_start, plen, partial));
4442}
4443
4444static inline bool skb_is_gso(const struct sk_buff *skb)
4445{
4446 return skb_shinfo(skb)->gso_size;
4447}
4448
4449
4450static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4451{
4452 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4453}
4454
4455
4456static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4457{
4458 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4459}
4460
4461
4462static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4463{
4464 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4465}
4466
4467static inline void skb_gso_reset(struct sk_buff *skb)
4468{
4469 skb_shinfo(skb)->gso_size = 0;
4470 skb_shinfo(skb)->gso_segs = 0;
4471 skb_shinfo(skb)->gso_type = 0;
4472}
4473
4474static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4475 u16 increment)
4476{
4477 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4478 return;
4479 shinfo->gso_size += increment;
4480}
4481
4482static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4483 u16 decrement)
4484{
4485 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4486 return;
4487 shinfo->gso_size -= decrement;
4488}
4489
4490void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4491
4492static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4493{
4494
4495
4496 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4497
4498 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4499 unlikely(shinfo->gso_type == 0)) {
4500 __skb_warn_lro_forwarding(skb);
4501 return true;
4502 }
4503 return false;
4504}
4505
4506static inline void skb_forward_csum(struct sk_buff *skb)
4507{
4508
4509 if (skb->ip_summed == CHECKSUM_COMPLETE)
4510 skb->ip_summed = CHECKSUM_NONE;
4511}
4512
4513
4514
4515
4516
4517
4518
4519
4520
4521static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4522{
4523#ifdef DEBUG
4524 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4525#endif
4526}
4527
4528bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4529
4530int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4531struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4532 unsigned int transport_len,
4533 __sum16(*skb_chkf)(struct sk_buff *skb));
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544static inline bool skb_head_is_locked(const struct sk_buff *skb)
4545{
4546 return !skb->head_frag || skb_cloned(skb);
4547}
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558static inline __wsum lco_csum(struct sk_buff *skb)
4559{
4560 unsigned char *csum_start = skb_checksum_start(skb);
4561 unsigned char *l4_hdr = skb_transport_header(skb);
4562 __wsum partial;
4563
4564
4565 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4566 skb->csum_offset));
4567
4568
4569
4570
4571 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4572}
4573
4574static inline bool skb_is_redirected(const struct sk_buff *skb)
4575{
4576#ifdef CONFIG_NET_REDIRECT
4577 return skb->redirected;
4578#else
4579 return false;
4580#endif
4581}
4582
4583static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4584{
4585#ifdef CONFIG_NET_REDIRECT
4586 skb->redirected = 1;
4587 skb->from_ingress = from_ingress;
4588 if (skb->from_ingress)
4589 skb->tstamp = 0;
4590#endif
4591}
4592
4593static inline void skb_reset_redirect(struct sk_buff *skb)
4594{
4595#ifdef CONFIG_NET_REDIRECT
4596 skb->redirected = 0;
4597#endif
4598}
4599
4600#endif
4601#endif
4602