1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/bvec.h>
18#include <linux/cache.h>
19#include <linux/rbtree.h>
20#include <linux/socket.h>
21#include <linux/refcount.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/dma-mapping.h>
32#include <linux/netdev_features.h>
33#include <linux/sched.h>
34#include <linux/sched/clock.h>
35#include <net/flow_dissector.h>
36#include <linux/splice.h>
37#include <linux/in6.h>
38#include <linux/if_packet.h>
39#include <net/flow.h>
40#if IS_ENABLED(CONFIG_NF_CONNTRACK)
41#include <linux/netfilter/nf_conntrack_common.h>
42#endif
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct net_device;
242struct scatterlist;
243struct pipe_inode_info;
244struct iov_iter;
245struct napi_struct;
246struct bpf_prog;
247union bpf_attr;
248struct skb_ext;
249
250#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
251struct nf_bridge_info {
252 enum {
253 BRNF_PROTO_UNCHANGED,
254 BRNF_PROTO_8021Q,
255 BRNF_PROTO_PPPOE
256 } orig_proto:8;
257 u8 pkt_otherhost:1;
258 u8 in_prerouting:1;
259 u8 bridged_dnat:1;
260 __u16 frag_max_size;
261 struct net_device *physindev;
262
263
264 struct net_device *physoutdev;
265 union {
266
267 __be32 ipv4_daddr;
268 struct in6_addr ipv6_daddr;
269
270
271
272
273
274 char neigh_header[8];
275 };
276};
277#endif
278
279#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
280
281
282
283
284struct tc_skb_ext {
285 __u32 chain;
286};
287#endif
288
289struct sk_buff_head {
290
291 struct sk_buff *next;
292 struct sk_buff *prev;
293
294 __u32 qlen;
295 spinlock_t lock;
296};
297
298struct sk_buff;
299
300
301
302
303
304
305
306
307#if (65536/PAGE_SIZE + 1) < 16
308#define MAX_SKB_FRAGS 16UL
309#else
310#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
311#endif
312extern int sysctl_max_skb_frags;
313
314
315
316
317#define GSO_BY_FRAGS 0xFFFF
318
319typedef struct bio_vec skb_frag_t;
320
321
322
323
324
325static inline unsigned int skb_frag_size(const skb_frag_t *frag)
326{
327 return frag->bv_len;
328}
329
330
331
332
333
334
335static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
336{
337 frag->bv_len = size;
338}
339
340
341
342
343
344
345static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
346{
347 frag->bv_len += delta;
348}
349
350
351
352
353
354
355static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
356{
357 frag->bv_len -= delta;
358}
359
360
361
362
363
364static inline bool skb_frag_must_loop(struct page *p)
365{
366#if defined(CONFIG_HIGHMEM)
367 if (PageHighMem(p))
368 return true;
369#endif
370 return false;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
391 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
392 p_off = (f_off) & (PAGE_SIZE - 1), \
393 p_len = skb_frag_must_loop(p) ? \
394 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
395 copied = 0; \
396 copied < f_len; \
397 copied += p_len, p++, p_off = 0, \
398 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
399
400#define HAVE_HW_TIME_STAMP
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416struct skb_shared_hwtstamps {
417 ktime_t hwtstamp;
418};
419
420
421enum {
422
423 SKBTX_HW_TSTAMP = 1 << 0,
424
425
426 SKBTX_SW_TSTAMP = 1 << 1,
427
428
429 SKBTX_IN_PROGRESS = 1 << 2,
430
431
432 SKBTX_DEV_ZEROCOPY = 1 << 3,
433
434
435 SKBTX_WIFI_STATUS = 1 << 4,
436
437
438
439
440
441
442 SKBTX_SHARED_FRAG = 1 << 5,
443
444
445 SKBTX_SCHED_TSTAMP = 1 << 6,
446};
447
448#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
449#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
450 SKBTX_SCHED_TSTAMP)
451#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
452
453
454
455
456
457
458
459
460
461struct ubuf_info {
462 void (*callback)(struct ubuf_info *, bool zerocopy_success);
463 union {
464 struct {
465 unsigned long desc;
466 void *ctx;
467 };
468 struct {
469 u32 id;
470 u16 len;
471 u16 zerocopy:1;
472 u32 bytelen;
473 };
474 };
475 refcount_t refcnt;
476
477 struct mmpin {
478 struct user_struct *user;
479 unsigned int num_pg;
480 } mmp;
481};
482
483#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
484
485int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
486void mm_unaccount_pinned_pages(struct mmpin *mmp);
487
488struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
489struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
490 struct ubuf_info *uarg);
491
492static inline void sock_zerocopy_get(struct ubuf_info *uarg)
493{
494 refcount_inc(&uarg->refcnt);
495}
496
497void sock_zerocopy_put(struct ubuf_info *uarg);
498void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
499
500void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
501
502int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
503int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
504 struct msghdr *msg, int len,
505 struct ubuf_info *uarg);
506
507
508
509
510struct skb_shared_info {
511 __u8 __unused;
512 __u8 meta_len;
513 __u8 nr_frags;
514 __u8 tx_flags;
515 unsigned short gso_size;
516
517 unsigned short gso_segs;
518 struct sk_buff *frag_list;
519 struct skb_shared_hwtstamps hwtstamps;
520 unsigned int gso_type;
521 u32 tskey;
522
523
524
525
526 atomic_t dataref;
527
528
529
530 void * destructor_arg;
531
532
533 skb_frag_t frags[MAX_SKB_FRAGS];
534};
535
536
537
538
539
540
541
542
543
544
545
546
547#define SKB_DATAREF_SHIFT 16
548#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
549
550
551enum {
552 SKB_FCLONE_UNAVAILABLE,
553 SKB_FCLONE_ORIG,
554 SKB_FCLONE_CLONE,
555};
556
557enum {
558 SKB_GSO_TCPV4 = 1 << 0,
559
560
561 SKB_GSO_DODGY = 1 << 1,
562
563
564 SKB_GSO_TCP_ECN = 1 << 2,
565
566 SKB_GSO_TCP_FIXEDID = 1 << 3,
567
568 SKB_GSO_TCPV6 = 1 << 4,
569
570 SKB_GSO_FCOE = 1 << 5,
571
572 SKB_GSO_GRE = 1 << 6,
573
574 SKB_GSO_GRE_CSUM = 1 << 7,
575
576 SKB_GSO_IPXIP4 = 1 << 8,
577
578 SKB_GSO_IPXIP6 = 1 << 9,
579
580 SKB_GSO_UDP_TUNNEL = 1 << 10,
581
582 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
583
584 SKB_GSO_PARTIAL = 1 << 12,
585
586 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
587
588 SKB_GSO_SCTP = 1 << 14,
589
590 SKB_GSO_ESP = 1 << 15,
591
592 SKB_GSO_UDP = 1 << 16,
593
594 SKB_GSO_UDP_L4 = 1 << 17,
595
596 SKB_GSO_FRAGLIST = 1 << 18,
597};
598
599#if BITS_PER_LONG > 32
600#define NET_SKBUFF_DATA_USES_OFFSET 1
601#endif
602
603#ifdef NET_SKBUFF_DATA_USES_OFFSET
604typedef unsigned int sk_buff_data_t;
605#else
606typedef unsigned char *sk_buff_data_t;
607#endif
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711struct sk_buff {
712 union {
713 struct {
714
715 struct sk_buff *next;
716 struct sk_buff *prev;
717
718 union {
719 struct net_device *dev;
720
721
722
723
724 unsigned long dev_scratch;
725 };
726 };
727 struct rb_node rbnode;
728 struct list_head list;
729 };
730
731 union {
732 struct sock *sk;
733 int ip_defrag_offset;
734 };
735
736 union {
737 ktime_t tstamp;
738 u64 skb_mstamp_ns;
739 };
740
741
742
743
744
745
746 char cb[48] __aligned(8);
747
748 union {
749 struct {
750 unsigned long _skb_refdst;
751 void (*destructor)(struct sk_buff *skb);
752 };
753 struct list_head tcp_tsorted_anchor;
754 };
755
756#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
757 unsigned long _nfct;
758#endif
759 unsigned int len,
760 data_len;
761 __u16 mac_len,
762 hdr_len;
763
764
765
766
767 __u16 queue_mapping;
768
769
770#ifdef __BIG_ENDIAN_BITFIELD
771#define CLONED_MASK (1 << 7)
772#else
773#define CLONED_MASK 1
774#endif
775#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
776
777
778 __u8 __cloned_offset[0];
779
780 __u8 cloned:1,
781 nohdr:1,
782 fclone:2,
783 peeked:1,
784 head_frag:1,
785 pfmemalloc:1;
786#ifdef CONFIG_SKB_EXTENSIONS
787 __u8 active_extensions;
788#endif
789
790
791
792
793 __u32 headers_start[0];
794
795
796
797#ifdef __BIG_ENDIAN_BITFIELD
798#define PKT_TYPE_MAX (7 << 5)
799#else
800#define PKT_TYPE_MAX 7
801#endif
802#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
803
804
805 __u8 __pkt_type_offset[0];
806
807 __u8 pkt_type:3;
808 __u8 ignore_df:1;
809 __u8 nf_trace:1;
810 __u8 ip_summed:2;
811 __u8 ooo_okay:1;
812
813 __u8 l4_hash:1;
814 __u8 sw_hash:1;
815 __u8 wifi_acked_valid:1;
816 __u8 wifi_acked:1;
817 __u8 no_fcs:1;
818
819 __u8 encapsulation:1;
820 __u8 encap_hdr_csum:1;
821 __u8 csum_valid:1;
822
823#ifdef __BIG_ENDIAN_BITFIELD
824#define PKT_VLAN_PRESENT_BIT 7
825#else
826#define PKT_VLAN_PRESENT_BIT 0
827#endif
828#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
829
830 __u8 __pkt_vlan_present_offset[0];
831
832 __u8 vlan_present:1;
833 __u8 csum_complete_sw:1;
834 __u8 csum_level:2;
835 __u8 csum_not_inet:1;
836 __u8 dst_pending_confirm:1;
837#ifdef CONFIG_IPV6_NDISC_NODETYPE
838 __u8 ndisc_nodetype:2;
839#endif
840
841 __u8 ipvs_property:1;
842 __u8 inner_protocol_type:1;
843 __u8 remcsum_offload:1;
844#ifdef CONFIG_NET_SWITCHDEV
845 __u8 offload_fwd_mark:1;
846 __u8 offload_l3_fwd_mark:1;
847#endif
848#ifdef CONFIG_NET_CLS_ACT
849 __u8 tc_skip_classify:1;
850 __u8 tc_at_ingress:1;
851#endif
852#ifdef CONFIG_NET_REDIRECT
853 __u8 redirected:1;
854 __u8 from_ingress:1;
855#endif
856#ifdef CONFIG_TLS_DEVICE
857 __u8 decrypted:1;
858#endif
859
860#ifdef CONFIG_NET_SCHED
861 __u16 tc_index;
862#endif
863
864 union {
865 __wsum csum;
866 struct {
867 __u16 csum_start;
868 __u16 csum_offset;
869 };
870 };
871 __u32 priority;
872 int skb_iif;
873 __u32 hash;
874 __be16 vlan_proto;
875 __u16 vlan_tci;
876#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
877 union {
878 unsigned int napi_id;
879 unsigned int sender_cpu;
880 };
881#endif
882#ifdef CONFIG_NETWORK_SECMARK
883 __u32 secmark;
884#endif
885
886 union {
887 __u32 mark;
888 __u32 reserved_tailroom;
889 };
890
891 union {
892 __be16 inner_protocol;
893 __u8 inner_ipproto;
894 };
895
896 __u16 inner_transport_header;
897 __u16 inner_network_header;
898 __u16 inner_mac_header;
899
900 __be16 protocol;
901 __u16 transport_header;
902 __u16 network_header;
903 __u16 mac_header;
904
905
906 __u32 headers_end[0];
907
908
909
910 sk_buff_data_t tail;
911 sk_buff_data_t end;
912 unsigned char *head,
913 *data;
914 unsigned int truesize;
915 refcount_t users;
916
917#ifdef CONFIG_SKB_EXTENSIONS
918
919 struct skb_ext *extensions;
920#endif
921};
922
923#ifdef __KERNEL__
924
925
926
927
928#define SKB_ALLOC_FCLONE 0x01
929#define SKB_ALLOC_RX 0x02
930#define SKB_ALLOC_NAPI 0x04
931
932
933
934
935
936static inline bool skb_pfmemalloc(const struct sk_buff *skb)
937{
938 return unlikely(skb->pfmemalloc);
939}
940
941
942
943
944
945#define SKB_DST_NOREF 1UL
946#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
947
948
949
950
951
952
953
954static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
955{
956
957
958
959 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
960 !rcu_read_lock_held() &&
961 !rcu_read_lock_bh_held());
962 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
963}
964
965
966
967
968
969
970
971
972
973static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
974{
975 skb->_skb_refdst = (unsigned long)dst;
976}
977
978
979
980
981
982
983
984
985
986
987
988static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
989{
990 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
991 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
992}
993
994
995
996
997
998static inline bool skb_dst_is_noref(const struct sk_buff *skb)
999{
1000 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1001}
1002
1003
1004
1005
1006
1007static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1008{
1009 return (struct rtable *)skb_dst(skb);
1010}
1011
1012
1013
1014
1015
1016static inline bool skb_pkt_type_ok(u32 ptype)
1017{
1018 return ptype <= PACKET_OTHERHOST;
1019}
1020
1021
1022
1023
1024
1025static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1026{
1027#ifdef CONFIG_NET_RX_BUSY_POLL
1028 return skb->napi_id;
1029#else
1030 return 0;
1031#endif
1032}
1033
1034
1035
1036
1037
1038
1039
1040static inline bool skb_unref(struct sk_buff *skb)
1041{
1042 if (unlikely(!skb))
1043 return false;
1044 if (likely(refcount_read(&skb->users) == 1))
1045 smp_rmb();
1046 else if (likely(!refcount_dec_and_test(&skb->users)))
1047 return false;
1048
1049 return true;
1050}
1051
1052void skb_release_head_state(struct sk_buff *skb);
1053void kfree_skb(struct sk_buff *skb);
1054void kfree_skb_list(struct sk_buff *segs);
1055void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1056void skb_tx_error(struct sk_buff *skb);
1057void consume_skb(struct sk_buff *skb);
1058void __consume_stateless_skb(struct sk_buff *skb);
1059void __kfree_skb(struct sk_buff *skb);
1060extern struct kmem_cache *skbuff_head_cache;
1061
1062void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1063bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1064 bool *fragstolen, int *delta_truesize);
1065
1066struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1067 int node);
1068struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1069struct sk_buff *build_skb(void *data, unsigned int frag_size);
1070struct sk_buff *build_skb_around(struct sk_buff *skb,
1071 void *data, unsigned int frag_size);
1072
1073
1074
1075
1076
1077
1078
1079
1080static inline struct sk_buff *alloc_skb(unsigned int size,
1081 gfp_t priority)
1082{
1083 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1084}
1085
1086struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1087 unsigned long data_len,
1088 int max_page_order,
1089 int *errcode,
1090 gfp_t gfp_mask);
1091struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1092
1093
1094struct sk_buff_fclones {
1095 struct sk_buff skb1;
1096
1097 struct sk_buff skb2;
1098
1099 refcount_t fclone_ref;
1100};
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111static inline bool skb_fclone_busy(const struct sock *sk,
1112 const struct sk_buff *skb)
1113{
1114 const struct sk_buff_fclones *fclones;
1115
1116 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1117
1118 return skb->fclone == SKB_FCLONE_ORIG &&
1119 refcount_read(&fclones->fclone_ref) > 1 &&
1120 fclones->skb2.sk == sk;
1121}
1122
1123
1124
1125
1126
1127
1128
1129
1130static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1131 gfp_t priority)
1132{
1133 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1134}
1135
1136struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1137void skb_headers_offset_update(struct sk_buff *skb, int off);
1138int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1139struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1140void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1141struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1142struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1143 gfp_t gfp_mask, bool fclone);
1144static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1145 gfp_t gfp_mask)
1146{
1147 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1148}
1149
1150int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1151struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1152 unsigned int headroom);
1153struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1154 int newtailroom, gfp_t priority);
1155int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1156 int offset, int len);
1157int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1158 int offset, int len);
1159int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1160int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173static inline int skb_pad(struct sk_buff *skb, int pad)
1174{
1175 return __skb_pad(skb, pad, true);
1176}
1177#define dev_kfree_skb(a) consume_skb(a)
1178
1179int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1180 int offset, size_t size);
1181
1182struct skb_seq_state {
1183 __u32 lower_offset;
1184 __u32 upper_offset;
1185 __u32 frag_idx;
1186 __u32 stepped_offset;
1187 struct sk_buff *root_skb;
1188 struct sk_buff *cur_skb;
1189 __u8 *frag_data;
1190};
1191
1192void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1193 unsigned int to, struct skb_seq_state *st);
1194unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1195 struct skb_seq_state *st);
1196void skb_abort_seq_read(struct skb_seq_state *st);
1197
1198unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1199 unsigned int to, struct ts_config *config);
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227enum pkt_hash_types {
1228 PKT_HASH_TYPE_NONE,
1229 PKT_HASH_TYPE_L2,
1230 PKT_HASH_TYPE_L3,
1231 PKT_HASH_TYPE_L4,
1232};
1233
1234static inline void skb_clear_hash(struct sk_buff *skb)
1235{
1236 skb->hash = 0;
1237 skb->sw_hash = 0;
1238 skb->l4_hash = 0;
1239}
1240
1241static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1242{
1243 if (!skb->l4_hash)
1244 skb_clear_hash(skb);
1245}
1246
1247static inline void
1248__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1249{
1250 skb->l4_hash = is_l4;
1251 skb->sw_hash = is_sw;
1252 skb->hash = hash;
1253}
1254
1255static inline void
1256skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1257{
1258
1259 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1260}
1261
1262static inline void
1263__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1264{
1265 __skb_set_hash(skb, hash, true, is_l4);
1266}
1267
1268void __skb_get_hash(struct sk_buff *skb);
1269u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1270u32 skb_get_poff(const struct sk_buff *skb);
1271u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1272 const struct flow_keys_basic *keys, int hlen);
1273__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1274 void *data, int hlen_proto);
1275
1276static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1277 int thoff, u8 ip_proto)
1278{
1279 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1280}
1281
1282void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1283 const struct flow_dissector_key *key,
1284 unsigned int key_count);
1285
1286#ifdef CONFIG_NET
1287int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1288 union bpf_attr __user *uattr);
1289int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1290 struct bpf_prog *prog);
1291
1292int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1293#else
1294static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1295 union bpf_attr __user *uattr)
1296{
1297 return -EOPNOTSUPP;
1298}
1299
1300static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1301 struct bpf_prog *prog)
1302{
1303 return -EOPNOTSUPP;
1304}
1305
1306static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1307{
1308 return -EOPNOTSUPP;
1309}
1310#endif
1311
1312struct bpf_flow_dissector;
1313bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1314 __be16 proto, int nhoff, int hlen, unsigned int flags);
1315
1316bool __skb_flow_dissect(const struct net *net,
1317 const struct sk_buff *skb,
1318 struct flow_dissector *flow_dissector,
1319 void *target_container,
1320 void *data, __be16 proto, int nhoff, int hlen,
1321 unsigned int flags);
1322
1323static inline bool skb_flow_dissect(const struct sk_buff *skb,
1324 struct flow_dissector *flow_dissector,
1325 void *target_container, unsigned int flags)
1326{
1327 return __skb_flow_dissect(NULL, skb, flow_dissector,
1328 target_container, NULL, 0, 0, 0, flags);
1329}
1330
1331static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1332 struct flow_keys *flow,
1333 unsigned int flags)
1334{
1335 memset(flow, 0, sizeof(*flow));
1336 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1337 flow, NULL, 0, 0, 0, flags);
1338}
1339
1340static inline bool
1341skb_flow_dissect_flow_keys_basic(const struct net *net,
1342 const struct sk_buff *skb,
1343 struct flow_keys_basic *flow, void *data,
1344 __be16 proto, int nhoff, int hlen,
1345 unsigned int flags)
1346{
1347 memset(flow, 0, sizeof(*flow));
1348 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1349 data, proto, nhoff, hlen, flags);
1350}
1351
1352void skb_flow_dissect_meta(const struct sk_buff *skb,
1353 struct flow_dissector *flow_dissector,
1354 void *target_container);
1355
1356
1357
1358
1359
1360void
1361skb_flow_dissect_ct(const struct sk_buff *skb,
1362 struct flow_dissector *flow_dissector,
1363 void *target_container,
1364 u16 *ctinfo_map,
1365 size_t mapsize);
1366void
1367skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1368 struct flow_dissector *flow_dissector,
1369 void *target_container);
1370
1371static inline __u32 skb_get_hash(struct sk_buff *skb)
1372{
1373 if (!skb->l4_hash && !skb->sw_hash)
1374 __skb_get_hash(skb);
1375
1376 return skb->hash;
1377}
1378
1379static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1380{
1381 if (!skb->l4_hash && !skb->sw_hash) {
1382 struct flow_keys keys;
1383 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1384
1385 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1386 }
1387
1388 return skb->hash;
1389}
1390
1391__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1392 const siphash_key_t *perturb);
1393
1394static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1395{
1396 return skb->hash;
1397}
1398
1399static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1400{
1401 to->hash = from->hash;
1402 to->sw_hash = from->sw_hash;
1403 to->l4_hash = from->l4_hash;
1404};
1405
1406static inline void skb_copy_decrypted(struct sk_buff *to,
1407 const struct sk_buff *from)
1408{
1409#ifdef CONFIG_TLS_DEVICE
1410 to->decrypted = from->decrypted;
1411#endif
1412}
1413
1414#ifdef NET_SKBUFF_DATA_USES_OFFSET
1415static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1416{
1417 return skb->head + skb->end;
1418}
1419
1420static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1421{
1422 return skb->end;
1423}
1424#else
1425static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1426{
1427 return skb->end;
1428}
1429
1430static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1431{
1432 return skb->end - skb->head;
1433}
1434#endif
1435
1436
1437#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1438
1439static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1440{
1441 return &skb_shinfo(skb)->hwtstamps;
1442}
1443
1444static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1445{
1446 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1447
1448 return is_zcopy ? skb_uarg(skb) : NULL;
1449}
1450
1451static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1452 bool *have_ref)
1453{
1454 if (skb && uarg && !skb_zcopy(skb)) {
1455 if (unlikely(have_ref && *have_ref))
1456 *have_ref = false;
1457 else
1458 sock_zerocopy_get(uarg);
1459 skb_shinfo(skb)->destructor_arg = uarg;
1460 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1461 }
1462}
1463
1464static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1465{
1466 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1467 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1468}
1469
1470static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1471{
1472 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1473}
1474
1475static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1476{
1477 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1478}
1479
1480
1481static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1482{
1483 struct ubuf_info *uarg = skb_zcopy(skb);
1484
1485 if (uarg) {
1486 if (skb_zcopy_is_nouarg(skb)) {
1487
1488 } else if (uarg->callback == sock_zerocopy_callback) {
1489 uarg->zerocopy = uarg->zerocopy && zerocopy;
1490 sock_zerocopy_put(uarg);
1491 } else {
1492 uarg->callback(uarg, zerocopy);
1493 }
1494
1495 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1496 }
1497}
1498
1499
1500static inline void skb_zcopy_abort(struct sk_buff *skb)
1501{
1502 struct ubuf_info *uarg = skb_zcopy(skb);
1503
1504 if (uarg) {
1505 sock_zerocopy_put_abort(uarg, false);
1506 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1507 }
1508}
1509
1510static inline void skb_mark_not_on_list(struct sk_buff *skb)
1511{
1512 skb->next = NULL;
1513}
1514
1515
1516#define skb_list_walk_safe(first, skb, next_skb) \
1517 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1518 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1519
1520static inline void skb_list_del_init(struct sk_buff *skb)
1521{
1522 __list_del_entry(&skb->list);
1523 skb_mark_not_on_list(skb);
1524}
1525
1526
1527
1528
1529
1530
1531
1532static inline int skb_queue_empty(const struct sk_buff_head *list)
1533{
1534 return list->next == (const struct sk_buff *) list;
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1545{
1546 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1547}
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1558 const struct sk_buff *skb)
1559{
1560 return skb->next == (const struct sk_buff *) list;
1561}
1562
1563
1564
1565
1566
1567
1568
1569
1570static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1571 const struct sk_buff *skb)
1572{
1573 return skb->prev == (const struct sk_buff *) list;
1574}
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1585 const struct sk_buff *skb)
1586{
1587
1588
1589
1590 BUG_ON(skb_queue_is_last(list, skb));
1591 return skb->next;
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1603 const struct sk_buff *skb)
1604{
1605
1606
1607
1608 BUG_ON(skb_queue_is_first(list, skb));
1609 return skb->prev;
1610}
1611
1612
1613
1614
1615
1616
1617
1618
1619static inline struct sk_buff *skb_get(struct sk_buff *skb)
1620{
1621 refcount_inc(&skb->users);
1622 return skb;
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637static inline int skb_cloned(const struct sk_buff *skb)
1638{
1639 return skb->cloned &&
1640 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1641}
1642
1643static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1644{
1645 might_sleep_if(gfpflags_allow_blocking(pri));
1646
1647 if (skb_cloned(skb))
1648 return pskb_expand_head(skb, 0, 0, pri);
1649
1650 return 0;
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660static inline int skb_header_cloned(const struct sk_buff *skb)
1661{
1662 int dataref;
1663
1664 if (!skb->cloned)
1665 return 0;
1666
1667 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1668 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1669 return dataref != 1;
1670}
1671
1672static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1673{
1674 might_sleep_if(gfpflags_allow_blocking(pri));
1675
1676 if (skb_header_cloned(skb))
1677 return pskb_expand_head(skb, 0, 0, pri);
1678
1679 return 0;
1680}
1681
1682
1683
1684
1685
1686static inline void __skb_header_release(struct sk_buff *skb)
1687{
1688 skb->nohdr = 1;
1689 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700static inline int skb_shared(const struct sk_buff *skb)
1701{
1702 return refcount_read(&skb->users) != 1;
1703}
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1719{
1720 might_sleep_if(gfpflags_allow_blocking(pri));
1721 if (skb_shared(skb)) {
1722 struct sk_buff *nskb = skb_clone(skb, pri);
1723
1724 if (likely(nskb))
1725 consume_skb(skb);
1726 else
1727 kfree_skb(skb);
1728 skb = nskb;
1729 }
1730 return skb;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1754 gfp_t pri)
1755{
1756 might_sleep_if(gfpflags_allow_blocking(pri));
1757 if (skb_cloned(skb)) {
1758 struct sk_buff *nskb = skb_copy(skb, pri);
1759
1760
1761 if (likely(nskb))
1762 consume_skb(skb);
1763 else
1764 kfree_skb(skb);
1765 skb = nskb;
1766 }
1767 return skb;
1768}
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1784{
1785 struct sk_buff *skb = list_->next;
1786
1787 if (skb == (struct sk_buff *)list_)
1788 skb = NULL;
1789 return skb;
1790}
1791
1792
1793
1794
1795
1796
1797
1798static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1799{
1800 return list_->next;
1801}
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1813 const struct sk_buff_head *list_)
1814{
1815 struct sk_buff *next = skb->next;
1816
1817 if (next == (struct sk_buff *)list_)
1818 next = NULL;
1819 return next;
1820}
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1836{
1837 struct sk_buff *skb = READ_ONCE(list_->prev);
1838
1839 if (skb == (struct sk_buff *)list_)
1840 skb = NULL;
1841 return skb;
1842
1843}
1844
1845
1846
1847
1848
1849
1850
1851static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1852{
1853 return list_->qlen;
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
1864{
1865 return READ_ONCE(list_->qlen);
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878static inline void __skb_queue_head_init(struct sk_buff_head *list)
1879{
1880 list->prev = list->next = (struct sk_buff *)list;
1881 list->qlen = 0;
1882}
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892static inline void skb_queue_head_init(struct sk_buff_head *list)
1893{
1894 spin_lock_init(&list->lock);
1895 __skb_queue_head_init(list);
1896}
1897
1898static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1899 struct lock_class_key *class)
1900{
1901 skb_queue_head_init(list);
1902 lockdep_set_class(&list->lock, class);
1903}
1904
1905
1906
1907
1908
1909
1910
1911static inline void __skb_insert(struct sk_buff *newsk,
1912 struct sk_buff *prev, struct sk_buff *next,
1913 struct sk_buff_head *list)
1914{
1915
1916
1917
1918 WRITE_ONCE(newsk->next, next);
1919 WRITE_ONCE(newsk->prev, prev);
1920 WRITE_ONCE(next->prev, newsk);
1921 WRITE_ONCE(prev->next, newsk);
1922 list->qlen++;
1923}
1924
1925static inline void __skb_queue_splice(const struct sk_buff_head *list,
1926 struct sk_buff *prev,
1927 struct sk_buff *next)
1928{
1929 struct sk_buff *first = list->next;
1930 struct sk_buff *last = list->prev;
1931
1932 WRITE_ONCE(first->prev, prev);
1933 WRITE_ONCE(prev->next, first);
1934
1935 WRITE_ONCE(last->next, next);
1936 WRITE_ONCE(next->prev, last);
1937}
1938
1939
1940
1941
1942
1943
1944static inline void skb_queue_splice(const struct sk_buff_head *list,
1945 struct sk_buff_head *head)
1946{
1947 if (!skb_queue_empty(list)) {
1948 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1949 head->qlen += list->qlen;
1950 }
1951}
1952
1953
1954
1955
1956
1957
1958
1959
1960static inline void skb_queue_splice_init(struct sk_buff_head *list,
1961 struct sk_buff_head *head)
1962{
1963 if (!skb_queue_empty(list)) {
1964 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1965 head->qlen += list->qlen;
1966 __skb_queue_head_init(list);
1967 }
1968}
1969
1970
1971
1972
1973
1974
1975static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1976 struct sk_buff_head *head)
1977{
1978 if (!skb_queue_empty(list)) {
1979 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1980 head->qlen += list->qlen;
1981 }
1982}
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1993 struct sk_buff_head *head)
1994{
1995 if (!skb_queue_empty(list)) {
1996 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1997 head->qlen += list->qlen;
1998 __skb_queue_head_init(list);
1999 }
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013static inline void __skb_queue_after(struct sk_buff_head *list,
2014 struct sk_buff *prev,
2015 struct sk_buff *newsk)
2016{
2017 __skb_insert(newsk, prev, prev->next, list);
2018}
2019
2020void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2021 struct sk_buff_head *list);
2022
2023static inline void __skb_queue_before(struct sk_buff_head *list,
2024 struct sk_buff *next,
2025 struct sk_buff *newsk)
2026{
2027 __skb_insert(newsk, next->prev, next, list);
2028}
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040static inline void __skb_queue_head(struct sk_buff_head *list,
2041 struct sk_buff *newsk)
2042{
2043 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2044}
2045void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057static inline void __skb_queue_tail(struct sk_buff_head *list,
2058 struct sk_buff *newsk)
2059{
2060 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2061}
2062void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2063
2064
2065
2066
2067
2068void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2069static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2070{
2071 struct sk_buff *next, *prev;
2072
2073 WRITE_ONCE(list->qlen, list->qlen - 1);
2074 next = skb->next;
2075 prev = skb->prev;
2076 skb->next = skb->prev = NULL;
2077 WRITE_ONCE(next->prev, prev);
2078 WRITE_ONCE(prev->next, next);
2079}
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2090{
2091 struct sk_buff *skb = skb_peek(list);
2092 if (skb)
2093 __skb_unlink(skb, list);
2094 return skb;
2095}
2096struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2107{
2108 struct sk_buff *skb = skb_peek_tail(list);
2109 if (skb)
2110 __skb_unlink(skb, list);
2111 return skb;
2112}
2113struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2114
2115
2116static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2117{
2118 return skb->data_len;
2119}
2120
2121static inline unsigned int skb_headlen(const struct sk_buff *skb)
2122{
2123 return skb->len - skb->data_len;
2124}
2125
2126static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2127{
2128 unsigned int i, len = 0;
2129
2130 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2131 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2132 return len;
2133}
2134
2135static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2136{
2137 return skb_headlen(skb) + __skb_pagelen(skb);
2138}
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2154 struct page *page, int off, int size)
2155{
2156 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2157
2158
2159
2160
2161
2162
2163 frag->bv_page = page;
2164 frag->bv_offset = off;
2165 skb_frag_size_set(frag, size);
2166
2167 page = compound_head(page);
2168 if (page_is_pfmemalloc(page))
2169 skb->pfmemalloc = true;
2170}
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2187 struct page *page, int off, int size)
2188{
2189 __skb_fill_page_desc(skb, i, page, off, size);
2190 skb_shinfo(skb)->nr_frags = i + 1;
2191}
2192
2193void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2194 int size, unsigned int truesize);
2195
2196void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2197 unsigned int truesize);
2198
2199#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2200
2201#ifdef NET_SKBUFF_DATA_USES_OFFSET
2202static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2203{
2204 return skb->head + skb->tail;
2205}
2206
2207static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2208{
2209 skb->tail = skb->data - skb->head;
2210}
2211
2212static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2213{
2214 skb_reset_tail_pointer(skb);
2215 skb->tail += offset;
2216}
2217
2218#else
2219static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2220{
2221 return skb->tail;
2222}
2223
2224static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2225{
2226 skb->tail = skb->data;
2227}
2228
2229static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2230{
2231 skb->tail = skb->data + offset;
2232}
2233
2234#endif
2235
2236
2237
2238
2239void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2240void *skb_put(struct sk_buff *skb, unsigned int len);
2241static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2242{
2243 void *tmp = skb_tail_pointer(skb);
2244 SKB_LINEAR_ASSERT(skb);
2245 skb->tail += len;
2246 skb->len += len;
2247 return tmp;
2248}
2249
2250static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2251{
2252 void *tmp = __skb_put(skb, len);
2253
2254 memset(tmp, 0, len);
2255 return tmp;
2256}
2257
2258static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2259 unsigned int len)
2260{
2261 void *tmp = __skb_put(skb, len);
2262
2263 memcpy(tmp, data, len);
2264 return tmp;
2265}
2266
2267static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2268{
2269 *(u8 *)__skb_put(skb, 1) = val;
2270}
2271
2272static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2273{
2274 void *tmp = skb_put(skb, len);
2275
2276 memset(tmp, 0, len);
2277
2278 return tmp;
2279}
2280
2281static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2282 unsigned int len)
2283{
2284 void *tmp = skb_put(skb, len);
2285
2286 memcpy(tmp, data, len);
2287
2288 return tmp;
2289}
2290
2291static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2292{
2293 *(u8 *)skb_put(skb, 1) = val;
2294}
2295
2296void *skb_push(struct sk_buff *skb, unsigned int len);
2297static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2298{
2299 skb->data -= len;
2300 skb->len += len;
2301 return skb->data;
2302}
2303
2304void *skb_pull(struct sk_buff *skb, unsigned int len);
2305static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2306{
2307 skb->len -= len;
2308 BUG_ON(skb->len < skb->data_len);
2309 return skb->data += len;
2310}
2311
2312static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2313{
2314 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2315}
2316
2317void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2318
2319static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2320{
2321 if (len > skb_headlen(skb) &&
2322 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2323 return NULL;
2324 skb->len -= len;
2325 return skb->data += len;
2326}
2327
2328static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2329{
2330 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2331}
2332
2333static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2334{
2335 if (likely(len <= skb_headlen(skb)))
2336 return true;
2337 if (unlikely(len > skb->len))
2338 return false;
2339 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2340}
2341
2342void skb_condense(struct sk_buff *skb);
2343
2344
2345
2346
2347
2348
2349
2350static inline unsigned int skb_headroom(const struct sk_buff *skb)
2351{
2352 return skb->data - skb->head;
2353}
2354
2355
2356
2357
2358
2359
2360
2361static inline int skb_tailroom(const struct sk_buff *skb)
2362{
2363 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2364}
2365
2366
2367
2368
2369
2370
2371
2372
2373static inline int skb_availroom(const struct sk_buff *skb)
2374{
2375 if (skb_is_nonlinear(skb))
2376 return 0;
2377
2378 return skb->end - skb->tail - skb->reserved_tailroom;
2379}
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389static inline void skb_reserve(struct sk_buff *skb, int len)
2390{
2391 skb->data += len;
2392 skb->tail += len;
2393}
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2408 unsigned int needed_tailroom)
2409{
2410 SKB_LINEAR_ASSERT(skb);
2411 if (mtu < skb_tailroom(skb) - needed_tailroom)
2412
2413 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2414 else
2415
2416 skb->reserved_tailroom = needed_tailroom;
2417}
2418
2419#define ENCAP_TYPE_ETHER 0
2420#define ENCAP_TYPE_IPPROTO 1
2421
2422static inline void skb_set_inner_protocol(struct sk_buff *skb,
2423 __be16 protocol)
2424{
2425 skb->inner_protocol = protocol;
2426 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2427}
2428
2429static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2430 __u8 ipproto)
2431{
2432 skb->inner_ipproto = ipproto;
2433 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2434}
2435
2436static inline void skb_reset_inner_headers(struct sk_buff *skb)
2437{
2438 skb->inner_mac_header = skb->mac_header;
2439 skb->inner_network_header = skb->network_header;
2440 skb->inner_transport_header = skb->transport_header;
2441}
2442
2443static inline void skb_reset_mac_len(struct sk_buff *skb)
2444{
2445 skb->mac_len = skb->network_header - skb->mac_header;
2446}
2447
2448static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2449 *skb)
2450{
2451 return skb->head + skb->inner_transport_header;
2452}
2453
2454static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2455{
2456 return skb_inner_transport_header(skb) - skb->data;
2457}
2458
2459static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2460{
2461 skb->inner_transport_header = skb->data - skb->head;
2462}
2463
2464static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2465 const int offset)
2466{
2467 skb_reset_inner_transport_header(skb);
2468 skb->inner_transport_header += offset;
2469}
2470
2471static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2472{
2473 return skb->head + skb->inner_network_header;
2474}
2475
2476static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2477{
2478 skb->inner_network_header = skb->data - skb->head;
2479}
2480
2481static inline void skb_set_inner_network_header(struct sk_buff *skb,
2482 const int offset)
2483{
2484 skb_reset_inner_network_header(skb);
2485 skb->inner_network_header += offset;
2486}
2487
2488static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2489{
2490 return skb->head + skb->inner_mac_header;
2491}
2492
2493static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2494{
2495 skb->inner_mac_header = skb->data - skb->head;
2496}
2497
2498static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2499 const int offset)
2500{
2501 skb_reset_inner_mac_header(skb);
2502 skb->inner_mac_header += offset;
2503}
2504static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2505{
2506 return skb->transport_header != (typeof(skb->transport_header))~0U;
2507}
2508
2509static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2510{
2511 return skb->head + skb->transport_header;
2512}
2513
2514static inline void skb_reset_transport_header(struct sk_buff *skb)
2515{
2516 skb->transport_header = skb->data - skb->head;
2517}
2518
2519static inline void skb_set_transport_header(struct sk_buff *skb,
2520 const int offset)
2521{
2522 skb_reset_transport_header(skb);
2523 skb->transport_header += offset;
2524}
2525
2526static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2527{
2528 return skb->head + skb->network_header;
2529}
2530
2531static inline void skb_reset_network_header(struct sk_buff *skb)
2532{
2533 skb->network_header = skb->data - skb->head;
2534}
2535
2536static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2537{
2538 skb_reset_network_header(skb);
2539 skb->network_header += offset;
2540}
2541
2542static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2543{
2544 return skb->head + skb->mac_header;
2545}
2546
2547static inline int skb_mac_offset(const struct sk_buff *skb)
2548{
2549 return skb_mac_header(skb) - skb->data;
2550}
2551
2552static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2553{
2554 return skb->network_header - skb->mac_header;
2555}
2556
2557static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2558{
2559 return skb->mac_header != (typeof(skb->mac_header))~0U;
2560}
2561
2562static inline void skb_reset_mac_header(struct sk_buff *skb)
2563{
2564 skb->mac_header = skb->data - skb->head;
2565}
2566
2567static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2568{
2569 skb_reset_mac_header(skb);
2570 skb->mac_header += offset;
2571}
2572
2573static inline void skb_pop_mac_header(struct sk_buff *skb)
2574{
2575 skb->mac_header = skb->network_header;
2576}
2577
2578static inline void skb_probe_transport_header(struct sk_buff *skb)
2579{
2580 struct flow_keys_basic keys;
2581
2582 if (skb_transport_header_was_set(skb))
2583 return;
2584
2585 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2586 NULL, 0, 0, 0, 0))
2587 skb_set_transport_header(skb, keys.control.thoff);
2588}
2589
2590static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2591{
2592 if (skb_mac_header_was_set(skb)) {
2593 const unsigned char *old_mac = skb_mac_header(skb);
2594
2595 skb_set_mac_header(skb, -skb->mac_len);
2596 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2597 }
2598}
2599
2600static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2601{
2602 return skb->csum_start - skb_headroom(skb);
2603}
2604
2605static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2606{
2607 return skb->head + skb->csum_start;
2608}
2609
2610static inline int skb_transport_offset(const struct sk_buff *skb)
2611{
2612 return skb_transport_header(skb) - skb->data;
2613}
2614
2615static inline u32 skb_network_header_len(const struct sk_buff *skb)
2616{
2617 return skb->transport_header - skb->network_header;
2618}
2619
2620static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2621{
2622 return skb->inner_transport_header - skb->inner_network_header;
2623}
2624
2625static inline int skb_network_offset(const struct sk_buff *skb)
2626{
2627 return skb_network_header(skb) - skb->data;
2628}
2629
2630static inline int skb_inner_network_offset(const struct sk_buff *skb)
2631{
2632 return skb_inner_network_header(skb) - skb->data;
2633}
2634
2635static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2636{
2637 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2638}
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660#ifndef NET_IP_ALIGN
2661#define NET_IP_ALIGN 2
2662#endif
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684#ifndef NET_SKB_PAD
2685#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2686#endif
2687
2688int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2689
2690static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2691{
2692 if (WARN_ON(skb_is_nonlinear(skb)))
2693 return;
2694 skb->len = len;
2695 skb_set_tail_pointer(skb, len);
2696}
2697
2698static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2699{
2700 __skb_set_length(skb, len);
2701}
2702
2703void skb_trim(struct sk_buff *skb, unsigned int len);
2704
2705static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2706{
2707 if (skb->data_len)
2708 return ___pskb_trim(skb, len);
2709 __skb_trim(skb, len);
2710 return 0;
2711}
2712
2713static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2714{
2715 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2728{
2729 int err = pskb_trim(skb, len);
2730 BUG_ON(err);
2731}
2732
2733static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2734{
2735 unsigned int diff = len - skb->len;
2736
2737 if (skb_tailroom(skb) < diff) {
2738 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2739 GFP_ATOMIC);
2740 if (ret)
2741 return ret;
2742 }
2743 __skb_set_length(skb, len);
2744 return 0;
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755static inline void skb_orphan(struct sk_buff *skb)
2756{
2757 if (skb->destructor) {
2758 skb->destructor(skb);
2759 skb->destructor = NULL;
2760 skb->sk = NULL;
2761 } else {
2762 BUG_ON(skb->sk);
2763 }
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2776{
2777 if (likely(!skb_zcopy(skb)))
2778 return 0;
2779 if (!skb_zcopy_is_nouarg(skb) &&
2780 skb_uarg(skb)->callback == sock_zerocopy_callback)
2781 return 0;
2782 return skb_copy_ubufs(skb, gfp_mask);
2783}
2784
2785
2786static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2787{
2788 if (likely(!skb_zcopy(skb)))
2789 return 0;
2790 return skb_copy_ubufs(skb, gfp_mask);
2791}
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static inline void __skb_queue_purge(struct sk_buff_head *list)
2802{
2803 struct sk_buff *skb;
2804 while ((skb = __skb_dequeue(list)) != NULL)
2805 kfree_skb(skb);
2806}
2807void skb_queue_purge(struct sk_buff_head *list);
2808
2809unsigned int skb_rbtree_purge(struct rb_root *root);
2810
2811void *netdev_alloc_frag(unsigned int fragsz);
2812
2813struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2814 gfp_t gfp_mask);
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2830 unsigned int length)
2831{
2832 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2833}
2834
2835
2836static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2837 gfp_t gfp_mask)
2838{
2839 return __netdev_alloc_skb(NULL, length, gfp_mask);
2840}
2841
2842
2843static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2844{
2845 return netdev_alloc_skb(NULL, length);
2846}
2847
2848
2849static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2850 unsigned int length, gfp_t gfp)
2851{
2852 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2853
2854 if (NET_IP_ALIGN && skb)
2855 skb_reserve(skb, NET_IP_ALIGN);
2856 return skb;
2857}
2858
2859static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2860 unsigned int length)
2861{
2862 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2863}
2864
2865static inline void skb_free_frag(void *addr)
2866{
2867 page_frag_free(addr);
2868}
2869
2870void *napi_alloc_frag(unsigned int fragsz);
2871struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2872 unsigned int length, gfp_t gfp_mask);
2873static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2874 unsigned int length)
2875{
2876 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2877}
2878void napi_consume_skb(struct sk_buff *skb, int budget);
2879
2880void __kfree_skb_flush(void);
2881void __kfree_skb_defer(struct sk_buff *skb);
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2893 unsigned int order)
2894{
2895
2896
2897
2898
2899
2900
2901
2902
2903 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2904
2905 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2906}
2907
2908static inline struct page *dev_alloc_pages(unsigned int order)
2909{
2910 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2922{
2923 return __dev_alloc_pages(gfp_mask, 0);
2924}
2925
2926static inline struct page *dev_alloc_page(void)
2927{
2928 return dev_alloc_pages(0);
2929}
2930
2931
2932
2933
2934
2935
2936static inline void skb_propagate_pfmemalloc(struct page *page,
2937 struct sk_buff *skb)
2938{
2939 if (page_is_pfmemalloc(page))
2940 skb->pfmemalloc = true;
2941}
2942
2943
2944
2945
2946
2947static inline unsigned int skb_frag_off(const skb_frag_t *frag)
2948{
2949 return frag->bv_offset;
2950}
2951
2952
2953
2954
2955
2956
2957static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
2958{
2959 frag->bv_offset += delta;
2960}
2961
2962
2963
2964
2965
2966
2967static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
2968{
2969 frag->bv_offset = offset;
2970}
2971
2972
2973
2974
2975
2976
2977static inline void skb_frag_off_copy(skb_frag_t *fragto,
2978 const skb_frag_t *fragfrom)
2979{
2980 fragto->bv_offset = fragfrom->bv_offset;
2981}
2982
2983
2984
2985
2986
2987
2988
2989static inline struct page *skb_frag_page(const skb_frag_t *frag)
2990{
2991 return frag->bv_page;
2992}
2993
2994
2995
2996
2997
2998
2999
3000static inline void __skb_frag_ref(skb_frag_t *frag)
3001{
3002 get_page(skb_frag_page(frag));
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012static inline void skb_frag_ref(struct sk_buff *skb, int f)
3013{
3014 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3015}
3016
3017
3018
3019
3020
3021
3022
3023static inline void __skb_frag_unref(skb_frag_t *frag)
3024{
3025 put_page(skb_frag_page(frag));
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035static inline void skb_frag_unref(struct sk_buff *skb, int f)
3036{
3037 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
3038}
3039
3040
3041
3042
3043
3044
3045
3046
3047static inline void *skb_frag_address(const skb_frag_t *frag)
3048{
3049 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3060{
3061 void *ptr = page_address(skb_frag_page(frag));
3062 if (unlikely(!ptr))
3063 return NULL;
3064
3065 return ptr + skb_frag_off(frag);
3066}
3067
3068
3069
3070
3071
3072
3073static inline void skb_frag_page_copy(skb_frag_t *fragto,
3074 const skb_frag_t *fragfrom)
3075{
3076 fragto->bv_page = fragfrom->bv_page;
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3087{
3088 frag->bv_page = page;
3089}
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3100 struct page *page)
3101{
3102 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3103}
3104
3105bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3119 const skb_frag_t *frag,
3120 size_t offset, size_t size,
3121 enum dma_data_direction dir)
3122{
3123 return dma_map_page(dev, skb_frag_page(frag),
3124 skb_frag_off(frag) + offset, size, dir);
3125}
3126
3127static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3128 gfp_t gfp_mask)
3129{
3130 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3131}
3132
3133
3134static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3135 gfp_t gfp_mask)
3136{
3137 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3138}
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3150{
3151 return !skb_header_cloned(skb) &&
3152 skb_headroom(skb) + len <= skb->hdr_len;
3153}
3154
3155static inline int skb_try_make_writable(struct sk_buff *skb,
3156 unsigned int write_len)
3157{
3158 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3159 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3160}
3161
3162static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3163 int cloned)
3164{
3165 int delta = 0;
3166
3167 if (headroom > skb_headroom(skb))
3168 delta = headroom - skb_headroom(skb);
3169
3170 if (delta || cloned)
3171 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3172 GFP_ATOMIC);
3173 return 0;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3189{
3190 return __skb_cow(skb, headroom, skb_cloned(skb));
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3204{
3205 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3206}
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3219{
3220 unsigned int size = skb->len;
3221 if (likely(size >= len))
3222 return 0;
3223 return skb_pad(skb, len - size);
3224}
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
3238 bool free_on_error)
3239{
3240 unsigned int size = skb->len;
3241
3242 if (unlikely(size < len)) {
3243 len -= size;
3244 if (__skb_pad(skb, len, free_on_error))
3245 return -ENOMEM;
3246 __skb_put(skb, len);
3247 }
3248 return 0;
3249}
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
3262{
3263 return __skb_put_padto(skb, len, true);
3264}
3265
3266static inline int skb_add_data(struct sk_buff *skb,
3267 struct iov_iter *from, int copy)
3268{
3269 const int off = skb->len;
3270
3271 if (skb->ip_summed == CHECKSUM_NONE) {
3272 __wsum csum = 0;
3273 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3274 &csum, from)) {
3275 skb->csum = csum_block_add(skb->csum, csum, off);
3276 return 0;
3277 }
3278 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3279 return 0;
3280
3281 __skb_trim(skb, off);
3282 return -EFAULT;
3283}
3284
3285static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3286 const struct page *page, int off)
3287{
3288 if (skb_zcopy(skb))
3289 return false;
3290 if (i) {
3291 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3292
3293 return page == skb_frag_page(frag) &&
3294 off == skb_frag_off(frag) + skb_frag_size(frag);
3295 }
3296 return false;
3297}
3298
3299static inline int __skb_linearize(struct sk_buff *skb)
3300{
3301 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3302}
3303
3304
3305
3306
3307
3308
3309
3310
3311static inline int skb_linearize(struct sk_buff *skb)
3312{
3313 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3314}
3315
3316
3317
3318
3319
3320
3321
3322
3323static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3324{
3325 return skb_is_nonlinear(skb) &&
3326 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3327}
3328
3329
3330
3331
3332
3333
3334
3335
3336static inline int skb_linearize_cow(struct sk_buff *skb)
3337{
3338 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3339 __skb_linearize(skb) : 0;
3340}
3341
3342static __always_inline void
3343__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3344 unsigned int off)
3345{
3346 if (skb->ip_summed == CHECKSUM_COMPLETE)
3347 skb->csum = csum_block_sub(skb->csum,
3348 csum_partial(start, len, 0), off);
3349 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3350 skb_checksum_start_offset(skb) < 0)
3351 skb->ip_summed = CHECKSUM_NONE;
3352}
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364static inline void skb_postpull_rcsum(struct sk_buff *skb,
3365 const void *start, unsigned int len)
3366{
3367 __skb_postpull_rcsum(skb, start, len, 0);
3368}
3369
3370static __always_inline void
3371__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3372 unsigned int off)
3373{
3374 if (skb->ip_summed == CHECKSUM_COMPLETE)
3375 skb->csum = csum_block_add(skb->csum,
3376 csum_partial(start, len, 0), off);
3377}
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388static inline void skb_postpush_rcsum(struct sk_buff *skb,
3389 const void *start, unsigned int len)
3390{
3391 __skb_postpush_rcsum(skb, start, len, 0);
3392}
3393
3394void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3408{
3409 skb_push(skb, len);
3410 skb_postpush_rcsum(skb, skb->data, len);
3411 return skb->data;
3412}
3413
3414int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3426{
3427 if (likely(len >= skb->len))
3428 return 0;
3429 return pskb_trim_rcsum_slow(skb, len);
3430}
3431
3432static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3433{
3434 if (skb->ip_summed == CHECKSUM_COMPLETE)
3435 skb->ip_summed = CHECKSUM_NONE;
3436 __skb_trim(skb, len);
3437 return 0;
3438}
3439
3440static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3441{
3442 if (skb->ip_summed == CHECKSUM_COMPLETE)
3443 skb->ip_summed = CHECKSUM_NONE;
3444 return __skb_grow(skb, len);
3445}
3446
3447#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3448#define skb_rb_first(root) rb_to_skb(rb_first(root))
3449#define skb_rb_last(root) rb_to_skb(rb_last(root))
3450#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3451#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3452
3453#define skb_queue_walk(queue, skb) \
3454 for (skb = (queue)->next; \
3455 skb != (struct sk_buff *)(queue); \
3456 skb = skb->next)
3457
3458#define skb_queue_walk_safe(queue, skb, tmp) \
3459 for (skb = (queue)->next, tmp = skb->next; \
3460 skb != (struct sk_buff *)(queue); \
3461 skb = tmp, tmp = skb->next)
3462
3463#define skb_queue_walk_from(queue, skb) \
3464 for (; skb != (struct sk_buff *)(queue); \
3465 skb = skb->next)
3466
3467#define skb_rbtree_walk(skb, root) \
3468 for (skb = skb_rb_first(root); skb != NULL; \
3469 skb = skb_rb_next(skb))
3470
3471#define skb_rbtree_walk_from(skb) \
3472 for (; skb != NULL; \
3473 skb = skb_rb_next(skb))
3474
3475#define skb_rbtree_walk_from_safe(skb, tmp) \
3476 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3477 skb = tmp)
3478
3479#define skb_queue_walk_from_safe(queue, skb, tmp) \
3480 for (tmp = skb->next; \
3481 skb != (struct sk_buff *)(queue); \
3482 skb = tmp, tmp = skb->next)
3483
3484#define skb_queue_reverse_walk(queue, skb) \
3485 for (skb = (queue)->prev; \
3486 skb != (struct sk_buff *)(queue); \
3487 skb = skb->prev)
3488
3489#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3490 for (skb = (queue)->prev, tmp = skb->prev; \
3491 skb != (struct sk_buff *)(queue); \
3492 skb = tmp, tmp = skb->prev)
3493
3494#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3495 for (tmp = skb->prev; \
3496 skb != (struct sk_buff *)(queue); \
3497 skb = tmp, tmp = skb->prev)
3498
3499static inline bool skb_has_frag_list(const struct sk_buff *skb)
3500{
3501 return skb_shinfo(skb)->frag_list != NULL;
3502}
3503
3504static inline void skb_frag_list_init(struct sk_buff *skb)
3505{
3506 skb_shinfo(skb)->frag_list = NULL;
3507}
3508
3509#define skb_walk_frags(skb, iter) \
3510 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3511
3512
3513int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3514 int *err, long *timeo_p,
3515 const struct sk_buff *skb);
3516struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3517 struct sk_buff_head *queue,
3518 unsigned int flags,
3519 int *off, int *err,
3520 struct sk_buff **last);
3521struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3522 struct sk_buff_head *queue,
3523 unsigned int flags, int *off, int *err,
3524 struct sk_buff **last);
3525struct sk_buff *__skb_recv_datagram(struct sock *sk,
3526 struct sk_buff_head *sk_queue,
3527 unsigned int flags, int *off, int *err);
3528struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3529 int *err);
3530__poll_t datagram_poll(struct file *file, struct socket *sock,
3531 struct poll_table_struct *wait);
3532int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3533 struct iov_iter *to, int size);
3534static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3535 struct msghdr *msg, int size)
3536{
3537 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3538}
3539int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3540 struct msghdr *msg);
3541int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3542 struct iov_iter *to, int len,
3543 struct ahash_request *hash);
3544int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3545 struct iov_iter *from, int len);
3546int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3547void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3548void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3549static inline void skb_free_datagram_locked(struct sock *sk,
3550 struct sk_buff *skb)
3551{
3552 __skb_free_datagram_locked(sk, skb, 0);
3553}
3554int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3555int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3556int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3557__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3558 int len, __wsum csum);
3559int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3560 struct pipe_inode_info *pipe, unsigned int len,
3561 unsigned int flags);
3562int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3563 int len);
3564void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3565unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3566int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3567 int len, int hlen);
3568void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3569int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3570void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3571bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3572bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3573struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3574struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3575 unsigned int offset);
3576struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3577int skb_ensure_writable(struct sk_buff *skb, int write_len);
3578int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3579int skb_vlan_pop(struct sk_buff *skb);
3580int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3581int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3582 int mac_len, bool ethernet);
3583int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3584 bool ethernet);
3585int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3586int skb_mpls_dec_ttl(struct sk_buff *skb);
3587struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3588 gfp_t gfp);
3589
3590static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3591{
3592 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3593}
3594
3595static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3596{
3597 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3598}
3599
3600struct skb_checksum_ops {
3601 __wsum (*update)(const void *mem, int len, __wsum wsum);
3602 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3603};
3604
3605extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3606
3607__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3608 __wsum csum, const struct skb_checksum_ops *ops);
3609__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3610 __wsum csum);
3611
3612static inline void * __must_check
3613__skb_header_pointer(const struct sk_buff *skb, int offset,
3614 int len, void *data, int hlen, void *buffer)
3615{
3616 if (hlen - offset >= len)
3617 return data + offset;
3618
3619 if (!skb ||
3620 skb_copy_bits(skb, offset, buffer, len) < 0)
3621 return NULL;
3622
3623 return buffer;
3624}
3625
3626static inline void * __must_check
3627skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3628{
3629 return __skb_header_pointer(skb, offset, len, skb->data,
3630 skb_headlen(skb), buffer);
3631}
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643static inline bool skb_needs_linearize(struct sk_buff *skb,
3644 netdev_features_t features)
3645{
3646 return skb_is_nonlinear(skb) &&
3647 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3648 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3649}
3650
3651static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3652 void *to,
3653 const unsigned int len)
3654{
3655 memcpy(to, skb->data, len);
3656}
3657
3658static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3659 const int offset, void *to,
3660 const unsigned int len)
3661{
3662 memcpy(to, skb->data + offset, len);
3663}
3664
3665static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3666 const void *from,
3667 const unsigned int len)
3668{
3669 memcpy(skb->data, from, len);
3670}
3671
3672static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3673 const int offset,
3674 const void *from,
3675 const unsigned int len)
3676{
3677 memcpy(skb->data + offset, from, len);
3678}
3679
3680void skb_init(void);
3681
3682static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3683{
3684 return skb->tstamp;
3685}
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696static inline void skb_get_timestamp(const struct sk_buff *skb,
3697 struct __kernel_old_timeval *stamp)
3698{
3699 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3700}
3701
3702static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3703 struct __kernel_sock_timeval *stamp)
3704{
3705 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3706
3707 stamp->tv_sec = ts.tv_sec;
3708 stamp->tv_usec = ts.tv_nsec / 1000;
3709}
3710
3711static inline void skb_get_timestampns(const struct sk_buff *skb,
3712 struct __kernel_old_timespec *stamp)
3713{
3714 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3715
3716 stamp->tv_sec = ts.tv_sec;
3717 stamp->tv_nsec = ts.tv_nsec;
3718}
3719
3720static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3721 struct __kernel_timespec *stamp)
3722{
3723 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3724
3725 stamp->tv_sec = ts.tv_sec;
3726 stamp->tv_nsec = ts.tv_nsec;
3727}
3728
3729static inline void __net_timestamp(struct sk_buff *skb)
3730{
3731 skb->tstamp = ktime_get_real();
3732}
3733
3734static inline ktime_t net_timedelta(ktime_t t)
3735{
3736 return ktime_sub(ktime_get_real(), t);
3737}
3738
3739static inline ktime_t net_invalid_timestamp(void)
3740{
3741 return 0;
3742}
3743
3744static inline u8 skb_metadata_len(const struct sk_buff *skb)
3745{
3746 return skb_shinfo(skb)->meta_len;
3747}
3748
3749static inline void *skb_metadata_end(const struct sk_buff *skb)
3750{
3751 return skb_mac_header(skb);
3752}
3753
3754static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3755 const struct sk_buff *skb_b,
3756 u8 meta_len)
3757{
3758 const void *a = skb_metadata_end(skb_a);
3759 const void *b = skb_metadata_end(skb_b);
3760
3761#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3762 u64 diffs = 0;
3763
3764 switch (meta_len) {
3765#define __it(x, op) (x -= sizeof(u##op))
3766#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3767 case 32: diffs |= __it_diff(a, b, 64);
3768
3769 case 24: diffs |= __it_diff(a, b, 64);
3770
3771 case 16: diffs |= __it_diff(a, b, 64);
3772
3773 case 8: diffs |= __it_diff(a, b, 64);
3774 break;
3775 case 28: diffs |= __it_diff(a, b, 64);
3776
3777 case 20: diffs |= __it_diff(a, b, 64);
3778
3779 case 12: diffs |= __it_diff(a, b, 64);
3780
3781 case 4: diffs |= __it_diff(a, b, 32);
3782 break;
3783 }
3784 return diffs;
3785#else
3786 return memcmp(a - meta_len, b - meta_len, meta_len);
3787#endif
3788}
3789
3790static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3791 const struct sk_buff *skb_b)
3792{
3793 u8 len_a = skb_metadata_len(skb_a);
3794 u8 len_b = skb_metadata_len(skb_b);
3795
3796 if (!(len_a | len_b))
3797 return false;
3798
3799 return len_a != len_b ?
3800 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3801}
3802
3803static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3804{
3805 skb_shinfo(skb)->meta_len = meta_len;
3806}
3807
3808static inline void skb_metadata_clear(struct sk_buff *skb)
3809{
3810 skb_metadata_set(skb, 0);
3811}
3812
3813struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3814
3815#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3816
3817void skb_clone_tx_timestamp(struct sk_buff *skb);
3818bool skb_defer_rx_timestamp(struct sk_buff *skb);
3819
3820#else
3821
3822static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3823{
3824}
3825
3826static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3827{
3828 return false;
3829}
3830
3831#endif
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845void skb_complete_tx_timestamp(struct sk_buff *skb,
3846 struct skb_shared_hwtstamps *hwtstamps);
3847
3848void __skb_tstamp_tx(struct sk_buff *orig_skb,
3849 struct skb_shared_hwtstamps *hwtstamps,
3850 struct sock *sk, int tstype);
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863void skb_tstamp_tx(struct sk_buff *orig_skb,
3864 struct skb_shared_hwtstamps *hwtstamps);
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878static inline void skb_tx_timestamp(struct sk_buff *skb)
3879{
3880 skb_clone_tx_timestamp(skb);
3881 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3882 skb_tstamp_tx(skb, NULL);
3883}
3884
3885
3886
3887
3888
3889
3890
3891
3892void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3893
3894__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3895__sum16 __skb_checksum_complete(struct sk_buff *skb);
3896
3897static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3898{
3899 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3900 skb->csum_valid ||
3901 (skb->ip_summed == CHECKSUM_PARTIAL &&
3902 skb_checksum_start_offset(skb) >= 0));
3903}
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3922{
3923 return skb_csum_unnecessary(skb) ?
3924 0 : __skb_checksum_complete(skb);
3925}
3926
3927static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3928{
3929 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3930 if (skb->csum_level == 0)
3931 skb->ip_summed = CHECKSUM_NONE;
3932 else
3933 skb->csum_level--;
3934 }
3935}
3936
3937static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3938{
3939 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3940 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3941 skb->csum_level++;
3942 } else if (skb->ip_summed == CHECKSUM_NONE) {
3943 skb->ip_summed = CHECKSUM_UNNECESSARY;
3944 skb->csum_level = 0;
3945 }
3946}
3947
3948
3949
3950
3951
3952
3953static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3954 bool zero_okay,
3955 __sum16 check)
3956{
3957 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3958 skb->csum_valid = 1;
3959 __skb_decr_checksum_unnecessary(skb);
3960 return false;
3961 }
3962
3963 return true;
3964}
3965
3966
3967
3968
3969#define CHECKSUM_BREAK 76
3970
3971
3972
3973
3974
3975
3976
3977static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3978{
3979 if (skb->ip_summed == CHECKSUM_COMPLETE)
3980 skb->ip_summed = CHECKSUM_NONE;
3981}
3982
3983
3984
3985
3986
3987
3988
3989
3990
3991
3992static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3993 bool complete,
3994 __wsum psum)
3995{
3996 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3997 if (!csum_fold(csum_add(psum, skb->csum))) {
3998 skb->csum_valid = 1;
3999 return 0;
4000 }
4001 }
4002
4003 skb->csum = psum;
4004
4005 if (complete || skb->len <= CHECKSUM_BREAK) {
4006 __sum16 csum;
4007
4008 csum = __skb_checksum_complete(skb);
4009 skb->csum_valid = !csum;
4010 return csum;
4011 }
4012
4013 return 0;
4014}
4015
4016static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4017{
4018 return 0;
4019}
4020
4021
4022
4023
4024
4025
4026
4027
4028
4029
4030
4031#define __skb_checksum_validate(skb, proto, complete, \
4032 zero_okay, check, compute_pseudo) \
4033({ \
4034 __sum16 __ret = 0; \
4035 skb->csum_valid = 0; \
4036 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4037 __ret = __skb_checksum_validate_complete(skb, \
4038 complete, compute_pseudo(skb, proto)); \
4039 __ret; \
4040})
4041
4042#define skb_checksum_init(skb, proto, compute_pseudo) \
4043 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4044
4045#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4046 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4047
4048#define skb_checksum_validate(skb, proto, compute_pseudo) \
4049 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4050
4051#define skb_checksum_validate_zero_check(skb, proto, check, \
4052 compute_pseudo) \
4053 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4054
4055#define skb_checksum_simple_validate(skb) \
4056 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4057
4058static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4059{
4060 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4061}
4062
4063static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4064{
4065 skb->csum = ~pseudo;
4066 skb->ip_summed = CHECKSUM_COMPLETE;
4067}
4068
4069#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4070do { \
4071 if (__skb_checksum_convert_check(skb)) \
4072 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4073} while (0)
4074
4075static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4076 u16 start, u16 offset)
4077{
4078 skb->ip_summed = CHECKSUM_PARTIAL;
4079 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4080 skb->csum_offset = offset - start;
4081}
4082
4083
4084
4085
4086
4087
4088static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4089 int start, int offset, bool nopartial)
4090{
4091 __wsum delta;
4092
4093 if (!nopartial) {
4094 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4095 return;
4096 }
4097
4098 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4099 __skb_checksum_complete(skb);
4100 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4101 }
4102
4103 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4104
4105
4106 skb->csum = csum_add(skb->csum, delta);
4107}
4108
4109static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4110{
4111#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4112 return (void *)(skb->_nfct & NFCT_PTRMASK);
4113#else
4114 return NULL;
4115#endif
4116}
4117
4118static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4119{
4120#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4121 return skb->_nfct;
4122#else
4123 return 0UL;
4124#endif
4125}
4126
4127static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4128{
4129#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4130 skb->_nfct = nfct;
4131#endif
4132}
4133
4134#ifdef CONFIG_SKB_EXTENSIONS
4135enum skb_ext_id {
4136#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4137 SKB_EXT_BRIDGE_NF,
4138#endif
4139#ifdef CONFIG_XFRM
4140 SKB_EXT_SEC_PATH,
4141#endif
4142#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4143 TC_SKB_EXT,
4144#endif
4145#if IS_ENABLED(CONFIG_MPTCP)
4146 SKB_EXT_MPTCP,
4147#endif
4148 SKB_EXT_NUM,
4149};
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161struct skb_ext {
4162 refcount_t refcnt;
4163 u8 offset[SKB_EXT_NUM];
4164 u8 chunks;
4165 char data[] __aligned(8);
4166};
4167
4168struct skb_ext *__skb_ext_alloc(void);
4169void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4170 struct skb_ext *ext);
4171void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4172void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4173void __skb_ext_put(struct skb_ext *ext);
4174
4175static inline void skb_ext_put(struct sk_buff *skb)
4176{
4177 if (skb->active_extensions)
4178 __skb_ext_put(skb->extensions);
4179}
4180
4181static inline void __skb_ext_copy(struct sk_buff *dst,
4182 const struct sk_buff *src)
4183{
4184 dst->active_extensions = src->active_extensions;
4185
4186 if (src->active_extensions) {
4187 struct skb_ext *ext = src->extensions;
4188
4189 refcount_inc(&ext->refcnt);
4190 dst->extensions = ext;
4191 }
4192}
4193
4194static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4195{
4196 skb_ext_put(dst);
4197 __skb_ext_copy(dst, src);
4198}
4199
4200static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4201{
4202 return !!ext->offset[i];
4203}
4204
4205static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4206{
4207 return skb->active_extensions & (1 << id);
4208}
4209
4210static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4211{
4212 if (skb_ext_exist(skb, id))
4213 __skb_ext_del(skb, id);
4214}
4215
4216static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4217{
4218 if (skb_ext_exist(skb, id)) {
4219 struct skb_ext *ext = skb->extensions;
4220
4221 return (void *)ext + (ext->offset[id] << 3);
4222 }
4223
4224 return NULL;
4225}
4226
4227static inline void skb_ext_reset(struct sk_buff *skb)
4228{
4229 if (unlikely(skb->active_extensions)) {
4230 __skb_ext_put(skb->extensions);
4231 skb->active_extensions = 0;
4232 }
4233}
4234
4235static inline bool skb_has_extensions(struct sk_buff *skb)
4236{
4237 return unlikely(skb->active_extensions);
4238}
4239#else
4240static inline void skb_ext_put(struct sk_buff *skb) {}
4241static inline void skb_ext_reset(struct sk_buff *skb) {}
4242static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4243static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4244static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4245static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4246#endif
4247
4248static inline void nf_reset_ct(struct sk_buff *skb)
4249{
4250#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4251 nf_conntrack_put(skb_nfct(skb));
4252 skb->_nfct = 0;
4253#endif
4254}
4255
4256static inline void nf_reset_trace(struct sk_buff *skb)
4257{
4258#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4259 skb->nf_trace = 0;
4260#endif
4261}
4262
4263static inline void ipvs_reset(struct sk_buff *skb)
4264{
4265#if IS_ENABLED(CONFIG_IP_VS)
4266 skb->ipvs_property = 0;
4267#endif
4268}
4269
4270
4271static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4272 bool copy)
4273{
4274#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4275 dst->_nfct = src->_nfct;
4276 nf_conntrack_get(skb_nfct(src));
4277#endif
4278#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4279 if (copy)
4280 dst->nf_trace = src->nf_trace;
4281#endif
4282}
4283
4284static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4285{
4286#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4287 nf_conntrack_put(skb_nfct(dst));
4288#endif
4289 __nf_copy(dst, src, true);
4290}
4291
4292#ifdef CONFIG_NETWORK_SECMARK
4293static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4294{
4295 to->secmark = from->secmark;
4296}
4297
4298static inline void skb_init_secmark(struct sk_buff *skb)
4299{
4300 skb->secmark = 0;
4301}
4302#else
4303static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4304{ }
4305
4306static inline void skb_init_secmark(struct sk_buff *skb)
4307{ }
4308#endif
4309
4310static inline int secpath_exists(const struct sk_buff *skb)
4311{
4312#ifdef CONFIG_XFRM
4313 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4314#else
4315 return 0;
4316#endif
4317}
4318
4319static inline bool skb_irq_freeable(const struct sk_buff *skb)
4320{
4321 return !skb->destructor &&
4322 !secpath_exists(skb) &&
4323 !skb_nfct(skb) &&
4324 !skb->_skb_refdst &&
4325 !skb_has_frag_list(skb);
4326}
4327
4328static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4329{
4330 skb->queue_mapping = queue_mapping;
4331}
4332
4333static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4334{
4335 return skb->queue_mapping;
4336}
4337
4338static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4339{
4340 to->queue_mapping = from->queue_mapping;
4341}
4342
4343static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4344{
4345 skb->queue_mapping = rx_queue + 1;
4346}
4347
4348static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4349{
4350 return skb->queue_mapping - 1;
4351}
4352
4353static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4354{
4355 return skb->queue_mapping != 0;
4356}
4357
4358static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4359{
4360 skb->dst_pending_confirm = val;
4361}
4362
4363static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4364{
4365 return skb->dst_pending_confirm != 0;
4366}
4367
4368static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4369{
4370#ifdef CONFIG_XFRM
4371 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4372#else
4373 return NULL;
4374#endif
4375}
4376
4377
4378
4379
4380
4381
4382
4383struct skb_gso_cb {
4384 union {
4385 int mac_offset;
4386 int data_offset;
4387 };
4388 int encap_level;
4389 __wsum csum;
4390 __u16 csum_start;
4391};
4392#define SKB_GSO_CB_OFFSET 32
4393#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4394
4395static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4396{
4397 return (skb_mac_header(inner_skb) - inner_skb->head) -
4398 SKB_GSO_CB(inner_skb)->mac_offset;
4399}
4400
4401static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4402{
4403 int new_headroom, headroom;
4404 int ret;
4405
4406 headroom = skb_headroom(skb);
4407 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4408 if (ret)
4409 return ret;
4410
4411 new_headroom = skb_headroom(skb);
4412 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4413 return 0;
4414}
4415
4416static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4417{
4418
4419 if (skb->remcsum_offload)
4420 return;
4421
4422 SKB_GSO_CB(skb)->csum = res;
4423 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4424}
4425
4426
4427
4428
4429
4430
4431
4432
4433
4434static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4435{
4436 unsigned char *csum_start = skb_transport_header(skb);
4437 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4438 __wsum partial = SKB_GSO_CB(skb)->csum;
4439
4440 SKB_GSO_CB(skb)->csum = res;
4441 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4442
4443 return csum_fold(csum_partial(csum_start, plen, partial));
4444}
4445
4446static inline bool skb_is_gso(const struct sk_buff *skb)
4447{
4448 return skb_shinfo(skb)->gso_size;
4449}
4450
4451
4452static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4453{
4454 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4455}
4456
4457
4458static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4459{
4460 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4461}
4462
4463
4464static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4465{
4466 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4467}
4468
4469static inline void skb_gso_reset(struct sk_buff *skb)
4470{
4471 skb_shinfo(skb)->gso_size = 0;
4472 skb_shinfo(skb)->gso_segs = 0;
4473 skb_shinfo(skb)->gso_type = 0;
4474}
4475
4476static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4477 u16 increment)
4478{
4479 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4480 return;
4481 shinfo->gso_size += increment;
4482}
4483
4484static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4485 u16 decrement)
4486{
4487 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4488 return;
4489 shinfo->gso_size -= decrement;
4490}
4491
4492void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4493
4494static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4495{
4496
4497
4498 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4499
4500 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4501 unlikely(shinfo->gso_type == 0)) {
4502 __skb_warn_lro_forwarding(skb);
4503 return true;
4504 }
4505 return false;
4506}
4507
4508static inline void skb_forward_csum(struct sk_buff *skb)
4509{
4510
4511 if (skb->ip_summed == CHECKSUM_COMPLETE)
4512 skb->ip_summed = CHECKSUM_NONE;
4513}
4514
4515
4516
4517
4518
4519
4520
4521
4522
4523static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4524{
4525#ifdef DEBUG
4526 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4527#endif
4528}
4529
4530bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4531
4532int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4533struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4534 unsigned int transport_len,
4535 __sum16(*skb_chkf)(struct sk_buff *skb));
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546static inline bool skb_head_is_locked(const struct sk_buff *skb)
4547{
4548 return !skb->head_frag || skb_cloned(skb);
4549}
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559
4560static inline __wsum lco_csum(struct sk_buff *skb)
4561{
4562 unsigned char *csum_start = skb_checksum_start(skb);
4563 unsigned char *l4_hdr = skb_transport_header(skb);
4564 __wsum partial;
4565
4566
4567 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4568 skb->csum_offset));
4569
4570
4571
4572
4573 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4574}
4575
4576static inline bool skb_is_redirected(const struct sk_buff *skb)
4577{
4578#ifdef CONFIG_NET_REDIRECT
4579 return skb->redirected;
4580#else
4581 return false;
4582#endif
4583}
4584
4585static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4586{
4587#ifdef CONFIG_NET_REDIRECT
4588 skb->redirected = 1;
4589 skb->from_ingress = from_ingress;
4590 if (skb->from_ingress)
4591 skb->tstamp = 0;
4592#endif
4593}
4594
4595static inline void skb_reset_redirect(struct sk_buff *skb)
4596{
4597#ifdef CONFIG_NET_REDIRECT
4598 skb->redirected = 0;
4599#endif
4600}
4601
4602#endif
4603#endif
4604