1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/bug.h>
21#include <linux/cache.h>
22#include <linux/rbtree.h>
23#include <linux/socket.h>
24#include <linux/refcount.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <linux/sched/clock.h>
38#include <net/flow_dissector.h>
39#include <linux/splice.h>
40#include <linux/in6.h>
41#include <linux/if_packet.h>
42#include <net/flow.h>
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct net_device;
242struct scatterlist;
243struct pipe_inode_info;
244struct iov_iter;
245struct napi_struct;
246
247#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248struct nf_conntrack {
249 atomic_t use;
250};
251#endif
252
253#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
254struct nf_bridge_info {
255 refcount_t use;
256 enum {
257 BRNF_PROTO_UNCHANGED,
258 BRNF_PROTO_8021Q,
259 BRNF_PROTO_PPPOE
260 } orig_proto:8;
261 u8 pkt_otherhost:1;
262 u8 in_prerouting:1;
263 u8 bridged_dnat:1;
264 __u16 frag_max_size;
265 struct net_device *physindev;
266
267
268 struct net_device *physoutdev;
269 union {
270
271 __be32 ipv4_daddr;
272 struct in6_addr ipv6_daddr;
273
274
275
276
277
278 char neigh_header[8];
279 };
280};
281#endif
282
283struct sk_buff_head {
284
285 struct sk_buff *next;
286 struct sk_buff *prev;
287
288 __u32 qlen;
289 spinlock_t lock;
290};
291
292struct sk_buff;
293
294
295
296
297
298
299
300
301#if (65536/PAGE_SIZE + 1) < 16
302#define MAX_SKB_FRAGS 16UL
303#else
304#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
305#endif
306extern int sysctl_max_skb_frags;
307
308
309
310
311#define GSO_BY_FRAGS 0xFFFF
312
313typedef struct skb_frag_struct skb_frag_t;
314
315struct skb_frag_struct {
316 struct {
317 struct page *p;
318 } page;
319#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
320 __u32 page_offset;
321 __u32 size;
322#else
323 __u16 page_offset;
324 __u16 size;
325#endif
326};
327
328static inline unsigned int skb_frag_size(const skb_frag_t *frag)
329{
330 return frag->size;
331}
332
333static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
334{
335 frag->size = size;
336}
337
338static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
339{
340 frag->size += delta;
341}
342
343static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
344{
345 frag->size -= delta;
346}
347
348static inline bool skb_frag_must_loop(struct page *p)
349{
350#if defined(CONFIG_HIGHMEM)
351 if (PageHighMem(p))
352 return true;
353#endif
354 return false;
355}
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
375 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
376 p_off = (f_off) & (PAGE_SIZE - 1), \
377 p_len = skb_frag_must_loop(p) ? \
378 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
379 copied = 0; \
380 copied < f_len; \
381 copied += p_len, p++, p_off = 0, \
382 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
383
384#define HAVE_HW_TIME_STAMP
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400struct skb_shared_hwtstamps {
401 ktime_t hwtstamp;
402};
403
404
405enum {
406
407 SKBTX_HW_TSTAMP = 1 << 0,
408
409
410 SKBTX_SW_TSTAMP = 1 << 1,
411
412
413 SKBTX_IN_PROGRESS = 1 << 2,
414
415
416 SKBTX_DEV_ZEROCOPY = 1 << 3,
417
418
419 SKBTX_WIFI_STATUS = 1 << 4,
420
421
422
423
424
425
426 SKBTX_SHARED_FRAG = 1 << 5,
427
428
429 SKBTX_SCHED_TSTAMP = 1 << 6,
430};
431
432#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
433#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
434 SKBTX_SCHED_TSTAMP)
435#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
436
437
438
439
440
441
442
443
444
445struct ubuf_info {
446 void (*callback)(struct ubuf_info *, bool zerocopy_success);
447 union {
448 struct {
449 unsigned long desc;
450 void *ctx;
451 };
452 struct {
453 u32 id;
454 u16 len;
455 u16 zerocopy:1;
456 u32 bytelen;
457 };
458 };
459 refcount_t refcnt;
460
461 struct mmpin {
462 struct user_struct *user;
463 unsigned int num_pg;
464 } mmp;
465};
466
467#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
468
469struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
470struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
471 struct ubuf_info *uarg);
472
473static inline void sock_zerocopy_get(struct ubuf_info *uarg)
474{
475 refcount_inc(&uarg->refcnt);
476}
477
478void sock_zerocopy_put(struct ubuf_info *uarg);
479void sock_zerocopy_put_abort(struct ubuf_info *uarg);
480
481void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
482
483int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
484 struct msghdr *msg, int len,
485 struct ubuf_info *uarg);
486
487
488
489
490struct skb_shared_info {
491 __u8 __unused;
492 __u8 meta_len;
493 __u8 nr_frags;
494 __u8 tx_flags;
495 unsigned short gso_size;
496
497 unsigned short gso_segs;
498 struct sk_buff *frag_list;
499 struct skb_shared_hwtstamps hwtstamps;
500 unsigned int gso_type;
501 u32 tskey;
502
503
504
505
506 atomic_t dataref;
507
508
509
510 void * destructor_arg;
511
512
513 skb_frag_t frags[MAX_SKB_FRAGS];
514};
515
516
517
518
519
520
521
522
523
524
525
526
527#define SKB_DATAREF_SHIFT 16
528#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
529
530
531enum {
532 SKB_FCLONE_UNAVAILABLE,
533 SKB_FCLONE_ORIG,
534 SKB_FCLONE_CLONE,
535};
536
537enum {
538 SKB_GSO_TCPV4 = 1 << 0,
539
540
541 SKB_GSO_DODGY = 1 << 1,
542
543
544 SKB_GSO_TCP_ECN = 1 << 2,
545
546 SKB_GSO_TCP_FIXEDID = 1 << 3,
547
548 SKB_GSO_TCPV6 = 1 << 4,
549
550 SKB_GSO_FCOE = 1 << 5,
551
552 SKB_GSO_GRE = 1 << 6,
553
554 SKB_GSO_GRE_CSUM = 1 << 7,
555
556 SKB_GSO_IPXIP4 = 1 << 8,
557
558 SKB_GSO_IPXIP6 = 1 << 9,
559
560 SKB_GSO_UDP_TUNNEL = 1 << 10,
561
562 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
563
564 SKB_GSO_PARTIAL = 1 << 12,
565
566 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
567
568 SKB_GSO_SCTP = 1 << 14,
569
570 SKB_GSO_ESP = 1 << 15,
571
572 SKB_GSO_UDP = 1 << 16,
573};
574
575#if BITS_PER_LONG > 32
576#define NET_SKBUFF_DATA_USES_OFFSET 1
577#endif
578
579#ifdef NET_SKBUFF_DATA_USES_OFFSET
580typedef unsigned int sk_buff_data_t;
581#else
582typedef unsigned char *sk_buff_data_t;
583#endif
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658struct sk_buff {
659 union {
660 struct {
661
662 struct sk_buff *next;
663 struct sk_buff *prev;
664
665 union {
666 struct net_device *dev;
667
668
669
670
671 unsigned long dev_scratch;
672 };
673 };
674 struct rb_node rbnode;
675 };
676 struct sock *sk;
677
678 union {
679 ktime_t tstamp;
680 u64 skb_mstamp;
681 };
682
683
684
685
686
687
688 char cb[48] __aligned(8);
689
690 union {
691 struct {
692 unsigned long _skb_refdst;
693 void (*destructor)(struct sk_buff *skb);
694 };
695 struct list_head tcp_tsorted_anchor;
696 };
697
698#ifdef CONFIG_XFRM
699 struct sec_path *sp;
700#endif
701#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
702 unsigned long _nfct;
703#endif
704#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
705 struct nf_bridge_info *nf_bridge;
706#endif
707 unsigned int len,
708 data_len;
709 __u16 mac_len,
710 hdr_len;
711
712
713
714
715 __u16 queue_mapping;
716
717
718#ifdef __BIG_ENDIAN_BITFIELD
719#define CLONED_MASK (1 << 7)
720#else
721#define CLONED_MASK 1
722#endif
723#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
724
725 __u8 __cloned_offset[0];
726 __u8 cloned:1,
727 nohdr:1,
728 fclone:2,
729 peeked:1,
730 head_frag:1,
731 xmit_more:1,
732 __unused:1;
733
734
735
736
737
738 __u32 headers_start[0];
739
740
741
742#ifdef __BIG_ENDIAN_BITFIELD
743#define PKT_TYPE_MAX (7 << 5)
744#else
745#define PKT_TYPE_MAX 7
746#endif
747#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
748
749 __u8 __pkt_type_offset[0];
750 __u8 pkt_type:3;
751 __u8 pfmemalloc:1;
752 __u8 ignore_df:1;
753
754 __u8 nf_trace:1;
755 __u8 ip_summed:2;
756 __u8 ooo_okay:1;
757 __u8 l4_hash:1;
758 __u8 sw_hash:1;
759 __u8 wifi_acked_valid:1;
760 __u8 wifi_acked:1;
761
762 __u8 no_fcs:1;
763
764 __u8 encapsulation:1;
765 __u8 encap_hdr_csum:1;
766 __u8 csum_valid:1;
767 __u8 csum_complete_sw:1;
768 __u8 csum_level:2;
769 __u8 csum_not_inet:1;
770
771 __u8 dst_pending_confirm:1;
772#ifdef CONFIG_IPV6_NDISC_NODETYPE
773 __u8 ndisc_nodetype:2;
774#endif
775 __u8 ipvs_property:1;
776 __u8 inner_protocol_type:1;
777 __u8 remcsum_offload:1;
778#ifdef CONFIG_NET_SWITCHDEV
779 __u8 offload_fwd_mark:1;
780 __u8 offload_mr_fwd_mark:1;
781#endif
782#ifdef CONFIG_NET_CLS_ACT
783 __u8 tc_skip_classify:1;
784 __u8 tc_at_ingress:1;
785 __u8 tc_redirected:1;
786 __u8 tc_from_ingress:1;
787#endif
788
789#ifdef CONFIG_NET_SCHED
790 __u16 tc_index;
791#endif
792
793 union {
794 __wsum csum;
795 struct {
796 __u16 csum_start;
797 __u16 csum_offset;
798 };
799 };
800 __u32 priority;
801 int skb_iif;
802 __u32 hash;
803 __be16 vlan_proto;
804 __u16 vlan_tci;
805#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
806 union {
807 unsigned int napi_id;
808 unsigned int sender_cpu;
809 };
810#endif
811#ifdef CONFIG_NETWORK_SECMARK
812 __u32 secmark;
813#endif
814
815 union {
816 __u32 mark;
817 __u32 reserved_tailroom;
818 };
819
820 union {
821 __be16 inner_protocol;
822 __u8 inner_ipproto;
823 };
824
825 __u16 inner_transport_header;
826 __u16 inner_network_header;
827 __u16 inner_mac_header;
828
829 __be16 protocol;
830 __u16 transport_header;
831 __u16 network_header;
832 __u16 mac_header;
833
834
835 __u32 headers_end[0];
836
837
838
839 sk_buff_data_t tail;
840 sk_buff_data_t end;
841 unsigned char *head,
842 *data;
843 unsigned int truesize;
844 refcount_t users;
845};
846
847#ifdef __KERNEL__
848
849
850
851#include <linux/slab.h>
852
853
854#define SKB_ALLOC_FCLONE 0x01
855#define SKB_ALLOC_RX 0x02
856#define SKB_ALLOC_NAPI 0x04
857
858
859static inline bool skb_pfmemalloc(const struct sk_buff *skb)
860{
861 return unlikely(skb->pfmemalloc);
862}
863
864
865
866
867
868#define SKB_DST_NOREF 1UL
869#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
870
871#define SKB_NFCT_PTRMASK ~(7UL)
872
873
874
875
876
877
878static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
879{
880
881
882
883 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
884 !rcu_read_lock_held() &&
885 !rcu_read_lock_bh_held());
886 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
887}
888
889
890
891
892
893
894
895
896
897static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
898{
899 skb->_skb_refdst = (unsigned long)dst;
900}
901
902
903
904
905
906
907
908
909
910
911
912static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
913{
914 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
915 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
916}
917
918
919
920
921
922static inline bool skb_dst_is_noref(const struct sk_buff *skb)
923{
924 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
925}
926
927static inline struct rtable *skb_rtable(const struct sk_buff *skb)
928{
929 return (struct rtable *)skb_dst(skb);
930}
931
932
933
934
935
936static inline bool skb_pkt_type_ok(u32 ptype)
937{
938 return ptype <= PACKET_OTHERHOST;
939}
940
941static inline unsigned int skb_napi_id(const struct sk_buff *skb)
942{
943#ifdef CONFIG_NET_RX_BUSY_POLL
944 return skb->napi_id;
945#else
946 return 0;
947#endif
948}
949
950
951static inline bool skb_unref(struct sk_buff *skb)
952{
953 if (unlikely(!skb))
954 return false;
955 if (likely(refcount_read(&skb->users) == 1))
956 smp_rmb();
957 else if (likely(!refcount_dec_and_test(&skb->users)))
958 return false;
959
960 return true;
961}
962
963void skb_release_head_state(struct sk_buff *skb);
964void kfree_skb(struct sk_buff *skb);
965void kfree_skb_list(struct sk_buff *segs);
966void skb_tx_error(struct sk_buff *skb);
967void consume_skb(struct sk_buff *skb);
968void __consume_stateless_skb(struct sk_buff *skb);
969void __kfree_skb(struct sk_buff *skb);
970extern struct kmem_cache *skbuff_head_cache;
971
972void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
973bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
974 bool *fragstolen, int *delta_truesize);
975
976struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
977 int node);
978struct sk_buff *__build_skb(void *data, unsigned int frag_size);
979struct sk_buff *build_skb(void *data, unsigned int frag_size);
980static inline struct sk_buff *alloc_skb(unsigned int size,
981 gfp_t priority)
982{
983 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
984}
985
986struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
987 unsigned long data_len,
988 int max_page_order,
989 int *errcode,
990 gfp_t gfp_mask);
991
992
993struct sk_buff_fclones {
994 struct sk_buff skb1;
995
996 struct sk_buff skb2;
997
998 refcount_t fclone_ref;
999};
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010static inline bool skb_fclone_busy(const struct sock *sk,
1011 const struct sk_buff *skb)
1012{
1013 const struct sk_buff_fclones *fclones;
1014
1015 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1016
1017 return skb->fclone == SKB_FCLONE_ORIG &&
1018 refcount_read(&fclones->fclone_ref) > 1 &&
1019 fclones->skb2.sk == sk;
1020}
1021
1022static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1023 gfp_t priority)
1024{
1025 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1026}
1027
1028struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1029int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1030struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1031struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1032struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1033 gfp_t gfp_mask, bool fclone);
1034static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1035 gfp_t gfp_mask)
1036{
1037 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1038}
1039
1040int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1041struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1042 unsigned int headroom);
1043struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1044 int newtailroom, gfp_t priority);
1045int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1046 int offset, int len);
1047int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1048 int offset, int len);
1049int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1050int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063static inline int skb_pad(struct sk_buff *skb, int pad)
1064{
1065 return __skb_pad(skb, pad, true);
1066}
1067#define dev_kfree_skb(a) consume_skb(a)
1068
1069int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1070 int getfrag(void *from, char *to, int offset,
1071 int len, int odd, struct sk_buff *skb),
1072 void *from, int length);
1073
1074int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1075 int offset, size_t size);
1076
1077struct skb_seq_state {
1078 __u32 lower_offset;
1079 __u32 upper_offset;
1080 __u32 frag_idx;
1081 __u32 stepped_offset;
1082 struct sk_buff *root_skb;
1083 struct sk_buff *cur_skb;
1084 __u8 *frag_data;
1085};
1086
1087void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1088 unsigned int to, struct skb_seq_state *st);
1089unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1090 struct skb_seq_state *st);
1091void skb_abort_seq_read(struct skb_seq_state *st);
1092
1093unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1094 unsigned int to, struct ts_config *config);
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122enum pkt_hash_types {
1123 PKT_HASH_TYPE_NONE,
1124 PKT_HASH_TYPE_L2,
1125 PKT_HASH_TYPE_L3,
1126 PKT_HASH_TYPE_L4,
1127};
1128
1129static inline void skb_clear_hash(struct sk_buff *skb)
1130{
1131 skb->hash = 0;
1132 skb->sw_hash = 0;
1133 skb->l4_hash = 0;
1134}
1135
1136static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1137{
1138 if (!skb->l4_hash)
1139 skb_clear_hash(skb);
1140}
1141
1142static inline void
1143__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1144{
1145 skb->l4_hash = is_l4;
1146 skb->sw_hash = is_sw;
1147 skb->hash = hash;
1148}
1149
1150static inline void
1151skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1152{
1153
1154 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1155}
1156
1157static inline void
1158__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1159{
1160 __skb_set_hash(skb, hash, true, is_l4);
1161}
1162
1163void __skb_get_hash(struct sk_buff *skb);
1164u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1165u32 skb_get_poff(const struct sk_buff *skb);
1166u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1167 const struct flow_keys *keys, int hlen);
1168__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1169 void *data, int hlen_proto);
1170
1171static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1172 int thoff, u8 ip_proto)
1173{
1174 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1175}
1176
1177void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1178 const struct flow_dissector_key *key,
1179 unsigned int key_count);
1180
1181bool __skb_flow_dissect(const struct sk_buff *skb,
1182 struct flow_dissector *flow_dissector,
1183 void *target_container,
1184 void *data, __be16 proto, int nhoff, int hlen,
1185 unsigned int flags);
1186
1187static inline bool skb_flow_dissect(const struct sk_buff *skb,
1188 struct flow_dissector *flow_dissector,
1189 void *target_container, unsigned int flags)
1190{
1191 return __skb_flow_dissect(skb, flow_dissector, target_container,
1192 NULL, 0, 0, 0, flags);
1193}
1194
1195static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1196 struct flow_keys *flow,
1197 unsigned int flags)
1198{
1199 memset(flow, 0, sizeof(*flow));
1200 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1201 NULL, 0, 0, 0, flags);
1202}
1203
1204static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1205 void *data, __be16 proto,
1206 int nhoff, int hlen,
1207 unsigned int flags)
1208{
1209 memset(flow, 0, sizeof(*flow));
1210 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1211 data, proto, nhoff, hlen, flags);
1212}
1213
1214static inline __u32 skb_get_hash(struct sk_buff *skb)
1215{
1216 if (!skb->l4_hash && !skb->sw_hash)
1217 __skb_get_hash(skb);
1218
1219 return skb->hash;
1220}
1221
1222static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1223{
1224 if (!skb->l4_hash && !skb->sw_hash) {
1225 struct flow_keys keys;
1226 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1227
1228 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1229 }
1230
1231 return skb->hash;
1232}
1233
1234__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1235
1236static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1237{
1238 return skb->hash;
1239}
1240
1241static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1242{
1243 to->hash = from->hash;
1244 to->sw_hash = from->sw_hash;
1245 to->l4_hash = from->l4_hash;
1246};
1247
1248#ifdef NET_SKBUFF_DATA_USES_OFFSET
1249static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1250{
1251 return skb->head + skb->end;
1252}
1253
1254static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1255{
1256 return skb->end;
1257}
1258#else
1259static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1260{
1261 return skb->end;
1262}
1263
1264static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1265{
1266 return skb->end - skb->head;
1267}
1268#endif
1269
1270
1271#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1272
1273static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1274{
1275 return &skb_shinfo(skb)->hwtstamps;
1276}
1277
1278static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1279{
1280 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1281
1282 return is_zcopy ? skb_uarg(skb) : NULL;
1283}
1284
1285static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1286{
1287 if (skb && uarg && !skb_zcopy(skb)) {
1288 sock_zerocopy_get(uarg);
1289 skb_shinfo(skb)->destructor_arg = uarg;
1290 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1291 }
1292}
1293
1294
1295static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1296{
1297 struct ubuf_info *uarg = skb_zcopy(skb);
1298
1299 if (uarg) {
1300 if (uarg->callback == sock_zerocopy_callback) {
1301 uarg->zerocopy = uarg->zerocopy && zerocopy;
1302 sock_zerocopy_put(uarg);
1303 } else {
1304 uarg->callback(uarg, zerocopy);
1305 }
1306
1307 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1308 }
1309}
1310
1311
1312static inline void skb_zcopy_abort(struct sk_buff *skb)
1313{
1314 struct ubuf_info *uarg = skb_zcopy(skb);
1315
1316 if (uarg) {
1317 sock_zerocopy_put_abort(uarg);
1318 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1319 }
1320}
1321
1322
1323
1324
1325
1326
1327
1328static inline int skb_queue_empty(const struct sk_buff_head *list)
1329{
1330 return list->next == (const struct sk_buff *) list;
1331}
1332
1333
1334
1335
1336
1337
1338
1339
1340static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1341 const struct sk_buff *skb)
1342{
1343 return skb->next == (const struct sk_buff *) list;
1344}
1345
1346
1347
1348
1349
1350
1351
1352
1353static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1354 const struct sk_buff *skb)
1355{
1356 return skb->prev == (const struct sk_buff *) list;
1357}
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1368 const struct sk_buff *skb)
1369{
1370
1371
1372
1373 BUG_ON(skb_queue_is_last(list, skb));
1374 return skb->next;
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1386 const struct sk_buff *skb)
1387{
1388
1389
1390
1391 BUG_ON(skb_queue_is_first(list, skb));
1392 return skb->prev;
1393}
1394
1395
1396
1397
1398
1399
1400
1401
1402static inline struct sk_buff *skb_get(struct sk_buff *skb)
1403{
1404 refcount_inc(&skb->users);
1405 return skb;
1406}
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420static inline int skb_cloned(const struct sk_buff *skb)
1421{
1422 return skb->cloned &&
1423 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1424}
1425
1426static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1427{
1428 might_sleep_if(gfpflags_allow_blocking(pri));
1429
1430 if (skb_cloned(skb))
1431 return pskb_expand_head(skb, 0, 0, pri);
1432
1433 return 0;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443static inline int skb_header_cloned(const struct sk_buff *skb)
1444{
1445 int dataref;
1446
1447 if (!skb->cloned)
1448 return 0;
1449
1450 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1451 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1452 return dataref != 1;
1453}
1454
1455static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1456{
1457 might_sleep_if(gfpflags_allow_blocking(pri));
1458
1459 if (skb_header_cloned(skb))
1460 return pskb_expand_head(skb, 0, 0, pri);
1461
1462 return 0;
1463}
1464
1465
1466
1467
1468
1469static inline void __skb_header_release(struct sk_buff *skb)
1470{
1471 skb->nohdr = 1;
1472 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483static inline int skb_shared(const struct sk_buff *skb)
1484{
1485 return refcount_read(&skb->users) != 1;
1486}
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1502{
1503 might_sleep_if(gfpflags_allow_blocking(pri));
1504 if (skb_shared(skb)) {
1505 struct sk_buff *nskb = skb_clone(skb, pri);
1506
1507 if (likely(nskb))
1508 consume_skb(skb);
1509 else
1510 kfree_skb(skb);
1511 skb = nskb;
1512 }
1513 return skb;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1537 gfp_t pri)
1538{
1539 might_sleep_if(gfpflags_allow_blocking(pri));
1540 if (skb_cloned(skb)) {
1541 struct sk_buff *nskb = skb_copy(skb, pri);
1542
1543
1544 if (likely(nskb))
1545 consume_skb(skb);
1546 else
1547 kfree_skb(skb);
1548 skb = nskb;
1549 }
1550 return skb;
1551}
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1567{
1568 struct sk_buff *skb = list_->next;
1569
1570 if (skb == (struct sk_buff *)list_)
1571 skb = NULL;
1572 return skb;
1573}
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1585 const struct sk_buff_head *list_)
1586{
1587 struct sk_buff *next = skb->next;
1588
1589 if (next == (struct sk_buff *)list_)
1590 next = NULL;
1591 return next;
1592}
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1608{
1609 struct sk_buff *skb = list_->prev;
1610
1611 if (skb == (struct sk_buff *)list_)
1612 skb = NULL;
1613 return skb;
1614
1615}
1616
1617
1618
1619
1620
1621
1622
1623static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1624{
1625 return list_->qlen;
1626}
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638static inline void __skb_queue_head_init(struct sk_buff_head *list)
1639{
1640 list->prev = list->next = (struct sk_buff *)list;
1641 list->qlen = 0;
1642}
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652static inline void skb_queue_head_init(struct sk_buff_head *list)
1653{
1654 spin_lock_init(&list->lock);
1655 __skb_queue_head_init(list);
1656}
1657
1658static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1659 struct lock_class_key *class)
1660{
1661 skb_queue_head_init(list);
1662 lockdep_set_class(&list->lock, class);
1663}
1664
1665
1666
1667
1668
1669
1670
1671void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1672 struct sk_buff_head *list);
1673static inline void __skb_insert(struct sk_buff *newsk,
1674 struct sk_buff *prev, struct sk_buff *next,
1675 struct sk_buff_head *list)
1676{
1677 newsk->next = next;
1678 newsk->prev = prev;
1679 next->prev = prev->next = newsk;
1680 list->qlen++;
1681}
1682
1683static inline void __skb_queue_splice(const struct sk_buff_head *list,
1684 struct sk_buff *prev,
1685 struct sk_buff *next)
1686{
1687 struct sk_buff *first = list->next;
1688 struct sk_buff *last = list->prev;
1689
1690 first->prev = prev;
1691 prev->next = first;
1692
1693 last->next = next;
1694 next->prev = last;
1695}
1696
1697
1698
1699
1700
1701
1702static inline void skb_queue_splice(const struct sk_buff_head *list,
1703 struct sk_buff_head *head)
1704{
1705 if (!skb_queue_empty(list)) {
1706 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1707 head->qlen += list->qlen;
1708 }
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718static inline void skb_queue_splice_init(struct sk_buff_head *list,
1719 struct sk_buff_head *head)
1720{
1721 if (!skb_queue_empty(list)) {
1722 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1723 head->qlen += list->qlen;
1724 __skb_queue_head_init(list);
1725 }
1726}
1727
1728
1729
1730
1731
1732
1733static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1734 struct sk_buff_head *head)
1735{
1736 if (!skb_queue_empty(list)) {
1737 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1738 head->qlen += list->qlen;
1739 }
1740}
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1751 struct sk_buff_head *head)
1752{
1753 if (!skb_queue_empty(list)) {
1754 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1755 head->qlen += list->qlen;
1756 __skb_queue_head_init(list);
1757 }
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771static inline void __skb_queue_after(struct sk_buff_head *list,
1772 struct sk_buff *prev,
1773 struct sk_buff *newsk)
1774{
1775 __skb_insert(newsk, prev, prev->next, list);
1776}
1777
1778void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1779 struct sk_buff_head *list);
1780
1781static inline void __skb_queue_before(struct sk_buff_head *list,
1782 struct sk_buff *next,
1783 struct sk_buff *newsk)
1784{
1785 __skb_insert(newsk, next->prev, next, list);
1786}
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1799static inline void __skb_queue_head(struct sk_buff_head *list,
1800 struct sk_buff *newsk)
1801{
1802 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1803}
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1816static inline void __skb_queue_tail(struct sk_buff_head *list,
1817 struct sk_buff *newsk)
1818{
1819 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1820}
1821
1822
1823
1824
1825
1826void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1827static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1828{
1829 struct sk_buff *next, *prev;
1830
1831 list->qlen--;
1832 next = skb->next;
1833 prev = skb->prev;
1834 skb->next = skb->prev = NULL;
1835 next->prev = prev;
1836 prev->next = next;
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1848static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1849{
1850 struct sk_buff *skb = skb_peek(list);
1851 if (skb)
1852 __skb_unlink(skb, list);
1853 return skb;
1854}
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1865static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1866{
1867 struct sk_buff *skb = skb_peek_tail(list);
1868 if (skb)
1869 __skb_unlink(skb, list);
1870 return skb;
1871}
1872
1873
1874static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1875{
1876 return skb->data_len;
1877}
1878
1879static inline unsigned int skb_headlen(const struct sk_buff *skb)
1880{
1881 return skb->len - skb->data_len;
1882}
1883
1884static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
1885{
1886 unsigned int i, len = 0;
1887
1888 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1889 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1890 return len;
1891}
1892
1893static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1894{
1895 return skb_headlen(skb) + __skb_pagelen(skb);
1896}
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1912 struct page *page, int off, int size)
1913{
1914 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1915
1916
1917
1918
1919
1920
1921 frag->page.p = page;
1922 frag->page_offset = off;
1923 skb_frag_size_set(frag, size);
1924
1925 page = compound_head(page);
1926 if (page_is_pfmemalloc(page))
1927 skb->pfmemalloc = true;
1928}
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1945 struct page *page, int off, int size)
1946{
1947 __skb_fill_page_desc(skb, i, page, off, size);
1948 skb_shinfo(skb)->nr_frags = i + 1;
1949}
1950
1951void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1952 int size, unsigned int truesize);
1953
1954void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1955 unsigned int truesize);
1956
1957#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1958#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1959#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1960
1961#ifdef NET_SKBUFF_DATA_USES_OFFSET
1962static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1963{
1964 return skb->head + skb->tail;
1965}
1966
1967static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1968{
1969 skb->tail = skb->data - skb->head;
1970}
1971
1972static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1973{
1974 skb_reset_tail_pointer(skb);
1975 skb->tail += offset;
1976}
1977
1978#else
1979static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1980{
1981 return skb->tail;
1982}
1983
1984static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1985{
1986 skb->tail = skb->data;
1987}
1988
1989static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1990{
1991 skb->tail = skb->data + offset;
1992}
1993
1994#endif
1995
1996
1997
1998
1999void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2000void *skb_put(struct sk_buff *skb, unsigned int len);
2001static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2002{
2003 void *tmp = skb_tail_pointer(skb);
2004 SKB_LINEAR_ASSERT(skb);
2005 skb->tail += len;
2006 skb->len += len;
2007 return tmp;
2008}
2009
2010static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2011{
2012 void *tmp = __skb_put(skb, len);
2013
2014 memset(tmp, 0, len);
2015 return tmp;
2016}
2017
2018static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2019 unsigned int len)
2020{
2021 void *tmp = __skb_put(skb, len);
2022
2023 memcpy(tmp, data, len);
2024 return tmp;
2025}
2026
2027static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2028{
2029 *(u8 *)__skb_put(skb, 1) = val;
2030}
2031
2032static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2033{
2034 void *tmp = skb_put(skb, len);
2035
2036 memset(tmp, 0, len);
2037
2038 return tmp;
2039}
2040
2041static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2042 unsigned int len)
2043{
2044 void *tmp = skb_put(skb, len);
2045
2046 memcpy(tmp, data, len);
2047
2048 return tmp;
2049}
2050
2051static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2052{
2053 *(u8 *)skb_put(skb, 1) = val;
2054}
2055
2056void *skb_push(struct sk_buff *skb, unsigned int len);
2057static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2058{
2059 skb->data -= len;
2060 skb->len += len;
2061 return skb->data;
2062}
2063
2064void *skb_pull(struct sk_buff *skb, unsigned int len);
2065static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2066{
2067 skb->len -= len;
2068 BUG_ON(skb->len < skb->data_len);
2069 return skb->data += len;
2070}
2071
2072static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2073{
2074 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2075}
2076
2077void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2078
2079static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2080{
2081 if (len > skb_headlen(skb) &&
2082 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2083 return NULL;
2084 skb->len -= len;
2085 return skb->data += len;
2086}
2087
2088static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2089{
2090 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2091}
2092
2093static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2094{
2095 if (likely(len <= skb_headlen(skb)))
2096 return 1;
2097 if (unlikely(len > skb->len))
2098 return 0;
2099 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2100}
2101
2102void skb_condense(struct sk_buff *skb);
2103
2104
2105
2106
2107
2108
2109
2110static inline unsigned int skb_headroom(const struct sk_buff *skb)
2111{
2112 return skb->data - skb->head;
2113}
2114
2115
2116
2117
2118
2119
2120
2121static inline int skb_tailroom(const struct sk_buff *skb)
2122{
2123 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2124}
2125
2126
2127
2128
2129
2130
2131
2132
2133static inline int skb_availroom(const struct sk_buff *skb)
2134{
2135 if (skb_is_nonlinear(skb))
2136 return 0;
2137
2138 return skb->end - skb->tail - skb->reserved_tailroom;
2139}
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149static inline void skb_reserve(struct sk_buff *skb, int len)
2150{
2151 skb->data += len;
2152 skb->tail += len;
2153}
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2168 unsigned int needed_tailroom)
2169{
2170 SKB_LINEAR_ASSERT(skb);
2171 if (mtu < skb_tailroom(skb) - needed_tailroom)
2172
2173 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2174 else
2175
2176 skb->reserved_tailroom = needed_tailroom;
2177}
2178
2179#define ENCAP_TYPE_ETHER 0
2180#define ENCAP_TYPE_IPPROTO 1
2181
2182static inline void skb_set_inner_protocol(struct sk_buff *skb,
2183 __be16 protocol)
2184{
2185 skb->inner_protocol = protocol;
2186 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2187}
2188
2189static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2190 __u8 ipproto)
2191{
2192 skb->inner_ipproto = ipproto;
2193 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2194}
2195
2196static inline void skb_reset_inner_headers(struct sk_buff *skb)
2197{
2198 skb->inner_mac_header = skb->mac_header;
2199 skb->inner_network_header = skb->network_header;
2200 skb->inner_transport_header = skb->transport_header;
2201}
2202
2203static inline void skb_reset_mac_len(struct sk_buff *skb)
2204{
2205 skb->mac_len = skb->network_header - skb->mac_header;
2206}
2207
2208static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2209 *skb)
2210{
2211 return skb->head + skb->inner_transport_header;
2212}
2213
2214static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2215{
2216 return skb_inner_transport_header(skb) - skb->data;
2217}
2218
2219static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2220{
2221 skb->inner_transport_header = skb->data - skb->head;
2222}
2223
2224static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2225 const int offset)
2226{
2227 skb_reset_inner_transport_header(skb);
2228 skb->inner_transport_header += offset;
2229}
2230
2231static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2232{
2233 return skb->head + skb->inner_network_header;
2234}
2235
2236static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2237{
2238 skb->inner_network_header = skb->data - skb->head;
2239}
2240
2241static inline void skb_set_inner_network_header(struct sk_buff *skb,
2242 const int offset)
2243{
2244 skb_reset_inner_network_header(skb);
2245 skb->inner_network_header += offset;
2246}
2247
2248static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2249{
2250 return skb->head + skb->inner_mac_header;
2251}
2252
2253static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2254{
2255 skb->inner_mac_header = skb->data - skb->head;
2256}
2257
2258static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2259 const int offset)
2260{
2261 skb_reset_inner_mac_header(skb);
2262 skb->inner_mac_header += offset;
2263}
2264static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2265{
2266 return skb->transport_header != (typeof(skb->transport_header))~0U;
2267}
2268
2269static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2270{
2271 return skb->head + skb->transport_header;
2272}
2273
2274static inline void skb_reset_transport_header(struct sk_buff *skb)
2275{
2276 skb->transport_header = skb->data - skb->head;
2277}
2278
2279static inline void skb_set_transport_header(struct sk_buff *skb,
2280 const int offset)
2281{
2282 skb_reset_transport_header(skb);
2283 skb->transport_header += offset;
2284}
2285
2286static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2287{
2288 return skb->head + skb->network_header;
2289}
2290
2291static inline void skb_reset_network_header(struct sk_buff *skb)
2292{
2293 skb->network_header = skb->data - skb->head;
2294}
2295
2296static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2297{
2298 skb_reset_network_header(skb);
2299 skb->network_header += offset;
2300}
2301
2302static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2303{
2304 return skb->head + skb->mac_header;
2305}
2306
2307static inline int skb_mac_offset(const struct sk_buff *skb)
2308{
2309 return skb_mac_header(skb) - skb->data;
2310}
2311
2312static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2313{
2314 return skb->network_header - skb->mac_header;
2315}
2316
2317static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2318{
2319 return skb->mac_header != (typeof(skb->mac_header))~0U;
2320}
2321
2322static inline void skb_reset_mac_header(struct sk_buff *skb)
2323{
2324 skb->mac_header = skb->data - skb->head;
2325}
2326
2327static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2328{
2329 skb_reset_mac_header(skb);
2330 skb->mac_header += offset;
2331}
2332
2333static inline void skb_pop_mac_header(struct sk_buff *skb)
2334{
2335 skb->mac_header = skb->network_header;
2336}
2337
2338static inline void skb_probe_transport_header(struct sk_buff *skb,
2339 const int offset_hint)
2340{
2341 struct flow_keys keys;
2342
2343 if (skb_transport_header_was_set(skb))
2344 return;
2345 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2346 skb_set_transport_header(skb, keys.control.thoff);
2347 else
2348 skb_set_transport_header(skb, offset_hint);
2349}
2350
2351static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2352{
2353 if (skb_mac_header_was_set(skb)) {
2354 const unsigned char *old_mac = skb_mac_header(skb);
2355
2356 skb_set_mac_header(skb, -skb->mac_len);
2357 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2358 }
2359}
2360
2361static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2362{
2363 return skb->csum_start - skb_headroom(skb);
2364}
2365
2366static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2367{
2368 return skb->head + skb->csum_start;
2369}
2370
2371static inline int skb_transport_offset(const struct sk_buff *skb)
2372{
2373 return skb_transport_header(skb) - skb->data;
2374}
2375
2376static inline u32 skb_network_header_len(const struct sk_buff *skb)
2377{
2378 return skb->transport_header - skb->network_header;
2379}
2380
2381static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2382{
2383 return skb->inner_transport_header - skb->inner_network_header;
2384}
2385
2386static inline int skb_network_offset(const struct sk_buff *skb)
2387{
2388 return skb_network_header(skb) - skb->data;
2389}
2390
2391static inline int skb_inner_network_offset(const struct sk_buff *skb)
2392{
2393 return skb_inner_network_header(skb) - skb->data;
2394}
2395
2396static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2397{
2398 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421#ifndef NET_IP_ALIGN
2422#define NET_IP_ALIGN 2
2423#endif
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445#ifndef NET_SKB_PAD
2446#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2447#endif
2448
2449int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2450
2451static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2452{
2453 if (unlikely(skb_is_nonlinear(skb))) {
2454 WARN_ON(1);
2455 return;
2456 }
2457 skb->len = len;
2458 skb_set_tail_pointer(skb, len);
2459}
2460
2461static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2462{
2463 __skb_set_length(skb, len);
2464}
2465
2466void skb_trim(struct sk_buff *skb, unsigned int len);
2467
2468static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2469{
2470 if (skb->data_len)
2471 return ___pskb_trim(skb, len);
2472 __skb_trim(skb, len);
2473 return 0;
2474}
2475
2476static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2477{
2478 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2479}
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2491{
2492 int err = pskb_trim(skb, len);
2493 BUG_ON(err);
2494}
2495
2496static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2497{
2498 unsigned int diff = len - skb->len;
2499
2500 if (skb_tailroom(skb) < diff) {
2501 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2502 GFP_ATOMIC);
2503 if (ret)
2504 return ret;
2505 }
2506 __skb_set_length(skb, len);
2507 return 0;
2508}
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518static inline void skb_orphan(struct sk_buff *skb)
2519{
2520 if (skb->destructor) {
2521 skb->destructor(skb);
2522 skb->destructor = NULL;
2523 skb->sk = NULL;
2524 } else {
2525 BUG_ON(skb->sk);
2526 }
2527}
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2539{
2540 if (likely(!skb_zcopy(skb)))
2541 return 0;
2542 if (skb_uarg(skb)->callback == sock_zerocopy_callback)
2543 return 0;
2544 return skb_copy_ubufs(skb, gfp_mask);
2545}
2546
2547
2548static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2549{
2550 if (likely(!skb_zcopy(skb)))
2551 return 0;
2552 return skb_copy_ubufs(skb, gfp_mask);
2553}
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563void skb_queue_purge(struct sk_buff_head *list);
2564static inline void __skb_queue_purge(struct sk_buff_head *list)
2565{
2566 struct sk_buff *skb;
2567 while ((skb = __skb_dequeue(list)) != NULL)
2568 kfree_skb(skb);
2569}
2570
2571void skb_rbtree_purge(struct rb_root *root);
2572
2573void *netdev_alloc_frag(unsigned int fragsz);
2574
2575struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2576 gfp_t gfp_mask);
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2592 unsigned int length)
2593{
2594 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2595}
2596
2597
2598static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2599 gfp_t gfp_mask)
2600{
2601 return __netdev_alloc_skb(NULL, length, gfp_mask);
2602}
2603
2604
2605static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2606{
2607 return netdev_alloc_skb(NULL, length);
2608}
2609
2610
2611static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2612 unsigned int length, gfp_t gfp)
2613{
2614 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2615
2616 if (NET_IP_ALIGN && skb)
2617 skb_reserve(skb, NET_IP_ALIGN);
2618 return skb;
2619}
2620
2621static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2622 unsigned int length)
2623{
2624 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2625}
2626
2627static inline void skb_free_frag(void *addr)
2628{
2629 page_frag_free(addr);
2630}
2631
2632void *napi_alloc_frag(unsigned int fragsz);
2633struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2634 unsigned int length, gfp_t gfp_mask);
2635static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2636 unsigned int length)
2637{
2638 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2639}
2640void napi_consume_skb(struct sk_buff *skb, int budget);
2641
2642void __kfree_skb_flush(void);
2643void __kfree_skb_defer(struct sk_buff *skb);
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2655 unsigned int order)
2656{
2657
2658
2659
2660
2661
2662
2663
2664
2665 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2666
2667 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2668}
2669
2670static inline struct page *dev_alloc_pages(unsigned int order)
2671{
2672 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2673}
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2684{
2685 return __dev_alloc_pages(gfp_mask, 0);
2686}
2687
2688static inline struct page *dev_alloc_page(void)
2689{
2690 return dev_alloc_pages(0);
2691}
2692
2693
2694
2695
2696
2697
2698static inline void skb_propagate_pfmemalloc(struct page *page,
2699 struct sk_buff *skb)
2700{
2701 if (page_is_pfmemalloc(page))
2702 skb->pfmemalloc = true;
2703}
2704
2705
2706
2707
2708
2709
2710
2711static inline struct page *skb_frag_page(const skb_frag_t *frag)
2712{
2713 return frag->page.p;
2714}
2715
2716
2717
2718
2719
2720
2721
2722static inline void __skb_frag_ref(skb_frag_t *frag)
2723{
2724 get_page(skb_frag_page(frag));
2725}
2726
2727
2728
2729
2730
2731
2732
2733
2734static inline void skb_frag_ref(struct sk_buff *skb, int f)
2735{
2736 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2737}
2738
2739
2740
2741
2742
2743
2744
2745static inline void __skb_frag_unref(skb_frag_t *frag)
2746{
2747 put_page(skb_frag_page(frag));
2748}
2749
2750
2751
2752
2753
2754
2755
2756
2757static inline void skb_frag_unref(struct sk_buff *skb, int f)
2758{
2759 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769static inline void *skb_frag_address(const skb_frag_t *frag)
2770{
2771 return page_address(skb_frag_page(frag)) + frag->page_offset;
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2782{
2783 void *ptr = page_address(skb_frag_page(frag));
2784 if (unlikely(!ptr))
2785 return NULL;
2786
2787 return ptr + frag->page_offset;
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2798{
2799 frag->page.p = page;
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2811 struct page *page)
2812{
2813 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2814}
2815
2816bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2830 const skb_frag_t *frag,
2831 size_t offset, size_t size,
2832 enum dma_data_direction dir)
2833{
2834 return dma_map_page(dev, skb_frag_page(frag),
2835 frag->page_offset + offset, size, dir);
2836}
2837
2838static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2839 gfp_t gfp_mask)
2840{
2841 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2842}
2843
2844
2845static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2846 gfp_t gfp_mask)
2847{
2848 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2849}
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2861{
2862 return !skb_header_cloned(skb) &&
2863 skb_headroom(skb) + len <= skb->hdr_len;
2864}
2865
2866static inline int skb_try_make_writable(struct sk_buff *skb,
2867 unsigned int write_len)
2868{
2869 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2870 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2871}
2872
2873static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2874 int cloned)
2875{
2876 int delta = 0;
2877
2878 if (headroom > skb_headroom(skb))
2879 delta = headroom - skb_headroom(skb);
2880
2881 if (delta || cloned)
2882 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2883 GFP_ATOMIC);
2884 return 0;
2885}
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2900{
2901 return __skb_cow(skb, headroom, skb_cloned(skb));
2902}
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2915{
2916 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2917}
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2930{
2931 unsigned int size = skb->len;
2932 if (likely(size >= len))
2933 return 0;
2934 return skb_pad(skb, len - size);
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
2949 bool free_on_error)
2950{
2951 unsigned int size = skb->len;
2952
2953 if (unlikely(size < len)) {
2954 len -= size;
2955 if (__skb_pad(skb, len, free_on_error))
2956 return -ENOMEM;
2957 __skb_put(skb, len);
2958 }
2959 return 0;
2960}
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2973{
2974 return __skb_put_padto(skb, len, true);
2975}
2976
2977static inline int skb_add_data(struct sk_buff *skb,
2978 struct iov_iter *from, int copy)
2979{
2980 const int off = skb->len;
2981
2982 if (skb->ip_summed == CHECKSUM_NONE) {
2983 __wsum csum = 0;
2984 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2985 &csum, from)) {
2986 skb->csum = csum_block_add(skb->csum, csum, off);
2987 return 0;
2988 }
2989 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
2990 return 0;
2991
2992 __skb_trim(skb, off);
2993 return -EFAULT;
2994}
2995
2996static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2997 const struct page *page, int off)
2998{
2999 if (skb_zcopy(skb))
3000 return false;
3001 if (i) {
3002 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3003
3004 return page == skb_frag_page(frag) &&
3005 off == frag->page_offset + skb_frag_size(frag);
3006 }
3007 return false;
3008}
3009
3010static inline int __skb_linearize(struct sk_buff *skb)
3011{
3012 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3013}
3014
3015
3016
3017
3018
3019
3020
3021
3022static inline int skb_linearize(struct sk_buff *skb)
3023{
3024 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3025}
3026
3027
3028
3029
3030
3031
3032
3033
3034static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3035{
3036 return skb_is_nonlinear(skb) &&
3037 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3038}
3039
3040
3041
3042
3043
3044
3045
3046
3047static inline int skb_linearize_cow(struct sk_buff *skb)
3048{
3049 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3050 __skb_linearize(skb) : 0;
3051}
3052
3053static __always_inline void
3054__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3055 unsigned int off)
3056{
3057 if (skb->ip_summed == CHECKSUM_COMPLETE)
3058 skb->csum = csum_block_sub(skb->csum,
3059 csum_partial(start, len, 0), off);
3060 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3061 skb_checksum_start_offset(skb) < 0)
3062 skb->ip_summed = CHECKSUM_NONE;
3063}
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075static inline void skb_postpull_rcsum(struct sk_buff *skb,
3076 const void *start, unsigned int len)
3077{
3078 __skb_postpull_rcsum(skb, start, len, 0);
3079}
3080
3081static __always_inline void
3082__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3083 unsigned int off)
3084{
3085 if (skb->ip_summed == CHECKSUM_COMPLETE)
3086 skb->csum = csum_block_add(skb->csum,
3087 csum_partial(start, len, 0), off);
3088}
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099static inline void skb_postpush_rcsum(struct sk_buff *skb,
3100 const void *start, unsigned int len)
3101{
3102 __skb_postpush_rcsum(skb, start, len, 0);
3103}
3104
3105void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3119{
3120 skb_push(skb, len);
3121 skb_postpush_rcsum(skb, skb->data, len);
3122 return skb->data;
3123}
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3135{
3136 if (likely(len >= skb->len))
3137 return 0;
3138 if (skb->ip_summed == CHECKSUM_COMPLETE)
3139 skb->ip_summed = CHECKSUM_NONE;
3140 return __pskb_trim(skb, len);
3141}
3142
3143static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3144{
3145 if (skb->ip_summed == CHECKSUM_COMPLETE)
3146 skb->ip_summed = CHECKSUM_NONE;
3147 __skb_trim(skb, len);
3148 return 0;
3149}
3150
3151static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3152{
3153 if (skb->ip_summed == CHECKSUM_COMPLETE)
3154 skb->ip_summed = CHECKSUM_NONE;
3155 return __skb_grow(skb, len);
3156}
3157
3158#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3159#define skb_rb_first(root) rb_to_skb(rb_first(root))
3160#define skb_rb_last(root) rb_to_skb(rb_last(root))
3161#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3162#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3163
3164#define skb_queue_walk(queue, skb) \
3165 for (skb = (queue)->next; \
3166 skb != (struct sk_buff *)(queue); \
3167 skb = skb->next)
3168
3169#define skb_queue_walk_safe(queue, skb, tmp) \
3170 for (skb = (queue)->next, tmp = skb->next; \
3171 skb != (struct sk_buff *)(queue); \
3172 skb = tmp, tmp = skb->next)
3173
3174#define skb_queue_walk_from(queue, skb) \
3175 for (; skb != (struct sk_buff *)(queue); \
3176 skb = skb->next)
3177
3178#define skb_rbtree_walk(skb, root) \
3179 for (skb = skb_rb_first(root); skb != NULL; \
3180 skb = skb_rb_next(skb))
3181
3182#define skb_rbtree_walk_from(skb) \
3183 for (; skb != NULL; \
3184 skb = skb_rb_next(skb))
3185
3186#define skb_rbtree_walk_from_safe(skb, tmp) \
3187 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3188 skb = tmp)
3189
3190#define skb_queue_walk_from_safe(queue, skb, tmp) \
3191 for (tmp = skb->next; \
3192 skb != (struct sk_buff *)(queue); \
3193 skb = tmp, tmp = skb->next)
3194
3195#define skb_queue_reverse_walk(queue, skb) \
3196 for (skb = (queue)->prev; \
3197 skb != (struct sk_buff *)(queue); \
3198 skb = skb->prev)
3199
3200#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3201 for (skb = (queue)->prev, tmp = skb->prev; \
3202 skb != (struct sk_buff *)(queue); \
3203 skb = tmp, tmp = skb->prev)
3204
3205#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3206 for (tmp = skb->prev; \
3207 skb != (struct sk_buff *)(queue); \
3208 skb = tmp, tmp = skb->prev)
3209
3210static inline bool skb_has_frag_list(const struct sk_buff *skb)
3211{
3212 return skb_shinfo(skb)->frag_list != NULL;
3213}
3214
3215static inline void skb_frag_list_init(struct sk_buff *skb)
3216{
3217 skb_shinfo(skb)->frag_list = NULL;
3218}
3219
3220#define skb_walk_frags(skb, iter) \
3221 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3222
3223
3224int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3225 const struct sk_buff *skb);
3226struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3227 struct sk_buff_head *queue,
3228 unsigned int flags,
3229 void (*destructor)(struct sock *sk,
3230 struct sk_buff *skb),
3231 int *peeked, int *off, int *err,
3232 struct sk_buff **last);
3233struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3234 void (*destructor)(struct sock *sk,
3235 struct sk_buff *skb),
3236 int *peeked, int *off, int *err,
3237 struct sk_buff **last);
3238struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3239 void (*destructor)(struct sock *sk,
3240 struct sk_buff *skb),
3241 int *peeked, int *off, int *err);
3242struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3243 int *err);
3244unsigned int datagram_poll(struct file *file, struct socket *sock,
3245 struct poll_table_struct *wait);
3246int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3247 struct iov_iter *to, int size);
3248static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3249 struct msghdr *msg, int size)
3250{
3251 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3252}
3253int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3254 struct msghdr *msg);
3255int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3256 struct iov_iter *from, int len);
3257int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3258void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3259void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3260static inline void skb_free_datagram_locked(struct sock *sk,
3261 struct sk_buff *skb)
3262{
3263 __skb_free_datagram_locked(sk, skb, 0);
3264}
3265int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3266int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3267int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3268__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3269 int len, __wsum csum);
3270int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3271 struct pipe_inode_info *pipe, unsigned int len,
3272 unsigned int flags);
3273int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3274 int len);
3275int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3276void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3277unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3278int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3279 int len, int hlen);
3280void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3281int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3282void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3283unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3284bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3285struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3286struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3287int skb_ensure_writable(struct sk_buff *skb, int write_len);
3288int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3289int skb_vlan_pop(struct sk_buff *skb);
3290int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3291struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3292 gfp_t gfp);
3293
3294static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3295{
3296 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3297}
3298
3299static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3300{
3301 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3302}
3303
3304struct skb_checksum_ops {
3305 __wsum (*update)(const void *mem, int len, __wsum wsum);
3306 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3307};
3308
3309extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3310
3311__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3312 __wsum csum, const struct skb_checksum_ops *ops);
3313__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3314 __wsum csum);
3315
3316static inline void * __must_check
3317__skb_header_pointer(const struct sk_buff *skb, int offset,
3318 int len, void *data, int hlen, void *buffer)
3319{
3320 if (hlen - offset >= len)
3321 return data + offset;
3322
3323 if (!skb ||
3324 skb_copy_bits(skb, offset, buffer, len) < 0)
3325 return NULL;
3326
3327 return buffer;
3328}
3329
3330static inline void * __must_check
3331skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3332{
3333 return __skb_header_pointer(skb, offset, len, skb->data,
3334 skb_headlen(skb), buffer);
3335}
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347static inline bool skb_needs_linearize(struct sk_buff *skb,
3348 netdev_features_t features)
3349{
3350 return skb_is_nonlinear(skb) &&
3351 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3352 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3353}
3354
3355static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3356 void *to,
3357 const unsigned int len)
3358{
3359 memcpy(to, skb->data, len);
3360}
3361
3362static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3363 const int offset, void *to,
3364 const unsigned int len)
3365{
3366 memcpy(to, skb->data + offset, len);
3367}
3368
3369static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3370 const void *from,
3371 const unsigned int len)
3372{
3373 memcpy(skb->data, from, len);
3374}
3375
3376static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3377 const int offset,
3378 const void *from,
3379 const unsigned int len)
3380{
3381 memcpy(skb->data + offset, from, len);
3382}
3383
3384void skb_init(void);
3385
3386static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3387{
3388 return skb->tstamp;
3389}
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400static inline void skb_get_timestamp(const struct sk_buff *skb,
3401 struct timeval *stamp)
3402{
3403 *stamp = ktime_to_timeval(skb->tstamp);
3404}
3405
3406static inline void skb_get_timestampns(const struct sk_buff *skb,
3407 struct timespec *stamp)
3408{
3409 *stamp = ktime_to_timespec(skb->tstamp);
3410}
3411
3412static inline void __net_timestamp(struct sk_buff *skb)
3413{
3414 skb->tstamp = ktime_get_real();
3415}
3416
3417static inline ktime_t net_timedelta(ktime_t t)
3418{
3419 return ktime_sub(ktime_get_real(), t);
3420}
3421
3422static inline ktime_t net_invalid_timestamp(void)
3423{
3424 return 0;
3425}
3426
3427static inline u8 skb_metadata_len(const struct sk_buff *skb)
3428{
3429 return skb_shinfo(skb)->meta_len;
3430}
3431
3432static inline void *skb_metadata_end(const struct sk_buff *skb)
3433{
3434 return skb_mac_header(skb);
3435}
3436
3437static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3438 const struct sk_buff *skb_b,
3439 u8 meta_len)
3440{
3441 const void *a = skb_metadata_end(skb_a);
3442 const void *b = skb_metadata_end(skb_b);
3443
3444#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3445 u64 diffs = 0;
3446
3447 switch (meta_len) {
3448#define __it(x, op) (x -= sizeof(u##op))
3449#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3450 case 32: diffs |= __it_diff(a, b, 64);
3451 case 24: diffs |= __it_diff(a, b, 64);
3452 case 16: diffs |= __it_diff(a, b, 64);
3453 case 8: diffs |= __it_diff(a, b, 64);
3454 break;
3455 case 28: diffs |= __it_diff(a, b, 64);
3456 case 20: diffs |= __it_diff(a, b, 64);
3457 case 12: diffs |= __it_diff(a, b, 64);
3458 case 4: diffs |= __it_diff(a, b, 32);
3459 break;
3460 }
3461 return diffs;
3462#else
3463 return memcmp(a - meta_len, b - meta_len, meta_len);
3464#endif
3465}
3466
3467static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3468 const struct sk_buff *skb_b)
3469{
3470 u8 len_a = skb_metadata_len(skb_a);
3471 u8 len_b = skb_metadata_len(skb_b);
3472
3473 if (!(len_a | len_b))
3474 return false;
3475
3476 return len_a != len_b ?
3477 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3478}
3479
3480static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3481{
3482 skb_shinfo(skb)->meta_len = meta_len;
3483}
3484
3485static inline void skb_metadata_clear(struct sk_buff *skb)
3486{
3487 skb_metadata_set(skb, 0);
3488}
3489
3490struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3491
3492#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3493
3494void skb_clone_tx_timestamp(struct sk_buff *skb);
3495bool skb_defer_rx_timestamp(struct sk_buff *skb);
3496
3497#else
3498
3499static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3500{
3501}
3502
3503static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3504{
3505 return false;
3506}
3507
3508#endif
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522void skb_complete_tx_timestamp(struct sk_buff *skb,
3523 struct skb_shared_hwtstamps *hwtstamps);
3524
3525void __skb_tstamp_tx(struct sk_buff *orig_skb,
3526 struct skb_shared_hwtstamps *hwtstamps,
3527 struct sock *sk, int tstype);
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540void skb_tstamp_tx(struct sk_buff *orig_skb,
3541 struct skb_shared_hwtstamps *hwtstamps);
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555static inline void skb_tx_timestamp(struct sk_buff *skb)
3556{
3557 skb_clone_tx_timestamp(skb);
3558 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3559 skb_tstamp_tx(skb, NULL);
3560}
3561
3562
3563
3564
3565
3566
3567
3568
3569void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3570
3571__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3572__sum16 __skb_checksum_complete(struct sk_buff *skb);
3573
3574static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3575{
3576 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3577 skb->csum_valid ||
3578 (skb->ip_summed == CHECKSUM_PARTIAL &&
3579 skb_checksum_start_offset(skb) >= 0));
3580}
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3599{
3600 return skb_csum_unnecessary(skb) ?
3601 0 : __skb_checksum_complete(skb);
3602}
3603
3604static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3605{
3606 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3607 if (skb->csum_level == 0)
3608 skb->ip_summed = CHECKSUM_NONE;
3609 else
3610 skb->csum_level--;
3611 }
3612}
3613
3614static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3615{
3616 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3617 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3618 skb->csum_level++;
3619 } else if (skb->ip_summed == CHECKSUM_NONE) {
3620 skb->ip_summed = CHECKSUM_UNNECESSARY;
3621 skb->csum_level = 0;
3622 }
3623}
3624
3625
3626
3627
3628
3629
3630static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3631 bool zero_okay,
3632 __sum16 check)
3633{
3634 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3635 skb->csum_valid = 1;
3636 __skb_decr_checksum_unnecessary(skb);
3637 return false;
3638 }
3639
3640 return true;
3641}
3642
3643
3644
3645
3646#define CHECKSUM_BREAK 76
3647
3648
3649
3650
3651
3652
3653
3654static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3655{
3656 if (skb->ip_summed == CHECKSUM_COMPLETE)
3657 skb->ip_summed = CHECKSUM_NONE;
3658}
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3670 bool complete,
3671 __wsum psum)
3672{
3673 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3674 if (!csum_fold(csum_add(psum, skb->csum))) {
3675 skb->csum_valid = 1;
3676 return 0;
3677 }
3678 }
3679
3680 skb->csum = psum;
3681
3682 if (complete || skb->len <= CHECKSUM_BREAK) {
3683 __sum16 csum;
3684
3685 csum = __skb_checksum_complete(skb);
3686 skb->csum_valid = !csum;
3687 return csum;
3688 }
3689
3690 return 0;
3691}
3692
3693static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3694{
3695 return 0;
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708#define __skb_checksum_validate(skb, proto, complete, \
3709 zero_okay, check, compute_pseudo) \
3710({ \
3711 __sum16 __ret = 0; \
3712 skb->csum_valid = 0; \
3713 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3714 __ret = __skb_checksum_validate_complete(skb, \
3715 complete, compute_pseudo(skb, proto)); \
3716 __ret; \
3717})
3718
3719#define skb_checksum_init(skb, proto, compute_pseudo) \
3720 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3721
3722#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3723 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3724
3725#define skb_checksum_validate(skb, proto, compute_pseudo) \
3726 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3727
3728#define skb_checksum_validate_zero_check(skb, proto, check, \
3729 compute_pseudo) \
3730 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3731
3732#define skb_checksum_simple_validate(skb) \
3733 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3734
3735static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3736{
3737 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3738}
3739
3740static inline void __skb_checksum_convert(struct sk_buff *skb,
3741 __sum16 check, __wsum pseudo)
3742{
3743 skb->csum = ~pseudo;
3744 skb->ip_summed = CHECKSUM_COMPLETE;
3745}
3746
3747#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3748do { \
3749 if (__skb_checksum_convert_check(skb)) \
3750 __skb_checksum_convert(skb, check, \
3751 compute_pseudo(skb, proto)); \
3752} while (0)
3753
3754static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3755 u16 start, u16 offset)
3756{
3757 skb->ip_summed = CHECKSUM_PARTIAL;
3758 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3759 skb->csum_offset = offset - start;
3760}
3761
3762
3763
3764
3765
3766
3767static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3768 int start, int offset, bool nopartial)
3769{
3770 __wsum delta;
3771
3772 if (!nopartial) {
3773 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3774 return;
3775 }
3776
3777 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3778 __skb_checksum_complete(skb);
3779 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3780 }
3781
3782 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3783
3784
3785 skb->csum = csum_add(skb->csum, delta);
3786}
3787
3788static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3789{
3790#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3791 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3792#else
3793 return NULL;
3794#endif
3795}
3796
3797#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3798void nf_conntrack_destroy(struct nf_conntrack *nfct);
3799static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3800{
3801 if (nfct && atomic_dec_and_test(&nfct->use))
3802 nf_conntrack_destroy(nfct);
3803}
3804static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3805{
3806 if (nfct)
3807 atomic_inc(&nfct->use);
3808}
3809#endif
3810#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3811static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3812{
3813 if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3814 kfree(nf_bridge);
3815}
3816static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3817{
3818 if (nf_bridge)
3819 refcount_inc(&nf_bridge->use);
3820}
3821#endif
3822static inline void nf_reset(struct sk_buff *skb)
3823{
3824#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3825 nf_conntrack_put(skb_nfct(skb));
3826 skb->_nfct = 0;
3827#endif
3828#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3829 nf_bridge_put(skb->nf_bridge);
3830 skb->nf_bridge = NULL;
3831#endif
3832}
3833
3834static inline void nf_reset_trace(struct sk_buff *skb)
3835{
3836#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3837 skb->nf_trace = 0;
3838#endif
3839}
3840
3841static inline void ipvs_reset(struct sk_buff *skb)
3842{
3843#if IS_ENABLED(CONFIG_IP_VS)
3844 skb->ipvs_property = 0;
3845#endif
3846}
3847
3848
3849static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3850 bool copy)
3851{
3852#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3853 dst->_nfct = src->_nfct;
3854 nf_conntrack_get(skb_nfct(src));
3855#endif
3856#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3857 dst->nf_bridge = src->nf_bridge;
3858 nf_bridge_get(src->nf_bridge);
3859#endif
3860#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3861 if (copy)
3862 dst->nf_trace = src->nf_trace;
3863#endif
3864}
3865
3866static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3867{
3868#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3869 nf_conntrack_put(skb_nfct(dst));
3870#endif
3871#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3872 nf_bridge_put(dst->nf_bridge);
3873#endif
3874 __nf_copy(dst, src, true);
3875}
3876
3877#ifdef CONFIG_NETWORK_SECMARK
3878static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3879{
3880 to->secmark = from->secmark;
3881}
3882
3883static inline void skb_init_secmark(struct sk_buff *skb)
3884{
3885 skb->secmark = 0;
3886}
3887#else
3888static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3889{ }
3890
3891static inline void skb_init_secmark(struct sk_buff *skb)
3892{ }
3893#endif
3894
3895static inline bool skb_irq_freeable(const struct sk_buff *skb)
3896{
3897 return !skb->destructor &&
3898#if IS_ENABLED(CONFIG_XFRM)
3899 !skb->sp &&
3900#endif
3901 !skb_nfct(skb) &&
3902 !skb->_skb_refdst &&
3903 !skb_has_frag_list(skb);
3904}
3905
3906static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3907{
3908 skb->queue_mapping = queue_mapping;
3909}
3910
3911static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3912{
3913 return skb->queue_mapping;
3914}
3915
3916static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3917{
3918 to->queue_mapping = from->queue_mapping;
3919}
3920
3921static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3922{
3923 skb->queue_mapping = rx_queue + 1;
3924}
3925
3926static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3927{
3928 return skb->queue_mapping - 1;
3929}
3930
3931static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3932{
3933 return skb->queue_mapping != 0;
3934}
3935
3936static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
3937{
3938 skb->dst_pending_confirm = val;
3939}
3940
3941static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
3942{
3943 return skb->dst_pending_confirm != 0;
3944}
3945
3946static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3947{
3948#ifdef CONFIG_XFRM
3949 return skb->sp;
3950#else
3951 return NULL;
3952#endif
3953}
3954
3955
3956
3957
3958
3959
3960
3961struct skb_gso_cb {
3962 union {
3963 int mac_offset;
3964 int data_offset;
3965 };
3966 int encap_level;
3967 __wsum csum;
3968 __u16 csum_start;
3969};
3970#define SKB_SGO_CB_OFFSET 32
3971#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3972
3973static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3974{
3975 return (skb_mac_header(inner_skb) - inner_skb->head) -
3976 SKB_GSO_CB(inner_skb)->mac_offset;
3977}
3978
3979static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3980{
3981 int new_headroom, headroom;
3982 int ret;
3983
3984 headroom = skb_headroom(skb);
3985 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3986 if (ret)
3987 return ret;
3988
3989 new_headroom = skb_headroom(skb);
3990 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3991 return 0;
3992}
3993
3994static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3995{
3996
3997 if (skb->remcsum_offload)
3998 return;
3999
4000 SKB_GSO_CB(skb)->csum = res;
4001 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4002}
4003
4004
4005
4006
4007
4008
4009
4010
4011
4012static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4013{
4014 unsigned char *csum_start = skb_transport_header(skb);
4015 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4016 __wsum partial = SKB_GSO_CB(skb)->csum;
4017
4018 SKB_GSO_CB(skb)->csum = res;
4019 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4020
4021 return csum_fold(csum_partial(csum_start, plen, partial));
4022}
4023
4024static inline bool skb_is_gso(const struct sk_buff *skb)
4025{
4026 return skb_shinfo(skb)->gso_size;
4027}
4028
4029
4030static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4031{
4032 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4033}
4034
4035static inline void skb_gso_reset(struct sk_buff *skb)
4036{
4037 skb_shinfo(skb)->gso_size = 0;
4038 skb_shinfo(skb)->gso_segs = 0;
4039 skb_shinfo(skb)->gso_type = 0;
4040}
4041
4042void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4043
4044static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4045{
4046
4047
4048 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4049
4050 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4051 unlikely(shinfo->gso_type == 0)) {
4052 __skb_warn_lro_forwarding(skb);
4053 return true;
4054 }
4055 return false;
4056}
4057
4058static inline void skb_forward_csum(struct sk_buff *skb)
4059{
4060
4061 if (skb->ip_summed == CHECKSUM_COMPLETE)
4062 skb->ip_summed = CHECKSUM_NONE;
4063}
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4074{
4075#ifdef DEBUG
4076 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4077#endif
4078}
4079
4080bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4081
4082int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4083struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4084 unsigned int transport_len,
4085 __sum16(*skb_chkf)(struct sk_buff *skb));
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096static inline bool skb_head_is_locked(const struct sk_buff *skb)
4097{
4098 return !skb->head_frag || skb_cloned(skb);
4099}
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109
4110
4111static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4112{
4113 unsigned int hdr_len = skb_transport_header(skb) -
4114 skb_network_header(skb);
4115 return hdr_len + skb_gso_transport_seglen(skb);
4116}
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127static inline __wsum lco_csum(struct sk_buff *skb)
4128{
4129 unsigned char *csum_start = skb_checksum_start(skb);
4130 unsigned char *l4_hdr = skb_transport_header(skb);
4131 __wsum partial;
4132
4133
4134 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4135 skb->csum_offset));
4136
4137
4138
4139
4140 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4141}
4142
4143#endif
4144#endif
4145