1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/bug.h>
21#include <linux/cache.h>
22#include <linux/rbtree.h>
23#include <linux/socket.h>
24#include <linux/refcount.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <linux/sched/clock.h>
38#include <net/flow_dissector.h>
39#include <linux/splice.h>
40#include <linux/in6.h>
41#include <linux/if_packet.h>
42#include <net/flow.h>
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct net_device;
242struct scatterlist;
243struct pipe_inode_info;
244struct iov_iter;
245struct napi_struct;
246
247#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248struct nf_conntrack {
249 atomic_t use;
250};
251#endif
252
253#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
254struct nf_bridge_info {
255 refcount_t use;
256 enum {
257 BRNF_PROTO_UNCHANGED,
258 BRNF_PROTO_8021Q,
259 BRNF_PROTO_PPPOE
260 } orig_proto:8;
261 u8 pkt_otherhost:1;
262 u8 in_prerouting:1;
263 u8 bridged_dnat:1;
264 __u16 frag_max_size;
265 struct net_device *physindev;
266
267
268 struct net_device *physoutdev;
269 union {
270
271 __be32 ipv4_daddr;
272 struct in6_addr ipv6_daddr;
273
274
275
276
277
278 char neigh_header[8];
279 };
280};
281#endif
282
283struct sk_buff_head {
284
285 struct sk_buff *next;
286 struct sk_buff *prev;
287
288 __u32 qlen;
289 spinlock_t lock;
290};
291
292struct sk_buff;
293
294
295
296
297
298
299
300
301#if (65536/PAGE_SIZE + 1) < 16
302#define MAX_SKB_FRAGS 16UL
303#else
304#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
305#endif
306extern int sysctl_max_skb_frags;
307
308
309
310
311#define GSO_BY_FRAGS 0xFFFF
312
313typedef struct skb_frag_struct skb_frag_t;
314
315struct skb_frag_struct {
316 struct {
317 struct page *p;
318 } page;
319#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
320 __u32 page_offset;
321 __u32 size;
322#else
323 __u16 page_offset;
324 __u16 size;
325#endif
326};
327
328static inline unsigned int skb_frag_size(const skb_frag_t *frag)
329{
330 return frag->size;
331}
332
333static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
334{
335 frag->size = size;
336}
337
338static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
339{
340 frag->size += delta;
341}
342
343static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
344{
345 frag->size -= delta;
346}
347
348static inline bool skb_frag_must_loop(struct page *p)
349{
350#if defined(CONFIG_HIGHMEM)
351 if (PageHighMem(p))
352 return true;
353#endif
354 return false;
355}
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
375 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
376 p_off = (f_off) & (PAGE_SIZE - 1), \
377 p_len = skb_frag_must_loop(p) ? \
378 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
379 copied = 0; \
380 copied < f_len; \
381 copied += p_len, p++, p_off = 0, \
382 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
383
384#define HAVE_HW_TIME_STAMP
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400struct skb_shared_hwtstamps {
401 ktime_t hwtstamp;
402};
403
404
405enum {
406
407 SKBTX_HW_TSTAMP = 1 << 0,
408
409
410 SKBTX_SW_TSTAMP = 1 << 1,
411
412
413 SKBTX_IN_PROGRESS = 1 << 2,
414
415
416 SKBTX_DEV_ZEROCOPY = 1 << 3,
417
418
419 SKBTX_WIFI_STATUS = 1 << 4,
420
421
422
423
424
425
426 SKBTX_SHARED_FRAG = 1 << 5,
427
428
429 SKBTX_SCHED_TSTAMP = 1 << 6,
430};
431
432#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
433#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
434 SKBTX_SCHED_TSTAMP)
435#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
436
437
438
439
440
441
442
443
444
445struct ubuf_info {
446 void (*callback)(struct ubuf_info *, bool zerocopy_success);
447 union {
448 struct {
449 unsigned long desc;
450 void *ctx;
451 };
452 struct {
453 u32 id;
454 u16 len;
455 u16 zerocopy:1;
456 u32 bytelen;
457 };
458 };
459 refcount_t refcnt;
460
461 struct mmpin {
462 struct user_struct *user;
463 unsigned int num_pg;
464 } mmp;
465};
466
467#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
468
469int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
470void mm_unaccount_pinned_pages(struct mmpin *mmp);
471
472struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
473struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
474 struct ubuf_info *uarg);
475
476static inline void sock_zerocopy_get(struct ubuf_info *uarg)
477{
478 refcount_inc(&uarg->refcnt);
479}
480
481void sock_zerocopy_put(struct ubuf_info *uarg);
482void sock_zerocopy_put_abort(struct ubuf_info *uarg);
483
484void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
485
486int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
487 struct msghdr *msg, int len,
488 struct ubuf_info *uarg);
489
490
491
492
493struct skb_shared_info {
494 __u8 __unused;
495 __u8 meta_len;
496 __u8 nr_frags;
497 __u8 tx_flags;
498 unsigned short gso_size;
499
500 unsigned short gso_segs;
501 struct sk_buff *frag_list;
502 struct skb_shared_hwtstamps hwtstamps;
503 unsigned int gso_type;
504 u32 tskey;
505
506
507
508
509 atomic_t dataref;
510
511
512
513 void * destructor_arg;
514
515
516 skb_frag_t frags[MAX_SKB_FRAGS];
517};
518
519
520
521
522
523
524
525
526
527
528
529
530#define SKB_DATAREF_SHIFT 16
531#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
532
533
534enum {
535 SKB_FCLONE_UNAVAILABLE,
536 SKB_FCLONE_ORIG,
537 SKB_FCLONE_CLONE,
538};
539
540enum {
541 SKB_GSO_TCPV4 = 1 << 0,
542
543
544 SKB_GSO_DODGY = 1 << 1,
545
546
547 SKB_GSO_TCP_ECN = 1 << 2,
548
549 SKB_GSO_TCP_FIXEDID = 1 << 3,
550
551 SKB_GSO_TCPV6 = 1 << 4,
552
553 SKB_GSO_FCOE = 1 << 5,
554
555 SKB_GSO_GRE = 1 << 6,
556
557 SKB_GSO_GRE_CSUM = 1 << 7,
558
559 SKB_GSO_IPXIP4 = 1 << 8,
560
561 SKB_GSO_IPXIP6 = 1 << 9,
562
563 SKB_GSO_UDP_TUNNEL = 1 << 10,
564
565 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
566
567 SKB_GSO_PARTIAL = 1 << 12,
568
569 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
570
571 SKB_GSO_SCTP = 1 << 14,
572
573 SKB_GSO_ESP = 1 << 15,
574
575 SKB_GSO_UDP = 1 << 16,
576};
577
578#if BITS_PER_LONG > 32
579#define NET_SKBUFF_DATA_USES_OFFSET 1
580#endif
581
582#ifdef NET_SKBUFF_DATA_USES_OFFSET
583typedef unsigned int sk_buff_data_t;
584#else
585typedef unsigned char *sk_buff_data_t;
586#endif
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661struct sk_buff {
662 union {
663 struct {
664
665 struct sk_buff *next;
666 struct sk_buff *prev;
667
668 union {
669 struct net_device *dev;
670
671
672
673
674 unsigned long dev_scratch;
675 int ip_defrag_offset;
676 };
677 };
678 struct rb_node rbnode;
679 };
680 struct sock *sk;
681
682 union {
683 ktime_t tstamp;
684 u64 skb_mstamp;
685 };
686
687
688
689
690
691
692 char cb[48] __aligned(8);
693
694 union {
695 struct {
696 unsigned long _skb_refdst;
697 void (*destructor)(struct sk_buff *skb);
698 };
699 struct list_head tcp_tsorted_anchor;
700 };
701
702#ifdef CONFIG_XFRM
703 struct sec_path *sp;
704#endif
705#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
706 unsigned long _nfct;
707#endif
708#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
709 struct nf_bridge_info *nf_bridge;
710#endif
711 unsigned int len,
712 data_len;
713 __u16 mac_len,
714 hdr_len;
715
716
717
718
719 __u16 queue_mapping;
720
721
722#ifdef __BIG_ENDIAN_BITFIELD
723#define CLONED_MASK (1 << 7)
724#else
725#define CLONED_MASK 1
726#endif
727#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
728
729 __u8 __cloned_offset[0];
730 __u8 cloned:1,
731 nohdr:1,
732 fclone:2,
733 peeked:1,
734 head_frag:1,
735 xmit_more:1,
736 __unused:1;
737
738
739
740
741
742 __u32 headers_start[0];
743
744
745
746#ifdef __BIG_ENDIAN_BITFIELD
747#define PKT_TYPE_MAX (7 << 5)
748#else
749#define PKT_TYPE_MAX 7
750#endif
751#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
752
753 __u8 __pkt_type_offset[0];
754 __u8 pkt_type:3;
755 __u8 pfmemalloc:1;
756 __u8 ignore_df:1;
757
758 __u8 nf_trace:1;
759 __u8 ip_summed:2;
760 __u8 ooo_okay:1;
761 __u8 l4_hash:1;
762 __u8 sw_hash:1;
763 __u8 wifi_acked_valid:1;
764 __u8 wifi_acked:1;
765
766 __u8 no_fcs:1;
767
768 __u8 encapsulation:1;
769 __u8 encap_hdr_csum:1;
770 __u8 csum_valid:1;
771 __u8 csum_complete_sw:1;
772 __u8 csum_level:2;
773 __u8 csum_not_inet:1;
774
775 __u8 dst_pending_confirm:1;
776#ifdef CONFIG_IPV6_NDISC_NODETYPE
777 __u8 ndisc_nodetype:2;
778#endif
779 __u8 ipvs_property:1;
780 __u8 inner_protocol_type:1;
781 __u8 remcsum_offload:1;
782#ifdef CONFIG_NET_SWITCHDEV
783 __u8 offload_fwd_mark:1;
784 __u8 offload_mr_fwd_mark:1;
785#endif
786#ifdef CONFIG_NET_CLS_ACT
787 __u8 tc_skip_classify:1;
788 __u8 tc_at_ingress:1;
789 __u8 tc_redirected:1;
790 __u8 tc_from_ingress:1;
791#endif
792
793#ifdef CONFIG_NET_SCHED
794 __u16 tc_index;
795#endif
796
797 union {
798 __wsum csum;
799 struct {
800 __u16 csum_start;
801 __u16 csum_offset;
802 };
803 };
804 __u32 priority;
805 int skb_iif;
806 __u32 hash;
807 __be16 vlan_proto;
808 __u16 vlan_tci;
809#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
810 union {
811 unsigned int napi_id;
812 unsigned int sender_cpu;
813 };
814#endif
815#ifdef CONFIG_NETWORK_SECMARK
816 __u32 secmark;
817#endif
818
819 union {
820 __u32 mark;
821 __u32 reserved_tailroom;
822 };
823
824 union {
825 __be16 inner_protocol;
826 __u8 inner_ipproto;
827 };
828
829 __u16 inner_transport_header;
830 __u16 inner_network_header;
831 __u16 inner_mac_header;
832
833 __be16 protocol;
834 __u16 transport_header;
835 __u16 network_header;
836 __u16 mac_header;
837
838
839 __u32 headers_end[0];
840
841
842
843 sk_buff_data_t tail;
844 sk_buff_data_t end;
845 unsigned char *head,
846 *data;
847 unsigned int truesize;
848 refcount_t users;
849};
850
851#ifdef __KERNEL__
852
853
854
855#include <linux/slab.h>
856
857
858#define SKB_ALLOC_FCLONE 0x01
859#define SKB_ALLOC_RX 0x02
860#define SKB_ALLOC_NAPI 0x04
861
862
863static inline bool skb_pfmemalloc(const struct sk_buff *skb)
864{
865 return unlikely(skb->pfmemalloc);
866}
867
868
869
870
871
872#define SKB_DST_NOREF 1UL
873#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
874
875#define SKB_NFCT_PTRMASK ~(7UL)
876
877
878
879
880
881
882static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
883{
884
885
886
887 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
888 !rcu_read_lock_held() &&
889 !rcu_read_lock_bh_held());
890 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
891}
892
893
894
895
896
897
898
899
900
901static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
902{
903 skb->_skb_refdst = (unsigned long)dst;
904}
905
906
907
908
909
910
911
912
913
914
915
916static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
917{
918 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
919 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
920}
921
922
923
924
925
926static inline bool skb_dst_is_noref(const struct sk_buff *skb)
927{
928 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
929}
930
931static inline struct rtable *skb_rtable(const struct sk_buff *skb)
932{
933 return (struct rtable *)skb_dst(skb);
934}
935
936
937
938
939
940static inline bool skb_pkt_type_ok(u32 ptype)
941{
942 return ptype <= PACKET_OTHERHOST;
943}
944
945static inline unsigned int skb_napi_id(const struct sk_buff *skb)
946{
947#ifdef CONFIG_NET_RX_BUSY_POLL
948 return skb->napi_id;
949#else
950 return 0;
951#endif
952}
953
954
955static inline bool skb_unref(struct sk_buff *skb)
956{
957 if (unlikely(!skb))
958 return false;
959 if (likely(refcount_read(&skb->users) == 1))
960 smp_rmb();
961 else if (likely(!refcount_dec_and_test(&skb->users)))
962 return false;
963
964 return true;
965}
966
967void skb_release_head_state(struct sk_buff *skb);
968void kfree_skb(struct sk_buff *skb);
969void kfree_skb_list(struct sk_buff *segs);
970void skb_tx_error(struct sk_buff *skb);
971void consume_skb(struct sk_buff *skb);
972void __consume_stateless_skb(struct sk_buff *skb);
973void __kfree_skb(struct sk_buff *skb);
974extern struct kmem_cache *skbuff_head_cache;
975
976void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
977bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
978 bool *fragstolen, int *delta_truesize);
979
980struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
981 int node);
982struct sk_buff *__build_skb(void *data, unsigned int frag_size);
983struct sk_buff *build_skb(void *data, unsigned int frag_size);
984static inline struct sk_buff *alloc_skb(unsigned int size,
985 gfp_t priority)
986{
987 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
988}
989
990struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
991 unsigned long data_len,
992 int max_page_order,
993 int *errcode,
994 gfp_t gfp_mask);
995
996
997struct sk_buff_fclones {
998 struct sk_buff skb1;
999
1000 struct sk_buff skb2;
1001
1002 refcount_t fclone_ref;
1003};
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static inline bool skb_fclone_busy(const struct sock *sk,
1015 const struct sk_buff *skb)
1016{
1017 const struct sk_buff_fclones *fclones;
1018
1019 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1020
1021 return skb->fclone == SKB_FCLONE_ORIG &&
1022 refcount_read(&fclones->fclone_ref) > 1 &&
1023 fclones->skb2.sk == sk;
1024}
1025
1026static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1027 gfp_t priority)
1028{
1029 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1030}
1031
1032struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1033int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1034struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1035struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1036struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1037 gfp_t gfp_mask, bool fclone);
1038static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1039 gfp_t gfp_mask)
1040{
1041 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1042}
1043
1044int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1045struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1046 unsigned int headroom);
1047struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1048 int newtailroom, gfp_t priority);
1049int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1050 int offset, int len);
1051int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1052 int offset, int len);
1053int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1054int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067static inline int skb_pad(struct sk_buff *skb, int pad)
1068{
1069 return __skb_pad(skb, pad, true);
1070}
1071#define dev_kfree_skb(a) consume_skb(a)
1072
1073int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1074 int getfrag(void *from, char *to, int offset,
1075 int len, int odd, struct sk_buff *skb),
1076 void *from, int length);
1077
1078int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1079 int offset, size_t size);
1080
1081struct skb_seq_state {
1082 __u32 lower_offset;
1083 __u32 upper_offset;
1084 __u32 frag_idx;
1085 __u32 stepped_offset;
1086 struct sk_buff *root_skb;
1087 struct sk_buff *cur_skb;
1088 __u8 *frag_data;
1089};
1090
1091void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1092 unsigned int to, struct skb_seq_state *st);
1093unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1094 struct skb_seq_state *st);
1095void skb_abort_seq_read(struct skb_seq_state *st);
1096
1097unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1098 unsigned int to, struct ts_config *config);
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126enum pkt_hash_types {
1127 PKT_HASH_TYPE_NONE,
1128 PKT_HASH_TYPE_L2,
1129 PKT_HASH_TYPE_L3,
1130 PKT_HASH_TYPE_L4,
1131};
1132
1133static inline void skb_clear_hash(struct sk_buff *skb)
1134{
1135 skb->hash = 0;
1136 skb->sw_hash = 0;
1137 skb->l4_hash = 0;
1138}
1139
1140static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1141{
1142 if (!skb->l4_hash)
1143 skb_clear_hash(skb);
1144}
1145
1146static inline void
1147__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1148{
1149 skb->l4_hash = is_l4;
1150 skb->sw_hash = is_sw;
1151 skb->hash = hash;
1152}
1153
1154static inline void
1155skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1156{
1157
1158 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1159}
1160
1161static inline void
1162__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1163{
1164 __skb_set_hash(skb, hash, true, is_l4);
1165}
1166
1167void __skb_get_hash(struct sk_buff *skb);
1168u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1169u32 skb_get_poff(const struct sk_buff *skb);
1170u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1171 const struct flow_keys *keys, int hlen);
1172__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1173 void *data, int hlen_proto);
1174
1175static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1176 int thoff, u8 ip_proto)
1177{
1178 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1179}
1180
1181void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1182 const struct flow_dissector_key *key,
1183 unsigned int key_count);
1184
1185bool __skb_flow_dissect(const struct sk_buff *skb,
1186 struct flow_dissector *flow_dissector,
1187 void *target_container,
1188 void *data, __be16 proto, int nhoff, int hlen,
1189 unsigned int flags);
1190
1191static inline bool skb_flow_dissect(const struct sk_buff *skb,
1192 struct flow_dissector *flow_dissector,
1193 void *target_container, unsigned int flags)
1194{
1195 return __skb_flow_dissect(skb, flow_dissector, target_container,
1196 NULL, 0, 0, 0, flags);
1197}
1198
1199static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1200 struct flow_keys *flow,
1201 unsigned int flags)
1202{
1203 memset(flow, 0, sizeof(*flow));
1204 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1205 NULL, 0, 0, 0, flags);
1206}
1207
1208static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1209 void *data, __be16 proto,
1210 int nhoff, int hlen,
1211 unsigned int flags)
1212{
1213 memset(flow, 0, sizeof(*flow));
1214 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1215 data, proto, nhoff, hlen, flags);
1216}
1217
1218void
1219skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1220 struct flow_dissector *flow_dissector,
1221 void *target_container);
1222
1223static inline __u32 skb_get_hash(struct sk_buff *skb)
1224{
1225 if (!skb->l4_hash && !skb->sw_hash)
1226 __skb_get_hash(skb);
1227
1228 return skb->hash;
1229}
1230
1231static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1232{
1233 if (!skb->l4_hash && !skb->sw_hash) {
1234 struct flow_keys keys;
1235 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1236
1237 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1238 }
1239
1240 return skb->hash;
1241}
1242
1243__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1244
1245static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1246{
1247 return skb->hash;
1248}
1249
1250static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1251{
1252 to->hash = from->hash;
1253 to->sw_hash = from->sw_hash;
1254 to->l4_hash = from->l4_hash;
1255};
1256
1257#ifdef NET_SKBUFF_DATA_USES_OFFSET
1258static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1259{
1260 return skb->head + skb->end;
1261}
1262
1263static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1264{
1265 return skb->end;
1266}
1267#else
1268static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1269{
1270 return skb->end;
1271}
1272
1273static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1274{
1275 return skb->end - skb->head;
1276}
1277#endif
1278
1279
1280#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1281
1282static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1283{
1284 return &skb_shinfo(skb)->hwtstamps;
1285}
1286
1287static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1288{
1289 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1290
1291 return is_zcopy ? skb_uarg(skb) : NULL;
1292}
1293
1294static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1295{
1296 if (skb && uarg && !skb_zcopy(skb)) {
1297 sock_zerocopy_get(uarg);
1298 skb_shinfo(skb)->destructor_arg = uarg;
1299 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1300 }
1301}
1302
1303
1304static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1305{
1306 struct ubuf_info *uarg = skb_zcopy(skb);
1307
1308 if (uarg) {
1309 if (uarg->callback == sock_zerocopy_callback) {
1310 uarg->zerocopy = uarg->zerocopy && zerocopy;
1311 sock_zerocopy_put(uarg);
1312 } else {
1313 uarg->callback(uarg, zerocopy);
1314 }
1315
1316 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1317 }
1318}
1319
1320
1321static inline void skb_zcopy_abort(struct sk_buff *skb)
1322{
1323 struct ubuf_info *uarg = skb_zcopy(skb);
1324
1325 if (uarg) {
1326 sock_zerocopy_put_abort(uarg);
1327 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1328 }
1329}
1330
1331
1332
1333
1334
1335
1336
1337static inline int skb_queue_empty(const struct sk_buff_head *list)
1338{
1339 return list->next == (const struct sk_buff *) list;
1340}
1341
1342
1343
1344
1345
1346
1347
1348
1349static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1350 const struct sk_buff *skb)
1351{
1352 return skb->next == (const struct sk_buff *) list;
1353}
1354
1355
1356
1357
1358
1359
1360
1361
1362static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1363 const struct sk_buff *skb)
1364{
1365 return skb->prev == (const struct sk_buff *) list;
1366}
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1377 const struct sk_buff *skb)
1378{
1379
1380
1381
1382 BUG_ON(skb_queue_is_last(list, skb));
1383 return skb->next;
1384}
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1395 const struct sk_buff *skb)
1396{
1397
1398
1399
1400 BUG_ON(skb_queue_is_first(list, skb));
1401 return skb->prev;
1402}
1403
1404
1405
1406
1407
1408
1409
1410
1411static inline struct sk_buff *skb_get(struct sk_buff *skb)
1412{
1413 refcount_inc(&skb->users);
1414 return skb;
1415}
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429static inline int skb_cloned(const struct sk_buff *skb)
1430{
1431 return skb->cloned &&
1432 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1433}
1434
1435static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1436{
1437 might_sleep_if(gfpflags_allow_blocking(pri));
1438
1439 if (skb_cloned(skb))
1440 return pskb_expand_head(skb, 0, 0, pri);
1441
1442 return 0;
1443}
1444
1445
1446
1447
1448
1449
1450
1451
1452static inline int skb_header_cloned(const struct sk_buff *skb)
1453{
1454 int dataref;
1455
1456 if (!skb->cloned)
1457 return 0;
1458
1459 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1460 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1461 return dataref != 1;
1462}
1463
1464static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1465{
1466 might_sleep_if(gfpflags_allow_blocking(pri));
1467
1468 if (skb_header_cloned(skb))
1469 return pskb_expand_head(skb, 0, 0, pri);
1470
1471 return 0;
1472}
1473
1474
1475
1476
1477
1478static inline void __skb_header_release(struct sk_buff *skb)
1479{
1480 skb->nohdr = 1;
1481 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492static inline int skb_shared(const struct sk_buff *skb)
1493{
1494 return refcount_read(&skb->users) != 1;
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1511{
1512 might_sleep_if(gfpflags_allow_blocking(pri));
1513 if (skb_shared(skb)) {
1514 struct sk_buff *nskb = skb_clone(skb, pri);
1515
1516 if (likely(nskb))
1517 consume_skb(skb);
1518 else
1519 kfree_skb(skb);
1520 skb = nskb;
1521 }
1522 return skb;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1546 gfp_t pri)
1547{
1548 might_sleep_if(gfpflags_allow_blocking(pri));
1549 if (skb_cloned(skb)) {
1550 struct sk_buff *nskb = skb_copy(skb, pri);
1551
1552
1553 if (likely(nskb))
1554 consume_skb(skb);
1555 else
1556 kfree_skb(skb);
1557 skb = nskb;
1558 }
1559 return skb;
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1576{
1577 struct sk_buff *skb = list_->next;
1578
1579 if (skb == (struct sk_buff *)list_)
1580 skb = NULL;
1581 return skb;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1594 const struct sk_buff_head *list_)
1595{
1596 struct sk_buff *next = skb->next;
1597
1598 if (next == (struct sk_buff *)list_)
1599 next = NULL;
1600 return next;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1617{
1618 struct sk_buff *skb = list_->prev;
1619
1620 if (skb == (struct sk_buff *)list_)
1621 skb = NULL;
1622 return skb;
1623
1624}
1625
1626
1627
1628
1629
1630
1631
1632static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1633{
1634 return list_->qlen;
1635}
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647static inline void __skb_queue_head_init(struct sk_buff_head *list)
1648{
1649 list->prev = list->next = (struct sk_buff *)list;
1650 list->qlen = 0;
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661static inline void skb_queue_head_init(struct sk_buff_head *list)
1662{
1663 spin_lock_init(&list->lock);
1664 __skb_queue_head_init(list);
1665}
1666
1667static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1668 struct lock_class_key *class)
1669{
1670 skb_queue_head_init(list);
1671 lockdep_set_class(&list->lock, class);
1672}
1673
1674
1675
1676
1677
1678
1679
1680void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1681 struct sk_buff_head *list);
1682static inline void __skb_insert(struct sk_buff *newsk,
1683 struct sk_buff *prev, struct sk_buff *next,
1684 struct sk_buff_head *list)
1685{
1686 newsk->next = next;
1687 newsk->prev = prev;
1688 next->prev = prev->next = newsk;
1689 list->qlen++;
1690}
1691
1692static inline void __skb_queue_splice(const struct sk_buff_head *list,
1693 struct sk_buff *prev,
1694 struct sk_buff *next)
1695{
1696 struct sk_buff *first = list->next;
1697 struct sk_buff *last = list->prev;
1698
1699 first->prev = prev;
1700 prev->next = first;
1701
1702 last->next = next;
1703 next->prev = last;
1704}
1705
1706
1707
1708
1709
1710
1711static inline void skb_queue_splice(const struct sk_buff_head *list,
1712 struct sk_buff_head *head)
1713{
1714 if (!skb_queue_empty(list)) {
1715 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1716 head->qlen += list->qlen;
1717 }
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727static inline void skb_queue_splice_init(struct sk_buff_head *list,
1728 struct sk_buff_head *head)
1729{
1730 if (!skb_queue_empty(list)) {
1731 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1732 head->qlen += list->qlen;
1733 __skb_queue_head_init(list);
1734 }
1735}
1736
1737
1738
1739
1740
1741
1742static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1743 struct sk_buff_head *head)
1744{
1745 if (!skb_queue_empty(list)) {
1746 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1747 head->qlen += list->qlen;
1748 }
1749}
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1760 struct sk_buff_head *head)
1761{
1762 if (!skb_queue_empty(list)) {
1763 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1764 head->qlen += list->qlen;
1765 __skb_queue_head_init(list);
1766 }
1767}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780static inline void __skb_queue_after(struct sk_buff_head *list,
1781 struct sk_buff *prev,
1782 struct sk_buff *newsk)
1783{
1784 __skb_insert(newsk, prev, prev->next, list);
1785}
1786
1787void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1788 struct sk_buff_head *list);
1789
1790static inline void __skb_queue_before(struct sk_buff_head *list,
1791 struct sk_buff *next,
1792 struct sk_buff *newsk)
1793{
1794 __skb_insert(newsk, next->prev, next, list);
1795}
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1808static inline void __skb_queue_head(struct sk_buff_head *list,
1809 struct sk_buff *newsk)
1810{
1811 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1812}
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1825static inline void __skb_queue_tail(struct sk_buff_head *list,
1826 struct sk_buff *newsk)
1827{
1828 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1829}
1830
1831
1832
1833
1834
1835void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1836static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1837{
1838 struct sk_buff *next, *prev;
1839
1840 list->qlen--;
1841 next = skb->next;
1842 prev = skb->prev;
1843 skb->next = skb->prev = NULL;
1844 next->prev = prev;
1845 prev->next = next;
1846}
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1857static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1858{
1859 struct sk_buff *skb = skb_peek(list);
1860 if (skb)
1861 __skb_unlink(skb, list);
1862 return skb;
1863}
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1874static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1875{
1876 struct sk_buff *skb = skb_peek_tail(list);
1877 if (skb)
1878 __skb_unlink(skb, list);
1879 return skb;
1880}
1881
1882
1883static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1884{
1885 return skb->data_len;
1886}
1887
1888static inline unsigned int skb_headlen(const struct sk_buff *skb)
1889{
1890 return skb->len - skb->data_len;
1891}
1892
1893static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
1894{
1895 unsigned int i, len = 0;
1896
1897 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1898 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1899 return len;
1900}
1901
1902static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1903{
1904 return skb_headlen(skb) + __skb_pagelen(skb);
1905}
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1921 struct page *page, int off, int size)
1922{
1923 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1924
1925
1926
1927
1928
1929
1930 frag->page.p = page;
1931 frag->page_offset = off;
1932 skb_frag_size_set(frag, size);
1933
1934 page = compound_head(page);
1935 if (page_is_pfmemalloc(page))
1936 skb->pfmemalloc = true;
1937}
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1954 struct page *page, int off, int size)
1955{
1956 __skb_fill_page_desc(skb, i, page, off, size);
1957 skb_shinfo(skb)->nr_frags = i + 1;
1958}
1959
1960void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1961 int size, unsigned int truesize);
1962
1963void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1964 unsigned int truesize);
1965
1966#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1967#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1968#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1969
1970#ifdef NET_SKBUFF_DATA_USES_OFFSET
1971static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1972{
1973 return skb->head + skb->tail;
1974}
1975
1976static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1977{
1978 skb->tail = skb->data - skb->head;
1979}
1980
1981static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1982{
1983 skb_reset_tail_pointer(skb);
1984 skb->tail += offset;
1985}
1986
1987#else
1988static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1989{
1990 return skb->tail;
1991}
1992
1993static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1994{
1995 skb->tail = skb->data;
1996}
1997
1998static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1999{
2000 skb->tail = skb->data + offset;
2001}
2002
2003#endif
2004
2005
2006
2007
2008void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2009void *skb_put(struct sk_buff *skb, unsigned int len);
2010static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2011{
2012 void *tmp = skb_tail_pointer(skb);
2013 SKB_LINEAR_ASSERT(skb);
2014 skb->tail += len;
2015 skb->len += len;
2016 return tmp;
2017}
2018
2019static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2020{
2021 void *tmp = __skb_put(skb, len);
2022
2023 memset(tmp, 0, len);
2024 return tmp;
2025}
2026
2027static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2028 unsigned int len)
2029{
2030 void *tmp = __skb_put(skb, len);
2031
2032 memcpy(tmp, data, len);
2033 return tmp;
2034}
2035
2036static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2037{
2038 *(u8 *)__skb_put(skb, 1) = val;
2039}
2040
2041static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2042{
2043 void *tmp = skb_put(skb, len);
2044
2045 memset(tmp, 0, len);
2046
2047 return tmp;
2048}
2049
2050static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2051 unsigned int len)
2052{
2053 void *tmp = skb_put(skb, len);
2054
2055 memcpy(tmp, data, len);
2056
2057 return tmp;
2058}
2059
2060static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2061{
2062 *(u8 *)skb_put(skb, 1) = val;
2063}
2064
2065void *skb_push(struct sk_buff *skb, unsigned int len);
2066static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2067{
2068 skb->data -= len;
2069 skb->len += len;
2070 return skb->data;
2071}
2072
2073void *skb_pull(struct sk_buff *skb, unsigned int len);
2074static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2075{
2076 skb->len -= len;
2077 BUG_ON(skb->len < skb->data_len);
2078 return skb->data += len;
2079}
2080
2081static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2082{
2083 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2084}
2085
2086void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2087
2088static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2089{
2090 if (len > skb_headlen(skb) &&
2091 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2092 return NULL;
2093 skb->len -= len;
2094 return skb->data += len;
2095}
2096
2097static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2098{
2099 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2100}
2101
2102static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2103{
2104 if (likely(len <= skb_headlen(skb)))
2105 return 1;
2106 if (unlikely(len > skb->len))
2107 return 0;
2108 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2109}
2110
2111void skb_condense(struct sk_buff *skb);
2112
2113
2114
2115
2116
2117
2118
2119static inline unsigned int skb_headroom(const struct sk_buff *skb)
2120{
2121 return skb->data - skb->head;
2122}
2123
2124
2125
2126
2127
2128
2129
2130static inline int skb_tailroom(const struct sk_buff *skb)
2131{
2132 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142static inline int skb_availroom(const struct sk_buff *skb)
2143{
2144 if (skb_is_nonlinear(skb))
2145 return 0;
2146
2147 return skb->end - skb->tail - skb->reserved_tailroom;
2148}
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158static inline void skb_reserve(struct sk_buff *skb, int len)
2159{
2160 skb->data += len;
2161 skb->tail += len;
2162}
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2177 unsigned int needed_tailroom)
2178{
2179 SKB_LINEAR_ASSERT(skb);
2180 if (mtu < skb_tailroom(skb) - needed_tailroom)
2181
2182 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2183 else
2184
2185 skb->reserved_tailroom = needed_tailroom;
2186}
2187
2188#define ENCAP_TYPE_ETHER 0
2189#define ENCAP_TYPE_IPPROTO 1
2190
2191static inline void skb_set_inner_protocol(struct sk_buff *skb,
2192 __be16 protocol)
2193{
2194 skb->inner_protocol = protocol;
2195 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2196}
2197
2198static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2199 __u8 ipproto)
2200{
2201 skb->inner_ipproto = ipproto;
2202 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2203}
2204
2205static inline void skb_reset_inner_headers(struct sk_buff *skb)
2206{
2207 skb->inner_mac_header = skb->mac_header;
2208 skb->inner_network_header = skb->network_header;
2209 skb->inner_transport_header = skb->transport_header;
2210}
2211
2212static inline void skb_reset_mac_len(struct sk_buff *skb)
2213{
2214 skb->mac_len = skb->network_header - skb->mac_header;
2215}
2216
2217static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2218 *skb)
2219{
2220 return skb->head + skb->inner_transport_header;
2221}
2222
2223static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2224{
2225 return skb_inner_transport_header(skb) - skb->data;
2226}
2227
2228static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2229{
2230 skb->inner_transport_header = skb->data - skb->head;
2231}
2232
2233static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2234 const int offset)
2235{
2236 skb_reset_inner_transport_header(skb);
2237 skb->inner_transport_header += offset;
2238}
2239
2240static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2241{
2242 return skb->head + skb->inner_network_header;
2243}
2244
2245static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2246{
2247 skb->inner_network_header = skb->data - skb->head;
2248}
2249
2250static inline void skb_set_inner_network_header(struct sk_buff *skb,
2251 const int offset)
2252{
2253 skb_reset_inner_network_header(skb);
2254 skb->inner_network_header += offset;
2255}
2256
2257static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2258{
2259 return skb->head + skb->inner_mac_header;
2260}
2261
2262static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2263{
2264 skb->inner_mac_header = skb->data - skb->head;
2265}
2266
2267static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2268 const int offset)
2269{
2270 skb_reset_inner_mac_header(skb);
2271 skb->inner_mac_header += offset;
2272}
2273static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2274{
2275 return skb->transport_header != (typeof(skb->transport_header))~0U;
2276}
2277
2278static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2279{
2280 return skb->head + skb->transport_header;
2281}
2282
2283static inline void skb_reset_transport_header(struct sk_buff *skb)
2284{
2285 skb->transport_header = skb->data - skb->head;
2286}
2287
2288static inline void skb_set_transport_header(struct sk_buff *skb,
2289 const int offset)
2290{
2291 skb_reset_transport_header(skb);
2292 skb->transport_header += offset;
2293}
2294
2295static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2296{
2297 return skb->head + skb->network_header;
2298}
2299
2300static inline void skb_reset_network_header(struct sk_buff *skb)
2301{
2302 skb->network_header = skb->data - skb->head;
2303}
2304
2305static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2306{
2307 skb_reset_network_header(skb);
2308 skb->network_header += offset;
2309}
2310
2311static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2312{
2313 return skb->head + skb->mac_header;
2314}
2315
2316static inline int skb_mac_offset(const struct sk_buff *skb)
2317{
2318 return skb_mac_header(skb) - skb->data;
2319}
2320
2321static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2322{
2323 return skb->network_header - skb->mac_header;
2324}
2325
2326static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2327{
2328 return skb->mac_header != (typeof(skb->mac_header))~0U;
2329}
2330
2331static inline void skb_reset_mac_header(struct sk_buff *skb)
2332{
2333 skb->mac_header = skb->data - skb->head;
2334}
2335
2336static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2337{
2338 skb_reset_mac_header(skb);
2339 skb->mac_header += offset;
2340}
2341
2342static inline void skb_pop_mac_header(struct sk_buff *skb)
2343{
2344 skb->mac_header = skb->network_header;
2345}
2346
2347static inline void skb_probe_transport_header(struct sk_buff *skb,
2348 const int offset_hint)
2349{
2350 struct flow_keys keys;
2351
2352 if (skb_transport_header_was_set(skb))
2353 return;
2354 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2355 skb_set_transport_header(skb, keys.control.thoff);
2356 else
2357 skb_set_transport_header(skb, offset_hint);
2358}
2359
2360static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2361{
2362 if (skb_mac_header_was_set(skb)) {
2363 const unsigned char *old_mac = skb_mac_header(skb);
2364
2365 skb_set_mac_header(skb, -skb->mac_len);
2366 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2367 }
2368}
2369
2370static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2371{
2372 return skb->csum_start - skb_headroom(skb);
2373}
2374
2375static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2376{
2377 return skb->head + skb->csum_start;
2378}
2379
2380static inline int skb_transport_offset(const struct sk_buff *skb)
2381{
2382 return skb_transport_header(skb) - skb->data;
2383}
2384
2385static inline u32 skb_network_header_len(const struct sk_buff *skb)
2386{
2387 return skb->transport_header - skb->network_header;
2388}
2389
2390static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2391{
2392 return skb->inner_transport_header - skb->inner_network_header;
2393}
2394
2395static inline int skb_network_offset(const struct sk_buff *skb)
2396{
2397 return skb_network_header(skb) - skb->data;
2398}
2399
2400static inline int skb_inner_network_offset(const struct sk_buff *skb)
2401{
2402 return skb_inner_network_header(skb) - skb->data;
2403}
2404
2405static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2406{
2407 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2408}
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430#ifndef NET_IP_ALIGN
2431#define NET_IP_ALIGN 2
2432#endif
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454#ifndef NET_SKB_PAD
2455#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2456#endif
2457
2458int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2459
2460static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2461{
2462 if (unlikely(skb_is_nonlinear(skb))) {
2463 WARN_ON(1);
2464 return;
2465 }
2466 skb->len = len;
2467 skb_set_tail_pointer(skb, len);
2468}
2469
2470static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2471{
2472 __skb_set_length(skb, len);
2473}
2474
2475void skb_trim(struct sk_buff *skb, unsigned int len);
2476
2477static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2478{
2479 if (skb->data_len)
2480 return ___pskb_trim(skb, len);
2481 __skb_trim(skb, len);
2482 return 0;
2483}
2484
2485static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2486{
2487 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2488}
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2500{
2501 int err = pskb_trim(skb, len);
2502 BUG_ON(err);
2503}
2504
2505static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2506{
2507 unsigned int diff = len - skb->len;
2508
2509 if (skb_tailroom(skb) < diff) {
2510 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2511 GFP_ATOMIC);
2512 if (ret)
2513 return ret;
2514 }
2515 __skb_set_length(skb, len);
2516 return 0;
2517}
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527static inline void skb_orphan(struct sk_buff *skb)
2528{
2529 if (skb->destructor) {
2530 skb->destructor(skb);
2531 skb->destructor = NULL;
2532 skb->sk = NULL;
2533 } else {
2534 BUG_ON(skb->sk);
2535 }
2536}
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2548{
2549 if (likely(!skb_zcopy(skb)))
2550 return 0;
2551 if (skb_uarg(skb)->callback == sock_zerocopy_callback)
2552 return 0;
2553 return skb_copy_ubufs(skb, gfp_mask);
2554}
2555
2556
2557static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2558{
2559 if (likely(!skb_zcopy(skb)))
2560 return 0;
2561 return skb_copy_ubufs(skb, gfp_mask);
2562}
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572void skb_queue_purge(struct sk_buff_head *list);
2573static inline void __skb_queue_purge(struct sk_buff_head *list)
2574{
2575 struct sk_buff *skb;
2576 while ((skb = __skb_dequeue(list)) != NULL)
2577 kfree_skb(skb);
2578}
2579
2580void skb_rbtree_purge(struct rb_root *root);
2581
2582void *netdev_alloc_frag(unsigned int fragsz);
2583
2584struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2585 gfp_t gfp_mask);
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2601 unsigned int length)
2602{
2603 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2604}
2605
2606
2607static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2608 gfp_t gfp_mask)
2609{
2610 return __netdev_alloc_skb(NULL, length, gfp_mask);
2611}
2612
2613
2614static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2615{
2616 return netdev_alloc_skb(NULL, length);
2617}
2618
2619
2620static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2621 unsigned int length, gfp_t gfp)
2622{
2623 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2624
2625 if (NET_IP_ALIGN && skb)
2626 skb_reserve(skb, NET_IP_ALIGN);
2627 return skb;
2628}
2629
2630static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2631 unsigned int length)
2632{
2633 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2634}
2635
2636static inline void skb_free_frag(void *addr)
2637{
2638 page_frag_free(addr);
2639}
2640
2641void *napi_alloc_frag(unsigned int fragsz);
2642struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2643 unsigned int length, gfp_t gfp_mask);
2644static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2645 unsigned int length)
2646{
2647 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2648}
2649void napi_consume_skb(struct sk_buff *skb, int budget);
2650
2651void __kfree_skb_flush(void);
2652void __kfree_skb_defer(struct sk_buff *skb);
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2664 unsigned int order)
2665{
2666
2667
2668
2669
2670
2671
2672
2673
2674 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2675
2676 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2677}
2678
2679static inline struct page *dev_alloc_pages(unsigned int order)
2680{
2681 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2693{
2694 return __dev_alloc_pages(gfp_mask, 0);
2695}
2696
2697static inline struct page *dev_alloc_page(void)
2698{
2699 return dev_alloc_pages(0);
2700}
2701
2702
2703
2704
2705
2706
2707static inline void skb_propagate_pfmemalloc(struct page *page,
2708 struct sk_buff *skb)
2709{
2710 if (page_is_pfmemalloc(page))
2711 skb->pfmemalloc = true;
2712}
2713
2714
2715
2716
2717
2718
2719
2720static inline struct page *skb_frag_page(const skb_frag_t *frag)
2721{
2722 return frag->page.p;
2723}
2724
2725
2726
2727
2728
2729
2730
2731static inline void __skb_frag_ref(skb_frag_t *frag)
2732{
2733 get_page(skb_frag_page(frag));
2734}
2735
2736
2737
2738
2739
2740
2741
2742
2743static inline void skb_frag_ref(struct sk_buff *skb, int f)
2744{
2745 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2746}
2747
2748
2749
2750
2751
2752
2753
2754static inline void __skb_frag_unref(skb_frag_t *frag)
2755{
2756 put_page(skb_frag_page(frag));
2757}
2758
2759
2760
2761
2762
2763
2764
2765
2766static inline void skb_frag_unref(struct sk_buff *skb, int f)
2767{
2768 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778static inline void *skb_frag_address(const skb_frag_t *frag)
2779{
2780 return page_address(skb_frag_page(frag)) + frag->page_offset;
2781}
2782
2783
2784
2785
2786
2787
2788
2789
2790static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2791{
2792 void *ptr = page_address(skb_frag_page(frag));
2793 if (unlikely(!ptr))
2794 return NULL;
2795
2796 return ptr + frag->page_offset;
2797}
2798
2799
2800
2801
2802
2803
2804
2805
2806static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2807{
2808 frag->page.p = page;
2809}
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2820 struct page *page)
2821{
2822 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2823}
2824
2825bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2839 const skb_frag_t *frag,
2840 size_t offset, size_t size,
2841 enum dma_data_direction dir)
2842{
2843 return dma_map_page(dev, skb_frag_page(frag),
2844 frag->page_offset + offset, size, dir);
2845}
2846
2847static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2848 gfp_t gfp_mask)
2849{
2850 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2851}
2852
2853
2854static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2855 gfp_t gfp_mask)
2856{
2857 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2858}
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2870{
2871 return !skb_header_cloned(skb) &&
2872 skb_headroom(skb) + len <= skb->hdr_len;
2873}
2874
2875static inline int skb_try_make_writable(struct sk_buff *skb,
2876 unsigned int write_len)
2877{
2878 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2879 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2880}
2881
2882static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2883 int cloned)
2884{
2885 int delta = 0;
2886
2887 if (headroom > skb_headroom(skb))
2888 delta = headroom - skb_headroom(skb);
2889
2890 if (delta || cloned)
2891 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2892 GFP_ATOMIC);
2893 return 0;
2894}
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2909{
2910 return __skb_cow(skb, headroom, skb_cloned(skb));
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2924{
2925 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2926}
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2939{
2940 unsigned int size = skb->len;
2941 if (likely(size >= len))
2942 return 0;
2943 return skb_pad(skb, len - size);
2944}
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
2958 bool free_on_error)
2959{
2960 unsigned int size = skb->len;
2961
2962 if (unlikely(size < len)) {
2963 len -= size;
2964 if (__skb_pad(skb, len, free_on_error))
2965 return -ENOMEM;
2966 __skb_put(skb, len);
2967 }
2968 return 0;
2969}
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2982{
2983 return __skb_put_padto(skb, len, true);
2984}
2985
2986static inline int skb_add_data(struct sk_buff *skb,
2987 struct iov_iter *from, int copy)
2988{
2989 const int off = skb->len;
2990
2991 if (skb->ip_summed == CHECKSUM_NONE) {
2992 __wsum csum = 0;
2993 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2994 &csum, from)) {
2995 skb->csum = csum_block_add(skb->csum, csum, off);
2996 return 0;
2997 }
2998 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
2999 return 0;
3000
3001 __skb_trim(skb, off);
3002 return -EFAULT;
3003}
3004
3005static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3006 const struct page *page, int off)
3007{
3008 if (skb_zcopy(skb))
3009 return false;
3010 if (i) {
3011 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3012
3013 return page == skb_frag_page(frag) &&
3014 off == frag->page_offset + skb_frag_size(frag);
3015 }
3016 return false;
3017}
3018
3019static inline int __skb_linearize(struct sk_buff *skb)
3020{
3021 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3022}
3023
3024
3025
3026
3027
3028
3029
3030
3031static inline int skb_linearize(struct sk_buff *skb)
3032{
3033 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3034}
3035
3036
3037
3038
3039
3040
3041
3042
3043static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3044{
3045 return skb_is_nonlinear(skb) &&
3046 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3047}
3048
3049
3050
3051
3052
3053
3054
3055
3056static inline int skb_linearize_cow(struct sk_buff *skb)
3057{
3058 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3059 __skb_linearize(skb) : 0;
3060}
3061
3062static __always_inline void
3063__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3064 unsigned int off)
3065{
3066 if (skb->ip_summed == CHECKSUM_COMPLETE)
3067 skb->csum = csum_block_sub(skb->csum,
3068 csum_partial(start, len, 0), off);
3069 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3070 skb_checksum_start_offset(skb) < 0)
3071 skb->ip_summed = CHECKSUM_NONE;
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static inline void skb_postpull_rcsum(struct sk_buff *skb,
3085 const void *start, unsigned int len)
3086{
3087 __skb_postpull_rcsum(skb, start, len, 0);
3088}
3089
3090static __always_inline void
3091__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3092 unsigned int off)
3093{
3094 if (skb->ip_summed == CHECKSUM_COMPLETE)
3095 skb->csum = csum_block_add(skb->csum,
3096 csum_partial(start, len, 0), off);
3097}
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108static inline void skb_postpush_rcsum(struct sk_buff *skb,
3109 const void *start, unsigned int len)
3110{
3111 __skb_postpush_rcsum(skb, start, len, 0);
3112}
3113
3114void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3128{
3129 skb_push(skb, len);
3130 skb_postpush_rcsum(skb, skb->data, len);
3131 return skb->data;
3132}
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3144{
3145 if (likely(len >= skb->len))
3146 return 0;
3147 if (skb->ip_summed == CHECKSUM_COMPLETE)
3148 skb->ip_summed = CHECKSUM_NONE;
3149 return __pskb_trim(skb, len);
3150}
3151
3152static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3153{
3154 if (skb->ip_summed == CHECKSUM_COMPLETE)
3155 skb->ip_summed = CHECKSUM_NONE;
3156 __skb_trim(skb, len);
3157 return 0;
3158}
3159
3160static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3161{
3162 if (skb->ip_summed == CHECKSUM_COMPLETE)
3163 skb->ip_summed = CHECKSUM_NONE;
3164 return __skb_grow(skb, len);
3165}
3166
3167#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3168#define skb_rb_first(root) rb_to_skb(rb_first(root))
3169#define skb_rb_last(root) rb_to_skb(rb_last(root))
3170#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3171#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3172
3173#define skb_queue_walk(queue, skb) \
3174 for (skb = (queue)->next; \
3175 skb != (struct sk_buff *)(queue); \
3176 skb = skb->next)
3177
3178#define skb_queue_walk_safe(queue, skb, tmp) \
3179 for (skb = (queue)->next, tmp = skb->next; \
3180 skb != (struct sk_buff *)(queue); \
3181 skb = tmp, tmp = skb->next)
3182
3183#define skb_queue_walk_from(queue, skb) \
3184 for (; skb != (struct sk_buff *)(queue); \
3185 skb = skb->next)
3186
3187#define skb_rbtree_walk(skb, root) \
3188 for (skb = skb_rb_first(root); skb != NULL; \
3189 skb = skb_rb_next(skb))
3190
3191#define skb_rbtree_walk_from(skb) \
3192 for (; skb != NULL; \
3193 skb = skb_rb_next(skb))
3194
3195#define skb_rbtree_walk_from_safe(skb, tmp) \
3196 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3197 skb = tmp)
3198
3199#define skb_queue_walk_from_safe(queue, skb, tmp) \
3200 for (tmp = skb->next; \
3201 skb != (struct sk_buff *)(queue); \
3202 skb = tmp, tmp = skb->next)
3203
3204#define skb_queue_reverse_walk(queue, skb) \
3205 for (skb = (queue)->prev; \
3206 skb != (struct sk_buff *)(queue); \
3207 skb = skb->prev)
3208
3209#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3210 for (skb = (queue)->prev, tmp = skb->prev; \
3211 skb != (struct sk_buff *)(queue); \
3212 skb = tmp, tmp = skb->prev)
3213
3214#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3215 for (tmp = skb->prev; \
3216 skb != (struct sk_buff *)(queue); \
3217 skb = tmp, tmp = skb->prev)
3218
3219static inline bool skb_has_frag_list(const struct sk_buff *skb)
3220{
3221 return skb_shinfo(skb)->frag_list != NULL;
3222}
3223
3224static inline void skb_frag_list_init(struct sk_buff *skb)
3225{
3226 skb_shinfo(skb)->frag_list = NULL;
3227}
3228
3229#define skb_walk_frags(skb, iter) \
3230 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3231
3232
3233int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3234 const struct sk_buff *skb);
3235struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3236 struct sk_buff_head *queue,
3237 unsigned int flags,
3238 void (*destructor)(struct sock *sk,
3239 struct sk_buff *skb),
3240 int *peeked, int *off, int *err,
3241 struct sk_buff **last);
3242struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3243 void (*destructor)(struct sock *sk,
3244 struct sk_buff *skb),
3245 int *peeked, int *off, int *err,
3246 struct sk_buff **last);
3247struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3248 void (*destructor)(struct sock *sk,
3249 struct sk_buff *skb),
3250 int *peeked, int *off, int *err);
3251struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3252 int *err);
3253__poll_t datagram_poll(struct file *file, struct socket *sock,
3254 struct poll_table_struct *wait);
3255int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3256 struct iov_iter *to, int size);
3257static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3258 struct msghdr *msg, int size)
3259{
3260 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3261}
3262int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3263 struct msghdr *msg);
3264int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3265 struct iov_iter *from, int len);
3266int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3267void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3268void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3269static inline void skb_free_datagram_locked(struct sock *sk,
3270 struct sk_buff *skb)
3271{
3272 __skb_free_datagram_locked(sk, skb, 0);
3273}
3274int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3275int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3276int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3277__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3278 int len, __wsum csum);
3279int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3280 struct pipe_inode_info *pipe, unsigned int len,
3281 unsigned int flags);
3282int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3283 int len);
3284int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3285void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3286unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3287int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3288 int len, int hlen);
3289void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3290int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3291void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3292bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3293bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3294struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3295struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3296int skb_ensure_writable(struct sk_buff *skb, int write_len);
3297int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3298int skb_vlan_pop(struct sk_buff *skb);
3299int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3300struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3301 gfp_t gfp);
3302
3303static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3304{
3305 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3306}
3307
3308static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3309{
3310 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3311}
3312
3313struct skb_checksum_ops {
3314 __wsum (*update)(const void *mem, int len, __wsum wsum);
3315 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3316};
3317
3318extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3319
3320__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3321 __wsum csum, const struct skb_checksum_ops *ops);
3322__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3323 __wsum csum);
3324
3325static inline void * __must_check
3326__skb_header_pointer(const struct sk_buff *skb, int offset,
3327 int len, void *data, int hlen, void *buffer)
3328{
3329 if (hlen - offset >= len)
3330 return data + offset;
3331
3332 if (!skb ||
3333 skb_copy_bits(skb, offset, buffer, len) < 0)
3334 return NULL;
3335
3336 return buffer;
3337}
3338
3339static inline void * __must_check
3340skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3341{
3342 return __skb_header_pointer(skb, offset, len, skb->data,
3343 skb_headlen(skb), buffer);
3344}
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356static inline bool skb_needs_linearize(struct sk_buff *skb,
3357 netdev_features_t features)
3358{
3359 return skb_is_nonlinear(skb) &&
3360 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3361 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3362}
3363
3364static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3365 void *to,
3366 const unsigned int len)
3367{
3368 memcpy(to, skb->data, len);
3369}
3370
3371static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3372 const int offset, void *to,
3373 const unsigned int len)
3374{
3375 memcpy(to, skb->data + offset, len);
3376}
3377
3378static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3379 const void *from,
3380 const unsigned int len)
3381{
3382 memcpy(skb->data, from, len);
3383}
3384
3385static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3386 const int offset,
3387 const void *from,
3388 const unsigned int len)
3389{
3390 memcpy(skb->data + offset, from, len);
3391}
3392
3393void skb_init(void);
3394
3395static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3396{
3397 return skb->tstamp;
3398}
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409static inline void skb_get_timestamp(const struct sk_buff *skb,
3410 struct timeval *stamp)
3411{
3412 *stamp = ktime_to_timeval(skb->tstamp);
3413}
3414
3415static inline void skb_get_timestampns(const struct sk_buff *skb,
3416 struct timespec *stamp)
3417{
3418 *stamp = ktime_to_timespec(skb->tstamp);
3419}
3420
3421static inline void __net_timestamp(struct sk_buff *skb)
3422{
3423 skb->tstamp = ktime_get_real();
3424}
3425
3426static inline ktime_t net_timedelta(ktime_t t)
3427{
3428 return ktime_sub(ktime_get_real(), t);
3429}
3430
3431static inline ktime_t net_invalid_timestamp(void)
3432{
3433 return 0;
3434}
3435
3436static inline u8 skb_metadata_len(const struct sk_buff *skb)
3437{
3438 return skb_shinfo(skb)->meta_len;
3439}
3440
3441static inline void *skb_metadata_end(const struct sk_buff *skb)
3442{
3443 return skb_mac_header(skb);
3444}
3445
3446static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3447 const struct sk_buff *skb_b,
3448 u8 meta_len)
3449{
3450 const void *a = skb_metadata_end(skb_a);
3451 const void *b = skb_metadata_end(skb_b);
3452
3453#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3454 u64 diffs = 0;
3455
3456 switch (meta_len) {
3457#define __it(x, op) (x -= sizeof(u##op))
3458#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3459 case 32: diffs |= __it_diff(a, b, 64);
3460 case 24: diffs |= __it_diff(a, b, 64);
3461 case 16: diffs |= __it_diff(a, b, 64);
3462 case 8: diffs |= __it_diff(a, b, 64);
3463 break;
3464 case 28: diffs |= __it_diff(a, b, 64);
3465 case 20: diffs |= __it_diff(a, b, 64);
3466 case 12: diffs |= __it_diff(a, b, 64);
3467 case 4: diffs |= __it_diff(a, b, 32);
3468 break;
3469 }
3470 return diffs;
3471#else
3472 return memcmp(a - meta_len, b - meta_len, meta_len);
3473#endif
3474}
3475
3476static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3477 const struct sk_buff *skb_b)
3478{
3479 u8 len_a = skb_metadata_len(skb_a);
3480 u8 len_b = skb_metadata_len(skb_b);
3481
3482 if (!(len_a | len_b))
3483 return false;
3484
3485 return len_a != len_b ?
3486 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3487}
3488
3489static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3490{
3491 skb_shinfo(skb)->meta_len = meta_len;
3492}
3493
3494static inline void skb_metadata_clear(struct sk_buff *skb)
3495{
3496 skb_metadata_set(skb, 0);
3497}
3498
3499struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3500
3501#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3502
3503void skb_clone_tx_timestamp(struct sk_buff *skb);
3504bool skb_defer_rx_timestamp(struct sk_buff *skb);
3505
3506#else
3507
3508static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3509{
3510}
3511
3512static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3513{
3514 return false;
3515}
3516
3517#endif
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531void skb_complete_tx_timestamp(struct sk_buff *skb,
3532 struct skb_shared_hwtstamps *hwtstamps);
3533
3534void __skb_tstamp_tx(struct sk_buff *orig_skb,
3535 struct skb_shared_hwtstamps *hwtstamps,
3536 struct sock *sk, int tstype);
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549void skb_tstamp_tx(struct sk_buff *orig_skb,
3550 struct skb_shared_hwtstamps *hwtstamps);
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564static inline void skb_tx_timestamp(struct sk_buff *skb)
3565{
3566 skb_clone_tx_timestamp(skb);
3567 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3568 skb_tstamp_tx(skb, NULL);
3569}
3570
3571
3572
3573
3574
3575
3576
3577
3578void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3579
3580__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3581__sum16 __skb_checksum_complete(struct sk_buff *skb);
3582
3583static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3584{
3585 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3586 skb->csum_valid ||
3587 (skb->ip_summed == CHECKSUM_PARTIAL &&
3588 skb_checksum_start_offset(skb) >= 0));
3589}
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3608{
3609 return skb_csum_unnecessary(skb) ?
3610 0 : __skb_checksum_complete(skb);
3611}
3612
3613static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3614{
3615 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3616 if (skb->csum_level == 0)
3617 skb->ip_summed = CHECKSUM_NONE;
3618 else
3619 skb->csum_level--;
3620 }
3621}
3622
3623static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3624{
3625 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3626 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3627 skb->csum_level++;
3628 } else if (skb->ip_summed == CHECKSUM_NONE) {
3629 skb->ip_summed = CHECKSUM_UNNECESSARY;
3630 skb->csum_level = 0;
3631 }
3632}
3633
3634
3635
3636
3637
3638
3639static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3640 bool zero_okay,
3641 __sum16 check)
3642{
3643 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3644 skb->csum_valid = 1;
3645 __skb_decr_checksum_unnecessary(skb);
3646 return false;
3647 }
3648
3649 return true;
3650}
3651
3652
3653
3654
3655#define CHECKSUM_BREAK 76
3656
3657
3658
3659
3660
3661
3662
3663static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3664{
3665 if (skb->ip_summed == CHECKSUM_COMPLETE)
3666 skb->ip_summed = CHECKSUM_NONE;
3667}
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3679 bool complete,
3680 __wsum psum)
3681{
3682 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3683 if (!csum_fold(csum_add(psum, skb->csum))) {
3684 skb->csum_valid = 1;
3685 return 0;
3686 }
3687 }
3688
3689 skb->csum = psum;
3690
3691 if (complete || skb->len <= CHECKSUM_BREAK) {
3692 __sum16 csum;
3693
3694 csum = __skb_checksum_complete(skb);
3695 skb->csum_valid = !csum;
3696 return csum;
3697 }
3698
3699 return 0;
3700}
3701
3702static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3703{
3704 return 0;
3705}
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717#define __skb_checksum_validate(skb, proto, complete, \
3718 zero_okay, check, compute_pseudo) \
3719({ \
3720 __sum16 __ret = 0; \
3721 skb->csum_valid = 0; \
3722 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3723 __ret = __skb_checksum_validate_complete(skb, \
3724 complete, compute_pseudo(skb, proto)); \
3725 __ret; \
3726})
3727
3728#define skb_checksum_init(skb, proto, compute_pseudo) \
3729 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3730
3731#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3732 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3733
3734#define skb_checksum_validate(skb, proto, compute_pseudo) \
3735 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3736
3737#define skb_checksum_validate_zero_check(skb, proto, check, \
3738 compute_pseudo) \
3739 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3740
3741#define skb_checksum_simple_validate(skb) \
3742 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3743
3744static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3745{
3746 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3747}
3748
3749static inline void __skb_checksum_convert(struct sk_buff *skb,
3750 __sum16 check, __wsum pseudo)
3751{
3752 skb->csum = ~pseudo;
3753 skb->ip_summed = CHECKSUM_COMPLETE;
3754}
3755
3756#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3757do { \
3758 if (__skb_checksum_convert_check(skb)) \
3759 __skb_checksum_convert(skb, check, \
3760 compute_pseudo(skb, proto)); \
3761} while (0)
3762
3763static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3764 u16 start, u16 offset)
3765{
3766 skb->ip_summed = CHECKSUM_PARTIAL;
3767 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3768 skb->csum_offset = offset - start;
3769}
3770
3771
3772
3773
3774
3775
3776static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3777 int start, int offset, bool nopartial)
3778{
3779 __wsum delta;
3780
3781 if (!nopartial) {
3782 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3783 return;
3784 }
3785
3786 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3787 __skb_checksum_complete(skb);
3788 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3789 }
3790
3791 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3792
3793
3794 skb->csum = csum_add(skb->csum, delta);
3795}
3796
3797static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3798{
3799#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3800 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3801#else
3802 return NULL;
3803#endif
3804}
3805
3806#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3807void nf_conntrack_destroy(struct nf_conntrack *nfct);
3808static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3809{
3810 if (nfct && atomic_dec_and_test(&nfct->use))
3811 nf_conntrack_destroy(nfct);
3812}
3813static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3814{
3815 if (nfct)
3816 atomic_inc(&nfct->use);
3817}
3818#endif
3819#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3820static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3821{
3822 if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3823 kfree(nf_bridge);
3824}
3825static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3826{
3827 if (nf_bridge)
3828 refcount_inc(&nf_bridge->use);
3829}
3830#endif
3831static inline void nf_reset(struct sk_buff *skb)
3832{
3833#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3834 nf_conntrack_put(skb_nfct(skb));
3835 skb->_nfct = 0;
3836#endif
3837#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3838 nf_bridge_put(skb->nf_bridge);
3839 skb->nf_bridge = NULL;
3840#endif
3841}
3842
3843static inline void nf_reset_trace(struct sk_buff *skb)
3844{
3845#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3846 skb->nf_trace = 0;
3847#endif
3848}
3849
3850static inline void ipvs_reset(struct sk_buff *skb)
3851{
3852#if IS_ENABLED(CONFIG_IP_VS)
3853 skb->ipvs_property = 0;
3854#endif
3855}
3856
3857
3858static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3859 bool copy)
3860{
3861#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3862 dst->_nfct = src->_nfct;
3863 nf_conntrack_get(skb_nfct(src));
3864#endif
3865#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3866 dst->nf_bridge = src->nf_bridge;
3867 nf_bridge_get(src->nf_bridge);
3868#endif
3869#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3870 if (copy)
3871 dst->nf_trace = src->nf_trace;
3872#endif
3873}
3874
3875static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3876{
3877#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3878 nf_conntrack_put(skb_nfct(dst));
3879#endif
3880#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3881 nf_bridge_put(dst->nf_bridge);
3882#endif
3883 __nf_copy(dst, src, true);
3884}
3885
3886#ifdef CONFIG_NETWORK_SECMARK
3887static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3888{
3889 to->secmark = from->secmark;
3890}
3891
3892static inline void skb_init_secmark(struct sk_buff *skb)
3893{
3894 skb->secmark = 0;
3895}
3896#else
3897static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3898{ }
3899
3900static inline void skb_init_secmark(struct sk_buff *skb)
3901{ }
3902#endif
3903
3904static inline bool skb_irq_freeable(const struct sk_buff *skb)
3905{
3906 return !skb->destructor &&
3907#if IS_ENABLED(CONFIG_XFRM)
3908 !skb->sp &&
3909#endif
3910 !skb_nfct(skb) &&
3911 !skb->_skb_refdst &&
3912 !skb_has_frag_list(skb);
3913}
3914
3915static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3916{
3917 skb->queue_mapping = queue_mapping;
3918}
3919
3920static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3921{
3922 return skb->queue_mapping;
3923}
3924
3925static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3926{
3927 to->queue_mapping = from->queue_mapping;
3928}
3929
3930static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3931{
3932 skb->queue_mapping = rx_queue + 1;
3933}
3934
3935static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3936{
3937 return skb->queue_mapping - 1;
3938}
3939
3940static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3941{
3942 return skb->queue_mapping != 0;
3943}
3944
3945static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
3946{
3947 skb->dst_pending_confirm = val;
3948}
3949
3950static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
3951{
3952 return skb->dst_pending_confirm != 0;
3953}
3954
3955static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3956{
3957#ifdef CONFIG_XFRM
3958 return skb->sp;
3959#else
3960 return NULL;
3961#endif
3962}
3963
3964
3965
3966
3967
3968
3969
3970struct skb_gso_cb {
3971 union {
3972 int mac_offset;
3973 int data_offset;
3974 };
3975 int encap_level;
3976 __wsum csum;
3977 __u16 csum_start;
3978};
3979#define SKB_SGO_CB_OFFSET 32
3980#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3981
3982static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3983{
3984 return (skb_mac_header(inner_skb) - inner_skb->head) -
3985 SKB_GSO_CB(inner_skb)->mac_offset;
3986}
3987
3988static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3989{
3990 int new_headroom, headroom;
3991 int ret;
3992
3993 headroom = skb_headroom(skb);
3994 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3995 if (ret)
3996 return ret;
3997
3998 new_headroom = skb_headroom(skb);
3999 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4000 return 0;
4001}
4002
4003static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4004{
4005
4006 if (skb->remcsum_offload)
4007 return;
4008
4009 SKB_GSO_CB(skb)->csum = res;
4010 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4011}
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4022{
4023 unsigned char *csum_start = skb_transport_header(skb);
4024 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4025 __wsum partial = SKB_GSO_CB(skb)->csum;
4026
4027 SKB_GSO_CB(skb)->csum = res;
4028 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4029
4030 return csum_fold(csum_partial(csum_start, plen, partial));
4031}
4032
4033static inline bool skb_is_gso(const struct sk_buff *skb)
4034{
4035 return skb_shinfo(skb)->gso_size;
4036}
4037
4038
4039static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4040{
4041 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4042}
4043
4044
4045static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4046{
4047 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4048}
4049
4050static inline void skb_gso_reset(struct sk_buff *skb)
4051{
4052 skb_shinfo(skb)->gso_size = 0;
4053 skb_shinfo(skb)->gso_segs = 0;
4054 skb_shinfo(skb)->gso_type = 0;
4055}
4056
4057static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4058 u16 increment)
4059{
4060 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4061 return;
4062 shinfo->gso_size += increment;
4063}
4064
4065static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4066 u16 decrement)
4067{
4068 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4069 return;
4070 shinfo->gso_size -= decrement;
4071}
4072
4073void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4074
4075static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4076{
4077
4078
4079 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4080
4081 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4082 unlikely(shinfo->gso_type == 0)) {
4083 __skb_warn_lro_forwarding(skb);
4084 return true;
4085 }
4086 return false;
4087}
4088
4089static inline void skb_forward_csum(struct sk_buff *skb)
4090{
4091
4092 if (skb->ip_summed == CHECKSUM_COMPLETE)
4093 skb->ip_summed = CHECKSUM_NONE;
4094}
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4105{
4106#ifdef DEBUG
4107 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4108#endif
4109}
4110
4111bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4112
4113int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4114struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4115 unsigned int transport_len,
4116 __sum16(*skb_chkf)(struct sk_buff *skb));
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126
4127static inline bool skb_head_is_locked(const struct sk_buff *skb)
4128{
4129 return !skb->head_frag || skb_cloned(skb);
4130}
4131
4132
4133
4134
4135
4136
4137
4138
4139
4140
4141static inline __wsum lco_csum(struct sk_buff *skb)
4142{
4143 unsigned char *csum_start = skb_checksum_start(skb);
4144 unsigned char *l4_hdr = skb_transport_header(skb);
4145 __wsum partial;
4146
4147
4148 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4149 skb->csum_offset));
4150
4151
4152
4153
4154 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4155}
4156
4157#endif
4158#endif
4159