1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/bug.h>
21#include <linux/cache.h>
22#include <linux/rbtree.h>
23#include <linux/socket.h>
24#include <linux/refcount.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <linux/sched/clock.h>
38#include <net/flow_dissector.h>
39#include <linux/splice.h>
40#include <linux/in6.h>
41#include <linux/if_packet.h>
42#include <net/flow.h>
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct net_device;
242struct scatterlist;
243struct pipe_inode_info;
244struct iov_iter;
245struct napi_struct;
246
247#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248struct nf_conntrack {
249 atomic_t use;
250};
251#endif
252
253#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
254struct nf_bridge_info {
255 refcount_t use;
256 enum {
257 BRNF_PROTO_UNCHANGED,
258 BRNF_PROTO_8021Q,
259 BRNF_PROTO_PPPOE
260 } orig_proto:8;
261 u8 pkt_otherhost:1;
262 u8 in_prerouting:1;
263 u8 bridged_dnat:1;
264 __u16 frag_max_size;
265 struct net_device *physindev;
266
267
268 struct net_device *physoutdev;
269 union {
270
271 __be32 ipv4_daddr;
272 struct in6_addr ipv6_daddr;
273
274
275
276
277
278 char neigh_header[8];
279 };
280};
281#endif
282
283struct sk_buff_head {
284
285 struct sk_buff *next;
286 struct sk_buff *prev;
287
288 __u32 qlen;
289 spinlock_t lock;
290};
291
292struct sk_buff;
293
294
295
296
297
298
299
300
301#if (65536/PAGE_SIZE + 1) < 16
302#define MAX_SKB_FRAGS 16UL
303#else
304#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
305#endif
306extern int sysctl_max_skb_frags;
307
308
309
310
311#define GSO_BY_FRAGS 0xFFFF
312
313typedef struct skb_frag_struct skb_frag_t;
314
315struct skb_frag_struct {
316 struct {
317 struct page *p;
318 } page;
319#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
320 __u32 page_offset;
321 __u32 size;
322#else
323 __u16 page_offset;
324 __u16 size;
325#endif
326};
327
328static inline unsigned int skb_frag_size(const skb_frag_t *frag)
329{
330 return frag->size;
331}
332
333static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
334{
335 frag->size = size;
336}
337
338static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
339{
340 frag->size += delta;
341}
342
343static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
344{
345 frag->size -= delta;
346}
347
348static inline bool skb_frag_must_loop(struct page *p)
349{
350#if defined(CONFIG_HIGHMEM)
351 if (PageHighMem(p))
352 return true;
353#endif
354 return false;
355}
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
375 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
376 p_off = (f_off) & (PAGE_SIZE - 1), \
377 p_len = skb_frag_must_loop(p) ? \
378 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
379 copied = 0; \
380 copied < f_len; \
381 copied += p_len, p++, p_off = 0, \
382 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
383
384#define HAVE_HW_TIME_STAMP
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400struct skb_shared_hwtstamps {
401 ktime_t hwtstamp;
402};
403
404
405enum {
406
407 SKBTX_HW_TSTAMP = 1 << 0,
408
409
410 SKBTX_SW_TSTAMP = 1 << 1,
411
412
413 SKBTX_IN_PROGRESS = 1 << 2,
414
415
416 SKBTX_DEV_ZEROCOPY = 1 << 3,
417
418
419 SKBTX_WIFI_STATUS = 1 << 4,
420
421
422
423
424
425
426 SKBTX_SHARED_FRAG = 1 << 5,
427
428
429 SKBTX_SCHED_TSTAMP = 1 << 6,
430};
431
432#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
433#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
434 SKBTX_SCHED_TSTAMP)
435#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
436
437
438
439
440
441
442
443
444
445struct ubuf_info {
446 void (*callback)(struct ubuf_info *, bool zerocopy_success);
447 union {
448 struct {
449 unsigned long desc;
450 void *ctx;
451 };
452 struct {
453 u32 id;
454 u16 len;
455 u16 zerocopy:1;
456 u32 bytelen;
457 };
458 };
459 refcount_t refcnt;
460
461 struct mmpin {
462 struct user_struct *user;
463 unsigned int num_pg;
464 } mmp;
465};
466
467#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
468
469int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
470void mm_unaccount_pinned_pages(struct mmpin *mmp);
471
472struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
473struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
474 struct ubuf_info *uarg);
475
476static inline void sock_zerocopy_get(struct ubuf_info *uarg)
477{
478 refcount_inc(&uarg->refcnt);
479}
480
481void sock_zerocopy_put(struct ubuf_info *uarg);
482void sock_zerocopy_put_abort(struct ubuf_info *uarg);
483
484void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
485
486int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
487 struct msghdr *msg, int len,
488 struct ubuf_info *uarg);
489
490
491
492
493struct skb_shared_info {
494 __u8 __unused;
495 __u8 meta_len;
496 __u8 nr_frags;
497 __u8 tx_flags;
498 unsigned short gso_size;
499
500 unsigned short gso_segs;
501 struct sk_buff *frag_list;
502 struct skb_shared_hwtstamps hwtstamps;
503 unsigned int gso_type;
504 u32 tskey;
505
506
507
508
509 atomic_t dataref;
510
511
512
513 void * destructor_arg;
514
515
516 skb_frag_t frags[MAX_SKB_FRAGS];
517};
518
519
520
521
522
523
524
525
526
527
528
529
530#define SKB_DATAREF_SHIFT 16
531#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
532
533
534enum {
535 SKB_FCLONE_UNAVAILABLE,
536 SKB_FCLONE_ORIG,
537 SKB_FCLONE_CLONE,
538};
539
540enum {
541 SKB_GSO_TCPV4 = 1 << 0,
542
543
544 SKB_GSO_DODGY = 1 << 1,
545
546
547 SKB_GSO_TCP_ECN = 1 << 2,
548
549 SKB_GSO_TCP_FIXEDID = 1 << 3,
550
551 SKB_GSO_TCPV6 = 1 << 4,
552
553 SKB_GSO_FCOE = 1 << 5,
554
555 SKB_GSO_GRE = 1 << 6,
556
557 SKB_GSO_GRE_CSUM = 1 << 7,
558
559 SKB_GSO_IPXIP4 = 1 << 8,
560
561 SKB_GSO_IPXIP6 = 1 << 9,
562
563 SKB_GSO_UDP_TUNNEL = 1 << 10,
564
565 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
566
567 SKB_GSO_PARTIAL = 1 << 12,
568
569 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
570
571 SKB_GSO_SCTP = 1 << 14,
572
573 SKB_GSO_ESP = 1 << 15,
574
575 SKB_GSO_UDP = 1 << 16,
576
577 SKB_GSO_UDP_L4 = 1 << 17,
578};
579
580#if BITS_PER_LONG > 32
581#define NET_SKBUFF_DATA_USES_OFFSET 1
582#endif
583
584#ifdef NET_SKBUFF_DATA_USES_OFFSET
585typedef unsigned int sk_buff_data_t;
586#else
587typedef unsigned char *sk_buff_data_t;
588#endif
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664struct sk_buff {
665 union {
666 struct {
667
668 struct sk_buff *next;
669 struct sk_buff *prev;
670
671 union {
672 struct net_device *dev;
673
674
675
676
677 unsigned long dev_scratch;
678 int ip_defrag_offset;
679 };
680 };
681 struct rb_node rbnode;
682 };
683 struct sock *sk;
684
685 union {
686 ktime_t tstamp;
687 u64 skb_mstamp;
688 };
689
690
691
692
693
694
695 char cb[48] __aligned(8);
696
697 union {
698 struct {
699 unsigned long _skb_refdst;
700 void (*destructor)(struct sk_buff *skb);
701 };
702 struct list_head tcp_tsorted_anchor;
703 };
704
705#ifdef CONFIG_XFRM
706 struct sec_path *sp;
707#endif
708#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
709 unsigned long _nfct;
710#endif
711#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
712 struct nf_bridge_info *nf_bridge;
713#endif
714 unsigned int len,
715 data_len;
716 __u16 mac_len,
717 hdr_len;
718
719
720
721
722 __u16 queue_mapping;
723
724
725#ifdef __BIG_ENDIAN_BITFIELD
726#define CLONED_MASK (1 << 7)
727#else
728#define CLONED_MASK 1
729#endif
730#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
731
732 __u8 __cloned_offset[0];
733 __u8 cloned:1,
734 nohdr:1,
735 fclone:2,
736 peeked:1,
737 head_frag:1,
738 xmit_more:1,
739 pfmemalloc:1;
740
741
742
743
744
745 __u32 headers_start[0];
746
747
748
749#ifdef __BIG_ENDIAN_BITFIELD
750#define PKT_TYPE_MAX (7 << 5)
751#else
752#define PKT_TYPE_MAX 7
753#endif
754#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
755
756 __u8 __pkt_type_offset[0];
757 __u8 pkt_type:3;
758 __u8 ignore_df:1;
759 __u8 nf_trace:1;
760 __u8 ip_summed:2;
761 __u8 ooo_okay:1;
762
763 __u8 l4_hash:1;
764 __u8 sw_hash:1;
765 __u8 wifi_acked_valid:1;
766 __u8 wifi_acked:1;
767 __u8 no_fcs:1;
768
769 __u8 encapsulation:1;
770 __u8 encap_hdr_csum:1;
771 __u8 csum_valid:1;
772
773 __u8 csum_complete_sw:1;
774 __u8 csum_level:2;
775 __u8 csum_not_inet:1;
776 __u8 dst_pending_confirm:1;
777#ifdef CONFIG_IPV6_NDISC_NODETYPE
778 __u8 ndisc_nodetype:2;
779#endif
780 __u8 ipvs_property:1;
781
782 __u8 inner_protocol_type:1;
783 __u8 remcsum_offload:1;
784#ifdef CONFIG_NET_SWITCHDEV
785 __u8 offload_fwd_mark:1;
786 __u8 offload_mr_fwd_mark:1;
787#endif
788#ifdef CONFIG_NET_CLS_ACT
789 __u8 tc_skip_classify:1;
790 __u8 tc_at_ingress:1;
791 __u8 tc_redirected:1;
792 __u8 tc_from_ingress:1;
793#endif
794
795#ifdef CONFIG_NET_SCHED
796 __u16 tc_index;
797#endif
798
799 union {
800 __wsum csum;
801 struct {
802 __u16 csum_start;
803 __u16 csum_offset;
804 };
805 };
806 __u32 priority;
807 int skb_iif;
808 __u32 hash;
809 __be16 vlan_proto;
810 __u16 vlan_tci;
811#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
812 union {
813 unsigned int napi_id;
814 unsigned int sender_cpu;
815 };
816#endif
817#ifdef CONFIG_NETWORK_SECMARK
818 __u32 secmark;
819#endif
820
821 union {
822 __u32 mark;
823 __u32 reserved_tailroom;
824 };
825
826 union {
827 __be16 inner_protocol;
828 __u8 inner_ipproto;
829 };
830
831 __u16 inner_transport_header;
832 __u16 inner_network_header;
833 __u16 inner_mac_header;
834
835 __be16 protocol;
836 __u16 transport_header;
837 __u16 network_header;
838 __u16 mac_header;
839
840
841 __u32 headers_end[0];
842
843
844
845 sk_buff_data_t tail;
846 sk_buff_data_t end;
847 unsigned char *head,
848 *data;
849 unsigned int truesize;
850 refcount_t users;
851};
852
853#ifdef __KERNEL__
854
855
856
857
858#define SKB_ALLOC_FCLONE 0x01
859#define SKB_ALLOC_RX 0x02
860#define SKB_ALLOC_NAPI 0x04
861
862
863static inline bool skb_pfmemalloc(const struct sk_buff *skb)
864{
865 return unlikely(skb->pfmemalloc);
866}
867
868
869
870
871
872#define SKB_DST_NOREF 1UL
873#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
874
875#define SKB_NFCT_PTRMASK ~(7UL)
876
877
878
879
880
881
882static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
883{
884
885
886
887 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
888 !rcu_read_lock_held() &&
889 !rcu_read_lock_bh_held());
890 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
891}
892
893
894
895
896
897
898
899
900
901static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
902{
903 skb->_skb_refdst = (unsigned long)dst;
904}
905
906
907
908
909
910
911
912
913
914
915
916static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
917{
918 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
919 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
920}
921
922
923
924
925
926static inline bool skb_dst_is_noref(const struct sk_buff *skb)
927{
928 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
929}
930
931static inline struct rtable *skb_rtable(const struct sk_buff *skb)
932{
933 return (struct rtable *)skb_dst(skb);
934}
935
936
937
938
939
940static inline bool skb_pkt_type_ok(u32 ptype)
941{
942 return ptype <= PACKET_OTHERHOST;
943}
944
945static inline unsigned int skb_napi_id(const struct sk_buff *skb)
946{
947#ifdef CONFIG_NET_RX_BUSY_POLL
948 return skb->napi_id;
949#else
950 return 0;
951#endif
952}
953
954
955static inline bool skb_unref(struct sk_buff *skb)
956{
957 if (unlikely(!skb))
958 return false;
959 if (likely(refcount_read(&skb->users) == 1))
960 smp_rmb();
961 else if (likely(!refcount_dec_and_test(&skb->users)))
962 return false;
963
964 return true;
965}
966
967void skb_release_head_state(struct sk_buff *skb);
968void kfree_skb(struct sk_buff *skb);
969void kfree_skb_list(struct sk_buff *segs);
970void skb_tx_error(struct sk_buff *skb);
971void consume_skb(struct sk_buff *skb);
972void __consume_stateless_skb(struct sk_buff *skb);
973void __kfree_skb(struct sk_buff *skb);
974extern struct kmem_cache *skbuff_head_cache;
975
976void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
977bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
978 bool *fragstolen, int *delta_truesize);
979
980struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
981 int node);
982struct sk_buff *__build_skb(void *data, unsigned int frag_size);
983struct sk_buff *build_skb(void *data, unsigned int frag_size);
984static inline struct sk_buff *alloc_skb(unsigned int size,
985 gfp_t priority)
986{
987 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
988}
989
990struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
991 unsigned long data_len,
992 int max_page_order,
993 int *errcode,
994 gfp_t gfp_mask);
995
996
997struct sk_buff_fclones {
998 struct sk_buff skb1;
999
1000 struct sk_buff skb2;
1001
1002 refcount_t fclone_ref;
1003};
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014static inline bool skb_fclone_busy(const struct sock *sk,
1015 const struct sk_buff *skb)
1016{
1017 const struct sk_buff_fclones *fclones;
1018
1019 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1020
1021 return skb->fclone == SKB_FCLONE_ORIG &&
1022 refcount_read(&fclones->fclone_ref) > 1 &&
1023 fclones->skb2.sk == sk;
1024}
1025
1026static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1027 gfp_t priority)
1028{
1029 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1030}
1031
1032struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1033int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1034struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1035void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1036struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1037struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1038 gfp_t gfp_mask, bool fclone);
1039static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1040 gfp_t gfp_mask)
1041{
1042 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1043}
1044
1045int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1046struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1047 unsigned int headroom);
1048struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1049 int newtailroom, gfp_t priority);
1050int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1051 int offset, int len);
1052int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1053 int offset, int len);
1054int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1055int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068static inline int skb_pad(struct sk_buff *skb, int pad)
1069{
1070 return __skb_pad(skb, pad, true);
1071}
1072#define dev_kfree_skb(a) consume_skb(a)
1073
1074int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1075 int getfrag(void *from, char *to, int offset,
1076 int len, int odd, struct sk_buff *skb),
1077 void *from, int length);
1078
1079int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1080 int offset, size_t size);
1081
1082struct skb_seq_state {
1083 __u32 lower_offset;
1084 __u32 upper_offset;
1085 __u32 frag_idx;
1086 __u32 stepped_offset;
1087 struct sk_buff *root_skb;
1088 struct sk_buff *cur_skb;
1089 __u8 *frag_data;
1090};
1091
1092void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1093 unsigned int to, struct skb_seq_state *st);
1094unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1095 struct skb_seq_state *st);
1096void skb_abort_seq_read(struct skb_seq_state *st);
1097
1098unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1099 unsigned int to, struct ts_config *config);
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127enum pkt_hash_types {
1128 PKT_HASH_TYPE_NONE,
1129 PKT_HASH_TYPE_L2,
1130 PKT_HASH_TYPE_L3,
1131 PKT_HASH_TYPE_L4,
1132};
1133
1134static inline void skb_clear_hash(struct sk_buff *skb)
1135{
1136 skb->hash = 0;
1137 skb->sw_hash = 0;
1138 skb->l4_hash = 0;
1139}
1140
1141static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1142{
1143 if (!skb->l4_hash)
1144 skb_clear_hash(skb);
1145}
1146
1147static inline void
1148__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1149{
1150 skb->l4_hash = is_l4;
1151 skb->sw_hash = is_sw;
1152 skb->hash = hash;
1153}
1154
1155static inline void
1156skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1157{
1158
1159 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1160}
1161
1162static inline void
1163__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1164{
1165 __skb_set_hash(skb, hash, true, is_l4);
1166}
1167
1168void __skb_get_hash(struct sk_buff *skb);
1169u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1170u32 skb_get_poff(const struct sk_buff *skb);
1171u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1172 const struct flow_keys_basic *keys, int hlen);
1173__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1174 void *data, int hlen_proto);
1175
1176static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1177 int thoff, u8 ip_proto)
1178{
1179 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1180}
1181
1182void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1183 const struct flow_dissector_key *key,
1184 unsigned int key_count);
1185
1186bool __skb_flow_dissect(const struct sk_buff *skb,
1187 struct flow_dissector *flow_dissector,
1188 void *target_container,
1189 void *data, __be16 proto, int nhoff, int hlen,
1190 unsigned int flags);
1191
1192static inline bool skb_flow_dissect(const struct sk_buff *skb,
1193 struct flow_dissector *flow_dissector,
1194 void *target_container, unsigned int flags)
1195{
1196 return __skb_flow_dissect(skb, flow_dissector, target_container,
1197 NULL, 0, 0, 0, flags);
1198}
1199
1200static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1201 struct flow_keys *flow,
1202 unsigned int flags)
1203{
1204 memset(flow, 0, sizeof(*flow));
1205 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1206 NULL, 0, 0, 0, flags);
1207}
1208
1209static inline bool
1210skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb,
1211 struct flow_keys_basic *flow, void *data,
1212 __be16 proto, int nhoff, int hlen,
1213 unsigned int flags)
1214{
1215 memset(flow, 0, sizeof(*flow));
1216 return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow,
1217 data, proto, nhoff, hlen, flags);
1218}
1219
1220void
1221skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1222 struct flow_dissector *flow_dissector,
1223 void *target_container);
1224
1225static inline __u32 skb_get_hash(struct sk_buff *skb)
1226{
1227 if (!skb->l4_hash && !skb->sw_hash)
1228 __skb_get_hash(skb);
1229
1230 return skb->hash;
1231}
1232
1233static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1234{
1235 if (!skb->l4_hash && !skb->sw_hash) {
1236 struct flow_keys keys;
1237 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1238
1239 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1240 }
1241
1242 return skb->hash;
1243}
1244
1245__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1246
1247static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1248{
1249 return skb->hash;
1250}
1251
1252static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1253{
1254 to->hash = from->hash;
1255 to->sw_hash = from->sw_hash;
1256 to->l4_hash = from->l4_hash;
1257};
1258
1259#ifdef NET_SKBUFF_DATA_USES_OFFSET
1260static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1261{
1262 return skb->head + skb->end;
1263}
1264
1265static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1266{
1267 return skb->end;
1268}
1269#else
1270static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1271{
1272 return skb->end;
1273}
1274
1275static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1276{
1277 return skb->end - skb->head;
1278}
1279#endif
1280
1281
1282#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1283
1284static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1285{
1286 return &skb_shinfo(skb)->hwtstamps;
1287}
1288
1289static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1290{
1291 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1292
1293 return is_zcopy ? skb_uarg(skb) : NULL;
1294}
1295
1296static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1297{
1298 if (skb && uarg && !skb_zcopy(skb)) {
1299 sock_zerocopy_get(uarg);
1300 skb_shinfo(skb)->destructor_arg = uarg;
1301 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1302 }
1303}
1304
1305
1306static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1307{
1308 struct ubuf_info *uarg = skb_zcopy(skb);
1309
1310 if (uarg) {
1311 if (uarg->callback == sock_zerocopy_callback) {
1312 uarg->zerocopy = uarg->zerocopy && zerocopy;
1313 sock_zerocopy_put(uarg);
1314 } else {
1315 uarg->callback(uarg, zerocopy);
1316 }
1317
1318 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1319 }
1320}
1321
1322
1323static inline void skb_zcopy_abort(struct sk_buff *skb)
1324{
1325 struct ubuf_info *uarg = skb_zcopy(skb);
1326
1327 if (uarg) {
1328 sock_zerocopy_put_abort(uarg);
1329 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1330 }
1331}
1332
1333
1334
1335
1336
1337
1338
1339static inline int skb_queue_empty(const struct sk_buff_head *list)
1340{
1341 return list->next == (const struct sk_buff *) list;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1352 const struct sk_buff *skb)
1353{
1354 return skb->next == (const struct sk_buff *) list;
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1365 const struct sk_buff *skb)
1366{
1367 return skb->prev == (const struct sk_buff *) list;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1379 const struct sk_buff *skb)
1380{
1381
1382
1383
1384 BUG_ON(skb_queue_is_last(list, skb));
1385 return skb->next;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1397 const struct sk_buff *skb)
1398{
1399
1400
1401
1402 BUG_ON(skb_queue_is_first(list, skb));
1403 return skb->prev;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413static inline struct sk_buff *skb_get(struct sk_buff *skb)
1414{
1415 refcount_inc(&skb->users);
1416 return skb;
1417}
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static inline int skb_cloned(const struct sk_buff *skb)
1432{
1433 return skb->cloned &&
1434 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1435}
1436
1437static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1438{
1439 might_sleep_if(gfpflags_allow_blocking(pri));
1440
1441 if (skb_cloned(skb))
1442 return pskb_expand_head(skb, 0, 0, pri);
1443
1444 return 0;
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454static inline int skb_header_cloned(const struct sk_buff *skb)
1455{
1456 int dataref;
1457
1458 if (!skb->cloned)
1459 return 0;
1460
1461 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1462 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1463 return dataref != 1;
1464}
1465
1466static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1467{
1468 might_sleep_if(gfpflags_allow_blocking(pri));
1469
1470 if (skb_header_cloned(skb))
1471 return pskb_expand_head(skb, 0, 0, pri);
1472
1473 return 0;
1474}
1475
1476
1477
1478
1479
1480static inline void __skb_header_release(struct sk_buff *skb)
1481{
1482 skb->nohdr = 1;
1483 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494static inline int skb_shared(const struct sk_buff *skb)
1495{
1496 return refcount_read(&skb->users) != 1;
1497}
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1513{
1514 might_sleep_if(gfpflags_allow_blocking(pri));
1515 if (skb_shared(skb)) {
1516 struct sk_buff *nskb = skb_clone(skb, pri);
1517
1518 if (likely(nskb))
1519 consume_skb(skb);
1520 else
1521 kfree_skb(skb);
1522 skb = nskb;
1523 }
1524 return skb;
1525}
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1548 gfp_t pri)
1549{
1550 might_sleep_if(gfpflags_allow_blocking(pri));
1551 if (skb_cloned(skb)) {
1552 struct sk_buff *nskb = skb_copy(skb, pri);
1553
1554
1555 if (likely(nskb))
1556 consume_skb(skb);
1557 else
1558 kfree_skb(skb);
1559 skb = nskb;
1560 }
1561 return skb;
1562}
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1578{
1579 struct sk_buff *skb = list_->next;
1580
1581 if (skb == (struct sk_buff *)list_)
1582 skb = NULL;
1583 return skb;
1584}
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1596 const struct sk_buff_head *list_)
1597{
1598 struct sk_buff *next = skb->next;
1599
1600 if (next == (struct sk_buff *)list_)
1601 next = NULL;
1602 return next;
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1619{
1620 struct sk_buff *skb = list_->prev;
1621
1622 if (skb == (struct sk_buff *)list_)
1623 skb = NULL;
1624 return skb;
1625
1626}
1627
1628
1629
1630
1631
1632
1633
1634static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1635{
1636 return list_->qlen;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649static inline void __skb_queue_head_init(struct sk_buff_head *list)
1650{
1651 list->prev = list->next = (struct sk_buff *)list;
1652 list->qlen = 0;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663static inline void skb_queue_head_init(struct sk_buff_head *list)
1664{
1665 spin_lock_init(&list->lock);
1666 __skb_queue_head_init(list);
1667}
1668
1669static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1670 struct lock_class_key *class)
1671{
1672 skb_queue_head_init(list);
1673 lockdep_set_class(&list->lock, class);
1674}
1675
1676
1677
1678
1679
1680
1681
1682void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1683 struct sk_buff_head *list);
1684static inline void __skb_insert(struct sk_buff *newsk,
1685 struct sk_buff *prev, struct sk_buff *next,
1686 struct sk_buff_head *list)
1687{
1688 newsk->next = next;
1689 newsk->prev = prev;
1690 next->prev = prev->next = newsk;
1691 list->qlen++;
1692}
1693
1694static inline void __skb_queue_splice(const struct sk_buff_head *list,
1695 struct sk_buff *prev,
1696 struct sk_buff *next)
1697{
1698 struct sk_buff *first = list->next;
1699 struct sk_buff *last = list->prev;
1700
1701 first->prev = prev;
1702 prev->next = first;
1703
1704 last->next = next;
1705 next->prev = last;
1706}
1707
1708
1709
1710
1711
1712
1713static inline void skb_queue_splice(const struct sk_buff_head *list,
1714 struct sk_buff_head *head)
1715{
1716 if (!skb_queue_empty(list)) {
1717 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1718 head->qlen += list->qlen;
1719 }
1720}
1721
1722
1723
1724
1725
1726
1727
1728
1729static inline void skb_queue_splice_init(struct sk_buff_head *list,
1730 struct sk_buff_head *head)
1731{
1732 if (!skb_queue_empty(list)) {
1733 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1734 head->qlen += list->qlen;
1735 __skb_queue_head_init(list);
1736 }
1737}
1738
1739
1740
1741
1742
1743
1744static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1745 struct sk_buff_head *head)
1746{
1747 if (!skb_queue_empty(list)) {
1748 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1749 head->qlen += list->qlen;
1750 }
1751}
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1762 struct sk_buff_head *head)
1763{
1764 if (!skb_queue_empty(list)) {
1765 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1766 head->qlen += list->qlen;
1767 __skb_queue_head_init(list);
1768 }
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782static inline void __skb_queue_after(struct sk_buff_head *list,
1783 struct sk_buff *prev,
1784 struct sk_buff *newsk)
1785{
1786 __skb_insert(newsk, prev, prev->next, list);
1787}
1788
1789void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1790 struct sk_buff_head *list);
1791
1792static inline void __skb_queue_before(struct sk_buff_head *list,
1793 struct sk_buff *next,
1794 struct sk_buff *newsk)
1795{
1796 __skb_insert(newsk, next->prev, next, list);
1797}
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1810static inline void __skb_queue_head(struct sk_buff_head *list,
1811 struct sk_buff *newsk)
1812{
1813 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1814}
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1827static inline void __skb_queue_tail(struct sk_buff_head *list,
1828 struct sk_buff *newsk)
1829{
1830 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1831}
1832
1833
1834
1835
1836
1837void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1838static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1839{
1840 struct sk_buff *next, *prev;
1841
1842 list->qlen--;
1843 next = skb->next;
1844 prev = skb->prev;
1845 skb->next = skb->prev = NULL;
1846 next->prev = prev;
1847 prev->next = next;
1848}
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1859static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1860{
1861 struct sk_buff *skb = skb_peek(list);
1862 if (skb)
1863 __skb_unlink(skb, list);
1864 return skb;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1876static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1877{
1878 struct sk_buff *skb = skb_peek_tail(list);
1879 if (skb)
1880 __skb_unlink(skb, list);
1881 return skb;
1882}
1883
1884
1885static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1886{
1887 return skb->data_len;
1888}
1889
1890static inline unsigned int skb_headlen(const struct sk_buff *skb)
1891{
1892 return skb->len - skb->data_len;
1893}
1894
1895static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
1896{
1897 unsigned int i, len = 0;
1898
1899 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1900 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1901 return len;
1902}
1903
1904static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1905{
1906 return skb_headlen(skb) + __skb_pagelen(skb);
1907}
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1923 struct page *page, int off, int size)
1924{
1925 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1926
1927
1928
1929
1930
1931
1932 frag->page.p = page;
1933 frag->page_offset = off;
1934 skb_frag_size_set(frag, size);
1935
1936 page = compound_head(page);
1937 if (page_is_pfmemalloc(page))
1938 skb->pfmemalloc = true;
1939}
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1956 struct page *page, int off, int size)
1957{
1958 __skb_fill_page_desc(skb, i, page, off, size);
1959 skb_shinfo(skb)->nr_frags = i + 1;
1960}
1961
1962void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1963 int size, unsigned int truesize);
1964
1965void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1966 unsigned int truesize);
1967
1968#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1969#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1970#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1971
1972#ifdef NET_SKBUFF_DATA_USES_OFFSET
1973static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1974{
1975 return skb->head + skb->tail;
1976}
1977
1978static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1979{
1980 skb->tail = skb->data - skb->head;
1981}
1982
1983static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1984{
1985 skb_reset_tail_pointer(skb);
1986 skb->tail += offset;
1987}
1988
1989#else
1990static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1991{
1992 return skb->tail;
1993}
1994
1995static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1996{
1997 skb->tail = skb->data;
1998}
1999
2000static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2001{
2002 skb->tail = skb->data + offset;
2003}
2004
2005#endif
2006
2007
2008
2009
2010void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2011void *skb_put(struct sk_buff *skb, unsigned int len);
2012static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2013{
2014 void *tmp = skb_tail_pointer(skb);
2015 SKB_LINEAR_ASSERT(skb);
2016 skb->tail += len;
2017 skb->len += len;
2018 return tmp;
2019}
2020
2021static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2022{
2023 void *tmp = __skb_put(skb, len);
2024
2025 memset(tmp, 0, len);
2026 return tmp;
2027}
2028
2029static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2030 unsigned int len)
2031{
2032 void *tmp = __skb_put(skb, len);
2033
2034 memcpy(tmp, data, len);
2035 return tmp;
2036}
2037
2038static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2039{
2040 *(u8 *)__skb_put(skb, 1) = val;
2041}
2042
2043static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2044{
2045 void *tmp = skb_put(skb, len);
2046
2047 memset(tmp, 0, len);
2048
2049 return tmp;
2050}
2051
2052static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2053 unsigned int len)
2054{
2055 void *tmp = skb_put(skb, len);
2056
2057 memcpy(tmp, data, len);
2058
2059 return tmp;
2060}
2061
2062static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2063{
2064 *(u8 *)skb_put(skb, 1) = val;
2065}
2066
2067void *skb_push(struct sk_buff *skb, unsigned int len);
2068static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2069{
2070 skb->data -= len;
2071 skb->len += len;
2072 return skb->data;
2073}
2074
2075void *skb_pull(struct sk_buff *skb, unsigned int len);
2076static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2077{
2078 skb->len -= len;
2079 BUG_ON(skb->len < skb->data_len);
2080 return skb->data += len;
2081}
2082
2083static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2084{
2085 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2086}
2087
2088void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2089
2090static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2091{
2092 if (len > skb_headlen(skb) &&
2093 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2094 return NULL;
2095 skb->len -= len;
2096 return skb->data += len;
2097}
2098
2099static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2100{
2101 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2102}
2103
2104static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2105{
2106 if (likely(len <= skb_headlen(skb)))
2107 return 1;
2108 if (unlikely(len > skb->len))
2109 return 0;
2110 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2111}
2112
2113void skb_condense(struct sk_buff *skb);
2114
2115
2116
2117
2118
2119
2120
2121static inline unsigned int skb_headroom(const struct sk_buff *skb)
2122{
2123 return skb->data - skb->head;
2124}
2125
2126
2127
2128
2129
2130
2131
2132static inline int skb_tailroom(const struct sk_buff *skb)
2133{
2134 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2135}
2136
2137
2138
2139
2140
2141
2142
2143
2144static inline int skb_availroom(const struct sk_buff *skb)
2145{
2146 if (skb_is_nonlinear(skb))
2147 return 0;
2148
2149 return skb->end - skb->tail - skb->reserved_tailroom;
2150}
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160static inline void skb_reserve(struct sk_buff *skb, int len)
2161{
2162 skb->data += len;
2163 skb->tail += len;
2164}
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2179 unsigned int needed_tailroom)
2180{
2181 SKB_LINEAR_ASSERT(skb);
2182 if (mtu < skb_tailroom(skb) - needed_tailroom)
2183
2184 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2185 else
2186
2187 skb->reserved_tailroom = needed_tailroom;
2188}
2189
2190#define ENCAP_TYPE_ETHER 0
2191#define ENCAP_TYPE_IPPROTO 1
2192
2193static inline void skb_set_inner_protocol(struct sk_buff *skb,
2194 __be16 protocol)
2195{
2196 skb->inner_protocol = protocol;
2197 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2198}
2199
2200static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2201 __u8 ipproto)
2202{
2203 skb->inner_ipproto = ipproto;
2204 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2205}
2206
2207static inline void skb_reset_inner_headers(struct sk_buff *skb)
2208{
2209 skb->inner_mac_header = skb->mac_header;
2210 skb->inner_network_header = skb->network_header;
2211 skb->inner_transport_header = skb->transport_header;
2212}
2213
2214static inline void skb_reset_mac_len(struct sk_buff *skb)
2215{
2216 skb->mac_len = skb->network_header - skb->mac_header;
2217}
2218
2219static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2220 *skb)
2221{
2222 return skb->head + skb->inner_transport_header;
2223}
2224
2225static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2226{
2227 return skb_inner_transport_header(skb) - skb->data;
2228}
2229
2230static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2231{
2232 skb->inner_transport_header = skb->data - skb->head;
2233}
2234
2235static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2236 const int offset)
2237{
2238 skb_reset_inner_transport_header(skb);
2239 skb->inner_transport_header += offset;
2240}
2241
2242static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2243{
2244 return skb->head + skb->inner_network_header;
2245}
2246
2247static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2248{
2249 skb->inner_network_header = skb->data - skb->head;
2250}
2251
2252static inline void skb_set_inner_network_header(struct sk_buff *skb,
2253 const int offset)
2254{
2255 skb_reset_inner_network_header(skb);
2256 skb->inner_network_header += offset;
2257}
2258
2259static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2260{
2261 return skb->head + skb->inner_mac_header;
2262}
2263
2264static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2265{
2266 skb->inner_mac_header = skb->data - skb->head;
2267}
2268
2269static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2270 const int offset)
2271{
2272 skb_reset_inner_mac_header(skb);
2273 skb->inner_mac_header += offset;
2274}
2275static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2276{
2277 return skb->transport_header != (typeof(skb->transport_header))~0U;
2278}
2279
2280static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2281{
2282 return skb->head + skb->transport_header;
2283}
2284
2285static inline void skb_reset_transport_header(struct sk_buff *skb)
2286{
2287 skb->transport_header = skb->data - skb->head;
2288}
2289
2290static inline void skb_set_transport_header(struct sk_buff *skb,
2291 const int offset)
2292{
2293 skb_reset_transport_header(skb);
2294 skb->transport_header += offset;
2295}
2296
2297static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2298{
2299 return skb->head + skb->network_header;
2300}
2301
2302static inline void skb_reset_network_header(struct sk_buff *skb)
2303{
2304 skb->network_header = skb->data - skb->head;
2305}
2306
2307static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2308{
2309 skb_reset_network_header(skb);
2310 skb->network_header += offset;
2311}
2312
2313static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2314{
2315 return skb->head + skb->mac_header;
2316}
2317
2318static inline int skb_mac_offset(const struct sk_buff *skb)
2319{
2320 return skb_mac_header(skb) - skb->data;
2321}
2322
2323static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2324{
2325 return skb->network_header - skb->mac_header;
2326}
2327
2328static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2329{
2330 return skb->mac_header != (typeof(skb->mac_header))~0U;
2331}
2332
2333static inline void skb_reset_mac_header(struct sk_buff *skb)
2334{
2335 skb->mac_header = skb->data - skb->head;
2336}
2337
2338static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2339{
2340 skb_reset_mac_header(skb);
2341 skb->mac_header += offset;
2342}
2343
2344static inline void skb_pop_mac_header(struct sk_buff *skb)
2345{
2346 skb->mac_header = skb->network_header;
2347}
2348
2349static inline void skb_probe_transport_header(struct sk_buff *skb,
2350 const int offset_hint)
2351{
2352 struct flow_keys_basic keys;
2353
2354 if (skb_transport_header_was_set(skb))
2355 return;
2356
2357 if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0))
2358 skb_set_transport_header(skb, keys.control.thoff);
2359 else
2360 skb_set_transport_header(skb, offset_hint);
2361}
2362
2363static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2364{
2365 if (skb_mac_header_was_set(skb)) {
2366 const unsigned char *old_mac = skb_mac_header(skb);
2367
2368 skb_set_mac_header(skb, -skb->mac_len);
2369 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2370 }
2371}
2372
2373static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2374{
2375 return skb->csum_start - skb_headroom(skb);
2376}
2377
2378static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2379{
2380 return skb->head + skb->csum_start;
2381}
2382
2383static inline int skb_transport_offset(const struct sk_buff *skb)
2384{
2385 return skb_transport_header(skb) - skb->data;
2386}
2387
2388static inline u32 skb_network_header_len(const struct sk_buff *skb)
2389{
2390 return skb->transport_header - skb->network_header;
2391}
2392
2393static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2394{
2395 return skb->inner_transport_header - skb->inner_network_header;
2396}
2397
2398static inline int skb_network_offset(const struct sk_buff *skb)
2399{
2400 return skb_network_header(skb) - skb->data;
2401}
2402
2403static inline int skb_inner_network_offset(const struct sk_buff *skb)
2404{
2405 return skb_inner_network_header(skb) - skb->data;
2406}
2407
2408static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2409{
2410 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2411}
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433#ifndef NET_IP_ALIGN
2434#define NET_IP_ALIGN 2
2435#endif
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457#ifndef NET_SKB_PAD
2458#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2459#endif
2460
2461int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2462
2463static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2464{
2465 if (unlikely(skb_is_nonlinear(skb))) {
2466 WARN_ON(1);
2467 return;
2468 }
2469 skb->len = len;
2470 skb_set_tail_pointer(skb, len);
2471}
2472
2473static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2474{
2475 __skb_set_length(skb, len);
2476}
2477
2478void skb_trim(struct sk_buff *skb, unsigned int len);
2479
2480static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2481{
2482 if (skb->data_len)
2483 return ___pskb_trim(skb, len);
2484 __skb_trim(skb, len);
2485 return 0;
2486}
2487
2488static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2489{
2490 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2491}
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2503{
2504 int err = pskb_trim(skb, len);
2505 BUG_ON(err);
2506}
2507
2508static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2509{
2510 unsigned int diff = len - skb->len;
2511
2512 if (skb_tailroom(skb) < diff) {
2513 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2514 GFP_ATOMIC);
2515 if (ret)
2516 return ret;
2517 }
2518 __skb_set_length(skb, len);
2519 return 0;
2520}
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530static inline void skb_orphan(struct sk_buff *skb)
2531{
2532 if (skb->destructor) {
2533 skb->destructor(skb);
2534 skb->destructor = NULL;
2535 skb->sk = NULL;
2536 } else {
2537 BUG_ON(skb->sk);
2538 }
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2551{
2552 if (likely(!skb_zcopy(skb)))
2553 return 0;
2554 if (skb_uarg(skb)->callback == sock_zerocopy_callback)
2555 return 0;
2556 return skb_copy_ubufs(skb, gfp_mask);
2557}
2558
2559
2560static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2561{
2562 if (likely(!skb_zcopy(skb)))
2563 return 0;
2564 return skb_copy_ubufs(skb, gfp_mask);
2565}
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575void skb_queue_purge(struct sk_buff_head *list);
2576static inline void __skb_queue_purge(struct sk_buff_head *list)
2577{
2578 struct sk_buff *skb;
2579 while ((skb = __skb_dequeue(list)) != NULL)
2580 kfree_skb(skb);
2581}
2582
2583void skb_rbtree_purge(struct rb_root *root);
2584
2585void *netdev_alloc_frag(unsigned int fragsz);
2586
2587struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2588 gfp_t gfp_mask);
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2604 unsigned int length)
2605{
2606 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2607}
2608
2609
2610static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2611 gfp_t gfp_mask)
2612{
2613 return __netdev_alloc_skb(NULL, length, gfp_mask);
2614}
2615
2616
2617static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2618{
2619 return netdev_alloc_skb(NULL, length);
2620}
2621
2622
2623static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2624 unsigned int length, gfp_t gfp)
2625{
2626 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2627
2628 if (NET_IP_ALIGN && skb)
2629 skb_reserve(skb, NET_IP_ALIGN);
2630 return skb;
2631}
2632
2633static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2634 unsigned int length)
2635{
2636 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2637}
2638
2639static inline void skb_free_frag(void *addr)
2640{
2641 page_frag_free(addr);
2642}
2643
2644void *napi_alloc_frag(unsigned int fragsz);
2645struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2646 unsigned int length, gfp_t gfp_mask);
2647static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2648 unsigned int length)
2649{
2650 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2651}
2652void napi_consume_skb(struct sk_buff *skb, int budget);
2653
2654void __kfree_skb_flush(void);
2655void __kfree_skb_defer(struct sk_buff *skb);
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2667 unsigned int order)
2668{
2669
2670
2671
2672
2673
2674
2675
2676
2677 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2678
2679 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2680}
2681
2682static inline struct page *dev_alloc_pages(unsigned int order)
2683{
2684 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2685}
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2696{
2697 return __dev_alloc_pages(gfp_mask, 0);
2698}
2699
2700static inline struct page *dev_alloc_page(void)
2701{
2702 return dev_alloc_pages(0);
2703}
2704
2705
2706
2707
2708
2709
2710static inline void skb_propagate_pfmemalloc(struct page *page,
2711 struct sk_buff *skb)
2712{
2713 if (page_is_pfmemalloc(page))
2714 skb->pfmemalloc = true;
2715}
2716
2717
2718
2719
2720
2721
2722
2723static inline struct page *skb_frag_page(const skb_frag_t *frag)
2724{
2725 return frag->page.p;
2726}
2727
2728
2729
2730
2731
2732
2733
2734static inline void __skb_frag_ref(skb_frag_t *frag)
2735{
2736 get_page(skb_frag_page(frag));
2737}
2738
2739
2740
2741
2742
2743
2744
2745
2746static inline void skb_frag_ref(struct sk_buff *skb, int f)
2747{
2748 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2749}
2750
2751
2752
2753
2754
2755
2756
2757static inline void __skb_frag_unref(skb_frag_t *frag)
2758{
2759 put_page(skb_frag_page(frag));
2760}
2761
2762
2763
2764
2765
2766
2767
2768
2769static inline void skb_frag_unref(struct sk_buff *skb, int f)
2770{
2771 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2772}
2773
2774
2775
2776
2777
2778
2779
2780
2781static inline void *skb_frag_address(const skb_frag_t *frag)
2782{
2783 return page_address(skb_frag_page(frag)) + frag->page_offset;
2784}
2785
2786
2787
2788
2789
2790
2791
2792
2793static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2794{
2795 void *ptr = page_address(skb_frag_page(frag));
2796 if (unlikely(!ptr))
2797 return NULL;
2798
2799 return ptr + frag->page_offset;
2800}
2801
2802
2803
2804
2805
2806
2807
2808
2809static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2810{
2811 frag->page.p = page;
2812}
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2823 struct page *page)
2824{
2825 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2826}
2827
2828bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2842 const skb_frag_t *frag,
2843 size_t offset, size_t size,
2844 enum dma_data_direction dir)
2845{
2846 return dma_map_page(dev, skb_frag_page(frag),
2847 frag->page_offset + offset, size, dir);
2848}
2849
2850static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2851 gfp_t gfp_mask)
2852{
2853 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2854}
2855
2856
2857static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2858 gfp_t gfp_mask)
2859{
2860 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2861}
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2873{
2874 return !skb_header_cloned(skb) &&
2875 skb_headroom(skb) + len <= skb->hdr_len;
2876}
2877
2878static inline int skb_try_make_writable(struct sk_buff *skb,
2879 unsigned int write_len)
2880{
2881 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2882 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2883}
2884
2885static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2886 int cloned)
2887{
2888 int delta = 0;
2889
2890 if (headroom > skb_headroom(skb))
2891 delta = headroom - skb_headroom(skb);
2892
2893 if (delta || cloned)
2894 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2895 GFP_ATOMIC);
2896 return 0;
2897}
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2912{
2913 return __skb_cow(skb, headroom, skb_cloned(skb));
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2927{
2928 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2929}
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2942{
2943 unsigned int size = skb->len;
2944 if (likely(size >= len))
2945 return 0;
2946 return skb_pad(skb, len - size);
2947}
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
2961 bool free_on_error)
2962{
2963 unsigned int size = skb->len;
2964
2965 if (unlikely(size < len)) {
2966 len -= size;
2967 if (__skb_pad(skb, len, free_on_error))
2968 return -ENOMEM;
2969 __skb_put(skb, len);
2970 }
2971 return 0;
2972}
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2985{
2986 return __skb_put_padto(skb, len, true);
2987}
2988
2989static inline int skb_add_data(struct sk_buff *skb,
2990 struct iov_iter *from, int copy)
2991{
2992 const int off = skb->len;
2993
2994 if (skb->ip_summed == CHECKSUM_NONE) {
2995 __wsum csum = 0;
2996 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2997 &csum, from)) {
2998 skb->csum = csum_block_add(skb->csum, csum, off);
2999 return 0;
3000 }
3001 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3002 return 0;
3003
3004 __skb_trim(skb, off);
3005 return -EFAULT;
3006}
3007
3008static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3009 const struct page *page, int off)
3010{
3011 if (skb_zcopy(skb))
3012 return false;
3013 if (i) {
3014 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3015
3016 return page == skb_frag_page(frag) &&
3017 off == frag->page_offset + skb_frag_size(frag);
3018 }
3019 return false;
3020}
3021
3022static inline int __skb_linearize(struct sk_buff *skb)
3023{
3024 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3025}
3026
3027
3028
3029
3030
3031
3032
3033
3034static inline int skb_linearize(struct sk_buff *skb)
3035{
3036 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3037}
3038
3039
3040
3041
3042
3043
3044
3045
3046static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3047{
3048 return skb_is_nonlinear(skb) &&
3049 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059static inline int skb_linearize_cow(struct sk_buff *skb)
3060{
3061 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3062 __skb_linearize(skb) : 0;
3063}
3064
3065static __always_inline void
3066__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3067 unsigned int off)
3068{
3069 if (skb->ip_summed == CHECKSUM_COMPLETE)
3070 skb->csum = csum_block_sub(skb->csum,
3071 csum_partial(start, len, 0), off);
3072 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3073 skb_checksum_start_offset(skb) < 0)
3074 skb->ip_summed = CHECKSUM_NONE;
3075}
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087static inline void skb_postpull_rcsum(struct sk_buff *skb,
3088 const void *start, unsigned int len)
3089{
3090 __skb_postpull_rcsum(skb, start, len, 0);
3091}
3092
3093static __always_inline void
3094__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3095 unsigned int off)
3096{
3097 if (skb->ip_summed == CHECKSUM_COMPLETE)
3098 skb->csum = csum_block_add(skb->csum,
3099 csum_partial(start, len, 0), off);
3100}
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111static inline void skb_postpush_rcsum(struct sk_buff *skb,
3112 const void *start, unsigned int len)
3113{
3114 __skb_postpush_rcsum(skb, start, len, 0);
3115}
3116
3117void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3131{
3132 skb_push(skb, len);
3133 skb_postpush_rcsum(skb, skb->data, len);
3134 return skb->data;
3135}
3136
3137int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3148{
3149 if (likely(len >= skb->len))
3150 return 0;
3151 return pskb_trim_rcsum_slow(skb, len);
3152}
3153
3154static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3155{
3156 if (skb->ip_summed == CHECKSUM_COMPLETE)
3157 skb->ip_summed = CHECKSUM_NONE;
3158 __skb_trim(skb, len);
3159 return 0;
3160}
3161
3162static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3163{
3164 if (skb->ip_summed == CHECKSUM_COMPLETE)
3165 skb->ip_summed = CHECKSUM_NONE;
3166 return __skb_grow(skb, len);
3167}
3168
3169#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3170#define skb_rb_first(root) rb_to_skb(rb_first(root))
3171#define skb_rb_last(root) rb_to_skb(rb_last(root))
3172#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3173#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3174
3175#define skb_queue_walk(queue, skb) \
3176 for (skb = (queue)->next; \
3177 skb != (struct sk_buff *)(queue); \
3178 skb = skb->next)
3179
3180#define skb_queue_walk_safe(queue, skb, tmp) \
3181 for (skb = (queue)->next, tmp = skb->next; \
3182 skb != (struct sk_buff *)(queue); \
3183 skb = tmp, tmp = skb->next)
3184
3185#define skb_queue_walk_from(queue, skb) \
3186 for (; skb != (struct sk_buff *)(queue); \
3187 skb = skb->next)
3188
3189#define skb_rbtree_walk(skb, root) \
3190 for (skb = skb_rb_first(root); skb != NULL; \
3191 skb = skb_rb_next(skb))
3192
3193#define skb_rbtree_walk_from(skb) \
3194 for (; skb != NULL; \
3195 skb = skb_rb_next(skb))
3196
3197#define skb_rbtree_walk_from_safe(skb, tmp) \
3198 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3199 skb = tmp)
3200
3201#define skb_queue_walk_from_safe(queue, skb, tmp) \
3202 for (tmp = skb->next; \
3203 skb != (struct sk_buff *)(queue); \
3204 skb = tmp, tmp = skb->next)
3205
3206#define skb_queue_reverse_walk(queue, skb) \
3207 for (skb = (queue)->prev; \
3208 skb != (struct sk_buff *)(queue); \
3209 skb = skb->prev)
3210
3211#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3212 for (skb = (queue)->prev, tmp = skb->prev; \
3213 skb != (struct sk_buff *)(queue); \
3214 skb = tmp, tmp = skb->prev)
3215
3216#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3217 for (tmp = skb->prev; \
3218 skb != (struct sk_buff *)(queue); \
3219 skb = tmp, tmp = skb->prev)
3220
3221static inline bool skb_has_frag_list(const struct sk_buff *skb)
3222{
3223 return skb_shinfo(skb)->frag_list != NULL;
3224}
3225
3226static inline void skb_frag_list_init(struct sk_buff *skb)
3227{
3228 skb_shinfo(skb)->frag_list = NULL;
3229}
3230
3231#define skb_walk_frags(skb, iter) \
3232 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3233
3234
3235int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3236 const struct sk_buff *skb);
3237struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3238 struct sk_buff_head *queue,
3239 unsigned int flags,
3240 void (*destructor)(struct sock *sk,
3241 struct sk_buff *skb),
3242 int *peeked, int *off, int *err,
3243 struct sk_buff **last);
3244struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3245 void (*destructor)(struct sock *sk,
3246 struct sk_buff *skb),
3247 int *peeked, int *off, int *err,
3248 struct sk_buff **last);
3249struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3250 void (*destructor)(struct sock *sk,
3251 struct sk_buff *skb),
3252 int *peeked, int *off, int *err);
3253struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3254 int *err);
3255__poll_t datagram_poll(struct file *file, struct socket *sock,
3256 struct poll_table_struct *wait);
3257int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3258 struct iov_iter *to, int size);
3259static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3260 struct msghdr *msg, int size)
3261{
3262 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3263}
3264int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3265 struct msghdr *msg);
3266int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3267 struct iov_iter *from, int len);
3268int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3269void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3270void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3271static inline void skb_free_datagram_locked(struct sock *sk,
3272 struct sk_buff *skb)
3273{
3274 __skb_free_datagram_locked(sk, skb, 0);
3275}
3276int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3277int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3278int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3279__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3280 int len, __wsum csum);
3281int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3282 struct pipe_inode_info *pipe, unsigned int len,
3283 unsigned int flags);
3284int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3285 int len);
3286int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3287void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3288unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3289int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3290 int len, int hlen);
3291void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3292int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3293void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3294bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3295bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3296struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3297struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3298int skb_ensure_writable(struct sk_buff *skb, int write_len);
3299int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3300int skb_vlan_pop(struct sk_buff *skb);
3301int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3302struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3303 gfp_t gfp);
3304
3305static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3306{
3307 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3308}
3309
3310static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3311{
3312 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3313}
3314
3315struct skb_checksum_ops {
3316 __wsum (*update)(const void *mem, int len, __wsum wsum);
3317 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3318};
3319
3320extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3321
3322__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3323 __wsum csum, const struct skb_checksum_ops *ops);
3324__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3325 __wsum csum);
3326
3327static inline void * __must_check
3328__skb_header_pointer(const struct sk_buff *skb, int offset,
3329 int len, void *data, int hlen, void *buffer)
3330{
3331 if (hlen - offset >= len)
3332 return data + offset;
3333
3334 if (!skb ||
3335 skb_copy_bits(skb, offset, buffer, len) < 0)
3336 return NULL;
3337
3338 return buffer;
3339}
3340
3341static inline void * __must_check
3342skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3343{
3344 return __skb_header_pointer(skb, offset, len, skb->data,
3345 skb_headlen(skb), buffer);
3346}
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358static inline bool skb_needs_linearize(struct sk_buff *skb,
3359 netdev_features_t features)
3360{
3361 return skb_is_nonlinear(skb) &&
3362 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3363 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3364}
3365
3366static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3367 void *to,
3368 const unsigned int len)
3369{
3370 memcpy(to, skb->data, len);
3371}
3372
3373static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3374 const int offset, void *to,
3375 const unsigned int len)
3376{
3377 memcpy(to, skb->data + offset, len);
3378}
3379
3380static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3381 const void *from,
3382 const unsigned int len)
3383{
3384 memcpy(skb->data, from, len);
3385}
3386
3387static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3388 const int offset,
3389 const void *from,
3390 const unsigned int len)
3391{
3392 memcpy(skb->data + offset, from, len);
3393}
3394
3395void skb_init(void);
3396
3397static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3398{
3399 return skb->tstamp;
3400}
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411static inline void skb_get_timestamp(const struct sk_buff *skb,
3412 struct timeval *stamp)
3413{
3414 *stamp = ktime_to_timeval(skb->tstamp);
3415}
3416
3417static inline void skb_get_timestampns(const struct sk_buff *skb,
3418 struct timespec *stamp)
3419{
3420 *stamp = ktime_to_timespec(skb->tstamp);
3421}
3422
3423static inline void __net_timestamp(struct sk_buff *skb)
3424{
3425 skb->tstamp = ktime_get_real();
3426}
3427
3428static inline ktime_t net_timedelta(ktime_t t)
3429{
3430 return ktime_sub(ktime_get_real(), t);
3431}
3432
3433static inline ktime_t net_invalid_timestamp(void)
3434{
3435 return 0;
3436}
3437
3438static inline u8 skb_metadata_len(const struct sk_buff *skb)
3439{
3440 return skb_shinfo(skb)->meta_len;
3441}
3442
3443static inline void *skb_metadata_end(const struct sk_buff *skb)
3444{
3445 return skb_mac_header(skb);
3446}
3447
3448static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3449 const struct sk_buff *skb_b,
3450 u8 meta_len)
3451{
3452 const void *a = skb_metadata_end(skb_a);
3453 const void *b = skb_metadata_end(skb_b);
3454
3455#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3456 u64 diffs = 0;
3457
3458 switch (meta_len) {
3459#define __it(x, op) (x -= sizeof(u##op))
3460#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3461 case 32: diffs |= __it_diff(a, b, 64);
3462 case 24: diffs |= __it_diff(a, b, 64);
3463 case 16: diffs |= __it_diff(a, b, 64);
3464 case 8: diffs |= __it_diff(a, b, 64);
3465 break;
3466 case 28: diffs |= __it_diff(a, b, 64);
3467 case 20: diffs |= __it_diff(a, b, 64);
3468 case 12: diffs |= __it_diff(a, b, 64);
3469 case 4: diffs |= __it_diff(a, b, 32);
3470 break;
3471 }
3472 return diffs;
3473#else
3474 return memcmp(a - meta_len, b - meta_len, meta_len);
3475#endif
3476}
3477
3478static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3479 const struct sk_buff *skb_b)
3480{
3481 u8 len_a = skb_metadata_len(skb_a);
3482 u8 len_b = skb_metadata_len(skb_b);
3483
3484 if (!(len_a | len_b))
3485 return false;
3486
3487 return len_a != len_b ?
3488 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3489}
3490
3491static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3492{
3493 skb_shinfo(skb)->meta_len = meta_len;
3494}
3495
3496static inline void skb_metadata_clear(struct sk_buff *skb)
3497{
3498 skb_metadata_set(skb, 0);
3499}
3500
3501struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3502
3503#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3504
3505void skb_clone_tx_timestamp(struct sk_buff *skb);
3506bool skb_defer_rx_timestamp(struct sk_buff *skb);
3507
3508#else
3509
3510static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3511{
3512}
3513
3514static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3515{
3516 return false;
3517}
3518
3519#endif
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533void skb_complete_tx_timestamp(struct sk_buff *skb,
3534 struct skb_shared_hwtstamps *hwtstamps);
3535
3536void __skb_tstamp_tx(struct sk_buff *orig_skb,
3537 struct skb_shared_hwtstamps *hwtstamps,
3538 struct sock *sk, int tstype);
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551void skb_tstamp_tx(struct sk_buff *orig_skb,
3552 struct skb_shared_hwtstamps *hwtstamps);
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566static inline void skb_tx_timestamp(struct sk_buff *skb)
3567{
3568 skb_clone_tx_timestamp(skb);
3569 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3570 skb_tstamp_tx(skb, NULL);
3571}
3572
3573
3574
3575
3576
3577
3578
3579
3580void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3581
3582__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3583__sum16 __skb_checksum_complete(struct sk_buff *skb);
3584
3585static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3586{
3587 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3588 skb->csum_valid ||
3589 (skb->ip_summed == CHECKSUM_PARTIAL &&
3590 skb_checksum_start_offset(skb) >= 0));
3591}
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3610{
3611 return skb_csum_unnecessary(skb) ?
3612 0 : __skb_checksum_complete(skb);
3613}
3614
3615static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3616{
3617 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3618 if (skb->csum_level == 0)
3619 skb->ip_summed = CHECKSUM_NONE;
3620 else
3621 skb->csum_level--;
3622 }
3623}
3624
3625static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3626{
3627 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3628 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3629 skb->csum_level++;
3630 } else if (skb->ip_summed == CHECKSUM_NONE) {
3631 skb->ip_summed = CHECKSUM_UNNECESSARY;
3632 skb->csum_level = 0;
3633 }
3634}
3635
3636
3637
3638
3639
3640
3641static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3642 bool zero_okay,
3643 __sum16 check)
3644{
3645 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3646 skb->csum_valid = 1;
3647 __skb_decr_checksum_unnecessary(skb);
3648 return false;
3649 }
3650
3651 return true;
3652}
3653
3654
3655
3656
3657#define CHECKSUM_BREAK 76
3658
3659
3660
3661
3662
3663
3664
3665static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3666{
3667 if (skb->ip_summed == CHECKSUM_COMPLETE)
3668 skb->ip_summed = CHECKSUM_NONE;
3669}
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3681 bool complete,
3682 __wsum psum)
3683{
3684 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3685 if (!csum_fold(csum_add(psum, skb->csum))) {
3686 skb->csum_valid = 1;
3687 return 0;
3688 }
3689 }
3690
3691 skb->csum = psum;
3692
3693 if (complete || skb->len <= CHECKSUM_BREAK) {
3694 __sum16 csum;
3695
3696 csum = __skb_checksum_complete(skb);
3697 skb->csum_valid = !csum;
3698 return csum;
3699 }
3700
3701 return 0;
3702}
3703
3704static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3705{
3706 return 0;
3707}
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717
3718
3719#define __skb_checksum_validate(skb, proto, complete, \
3720 zero_okay, check, compute_pseudo) \
3721({ \
3722 __sum16 __ret = 0; \
3723 skb->csum_valid = 0; \
3724 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3725 __ret = __skb_checksum_validate_complete(skb, \
3726 complete, compute_pseudo(skb, proto)); \
3727 __ret; \
3728})
3729
3730#define skb_checksum_init(skb, proto, compute_pseudo) \
3731 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3732
3733#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3734 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3735
3736#define skb_checksum_validate(skb, proto, compute_pseudo) \
3737 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3738
3739#define skb_checksum_validate_zero_check(skb, proto, check, \
3740 compute_pseudo) \
3741 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3742
3743#define skb_checksum_simple_validate(skb) \
3744 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3745
3746static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3747{
3748 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3749}
3750
3751static inline void __skb_checksum_convert(struct sk_buff *skb,
3752 __sum16 check, __wsum pseudo)
3753{
3754 skb->csum = ~pseudo;
3755 skb->ip_summed = CHECKSUM_COMPLETE;
3756}
3757
3758#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3759do { \
3760 if (__skb_checksum_convert_check(skb)) \
3761 __skb_checksum_convert(skb, check, \
3762 compute_pseudo(skb, proto)); \
3763} while (0)
3764
3765static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3766 u16 start, u16 offset)
3767{
3768 skb->ip_summed = CHECKSUM_PARTIAL;
3769 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3770 skb->csum_offset = offset - start;
3771}
3772
3773
3774
3775
3776
3777
3778static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3779 int start, int offset, bool nopartial)
3780{
3781 __wsum delta;
3782
3783 if (!nopartial) {
3784 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3785 return;
3786 }
3787
3788 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3789 __skb_checksum_complete(skb);
3790 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3791 }
3792
3793 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3794
3795
3796 skb->csum = csum_add(skb->csum, delta);
3797}
3798
3799static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3800{
3801#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3802 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3803#else
3804 return NULL;
3805#endif
3806}
3807
3808#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3809void nf_conntrack_destroy(struct nf_conntrack *nfct);
3810static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3811{
3812 if (nfct && atomic_dec_and_test(&nfct->use))
3813 nf_conntrack_destroy(nfct);
3814}
3815static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3816{
3817 if (nfct)
3818 atomic_inc(&nfct->use);
3819}
3820#endif
3821#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3822static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3823{
3824 if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3825 kfree(nf_bridge);
3826}
3827static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3828{
3829 if (nf_bridge)
3830 refcount_inc(&nf_bridge->use);
3831}
3832#endif
3833static inline void nf_reset(struct sk_buff *skb)
3834{
3835#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3836 nf_conntrack_put(skb_nfct(skb));
3837 skb->_nfct = 0;
3838#endif
3839#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3840 nf_bridge_put(skb->nf_bridge);
3841 skb->nf_bridge = NULL;
3842#endif
3843}
3844
3845static inline void nf_reset_trace(struct sk_buff *skb)
3846{
3847#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3848 skb->nf_trace = 0;
3849#endif
3850}
3851
3852static inline void ipvs_reset(struct sk_buff *skb)
3853{
3854#if IS_ENABLED(CONFIG_IP_VS)
3855 skb->ipvs_property = 0;
3856#endif
3857}
3858
3859
3860static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3861 bool copy)
3862{
3863#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3864 dst->_nfct = src->_nfct;
3865 nf_conntrack_get(skb_nfct(src));
3866#endif
3867#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3868 dst->nf_bridge = src->nf_bridge;
3869 nf_bridge_get(src->nf_bridge);
3870#endif
3871#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3872 if (copy)
3873 dst->nf_trace = src->nf_trace;
3874#endif
3875}
3876
3877static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3878{
3879#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3880 nf_conntrack_put(skb_nfct(dst));
3881#endif
3882#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3883 nf_bridge_put(dst->nf_bridge);
3884#endif
3885 __nf_copy(dst, src, true);
3886}
3887
3888#ifdef CONFIG_NETWORK_SECMARK
3889static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3890{
3891 to->secmark = from->secmark;
3892}
3893
3894static inline void skb_init_secmark(struct sk_buff *skb)
3895{
3896 skb->secmark = 0;
3897}
3898#else
3899static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3900{ }
3901
3902static inline void skb_init_secmark(struct sk_buff *skb)
3903{ }
3904#endif
3905
3906static inline bool skb_irq_freeable(const struct sk_buff *skb)
3907{
3908 return !skb->destructor &&
3909#if IS_ENABLED(CONFIG_XFRM)
3910 !skb->sp &&
3911#endif
3912 !skb_nfct(skb) &&
3913 !skb->_skb_refdst &&
3914 !skb_has_frag_list(skb);
3915}
3916
3917static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3918{
3919 skb->queue_mapping = queue_mapping;
3920}
3921
3922static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3923{
3924 return skb->queue_mapping;
3925}
3926
3927static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3928{
3929 to->queue_mapping = from->queue_mapping;
3930}
3931
3932static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3933{
3934 skb->queue_mapping = rx_queue + 1;
3935}
3936
3937static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3938{
3939 return skb->queue_mapping - 1;
3940}
3941
3942static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3943{
3944 return skb->queue_mapping != 0;
3945}
3946
3947static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
3948{
3949 skb->dst_pending_confirm = val;
3950}
3951
3952static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
3953{
3954 return skb->dst_pending_confirm != 0;
3955}
3956
3957static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3958{
3959#ifdef CONFIG_XFRM
3960 return skb->sp;
3961#else
3962 return NULL;
3963#endif
3964}
3965
3966
3967
3968
3969
3970
3971
3972struct skb_gso_cb {
3973 union {
3974 int mac_offset;
3975 int data_offset;
3976 };
3977 int encap_level;
3978 __wsum csum;
3979 __u16 csum_start;
3980};
3981#define SKB_SGO_CB_OFFSET 32
3982#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3983
3984static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3985{
3986 return (skb_mac_header(inner_skb) - inner_skb->head) -
3987 SKB_GSO_CB(inner_skb)->mac_offset;
3988}
3989
3990static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3991{
3992 int new_headroom, headroom;
3993 int ret;
3994
3995 headroom = skb_headroom(skb);
3996 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3997 if (ret)
3998 return ret;
3999
4000 new_headroom = skb_headroom(skb);
4001 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4002 return 0;
4003}
4004
4005static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4006{
4007
4008 if (skb->remcsum_offload)
4009 return;
4010
4011 SKB_GSO_CB(skb)->csum = res;
4012 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4013}
4014
4015
4016
4017
4018
4019
4020
4021
4022
4023static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4024{
4025 unsigned char *csum_start = skb_transport_header(skb);
4026 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4027 __wsum partial = SKB_GSO_CB(skb)->csum;
4028
4029 SKB_GSO_CB(skb)->csum = res;
4030 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4031
4032 return csum_fold(csum_partial(csum_start, plen, partial));
4033}
4034
4035static inline bool skb_is_gso(const struct sk_buff *skb)
4036{
4037 return skb_shinfo(skb)->gso_size;
4038}
4039
4040
4041static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4042{
4043 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4044}
4045
4046
4047static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4048{
4049 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4050}
4051
4052static inline void skb_gso_reset(struct sk_buff *skb)
4053{
4054 skb_shinfo(skb)->gso_size = 0;
4055 skb_shinfo(skb)->gso_segs = 0;
4056 skb_shinfo(skb)->gso_type = 0;
4057}
4058
4059static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4060 u16 increment)
4061{
4062 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4063 return;
4064 shinfo->gso_size += increment;
4065}
4066
4067static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4068 u16 decrement)
4069{
4070 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4071 return;
4072 shinfo->gso_size -= decrement;
4073}
4074
4075void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4076
4077static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4078{
4079
4080
4081 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4082
4083 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4084 unlikely(shinfo->gso_type == 0)) {
4085 __skb_warn_lro_forwarding(skb);
4086 return true;
4087 }
4088 return false;
4089}
4090
4091static inline void skb_forward_csum(struct sk_buff *skb)
4092{
4093
4094 if (skb->ip_summed == CHECKSUM_COMPLETE)
4095 skb->ip_summed = CHECKSUM_NONE;
4096}
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4107{
4108#ifdef DEBUG
4109 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4110#endif
4111}
4112
4113bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4114
4115int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4116struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4117 unsigned int transport_len,
4118 __sum16(*skb_chkf)(struct sk_buff *skb));
4119
4120
4121
4122
4123
4124
4125
4126
4127
4128
4129static inline bool skb_head_is_locked(const struct sk_buff *skb)
4130{
4131 return !skb->head_frag || skb_cloned(skb);
4132}
4133
4134
4135
4136
4137
4138
4139
4140
4141
4142
4143static inline __wsum lco_csum(struct sk_buff *skb)
4144{
4145 unsigned char *csum_start = skb_checksum_start(skb);
4146 unsigned char *l4_hdr = skb_transport_header(skb);
4147 __wsum partial;
4148
4149
4150 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4151 skb->csum_offset));
4152
4153
4154
4155
4156 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4157}
4158
4159#endif
4160#endif
4161