1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25#include <linux/refcount.h>
26
27#include <linux/atomic.h>
28#include <asm/types.h>
29#include <linux/spinlock.h>
30#include <linux/net.h>
31#include <linux/textsearch.h>
32#include <net/checksum.h>
33#include <linux/rcupdate.h>
34#include <linux/hrtimer.h>
35#include <linux/dma-mapping.h>
36#include <linux/netdev_features.h>
37#include <linux/sched.h>
38#include <linux/sched/clock.h>
39#include <net/flow_dissector.h>
40#include <linux/splice.h>
41#include <linux/in6.h>
42#include <linux/if_packet.h>
43#include <net/flow.h>
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221#define CHECKSUM_NONE 0
222#define CHECKSUM_UNNECESSARY 1
223#define CHECKSUM_COMPLETE 2
224#define CHECKSUM_PARTIAL 3
225
226
227#define SKB_MAX_CSUM_LEVEL 3
228
229#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
230#define SKB_WITH_OVERHEAD(X) \
231 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
232#define SKB_MAX_ORDER(X, ORDER) \
233 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
234#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
235#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
236
237
238#define SKB_TRUESIZE(X) ((X) + \
239 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
240 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
241
242struct net_device;
243struct scatterlist;
244struct pipe_inode_info;
245struct iov_iter;
246struct napi_struct;
247
248#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
249struct nf_conntrack {
250 atomic_t use;
251};
252#endif
253
254#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
255struct nf_bridge_info {
256 refcount_t use;
257 enum {
258 BRNF_PROTO_UNCHANGED,
259 BRNF_PROTO_8021Q,
260 BRNF_PROTO_PPPOE
261 } orig_proto:8;
262 u8 pkt_otherhost:1;
263 u8 in_prerouting:1;
264 u8 bridged_dnat:1;
265 __u16 frag_max_size;
266 struct net_device *physindev;
267
268
269 struct net_device *physoutdev;
270 union {
271
272 __be32 ipv4_daddr;
273 struct in6_addr ipv6_daddr;
274
275
276
277
278
279 char neigh_header[8];
280 };
281};
282#endif
283
284struct sk_buff_head {
285
286 struct sk_buff *next;
287 struct sk_buff *prev;
288
289 __u32 qlen;
290 spinlock_t lock;
291};
292
293struct sk_buff;
294
295
296
297
298
299
300
301
302#if (65536/PAGE_SIZE + 1) < 16
303#define MAX_SKB_FRAGS 16UL
304#else
305#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
306#endif
307extern int sysctl_max_skb_frags;
308
309
310
311
312#define GSO_BY_FRAGS 0xFFFF
313
314typedef struct skb_frag_struct skb_frag_t;
315
316struct skb_frag_struct {
317 struct {
318 struct page *p;
319 } page;
320#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
321 __u32 page_offset;
322 __u32 size;
323#else
324 __u16 page_offset;
325 __u16 size;
326#endif
327};
328
329static inline unsigned int skb_frag_size(const skb_frag_t *frag)
330{
331 return frag->size;
332}
333
334static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
335{
336 frag->size = size;
337}
338
339static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
340{
341 frag->size += delta;
342}
343
344static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
345{
346 frag->size -= delta;
347}
348
349static inline bool skb_frag_must_loop(struct page *p)
350{
351#if defined(CONFIG_HIGHMEM)
352 if (PageHighMem(p))
353 return true;
354#endif
355 return false;
356}
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
376 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
377 p_off = (f_off) & (PAGE_SIZE - 1), \
378 p_len = skb_frag_must_loop(p) ? \
379 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
380 copied = 0; \
381 copied < f_len; \
382 copied += p_len, p++, p_off = 0, \
383 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
384
385#define HAVE_HW_TIME_STAMP
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401struct skb_shared_hwtstamps {
402 ktime_t hwtstamp;
403};
404
405
406enum {
407
408 SKBTX_HW_TSTAMP = 1 << 0,
409
410
411 SKBTX_SW_TSTAMP = 1 << 1,
412
413
414 SKBTX_IN_PROGRESS = 1 << 2,
415
416
417 SKBTX_DEV_ZEROCOPY = 1 << 3,
418
419
420 SKBTX_WIFI_STATUS = 1 << 4,
421
422
423
424
425
426
427 SKBTX_SHARED_FRAG = 1 << 5,
428
429
430 SKBTX_SCHED_TSTAMP = 1 << 6,
431};
432
433#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
434#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
435 SKBTX_SCHED_TSTAMP)
436#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
437
438
439
440
441
442
443
444
445
446struct ubuf_info {
447 void (*callback)(struct ubuf_info *, bool zerocopy_success);
448 union {
449 struct {
450 unsigned long desc;
451 void *ctx;
452 };
453 struct {
454 u32 id;
455 u16 len;
456 u16 zerocopy:1;
457 u32 bytelen;
458 };
459 };
460 refcount_t refcnt;
461
462 struct mmpin {
463 struct user_struct *user;
464 unsigned int num_pg;
465 } mmp;
466};
467
468#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
469
470struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
471struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
472 struct ubuf_info *uarg);
473
474static inline void sock_zerocopy_get(struct ubuf_info *uarg)
475{
476 refcount_inc(&uarg->refcnt);
477}
478
479void sock_zerocopy_put(struct ubuf_info *uarg);
480void sock_zerocopy_put_abort(struct ubuf_info *uarg);
481
482void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
483
484int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
485 struct msghdr *msg, int len,
486 struct ubuf_info *uarg);
487
488
489
490
491struct skb_shared_info {
492 unsigned short _unused;
493 unsigned char nr_frags;
494 __u8 tx_flags;
495 unsigned short gso_size;
496
497 unsigned short gso_segs;
498 struct sk_buff *frag_list;
499 struct skb_shared_hwtstamps hwtstamps;
500 unsigned int gso_type;
501 u32 tskey;
502 __be32 ip6_frag_id;
503
504
505
506
507 atomic_t dataref;
508
509
510
511 void * destructor_arg;
512
513
514 skb_frag_t frags[MAX_SKB_FRAGS];
515};
516
517
518
519
520
521
522
523
524
525
526
527
528#define SKB_DATAREF_SHIFT 16
529#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
530
531
532enum {
533 SKB_FCLONE_UNAVAILABLE,
534 SKB_FCLONE_ORIG,
535 SKB_FCLONE_CLONE,
536};
537
538enum {
539 SKB_GSO_TCPV4 = 1 << 0,
540
541
542 SKB_GSO_DODGY = 1 << 1,
543
544
545 SKB_GSO_TCP_ECN = 1 << 2,
546
547 SKB_GSO_TCP_FIXEDID = 1 << 3,
548
549 SKB_GSO_TCPV6 = 1 << 4,
550
551 SKB_GSO_FCOE = 1 << 5,
552
553 SKB_GSO_GRE = 1 << 6,
554
555 SKB_GSO_GRE_CSUM = 1 << 7,
556
557 SKB_GSO_IPXIP4 = 1 << 8,
558
559 SKB_GSO_IPXIP6 = 1 << 9,
560
561 SKB_GSO_UDP_TUNNEL = 1 << 10,
562
563 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
564
565 SKB_GSO_PARTIAL = 1 << 12,
566
567 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
568
569 SKB_GSO_SCTP = 1 << 14,
570
571 SKB_GSO_ESP = 1 << 15,
572};
573
574#if BITS_PER_LONG > 32
575#define NET_SKBUFF_DATA_USES_OFFSET 1
576#endif
577
578#ifdef NET_SKBUFF_DATA_USES_OFFSET
579typedef unsigned int sk_buff_data_t;
580#else
581typedef unsigned char *sk_buff_data_t;
582#endif
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656struct sk_buff {
657 union {
658 struct {
659
660 struct sk_buff *next;
661 struct sk_buff *prev;
662
663 union {
664 ktime_t tstamp;
665 u64 skb_mstamp;
666 };
667 };
668 struct rb_node rbnode;
669 };
670 struct sock *sk;
671
672 union {
673 struct net_device *dev;
674
675
676
677
678 unsigned long dev_scratch;
679 };
680
681
682
683
684
685
686 char cb[48] __aligned(8);
687
688 unsigned long _skb_refdst;
689 void (*destructor)(struct sk_buff *skb);
690#ifdef CONFIG_XFRM
691 struct sec_path *sp;
692#endif
693#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
694 unsigned long _nfct;
695#endif
696#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
697 struct nf_bridge_info *nf_bridge;
698#endif
699 unsigned int len,
700 data_len;
701 __u16 mac_len,
702 hdr_len;
703
704
705
706
707 kmemcheck_bitfield_begin(flags1);
708 __u16 queue_mapping;
709
710
711#ifdef __BIG_ENDIAN_BITFIELD
712#define CLONED_MASK (1 << 7)
713#else
714#define CLONED_MASK 1
715#endif
716#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
717
718 __u8 __cloned_offset[0];
719 __u8 cloned:1,
720 nohdr:1,
721 fclone:2,
722 peeked:1,
723 head_frag:1,
724 xmit_more:1,
725 __unused:1;
726 kmemcheck_bitfield_end(flags1);
727
728
729
730
731
732 __u32 headers_start[0];
733
734
735
736#ifdef __BIG_ENDIAN_BITFIELD
737#define PKT_TYPE_MAX (7 << 5)
738#else
739#define PKT_TYPE_MAX 7
740#endif
741#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
742
743 __u8 __pkt_type_offset[0];
744 __u8 pkt_type:3;
745 __u8 pfmemalloc:1;
746 __u8 ignore_df:1;
747
748 __u8 nf_trace:1;
749 __u8 ip_summed:2;
750 __u8 ooo_okay:1;
751 __u8 l4_hash:1;
752 __u8 sw_hash:1;
753 __u8 wifi_acked_valid:1;
754 __u8 wifi_acked:1;
755
756 __u8 no_fcs:1;
757
758 __u8 encapsulation:1;
759 __u8 encap_hdr_csum:1;
760 __u8 csum_valid:1;
761 __u8 csum_complete_sw:1;
762 __u8 csum_level:2;
763 __u8 csum_not_inet:1;
764
765 __u8 dst_pending_confirm:1;
766#ifdef CONFIG_IPV6_NDISC_NODETYPE
767 __u8 ndisc_nodetype:2;
768#endif
769 __u8 ipvs_property:1;
770 __u8 inner_protocol_type:1;
771 __u8 remcsum_offload:1;
772#ifdef CONFIG_NET_SWITCHDEV
773 __u8 offload_fwd_mark:1;
774#endif
775#ifdef CONFIG_NET_CLS_ACT
776 __u8 tc_skip_classify:1;
777 __u8 tc_at_ingress:1;
778 __u8 tc_redirected:1;
779 __u8 tc_from_ingress:1;
780#endif
781
782#ifdef CONFIG_NET_SCHED
783 __u16 tc_index;
784#endif
785
786 union {
787 __wsum csum;
788 struct {
789 __u16 csum_start;
790 __u16 csum_offset;
791 };
792 };
793 __u32 priority;
794 int skb_iif;
795 __u32 hash;
796 __be16 vlan_proto;
797 __u16 vlan_tci;
798#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
799 union {
800 unsigned int napi_id;
801 unsigned int sender_cpu;
802 };
803#endif
804#ifdef CONFIG_NETWORK_SECMARK
805 __u32 secmark;
806#endif
807
808 union {
809 __u32 mark;
810 __u32 reserved_tailroom;
811 };
812
813 union {
814 __be16 inner_protocol;
815 __u8 inner_ipproto;
816 };
817
818 __u16 inner_transport_header;
819 __u16 inner_network_header;
820 __u16 inner_mac_header;
821
822 __be16 protocol;
823 __u16 transport_header;
824 __u16 network_header;
825 __u16 mac_header;
826
827
828 __u32 headers_end[0];
829
830
831
832 sk_buff_data_t tail;
833 sk_buff_data_t end;
834 unsigned char *head,
835 *data;
836 unsigned int truesize;
837 refcount_t users;
838};
839
840#ifdef __KERNEL__
841
842
843
844#include <linux/slab.h>
845
846
847#define SKB_ALLOC_FCLONE 0x01
848#define SKB_ALLOC_RX 0x02
849#define SKB_ALLOC_NAPI 0x04
850
851
852static inline bool skb_pfmemalloc(const struct sk_buff *skb)
853{
854 return unlikely(skb->pfmemalloc);
855}
856
857
858
859
860
861#define SKB_DST_NOREF 1UL
862#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
863
864#define SKB_NFCT_PTRMASK ~(7UL)
865
866
867
868
869
870
871static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
872{
873
874
875
876 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
877 !rcu_read_lock_held() &&
878 !rcu_read_lock_bh_held());
879 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
880}
881
882
883
884
885
886
887
888
889
890static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
891{
892 skb->_skb_refdst = (unsigned long)dst;
893}
894
895
896
897
898
899
900
901
902
903
904
905static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
906{
907 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
908 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
909}
910
911
912
913
914
915static inline bool skb_dst_is_noref(const struct sk_buff *skb)
916{
917 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
918}
919
920static inline struct rtable *skb_rtable(const struct sk_buff *skb)
921{
922 return (struct rtable *)skb_dst(skb);
923}
924
925
926
927
928
929static inline bool skb_pkt_type_ok(u32 ptype)
930{
931 return ptype <= PACKET_OTHERHOST;
932}
933
934static inline unsigned int skb_napi_id(const struct sk_buff *skb)
935{
936#ifdef CONFIG_NET_RX_BUSY_POLL
937 return skb->napi_id;
938#else
939 return 0;
940#endif
941}
942
943
944static inline bool skb_unref(struct sk_buff *skb)
945{
946 if (unlikely(!skb))
947 return false;
948 if (likely(refcount_read(&skb->users) == 1))
949 smp_rmb();
950 else if (likely(!refcount_dec_and_test(&skb->users)))
951 return false;
952
953 return true;
954}
955
956void skb_release_head_state(struct sk_buff *skb);
957void kfree_skb(struct sk_buff *skb);
958void kfree_skb_list(struct sk_buff *segs);
959void skb_tx_error(struct sk_buff *skb);
960void consume_skb(struct sk_buff *skb);
961void __consume_stateless_skb(struct sk_buff *skb);
962void __kfree_skb(struct sk_buff *skb);
963extern struct kmem_cache *skbuff_head_cache;
964
965void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
966bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
967 bool *fragstolen, int *delta_truesize);
968
969struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
970 int node);
971struct sk_buff *__build_skb(void *data, unsigned int frag_size);
972struct sk_buff *build_skb(void *data, unsigned int frag_size);
973static inline struct sk_buff *alloc_skb(unsigned int size,
974 gfp_t priority)
975{
976 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
977}
978
979struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
980 unsigned long data_len,
981 int max_page_order,
982 int *errcode,
983 gfp_t gfp_mask);
984
985
986struct sk_buff_fclones {
987 struct sk_buff skb1;
988
989 struct sk_buff skb2;
990
991 refcount_t fclone_ref;
992};
993
994
995
996
997
998
999
1000
1001
1002
1003static inline bool skb_fclone_busy(const struct sock *sk,
1004 const struct sk_buff *skb)
1005{
1006 const struct sk_buff_fclones *fclones;
1007
1008 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1009
1010 return skb->fclone == SKB_FCLONE_ORIG &&
1011 refcount_read(&fclones->fclone_ref) > 1 &&
1012 fclones->skb2.sk == sk;
1013}
1014
1015static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1016 gfp_t priority)
1017{
1018 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1019}
1020
1021struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1022int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1023struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1024struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1025struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1026 gfp_t gfp_mask, bool fclone);
1027static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1028 gfp_t gfp_mask)
1029{
1030 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1031}
1032
1033int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1034struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1035 unsigned int headroom);
1036struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1037 int newtailroom, gfp_t priority);
1038int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1039 int offset, int len);
1040int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1041 int offset, int len);
1042int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1043int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static inline int skb_pad(struct sk_buff *skb, int pad)
1057{
1058 return __skb_pad(skb, pad, true);
1059}
1060#define dev_kfree_skb(a) consume_skb(a)
1061
1062int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
1063 int getfrag(void *from, char *to, int offset,
1064 int len, int odd, struct sk_buff *skb),
1065 void *from, int length);
1066
1067int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1068 int offset, size_t size);
1069
1070struct skb_seq_state {
1071 __u32 lower_offset;
1072 __u32 upper_offset;
1073 __u32 frag_idx;
1074 __u32 stepped_offset;
1075 struct sk_buff *root_skb;
1076 struct sk_buff *cur_skb;
1077 __u8 *frag_data;
1078};
1079
1080void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1081 unsigned int to, struct skb_seq_state *st);
1082unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1083 struct skb_seq_state *st);
1084void skb_abort_seq_read(struct skb_seq_state *st);
1085
1086unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1087 unsigned int to, struct ts_config *config);
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115enum pkt_hash_types {
1116 PKT_HASH_TYPE_NONE,
1117 PKT_HASH_TYPE_L2,
1118 PKT_HASH_TYPE_L3,
1119 PKT_HASH_TYPE_L4,
1120};
1121
1122static inline void skb_clear_hash(struct sk_buff *skb)
1123{
1124 skb->hash = 0;
1125 skb->sw_hash = 0;
1126 skb->l4_hash = 0;
1127}
1128
1129static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1130{
1131 if (!skb->l4_hash)
1132 skb_clear_hash(skb);
1133}
1134
1135static inline void
1136__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1137{
1138 skb->l4_hash = is_l4;
1139 skb->sw_hash = is_sw;
1140 skb->hash = hash;
1141}
1142
1143static inline void
1144skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1145{
1146
1147 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1148}
1149
1150static inline void
1151__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1152{
1153 __skb_set_hash(skb, hash, true, is_l4);
1154}
1155
1156void __skb_get_hash(struct sk_buff *skb);
1157u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1158u32 skb_get_poff(const struct sk_buff *skb);
1159u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1160 const struct flow_keys *keys, int hlen);
1161__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1162 void *data, int hlen_proto);
1163
1164static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1165 int thoff, u8 ip_proto)
1166{
1167 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1168}
1169
1170void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1171 const struct flow_dissector_key *key,
1172 unsigned int key_count);
1173
1174bool __skb_flow_dissect(const struct sk_buff *skb,
1175 struct flow_dissector *flow_dissector,
1176 void *target_container,
1177 void *data, __be16 proto, int nhoff, int hlen,
1178 unsigned int flags);
1179
1180static inline bool skb_flow_dissect(const struct sk_buff *skb,
1181 struct flow_dissector *flow_dissector,
1182 void *target_container, unsigned int flags)
1183{
1184 return __skb_flow_dissect(skb, flow_dissector, target_container,
1185 NULL, 0, 0, 0, flags);
1186}
1187
1188static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1189 struct flow_keys *flow,
1190 unsigned int flags)
1191{
1192 memset(flow, 0, sizeof(*flow));
1193 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1194 NULL, 0, 0, 0, flags);
1195}
1196
1197static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1198 void *data, __be16 proto,
1199 int nhoff, int hlen,
1200 unsigned int flags)
1201{
1202 memset(flow, 0, sizeof(*flow));
1203 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1204 data, proto, nhoff, hlen, flags);
1205}
1206
1207static inline __u32 skb_get_hash(struct sk_buff *skb)
1208{
1209 if (!skb->l4_hash && !skb->sw_hash)
1210 __skb_get_hash(skb);
1211
1212 return skb->hash;
1213}
1214
1215static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1216{
1217 if (!skb->l4_hash && !skb->sw_hash) {
1218 struct flow_keys keys;
1219 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1220
1221 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1222 }
1223
1224 return skb->hash;
1225}
1226
1227__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1228
1229static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1230{
1231 return skb->hash;
1232}
1233
1234static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1235{
1236 to->hash = from->hash;
1237 to->sw_hash = from->sw_hash;
1238 to->l4_hash = from->l4_hash;
1239};
1240
1241#ifdef NET_SKBUFF_DATA_USES_OFFSET
1242static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1243{
1244 return skb->head + skb->end;
1245}
1246
1247static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1248{
1249 return skb->end;
1250}
1251#else
1252static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1253{
1254 return skb->end;
1255}
1256
1257static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1258{
1259 return skb->end - skb->head;
1260}
1261#endif
1262
1263
1264#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1265
1266static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1267{
1268 return &skb_shinfo(skb)->hwtstamps;
1269}
1270
1271static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1272{
1273 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1274
1275 return is_zcopy ? skb_uarg(skb) : NULL;
1276}
1277
1278static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
1279{
1280 if (skb && uarg && !skb_zcopy(skb)) {
1281 sock_zerocopy_get(uarg);
1282 skb_shinfo(skb)->destructor_arg = uarg;
1283 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1284 }
1285}
1286
1287
1288static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1289{
1290 struct ubuf_info *uarg = skb_zcopy(skb);
1291
1292 if (uarg) {
1293 if (uarg->callback == sock_zerocopy_callback) {
1294 uarg->zerocopy = uarg->zerocopy && zerocopy;
1295 sock_zerocopy_put(uarg);
1296 } else {
1297 uarg->callback(uarg, zerocopy);
1298 }
1299
1300 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1301 }
1302}
1303
1304
1305static inline void skb_zcopy_abort(struct sk_buff *skb)
1306{
1307 struct ubuf_info *uarg = skb_zcopy(skb);
1308
1309 if (uarg) {
1310 sock_zerocopy_put_abort(uarg);
1311 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1312 }
1313}
1314
1315
1316
1317
1318
1319
1320
1321static inline int skb_queue_empty(const struct sk_buff_head *list)
1322{
1323 return list->next == (const struct sk_buff *) list;
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1334 const struct sk_buff *skb)
1335{
1336 return skb->next == (const struct sk_buff *) list;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1347 const struct sk_buff *skb)
1348{
1349 return skb->prev == (const struct sk_buff *) list;
1350}
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1361 const struct sk_buff *skb)
1362{
1363
1364
1365
1366 BUG_ON(skb_queue_is_last(list, skb));
1367 return skb->next;
1368}
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1379 const struct sk_buff *skb)
1380{
1381
1382
1383
1384 BUG_ON(skb_queue_is_first(list, skb));
1385 return skb->prev;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395static inline struct sk_buff *skb_get(struct sk_buff *skb)
1396{
1397 refcount_inc(&skb->users);
1398 return skb;
1399}
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414static inline int skb_cloned(const struct sk_buff *skb)
1415{
1416 return skb->cloned &&
1417 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1418}
1419
1420static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1421{
1422 might_sleep_if(gfpflags_allow_blocking(pri));
1423
1424 if (skb_cloned(skb))
1425 return pskb_expand_head(skb, 0, 0, pri);
1426
1427 return 0;
1428}
1429
1430
1431
1432
1433
1434
1435
1436
1437static inline int skb_header_cloned(const struct sk_buff *skb)
1438{
1439 int dataref;
1440
1441 if (!skb->cloned)
1442 return 0;
1443
1444 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1445 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1446 return dataref != 1;
1447}
1448
1449static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1450{
1451 might_sleep_if(gfpflags_allow_blocking(pri));
1452
1453 if (skb_header_cloned(skb))
1454 return pskb_expand_head(skb, 0, 0, pri);
1455
1456 return 0;
1457}
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468static inline void skb_header_release(struct sk_buff *skb)
1469{
1470 BUG_ON(skb->nohdr);
1471 skb->nohdr = 1;
1472 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482static inline void __skb_header_release(struct sk_buff *skb)
1483{
1484 skb->nohdr = 1;
1485 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1486}
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496static inline int skb_shared(const struct sk_buff *skb)
1497{
1498 return refcount_read(&skb->users) != 1;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1515{
1516 might_sleep_if(gfpflags_allow_blocking(pri));
1517 if (skb_shared(skb)) {
1518 struct sk_buff *nskb = skb_clone(skb, pri);
1519
1520 if (likely(nskb))
1521 consume_skb(skb);
1522 else
1523 kfree_skb(skb);
1524 skb = nskb;
1525 }
1526 return skb;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1550 gfp_t pri)
1551{
1552 might_sleep_if(gfpflags_allow_blocking(pri));
1553 if (skb_cloned(skb)) {
1554 struct sk_buff *nskb = skb_copy(skb, pri);
1555
1556
1557 if (likely(nskb))
1558 consume_skb(skb);
1559 else
1560 kfree_skb(skb);
1561 skb = nskb;
1562 }
1563 return skb;
1564}
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1580{
1581 struct sk_buff *skb = list_->next;
1582
1583 if (skb == (struct sk_buff *)list_)
1584 skb = NULL;
1585 return skb;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1598 const struct sk_buff_head *list_)
1599{
1600 struct sk_buff *next = skb->next;
1601
1602 if (next == (struct sk_buff *)list_)
1603 next = NULL;
1604 return next;
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1621{
1622 struct sk_buff *skb = list_->prev;
1623
1624 if (skb == (struct sk_buff *)list_)
1625 skb = NULL;
1626 return skb;
1627
1628}
1629
1630
1631
1632
1633
1634
1635
1636static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1637{
1638 return list_->qlen;
1639}
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651static inline void __skb_queue_head_init(struct sk_buff_head *list)
1652{
1653 list->prev = list->next = (struct sk_buff *)list;
1654 list->qlen = 0;
1655}
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665static inline void skb_queue_head_init(struct sk_buff_head *list)
1666{
1667 spin_lock_init(&list->lock);
1668 __skb_queue_head_init(list);
1669}
1670
1671static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1672 struct lock_class_key *class)
1673{
1674 skb_queue_head_init(list);
1675 lockdep_set_class(&list->lock, class);
1676}
1677
1678
1679
1680
1681
1682
1683
1684void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1685 struct sk_buff_head *list);
1686static inline void __skb_insert(struct sk_buff *newsk,
1687 struct sk_buff *prev, struct sk_buff *next,
1688 struct sk_buff_head *list)
1689{
1690 newsk->next = next;
1691 newsk->prev = prev;
1692 next->prev = prev->next = newsk;
1693 list->qlen++;
1694}
1695
1696static inline void __skb_queue_splice(const struct sk_buff_head *list,
1697 struct sk_buff *prev,
1698 struct sk_buff *next)
1699{
1700 struct sk_buff *first = list->next;
1701 struct sk_buff *last = list->prev;
1702
1703 first->prev = prev;
1704 prev->next = first;
1705
1706 last->next = next;
1707 next->prev = last;
1708}
1709
1710
1711
1712
1713
1714
1715static inline void skb_queue_splice(const struct sk_buff_head *list,
1716 struct sk_buff_head *head)
1717{
1718 if (!skb_queue_empty(list)) {
1719 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1720 head->qlen += list->qlen;
1721 }
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731static inline void skb_queue_splice_init(struct sk_buff_head *list,
1732 struct sk_buff_head *head)
1733{
1734 if (!skb_queue_empty(list)) {
1735 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1736 head->qlen += list->qlen;
1737 __skb_queue_head_init(list);
1738 }
1739}
1740
1741
1742
1743
1744
1745
1746static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1747 struct sk_buff_head *head)
1748{
1749 if (!skb_queue_empty(list)) {
1750 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1751 head->qlen += list->qlen;
1752 }
1753}
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1764 struct sk_buff_head *head)
1765{
1766 if (!skb_queue_empty(list)) {
1767 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1768 head->qlen += list->qlen;
1769 __skb_queue_head_init(list);
1770 }
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784static inline void __skb_queue_after(struct sk_buff_head *list,
1785 struct sk_buff *prev,
1786 struct sk_buff *newsk)
1787{
1788 __skb_insert(newsk, prev, prev->next, list);
1789}
1790
1791void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1792 struct sk_buff_head *list);
1793
1794static inline void __skb_queue_before(struct sk_buff_head *list,
1795 struct sk_buff *next,
1796 struct sk_buff *newsk)
1797{
1798 __skb_insert(newsk, next->prev, next, list);
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1812static inline void __skb_queue_head(struct sk_buff_head *list,
1813 struct sk_buff *newsk)
1814{
1815 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1816}
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1829static inline void __skb_queue_tail(struct sk_buff_head *list,
1830 struct sk_buff *newsk)
1831{
1832 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1833}
1834
1835
1836
1837
1838
1839void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1840static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1841{
1842 struct sk_buff *next, *prev;
1843
1844 list->qlen--;
1845 next = skb->next;
1846 prev = skb->prev;
1847 skb->next = skb->prev = NULL;
1848 next->prev = prev;
1849 prev->next = next;
1850}
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1861static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1862{
1863 struct sk_buff *skb = skb_peek(list);
1864 if (skb)
1865 __skb_unlink(skb, list);
1866 return skb;
1867}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1878static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1879{
1880 struct sk_buff *skb = skb_peek_tail(list);
1881 if (skb)
1882 __skb_unlink(skb, list);
1883 return skb;
1884}
1885
1886
1887static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1888{
1889 return skb->data_len;
1890}
1891
1892static inline unsigned int skb_headlen(const struct sk_buff *skb)
1893{
1894 return skb->len - skb->data_len;
1895}
1896
1897static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
1898{
1899 unsigned int i, len = 0;
1900
1901 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1902 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1903 return len;
1904}
1905
1906static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1907{
1908 return skb_headlen(skb) + __skb_pagelen(skb);
1909}
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1925 struct page *page, int off, int size)
1926{
1927 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1928
1929
1930
1931
1932
1933
1934 frag->page.p = page;
1935 frag->page_offset = off;
1936 skb_frag_size_set(frag, size);
1937
1938 page = compound_head(page);
1939 if (page_is_pfmemalloc(page))
1940 skb->pfmemalloc = true;
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1958 struct page *page, int off, int size)
1959{
1960 __skb_fill_page_desc(skb, i, page, off, size);
1961 skb_shinfo(skb)->nr_frags = i + 1;
1962}
1963
1964void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1965 int size, unsigned int truesize);
1966
1967void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1968 unsigned int truesize);
1969
1970#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1971#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1972#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1973
1974#ifdef NET_SKBUFF_DATA_USES_OFFSET
1975static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1976{
1977 return skb->head + skb->tail;
1978}
1979
1980static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1981{
1982 skb->tail = skb->data - skb->head;
1983}
1984
1985static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1986{
1987 skb_reset_tail_pointer(skb);
1988 skb->tail += offset;
1989}
1990
1991#else
1992static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1993{
1994 return skb->tail;
1995}
1996
1997static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1998{
1999 skb->tail = skb->data;
2000}
2001
2002static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2003{
2004 skb->tail = skb->data + offset;
2005}
2006
2007#endif
2008
2009
2010
2011
2012void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2013void *skb_put(struct sk_buff *skb, unsigned int len);
2014static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2015{
2016 void *tmp = skb_tail_pointer(skb);
2017 SKB_LINEAR_ASSERT(skb);
2018 skb->tail += len;
2019 skb->len += len;
2020 return tmp;
2021}
2022
2023static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2024{
2025 void *tmp = __skb_put(skb, len);
2026
2027 memset(tmp, 0, len);
2028 return tmp;
2029}
2030
2031static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2032 unsigned int len)
2033{
2034 void *tmp = __skb_put(skb, len);
2035
2036 memcpy(tmp, data, len);
2037 return tmp;
2038}
2039
2040static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2041{
2042 *(u8 *)__skb_put(skb, 1) = val;
2043}
2044
2045static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2046{
2047 void *tmp = skb_put(skb, len);
2048
2049 memset(tmp, 0, len);
2050
2051 return tmp;
2052}
2053
2054static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2055 unsigned int len)
2056{
2057 void *tmp = skb_put(skb, len);
2058
2059 memcpy(tmp, data, len);
2060
2061 return tmp;
2062}
2063
2064static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2065{
2066 *(u8 *)skb_put(skb, 1) = val;
2067}
2068
2069void *skb_push(struct sk_buff *skb, unsigned int len);
2070static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2071{
2072 skb->data -= len;
2073 skb->len += len;
2074 return skb->data;
2075}
2076
2077void *skb_pull(struct sk_buff *skb, unsigned int len);
2078static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2079{
2080 skb->len -= len;
2081 BUG_ON(skb->len < skb->data_len);
2082 return skb->data += len;
2083}
2084
2085static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2086{
2087 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2088}
2089
2090void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2091
2092static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2093{
2094 if (len > skb_headlen(skb) &&
2095 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2096 return NULL;
2097 skb->len -= len;
2098 return skb->data += len;
2099}
2100
2101static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2102{
2103 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2104}
2105
2106static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2107{
2108 if (likely(len <= skb_headlen(skb)))
2109 return 1;
2110 if (unlikely(len > skb->len))
2111 return 0;
2112 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2113}
2114
2115void skb_condense(struct sk_buff *skb);
2116
2117
2118
2119
2120
2121
2122
2123static inline unsigned int skb_headroom(const struct sk_buff *skb)
2124{
2125 return skb->data - skb->head;
2126}
2127
2128
2129
2130
2131
2132
2133
2134static inline int skb_tailroom(const struct sk_buff *skb)
2135{
2136 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2137}
2138
2139
2140
2141
2142
2143
2144
2145
2146static inline int skb_availroom(const struct sk_buff *skb)
2147{
2148 if (skb_is_nonlinear(skb))
2149 return 0;
2150
2151 return skb->end - skb->tail - skb->reserved_tailroom;
2152}
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162static inline void skb_reserve(struct sk_buff *skb, int len)
2163{
2164 skb->data += len;
2165 skb->tail += len;
2166}
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2181 unsigned int needed_tailroom)
2182{
2183 SKB_LINEAR_ASSERT(skb);
2184 if (mtu < skb_tailroom(skb) - needed_tailroom)
2185
2186 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2187 else
2188
2189 skb->reserved_tailroom = needed_tailroom;
2190}
2191
2192#define ENCAP_TYPE_ETHER 0
2193#define ENCAP_TYPE_IPPROTO 1
2194
2195static inline void skb_set_inner_protocol(struct sk_buff *skb,
2196 __be16 protocol)
2197{
2198 skb->inner_protocol = protocol;
2199 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2200}
2201
2202static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2203 __u8 ipproto)
2204{
2205 skb->inner_ipproto = ipproto;
2206 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2207}
2208
2209static inline void skb_reset_inner_headers(struct sk_buff *skb)
2210{
2211 skb->inner_mac_header = skb->mac_header;
2212 skb->inner_network_header = skb->network_header;
2213 skb->inner_transport_header = skb->transport_header;
2214}
2215
2216static inline void skb_reset_mac_len(struct sk_buff *skb)
2217{
2218 skb->mac_len = skb->network_header - skb->mac_header;
2219}
2220
2221static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2222 *skb)
2223{
2224 return skb->head + skb->inner_transport_header;
2225}
2226
2227static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2228{
2229 return skb_inner_transport_header(skb) - skb->data;
2230}
2231
2232static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2233{
2234 skb->inner_transport_header = skb->data - skb->head;
2235}
2236
2237static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2238 const int offset)
2239{
2240 skb_reset_inner_transport_header(skb);
2241 skb->inner_transport_header += offset;
2242}
2243
2244static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2245{
2246 return skb->head + skb->inner_network_header;
2247}
2248
2249static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2250{
2251 skb->inner_network_header = skb->data - skb->head;
2252}
2253
2254static inline void skb_set_inner_network_header(struct sk_buff *skb,
2255 const int offset)
2256{
2257 skb_reset_inner_network_header(skb);
2258 skb->inner_network_header += offset;
2259}
2260
2261static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2262{
2263 return skb->head + skb->inner_mac_header;
2264}
2265
2266static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2267{
2268 skb->inner_mac_header = skb->data - skb->head;
2269}
2270
2271static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2272 const int offset)
2273{
2274 skb_reset_inner_mac_header(skb);
2275 skb->inner_mac_header += offset;
2276}
2277static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2278{
2279 return skb->transport_header != (typeof(skb->transport_header))~0U;
2280}
2281
2282static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2283{
2284 return skb->head + skb->transport_header;
2285}
2286
2287static inline void skb_reset_transport_header(struct sk_buff *skb)
2288{
2289 skb->transport_header = skb->data - skb->head;
2290}
2291
2292static inline void skb_set_transport_header(struct sk_buff *skb,
2293 const int offset)
2294{
2295 skb_reset_transport_header(skb);
2296 skb->transport_header += offset;
2297}
2298
2299static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2300{
2301 return skb->head + skb->network_header;
2302}
2303
2304static inline void skb_reset_network_header(struct sk_buff *skb)
2305{
2306 skb->network_header = skb->data - skb->head;
2307}
2308
2309static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2310{
2311 skb_reset_network_header(skb);
2312 skb->network_header += offset;
2313}
2314
2315static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2316{
2317 return skb->head + skb->mac_header;
2318}
2319
2320static inline int skb_mac_offset(const struct sk_buff *skb)
2321{
2322 return skb_mac_header(skb) - skb->data;
2323}
2324
2325static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2326{
2327 return skb->network_header - skb->mac_header;
2328}
2329
2330static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2331{
2332 return skb->mac_header != (typeof(skb->mac_header))~0U;
2333}
2334
2335static inline void skb_reset_mac_header(struct sk_buff *skb)
2336{
2337 skb->mac_header = skb->data - skb->head;
2338}
2339
2340static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2341{
2342 skb_reset_mac_header(skb);
2343 skb->mac_header += offset;
2344}
2345
2346static inline void skb_pop_mac_header(struct sk_buff *skb)
2347{
2348 skb->mac_header = skb->network_header;
2349}
2350
2351static inline void skb_probe_transport_header(struct sk_buff *skb,
2352 const int offset_hint)
2353{
2354 struct flow_keys keys;
2355
2356 if (skb_transport_header_was_set(skb))
2357 return;
2358 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2359 skb_set_transport_header(skb, keys.control.thoff);
2360 else
2361 skb_set_transport_header(skb, offset_hint);
2362}
2363
2364static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2365{
2366 if (skb_mac_header_was_set(skb)) {
2367 const unsigned char *old_mac = skb_mac_header(skb);
2368
2369 skb_set_mac_header(skb, -skb->mac_len);
2370 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2371 }
2372}
2373
2374static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2375{
2376 return skb->csum_start - skb_headroom(skb);
2377}
2378
2379static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2380{
2381 return skb->head + skb->csum_start;
2382}
2383
2384static inline int skb_transport_offset(const struct sk_buff *skb)
2385{
2386 return skb_transport_header(skb) - skb->data;
2387}
2388
2389static inline u32 skb_network_header_len(const struct sk_buff *skb)
2390{
2391 return skb->transport_header - skb->network_header;
2392}
2393
2394static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2395{
2396 return skb->inner_transport_header - skb->inner_network_header;
2397}
2398
2399static inline int skb_network_offset(const struct sk_buff *skb)
2400{
2401 return skb_network_header(skb) - skb->data;
2402}
2403
2404static inline int skb_inner_network_offset(const struct sk_buff *skb)
2405{
2406 return skb_inner_network_header(skb) - skb->data;
2407}
2408
2409static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2410{
2411 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2412}
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434#ifndef NET_IP_ALIGN
2435#define NET_IP_ALIGN 2
2436#endif
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458#ifndef NET_SKB_PAD
2459#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2460#endif
2461
2462int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2463
2464static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2465{
2466 if (unlikely(skb_is_nonlinear(skb))) {
2467 WARN_ON(1);
2468 return;
2469 }
2470 skb->len = len;
2471 skb_set_tail_pointer(skb, len);
2472}
2473
2474static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2475{
2476 __skb_set_length(skb, len);
2477}
2478
2479void skb_trim(struct sk_buff *skb, unsigned int len);
2480
2481static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2482{
2483 if (skb->data_len)
2484 return ___pskb_trim(skb, len);
2485 __skb_trim(skb, len);
2486 return 0;
2487}
2488
2489static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2490{
2491 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2492}
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2504{
2505 int err = pskb_trim(skb, len);
2506 BUG_ON(err);
2507}
2508
2509static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2510{
2511 unsigned int diff = len - skb->len;
2512
2513 if (skb_tailroom(skb) < diff) {
2514 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2515 GFP_ATOMIC);
2516 if (ret)
2517 return ret;
2518 }
2519 __skb_set_length(skb, len);
2520 return 0;
2521}
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531static inline void skb_orphan(struct sk_buff *skb)
2532{
2533 if (skb->destructor) {
2534 skb->destructor(skb);
2535 skb->destructor = NULL;
2536 skb->sk = NULL;
2537 } else {
2538 BUG_ON(skb->sk);
2539 }
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2552{
2553 if (likely(!skb_zcopy(skb)))
2554 return 0;
2555 if (skb_uarg(skb)->callback == sock_zerocopy_callback)
2556 return 0;
2557 return skb_copy_ubufs(skb, gfp_mask);
2558}
2559
2560
2561static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2562{
2563 if (likely(!skb_zcopy(skb)))
2564 return 0;
2565 return skb_copy_ubufs(skb, gfp_mask);
2566}
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576void skb_queue_purge(struct sk_buff_head *list);
2577static inline void __skb_queue_purge(struct sk_buff_head *list)
2578{
2579 struct sk_buff *skb;
2580 while ((skb = __skb_dequeue(list)) != NULL)
2581 kfree_skb(skb);
2582}
2583
2584void skb_rbtree_purge(struct rb_root *root);
2585
2586void *netdev_alloc_frag(unsigned int fragsz);
2587
2588struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2589 gfp_t gfp_mask);
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2605 unsigned int length)
2606{
2607 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2608}
2609
2610
2611static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2612 gfp_t gfp_mask)
2613{
2614 return __netdev_alloc_skb(NULL, length, gfp_mask);
2615}
2616
2617
2618static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2619{
2620 return netdev_alloc_skb(NULL, length);
2621}
2622
2623
2624static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2625 unsigned int length, gfp_t gfp)
2626{
2627 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2628
2629 if (NET_IP_ALIGN && skb)
2630 skb_reserve(skb, NET_IP_ALIGN);
2631 return skb;
2632}
2633
2634static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2635 unsigned int length)
2636{
2637 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2638}
2639
2640static inline void skb_free_frag(void *addr)
2641{
2642 page_frag_free(addr);
2643}
2644
2645void *napi_alloc_frag(unsigned int fragsz);
2646struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2647 unsigned int length, gfp_t gfp_mask);
2648static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2649 unsigned int length)
2650{
2651 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2652}
2653void napi_consume_skb(struct sk_buff *skb, int budget);
2654
2655void __kfree_skb_flush(void);
2656void __kfree_skb_defer(struct sk_buff *skb);
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2668 unsigned int order)
2669{
2670
2671
2672
2673
2674
2675
2676
2677
2678 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2679
2680 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2681}
2682
2683static inline struct page *dev_alloc_pages(unsigned int order)
2684{
2685 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2686}
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2697{
2698 return __dev_alloc_pages(gfp_mask, 0);
2699}
2700
2701static inline struct page *dev_alloc_page(void)
2702{
2703 return dev_alloc_pages(0);
2704}
2705
2706
2707
2708
2709
2710
2711static inline void skb_propagate_pfmemalloc(struct page *page,
2712 struct sk_buff *skb)
2713{
2714 if (page_is_pfmemalloc(page))
2715 skb->pfmemalloc = true;
2716}
2717
2718
2719
2720
2721
2722
2723
2724static inline struct page *skb_frag_page(const skb_frag_t *frag)
2725{
2726 return frag->page.p;
2727}
2728
2729
2730
2731
2732
2733
2734
2735static inline void __skb_frag_ref(skb_frag_t *frag)
2736{
2737 get_page(skb_frag_page(frag));
2738}
2739
2740
2741
2742
2743
2744
2745
2746
2747static inline void skb_frag_ref(struct sk_buff *skb, int f)
2748{
2749 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2750}
2751
2752
2753
2754
2755
2756
2757
2758static inline void __skb_frag_unref(skb_frag_t *frag)
2759{
2760 put_page(skb_frag_page(frag));
2761}
2762
2763
2764
2765
2766
2767
2768
2769
2770static inline void skb_frag_unref(struct sk_buff *skb, int f)
2771{
2772 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2773}
2774
2775
2776
2777
2778
2779
2780
2781
2782static inline void *skb_frag_address(const skb_frag_t *frag)
2783{
2784 return page_address(skb_frag_page(frag)) + frag->page_offset;
2785}
2786
2787
2788
2789
2790
2791
2792
2793
2794static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2795{
2796 void *ptr = page_address(skb_frag_page(frag));
2797 if (unlikely(!ptr))
2798 return NULL;
2799
2800 return ptr + frag->page_offset;
2801}
2802
2803
2804
2805
2806
2807
2808
2809
2810static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2811{
2812 frag->page.p = page;
2813}
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2824 struct page *page)
2825{
2826 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2827}
2828
2829bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2843 const skb_frag_t *frag,
2844 size_t offset, size_t size,
2845 enum dma_data_direction dir)
2846{
2847 return dma_map_page(dev, skb_frag_page(frag),
2848 frag->page_offset + offset, size, dir);
2849}
2850
2851static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2852 gfp_t gfp_mask)
2853{
2854 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2855}
2856
2857
2858static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2859 gfp_t gfp_mask)
2860{
2861 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2862}
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2874{
2875 return !skb_header_cloned(skb) &&
2876 skb_headroom(skb) + len <= skb->hdr_len;
2877}
2878
2879static inline int skb_try_make_writable(struct sk_buff *skb,
2880 unsigned int write_len)
2881{
2882 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2883 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2884}
2885
2886static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2887 int cloned)
2888{
2889 int delta = 0;
2890
2891 if (headroom > skb_headroom(skb))
2892 delta = headroom - skb_headroom(skb);
2893
2894 if (delta || cloned)
2895 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2896 GFP_ATOMIC);
2897 return 0;
2898}
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2913{
2914 return __skb_cow(skb, headroom, skb_cloned(skb));
2915}
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2928{
2929 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2930}
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2943{
2944 unsigned int size = skb->len;
2945 if (likely(size >= len))
2946 return 0;
2947 return skb_pad(skb, len - size);
2948}
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
2962 bool free_on_error)
2963{
2964 unsigned int size = skb->len;
2965
2966 if (unlikely(size < len)) {
2967 len -= size;
2968 if (__skb_pad(skb, len, free_on_error))
2969 return -ENOMEM;
2970 __skb_put(skb, len);
2971 }
2972 return 0;
2973}
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2986{
2987 return __skb_put_padto(skb, len, true);
2988}
2989
2990static inline int skb_add_data(struct sk_buff *skb,
2991 struct iov_iter *from, int copy)
2992{
2993 const int off = skb->len;
2994
2995 if (skb->ip_summed == CHECKSUM_NONE) {
2996 __wsum csum = 0;
2997 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2998 &csum, from)) {
2999 skb->csum = csum_block_add(skb->csum, csum, off);
3000 return 0;
3001 }
3002 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3003 return 0;
3004
3005 __skb_trim(skb, off);
3006 return -EFAULT;
3007}
3008
3009static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3010 const struct page *page, int off)
3011{
3012 if (skb_zcopy(skb))
3013 return false;
3014 if (i) {
3015 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3016
3017 return page == skb_frag_page(frag) &&
3018 off == frag->page_offset + skb_frag_size(frag);
3019 }
3020 return false;
3021}
3022
3023static inline int __skb_linearize(struct sk_buff *skb)
3024{
3025 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035static inline int skb_linearize(struct sk_buff *skb)
3036{
3037 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3038}
3039
3040
3041
3042
3043
3044
3045
3046
3047static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3048{
3049 return skb_is_nonlinear(skb) &&
3050 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3051}
3052
3053
3054
3055
3056
3057
3058
3059
3060static inline int skb_linearize_cow(struct sk_buff *skb)
3061{
3062 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3063 __skb_linearize(skb) : 0;
3064}
3065
3066static __always_inline void
3067__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3068 unsigned int off)
3069{
3070 if (skb->ip_summed == CHECKSUM_COMPLETE)
3071 skb->csum = csum_block_sub(skb->csum,
3072 csum_partial(start, len, 0), off);
3073 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3074 skb_checksum_start_offset(skb) < 0)
3075 skb->ip_summed = CHECKSUM_NONE;
3076}
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088static inline void skb_postpull_rcsum(struct sk_buff *skb,
3089 const void *start, unsigned int len)
3090{
3091 __skb_postpull_rcsum(skb, start, len, 0);
3092}
3093
3094static __always_inline void
3095__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3096 unsigned int off)
3097{
3098 if (skb->ip_summed == CHECKSUM_COMPLETE)
3099 skb->csum = csum_block_add(skb->csum,
3100 csum_partial(start, len, 0), off);
3101}
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112static inline void skb_postpush_rcsum(struct sk_buff *skb,
3113 const void *start, unsigned int len)
3114{
3115 __skb_postpush_rcsum(skb, start, len, 0);
3116}
3117
3118void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3132{
3133 skb_push(skb, len);
3134 skb_postpush_rcsum(skb, skb->data, len);
3135 return skb->data;
3136}
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3148{
3149 if (likely(len >= skb->len))
3150 return 0;
3151 if (skb->ip_summed == CHECKSUM_COMPLETE)
3152 skb->ip_summed = CHECKSUM_NONE;
3153 return __pskb_trim(skb, len);
3154}
3155
3156static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3157{
3158 if (skb->ip_summed == CHECKSUM_COMPLETE)
3159 skb->ip_summed = CHECKSUM_NONE;
3160 __skb_trim(skb, len);
3161 return 0;
3162}
3163
3164static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3165{
3166 if (skb->ip_summed == CHECKSUM_COMPLETE)
3167 skb->ip_summed = CHECKSUM_NONE;
3168 return __skb_grow(skb, len);
3169}
3170
3171#define skb_queue_walk(queue, skb) \
3172 for (skb = (queue)->next; \
3173 skb != (struct sk_buff *)(queue); \
3174 skb = skb->next)
3175
3176#define skb_queue_walk_safe(queue, skb, tmp) \
3177 for (skb = (queue)->next, tmp = skb->next; \
3178 skb != (struct sk_buff *)(queue); \
3179 skb = tmp, tmp = skb->next)
3180
3181#define skb_queue_walk_from(queue, skb) \
3182 for (; skb != (struct sk_buff *)(queue); \
3183 skb = skb->next)
3184
3185#define skb_queue_walk_from_safe(queue, skb, tmp) \
3186 for (tmp = skb->next; \
3187 skb != (struct sk_buff *)(queue); \
3188 skb = tmp, tmp = skb->next)
3189
3190#define skb_queue_reverse_walk(queue, skb) \
3191 for (skb = (queue)->prev; \
3192 skb != (struct sk_buff *)(queue); \
3193 skb = skb->prev)
3194
3195#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3196 for (skb = (queue)->prev, tmp = skb->prev; \
3197 skb != (struct sk_buff *)(queue); \
3198 skb = tmp, tmp = skb->prev)
3199
3200#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3201 for (tmp = skb->prev; \
3202 skb != (struct sk_buff *)(queue); \
3203 skb = tmp, tmp = skb->prev)
3204
3205static inline bool skb_has_frag_list(const struct sk_buff *skb)
3206{
3207 return skb_shinfo(skb)->frag_list != NULL;
3208}
3209
3210static inline void skb_frag_list_init(struct sk_buff *skb)
3211{
3212 skb_shinfo(skb)->frag_list = NULL;
3213}
3214
3215#define skb_walk_frags(skb, iter) \
3216 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3217
3218
3219int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3220 const struct sk_buff *skb);
3221struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3222 struct sk_buff_head *queue,
3223 unsigned int flags,
3224 void (*destructor)(struct sock *sk,
3225 struct sk_buff *skb),
3226 int *peeked, int *off, int *err,
3227 struct sk_buff **last);
3228struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3229 void (*destructor)(struct sock *sk,
3230 struct sk_buff *skb),
3231 int *peeked, int *off, int *err,
3232 struct sk_buff **last);
3233struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3234 void (*destructor)(struct sock *sk,
3235 struct sk_buff *skb),
3236 int *peeked, int *off, int *err);
3237struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3238 int *err);
3239unsigned int datagram_poll(struct file *file, struct socket *sock,
3240 struct poll_table_struct *wait);
3241int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3242 struct iov_iter *to, int size);
3243static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3244 struct msghdr *msg, int size)
3245{
3246 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3247}
3248int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3249 struct msghdr *msg);
3250int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3251 struct iov_iter *from, int len);
3252int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3253void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3254void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3255static inline void skb_free_datagram_locked(struct sock *sk,
3256 struct sk_buff *skb)
3257{
3258 __skb_free_datagram_locked(sk, skb, 0);
3259}
3260int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3261int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3262int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3263__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3264 int len, __wsum csum);
3265int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3266 struct pipe_inode_info *pipe, unsigned int len,
3267 unsigned int flags);
3268int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3269 int len);
3270int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
3271void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3272unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3273int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3274 int len, int hlen);
3275void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3276int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3277void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3278unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3279bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3280struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3281struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3282int skb_ensure_writable(struct sk_buff *skb, int write_len);
3283int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3284int skb_vlan_pop(struct sk_buff *skb);
3285int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3286struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3287 gfp_t gfp);
3288
3289static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3290{
3291 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3292}
3293
3294static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3295{
3296 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3297}
3298
3299struct skb_checksum_ops {
3300 __wsum (*update)(const void *mem, int len, __wsum wsum);
3301 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3302};
3303
3304extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3305
3306__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3307 __wsum csum, const struct skb_checksum_ops *ops);
3308__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3309 __wsum csum);
3310
3311static inline void * __must_check
3312__skb_header_pointer(const struct sk_buff *skb, int offset,
3313 int len, void *data, int hlen, void *buffer)
3314{
3315 if (hlen - offset >= len)
3316 return data + offset;
3317
3318 if (!skb ||
3319 skb_copy_bits(skb, offset, buffer, len) < 0)
3320 return NULL;
3321
3322 return buffer;
3323}
3324
3325static inline void * __must_check
3326skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3327{
3328 return __skb_header_pointer(skb, offset, len, skb->data,
3329 skb_headlen(skb), buffer);
3330}
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342static inline bool skb_needs_linearize(struct sk_buff *skb,
3343 netdev_features_t features)
3344{
3345 return skb_is_nonlinear(skb) &&
3346 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3347 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3348}
3349
3350static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3351 void *to,
3352 const unsigned int len)
3353{
3354 memcpy(to, skb->data, len);
3355}
3356
3357static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3358 const int offset, void *to,
3359 const unsigned int len)
3360{
3361 memcpy(to, skb->data + offset, len);
3362}
3363
3364static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3365 const void *from,
3366 const unsigned int len)
3367{
3368 memcpy(skb->data, from, len);
3369}
3370
3371static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3372 const int offset,
3373 const void *from,
3374 const unsigned int len)
3375{
3376 memcpy(skb->data + offset, from, len);
3377}
3378
3379void skb_init(void);
3380
3381static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3382{
3383 return skb->tstamp;
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395static inline void skb_get_timestamp(const struct sk_buff *skb,
3396 struct timeval *stamp)
3397{
3398 *stamp = ktime_to_timeval(skb->tstamp);
3399}
3400
3401static inline void skb_get_timestampns(const struct sk_buff *skb,
3402 struct timespec *stamp)
3403{
3404 *stamp = ktime_to_timespec(skb->tstamp);
3405}
3406
3407static inline void __net_timestamp(struct sk_buff *skb)
3408{
3409 skb->tstamp = ktime_get_real();
3410}
3411
3412static inline ktime_t net_timedelta(ktime_t t)
3413{
3414 return ktime_sub(ktime_get_real(), t);
3415}
3416
3417static inline ktime_t net_invalid_timestamp(void)
3418{
3419 return 0;
3420}
3421
3422struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3423
3424#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3425
3426void skb_clone_tx_timestamp(struct sk_buff *skb);
3427bool skb_defer_rx_timestamp(struct sk_buff *skb);
3428
3429#else
3430
3431static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3432{
3433}
3434
3435static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3436{
3437 return false;
3438}
3439
3440#endif
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454void skb_complete_tx_timestamp(struct sk_buff *skb,
3455 struct skb_shared_hwtstamps *hwtstamps);
3456
3457void __skb_tstamp_tx(struct sk_buff *orig_skb,
3458 struct skb_shared_hwtstamps *hwtstamps,
3459 struct sock *sk, int tstype);
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472void skb_tstamp_tx(struct sk_buff *orig_skb,
3473 struct skb_shared_hwtstamps *hwtstamps);
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487static inline void skb_tx_timestamp(struct sk_buff *skb)
3488{
3489 skb_clone_tx_timestamp(skb);
3490 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3491 skb_tstamp_tx(skb, NULL);
3492}
3493
3494
3495
3496
3497
3498
3499
3500
3501void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3502
3503__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3504__sum16 __skb_checksum_complete(struct sk_buff *skb);
3505
3506static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3507{
3508 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3509 skb->csum_valid ||
3510 (skb->ip_summed == CHECKSUM_PARTIAL &&
3511 skb_checksum_start_offset(skb) >= 0));
3512}
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3531{
3532 return skb_csum_unnecessary(skb) ?
3533 0 : __skb_checksum_complete(skb);
3534}
3535
3536static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3537{
3538 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3539 if (skb->csum_level == 0)
3540 skb->ip_summed = CHECKSUM_NONE;
3541 else
3542 skb->csum_level--;
3543 }
3544}
3545
3546static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3547{
3548 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3549 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3550 skb->csum_level++;
3551 } else if (skb->ip_summed == CHECKSUM_NONE) {
3552 skb->ip_summed = CHECKSUM_UNNECESSARY;
3553 skb->csum_level = 0;
3554 }
3555}
3556
3557
3558
3559
3560
3561
3562static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3563 bool zero_okay,
3564 __sum16 check)
3565{
3566 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3567 skb->csum_valid = 1;
3568 __skb_decr_checksum_unnecessary(skb);
3569 return false;
3570 }
3571
3572 return true;
3573}
3574
3575
3576
3577
3578#define CHECKSUM_BREAK 76
3579
3580
3581
3582
3583
3584
3585
3586static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3587{
3588 if (skb->ip_summed == CHECKSUM_COMPLETE)
3589 skb->ip_summed = CHECKSUM_NONE;
3590}
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3602 bool complete,
3603 __wsum psum)
3604{
3605 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3606 if (!csum_fold(csum_add(psum, skb->csum))) {
3607 skb->csum_valid = 1;
3608 return 0;
3609 }
3610 }
3611
3612 skb->csum = psum;
3613
3614 if (complete || skb->len <= CHECKSUM_BREAK) {
3615 __sum16 csum;
3616
3617 csum = __skb_checksum_complete(skb);
3618 skb->csum_valid = !csum;
3619 return csum;
3620 }
3621
3622 return 0;
3623}
3624
3625static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3626{
3627 return 0;
3628}
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640#define __skb_checksum_validate(skb, proto, complete, \
3641 zero_okay, check, compute_pseudo) \
3642({ \
3643 __sum16 __ret = 0; \
3644 skb->csum_valid = 0; \
3645 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3646 __ret = __skb_checksum_validate_complete(skb, \
3647 complete, compute_pseudo(skb, proto)); \
3648 __ret; \
3649})
3650
3651#define skb_checksum_init(skb, proto, compute_pseudo) \
3652 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3653
3654#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3655 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3656
3657#define skb_checksum_validate(skb, proto, compute_pseudo) \
3658 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3659
3660#define skb_checksum_validate_zero_check(skb, proto, check, \
3661 compute_pseudo) \
3662 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3663
3664#define skb_checksum_simple_validate(skb) \
3665 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3666
3667static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3668{
3669 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3670}
3671
3672static inline void __skb_checksum_convert(struct sk_buff *skb,
3673 __sum16 check, __wsum pseudo)
3674{
3675 skb->csum = ~pseudo;
3676 skb->ip_summed = CHECKSUM_COMPLETE;
3677}
3678
3679#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3680do { \
3681 if (__skb_checksum_convert_check(skb)) \
3682 __skb_checksum_convert(skb, check, \
3683 compute_pseudo(skb, proto)); \
3684} while (0)
3685
3686static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3687 u16 start, u16 offset)
3688{
3689 skb->ip_summed = CHECKSUM_PARTIAL;
3690 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3691 skb->csum_offset = offset - start;
3692}
3693
3694
3695
3696
3697
3698
3699static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3700 int start, int offset, bool nopartial)
3701{
3702 __wsum delta;
3703
3704 if (!nopartial) {
3705 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3706 return;
3707 }
3708
3709 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3710 __skb_checksum_complete(skb);
3711 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3712 }
3713
3714 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3715
3716
3717 skb->csum = csum_add(skb->csum, delta);
3718}
3719
3720static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3721{
3722#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3723 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3724#else
3725 return NULL;
3726#endif
3727}
3728
3729#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3730void nf_conntrack_destroy(struct nf_conntrack *nfct);
3731static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3732{
3733 if (nfct && atomic_dec_and_test(&nfct->use))
3734 nf_conntrack_destroy(nfct);
3735}
3736static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3737{
3738 if (nfct)
3739 atomic_inc(&nfct->use);
3740}
3741#endif
3742#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3743static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3744{
3745 if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3746 kfree(nf_bridge);
3747}
3748static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3749{
3750 if (nf_bridge)
3751 refcount_inc(&nf_bridge->use);
3752}
3753#endif
3754static inline void nf_reset(struct sk_buff *skb)
3755{
3756#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3757 nf_conntrack_put(skb_nfct(skb));
3758 skb->_nfct = 0;
3759#endif
3760#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3761 nf_bridge_put(skb->nf_bridge);
3762 skb->nf_bridge = NULL;
3763#endif
3764}
3765
3766static inline void nf_reset_trace(struct sk_buff *skb)
3767{
3768#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3769 skb->nf_trace = 0;
3770#endif
3771}
3772
3773static inline void ipvs_reset(struct sk_buff *skb)
3774{
3775#if IS_ENABLED(CONFIG_IP_VS)
3776 skb->ipvs_property = 0;
3777#endif
3778}
3779
3780
3781static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3782 bool copy)
3783{
3784#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3785 dst->_nfct = src->_nfct;
3786 nf_conntrack_get(skb_nfct(src));
3787#endif
3788#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3789 dst->nf_bridge = src->nf_bridge;
3790 nf_bridge_get(src->nf_bridge);
3791#endif
3792#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3793 if (copy)
3794 dst->nf_trace = src->nf_trace;
3795#endif
3796}
3797
3798static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3799{
3800#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3801 nf_conntrack_put(skb_nfct(dst));
3802#endif
3803#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3804 nf_bridge_put(dst->nf_bridge);
3805#endif
3806 __nf_copy(dst, src, true);
3807}
3808
3809#ifdef CONFIG_NETWORK_SECMARK
3810static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3811{
3812 to->secmark = from->secmark;
3813}
3814
3815static inline void skb_init_secmark(struct sk_buff *skb)
3816{
3817 skb->secmark = 0;
3818}
3819#else
3820static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3821{ }
3822
3823static inline void skb_init_secmark(struct sk_buff *skb)
3824{ }
3825#endif
3826
3827static inline bool skb_irq_freeable(const struct sk_buff *skb)
3828{
3829 return !skb->destructor &&
3830#if IS_ENABLED(CONFIG_XFRM)
3831 !skb->sp &&
3832#endif
3833 !skb_nfct(skb) &&
3834 !skb->_skb_refdst &&
3835 !skb_has_frag_list(skb);
3836}
3837
3838static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3839{
3840 skb->queue_mapping = queue_mapping;
3841}
3842
3843static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3844{
3845 return skb->queue_mapping;
3846}
3847
3848static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3849{
3850 to->queue_mapping = from->queue_mapping;
3851}
3852
3853static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3854{
3855 skb->queue_mapping = rx_queue + 1;
3856}
3857
3858static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3859{
3860 return skb->queue_mapping - 1;
3861}
3862
3863static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3864{
3865 return skb->queue_mapping != 0;
3866}
3867
3868static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
3869{
3870 skb->dst_pending_confirm = val;
3871}
3872
3873static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
3874{
3875 return skb->dst_pending_confirm != 0;
3876}
3877
3878static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3879{
3880#ifdef CONFIG_XFRM
3881 return skb->sp;
3882#else
3883 return NULL;
3884#endif
3885}
3886
3887
3888
3889
3890
3891
3892
3893struct skb_gso_cb {
3894 union {
3895 int mac_offset;
3896 int data_offset;
3897 };
3898 int encap_level;
3899 __wsum csum;
3900 __u16 csum_start;
3901};
3902#define SKB_SGO_CB_OFFSET 32
3903#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3904
3905static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3906{
3907 return (skb_mac_header(inner_skb) - inner_skb->head) -
3908 SKB_GSO_CB(inner_skb)->mac_offset;
3909}
3910
3911static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3912{
3913 int new_headroom, headroom;
3914 int ret;
3915
3916 headroom = skb_headroom(skb);
3917 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3918 if (ret)
3919 return ret;
3920
3921 new_headroom = skb_headroom(skb);
3922 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3923 return 0;
3924}
3925
3926static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3927{
3928
3929 if (skb->remcsum_offload)
3930 return;
3931
3932 SKB_GSO_CB(skb)->csum = res;
3933 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3934}
3935
3936
3937
3938
3939
3940
3941
3942
3943
3944static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3945{
3946 unsigned char *csum_start = skb_transport_header(skb);
3947 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3948 __wsum partial = SKB_GSO_CB(skb)->csum;
3949
3950 SKB_GSO_CB(skb)->csum = res;
3951 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3952
3953 return csum_fold(csum_partial(csum_start, plen, partial));
3954}
3955
3956static inline bool skb_is_gso(const struct sk_buff *skb)
3957{
3958 return skb_shinfo(skb)->gso_size;
3959}
3960
3961
3962static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3963{
3964 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3965}
3966
3967static inline void skb_gso_reset(struct sk_buff *skb)
3968{
3969 skb_shinfo(skb)->gso_size = 0;
3970 skb_shinfo(skb)->gso_segs = 0;
3971 skb_shinfo(skb)->gso_type = 0;
3972}
3973
3974void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3975
3976static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3977{
3978
3979
3980 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3981
3982 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3983 unlikely(shinfo->gso_type == 0)) {
3984 __skb_warn_lro_forwarding(skb);
3985 return true;
3986 }
3987 return false;
3988}
3989
3990static inline void skb_forward_csum(struct sk_buff *skb)
3991{
3992
3993 if (skb->ip_summed == CHECKSUM_COMPLETE)
3994 skb->ip_summed = CHECKSUM_NONE;
3995}
3996
3997
3998
3999
4000
4001
4002
4003
4004
4005static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4006{
4007#ifdef DEBUG
4008 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4009#endif
4010}
4011
4012bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4013
4014int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4015struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4016 unsigned int transport_len,
4017 __sum16(*skb_chkf)(struct sk_buff *skb));
4018
4019
4020
4021
4022
4023
4024
4025
4026
4027
4028static inline bool skb_head_is_locked(const struct sk_buff *skb)
4029{
4030 return !skb->head_frag || skb_cloned(skb);
4031}
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4044{
4045 unsigned int hdr_len = skb_transport_header(skb) -
4046 skb_network_header(skb);
4047 return hdr_len + skb_gso_transport_seglen(skb);
4048}
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059static inline __wsum lco_csum(struct sk_buff *skb)
4060{
4061 unsigned char *csum_start = skb_checksum_start(skb);
4062 unsigned char *l4_hdr = skb_transport_header(skb);
4063 __wsum partial;
4064
4065
4066 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4067 skb->csum_offset));
4068
4069
4070
4071
4072 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4073}
4074
4075#endif
4076#endif
4077