1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/cache.h>
18#include <linux/rbtree.h>
19#include <linux/socket.h>
20#include <linux/refcount.h>
21
22#include <linux/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
25#include <linux/net.h>
26#include <linux/textsearch.h>
27#include <net/checksum.h>
28#include <linux/rcupdate.h>
29#include <linux/hrtimer.h>
30#include <linux/dma-mapping.h>
31#include <linux/netdev_features.h>
32#include <linux/sched.h>
33#include <linux/sched/clock.h>
34#include <net/flow_dissector.h>
35#include <linux/splice.h>
36#include <linux/in6.h>
37#include <linux/if_packet.h>
38#include <net/flow.h>
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define CHECKSUM_NONE 0
217#define CHECKSUM_UNNECESSARY 1
218#define CHECKSUM_COMPLETE 2
219#define CHECKSUM_PARTIAL 3
220
221
222#define SKB_MAX_CSUM_LEVEL 3
223
224#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
225#define SKB_WITH_OVERHEAD(X) \
226 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
227#define SKB_MAX_ORDER(X, ORDER) \
228 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
229#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
230#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
231
232
233#define SKB_TRUESIZE(X) ((X) + \
234 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
236
237struct net_device;
238struct scatterlist;
239struct pipe_inode_info;
240struct iov_iter;
241struct napi_struct;
242struct bpf_prog;
243union bpf_attr;
244struct skb_ext;
245
246#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
247struct nf_conntrack {
248 atomic_t use;
249};
250#endif
251
252#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
253struct nf_bridge_info {
254 enum {
255 BRNF_PROTO_UNCHANGED,
256 BRNF_PROTO_8021Q,
257 BRNF_PROTO_PPPOE
258 } orig_proto:8;
259 u8 pkt_otherhost:1;
260 u8 in_prerouting:1;
261 u8 bridged_dnat:1;
262 __u16 frag_max_size;
263 struct net_device *physindev;
264
265
266 struct net_device *physoutdev;
267 union {
268
269 __be32 ipv4_daddr;
270 struct in6_addr ipv6_daddr;
271
272
273
274
275
276 char neigh_header[8];
277 };
278};
279#endif
280
281struct sk_buff_head {
282
283 struct sk_buff *next;
284 struct sk_buff *prev;
285
286 __u32 qlen;
287 spinlock_t lock;
288};
289
290struct sk_buff;
291
292
293
294
295
296
297
298
299#if (65536/PAGE_SIZE + 1) < 16
300#define MAX_SKB_FRAGS 16UL
301#else
302#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
303#endif
304extern int sysctl_max_skb_frags;
305
306
307
308
309#define GSO_BY_FRAGS 0xFFFF
310
311typedef struct skb_frag_struct skb_frag_t;
312
313struct skb_frag_struct {
314 struct {
315 struct page *p;
316 } page;
317#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
318 __u32 page_offset;
319 __u32 size;
320#else
321 __u16 page_offset;
322 __u16 size;
323#endif
324};
325
326
327
328
329
330static inline unsigned int skb_frag_size(const skb_frag_t *frag)
331{
332 return frag->size;
333}
334
335
336
337
338
339
340static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
341{
342 frag->size = size;
343}
344
345
346
347
348
349
350static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
351{
352 frag->size += delta;
353}
354
355
356
357
358
359
360static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
361{
362 frag->size -= delta;
363}
364
365
366
367
368
369static inline bool skb_frag_must_loop(struct page *p)
370{
371#if defined(CONFIG_HIGHMEM)
372 if (PageHighMem(p))
373 return true;
374#endif
375 return false;
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
396 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
397 p_off = (f_off) & (PAGE_SIZE - 1), \
398 p_len = skb_frag_must_loop(p) ? \
399 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
400 copied = 0; \
401 copied < f_len; \
402 copied += p_len, p++, p_off = 0, \
403 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
404
405#define HAVE_HW_TIME_STAMP
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421struct skb_shared_hwtstamps {
422 ktime_t hwtstamp;
423};
424
425
426enum {
427
428 SKBTX_HW_TSTAMP = 1 << 0,
429
430
431 SKBTX_SW_TSTAMP = 1 << 1,
432
433
434 SKBTX_IN_PROGRESS = 1 << 2,
435
436
437 SKBTX_DEV_ZEROCOPY = 1 << 3,
438
439
440 SKBTX_WIFI_STATUS = 1 << 4,
441
442
443
444
445
446
447 SKBTX_SHARED_FRAG = 1 << 5,
448
449
450 SKBTX_SCHED_TSTAMP = 1 << 6,
451};
452
453#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
454#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
455 SKBTX_SCHED_TSTAMP)
456#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
457
458
459
460
461
462
463
464
465
466struct ubuf_info {
467 void (*callback)(struct ubuf_info *, bool zerocopy_success);
468 union {
469 struct {
470 unsigned long desc;
471 void *ctx;
472 };
473 struct {
474 u32 id;
475 u16 len;
476 u16 zerocopy:1;
477 u32 bytelen;
478 };
479 };
480 refcount_t refcnt;
481
482 struct mmpin {
483 struct user_struct *user;
484 unsigned int num_pg;
485 } mmp;
486};
487
488#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
489
490int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
491void mm_unaccount_pinned_pages(struct mmpin *mmp);
492
493struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
494struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
495 struct ubuf_info *uarg);
496
497static inline void sock_zerocopy_get(struct ubuf_info *uarg)
498{
499 refcount_inc(&uarg->refcnt);
500}
501
502void sock_zerocopy_put(struct ubuf_info *uarg);
503void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
504
505void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
506
507int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
508int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
509 struct msghdr *msg, int len,
510 struct ubuf_info *uarg);
511
512
513
514
515struct skb_shared_info {
516 __u8 __unused;
517 __u8 meta_len;
518 __u8 nr_frags;
519 __u8 tx_flags;
520 unsigned short gso_size;
521
522 unsigned short gso_segs;
523 struct sk_buff *frag_list;
524 struct skb_shared_hwtstamps hwtstamps;
525 unsigned int gso_type;
526 u32 tskey;
527
528
529
530
531 atomic_t dataref;
532
533
534
535 void * destructor_arg;
536
537
538 skb_frag_t frags[MAX_SKB_FRAGS];
539};
540
541
542
543
544
545
546
547
548
549
550
551
552#define SKB_DATAREF_SHIFT 16
553#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
554
555
556enum {
557 SKB_FCLONE_UNAVAILABLE,
558 SKB_FCLONE_ORIG,
559 SKB_FCLONE_CLONE,
560};
561
562enum {
563 SKB_GSO_TCPV4 = 1 << 0,
564
565
566 SKB_GSO_DODGY = 1 << 1,
567
568
569 SKB_GSO_TCP_ECN = 1 << 2,
570
571 SKB_GSO_TCP_FIXEDID = 1 << 3,
572
573 SKB_GSO_TCPV6 = 1 << 4,
574
575 SKB_GSO_FCOE = 1 << 5,
576
577 SKB_GSO_GRE = 1 << 6,
578
579 SKB_GSO_GRE_CSUM = 1 << 7,
580
581 SKB_GSO_IPXIP4 = 1 << 8,
582
583 SKB_GSO_IPXIP6 = 1 << 9,
584
585 SKB_GSO_UDP_TUNNEL = 1 << 10,
586
587 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
588
589 SKB_GSO_PARTIAL = 1 << 12,
590
591 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
592
593 SKB_GSO_SCTP = 1 << 14,
594
595 SKB_GSO_ESP = 1 << 15,
596
597 SKB_GSO_UDP = 1 << 16,
598
599 SKB_GSO_UDP_L4 = 1 << 17,
600};
601
602#if BITS_PER_LONG > 32
603#define NET_SKBUFF_DATA_USES_OFFSET 1
604#endif
605
606#ifdef NET_SKBUFF_DATA_USES_OFFSET
607typedef unsigned int sk_buff_data_t;
608#else
609typedef unsigned char *sk_buff_data_t;
610#endif
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690struct sk_buff {
691 union {
692 struct {
693
694 struct sk_buff *next;
695 struct sk_buff *prev;
696
697 union {
698 struct net_device *dev;
699
700
701
702
703 unsigned long dev_scratch;
704 };
705 };
706 struct rb_node rbnode;
707 struct list_head list;
708 };
709
710 union {
711 struct sock *sk;
712 int ip_defrag_offset;
713 };
714
715 union {
716 ktime_t tstamp;
717 u64 skb_mstamp_ns;
718 };
719
720
721
722
723
724
725 char cb[48] __aligned(8);
726
727 union {
728 struct {
729 unsigned long _skb_refdst;
730 void (*destructor)(struct sk_buff *skb);
731 };
732 struct list_head tcp_tsorted_anchor;
733 };
734
735#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
736 unsigned long _nfct;
737#endif
738 unsigned int len,
739 data_len;
740 __u16 mac_len,
741 hdr_len;
742
743
744
745
746 __u16 queue_mapping;
747
748
749#ifdef __BIG_ENDIAN_BITFIELD
750#define CLONED_MASK (1 << 7)
751#else
752#define CLONED_MASK 1
753#endif
754#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
755
756 __u8 __cloned_offset[0];
757 __u8 cloned:1,
758 nohdr:1,
759 fclone:2,
760 peeked:1,
761 head_frag:1,
762 pfmemalloc:1;
763#ifdef CONFIG_SKB_EXTENSIONS
764 __u8 active_extensions;
765#endif
766
767
768
769
770 __u32 headers_start[0];
771
772
773
774#ifdef __BIG_ENDIAN_BITFIELD
775#define PKT_TYPE_MAX (7 << 5)
776#else
777#define PKT_TYPE_MAX 7
778#endif
779#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
780
781 __u8 __pkt_type_offset[0];
782 __u8 pkt_type:3;
783 __u8 ignore_df:1;
784 __u8 nf_trace:1;
785 __u8 ip_summed:2;
786 __u8 ooo_okay:1;
787
788 __u8 l4_hash:1;
789 __u8 sw_hash:1;
790 __u8 wifi_acked_valid:1;
791 __u8 wifi_acked:1;
792 __u8 no_fcs:1;
793
794 __u8 encapsulation:1;
795 __u8 encap_hdr_csum:1;
796 __u8 csum_valid:1;
797
798#ifdef __BIG_ENDIAN_BITFIELD
799#define PKT_VLAN_PRESENT_BIT 7
800#else
801#define PKT_VLAN_PRESENT_BIT 0
802#endif
803#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
804 __u8 __pkt_vlan_present_offset[0];
805 __u8 vlan_present:1;
806 __u8 csum_complete_sw:1;
807 __u8 csum_level:2;
808 __u8 csum_not_inet:1;
809 __u8 dst_pending_confirm:1;
810#ifdef CONFIG_IPV6_NDISC_NODETYPE
811 __u8 ndisc_nodetype:2;
812#endif
813
814 __u8 ipvs_property:1;
815 __u8 inner_protocol_type:1;
816 __u8 remcsum_offload:1;
817#ifdef CONFIG_NET_SWITCHDEV
818 __u8 offload_fwd_mark:1;
819 __u8 offload_l3_fwd_mark:1;
820#endif
821#ifdef CONFIG_NET_CLS_ACT
822 __u8 tc_skip_classify:1;
823 __u8 tc_at_ingress:1;
824 __u8 tc_redirected:1;
825 __u8 tc_from_ingress:1;
826#endif
827#ifdef CONFIG_TLS_DEVICE
828 __u8 decrypted:1;
829#endif
830
831#ifdef CONFIG_NET_SCHED
832 __u16 tc_index;
833#endif
834
835 union {
836 __wsum csum;
837 struct {
838 __u16 csum_start;
839 __u16 csum_offset;
840 };
841 };
842 __u32 priority;
843 int skb_iif;
844 __u32 hash;
845 __be16 vlan_proto;
846 __u16 vlan_tci;
847#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
848 union {
849 unsigned int napi_id;
850 unsigned int sender_cpu;
851 };
852#endif
853#ifdef CONFIG_NETWORK_SECMARK
854 __u32 secmark;
855#endif
856
857 union {
858 __u32 mark;
859 __u32 reserved_tailroom;
860 };
861
862 union {
863 __be16 inner_protocol;
864 __u8 inner_ipproto;
865 };
866
867 __u16 inner_transport_header;
868 __u16 inner_network_header;
869 __u16 inner_mac_header;
870
871 __be16 protocol;
872 __u16 transport_header;
873 __u16 network_header;
874 __u16 mac_header;
875
876
877 __u32 headers_end[0];
878
879
880
881 sk_buff_data_t tail;
882 sk_buff_data_t end;
883 unsigned char *head,
884 *data;
885 unsigned int truesize;
886 refcount_t users;
887
888#ifdef CONFIG_SKB_EXTENSIONS
889
890 struct skb_ext *extensions;
891#endif
892};
893
894#ifdef __KERNEL__
895
896
897
898
899#define SKB_ALLOC_FCLONE 0x01
900#define SKB_ALLOC_RX 0x02
901#define SKB_ALLOC_NAPI 0x04
902
903
904
905
906
907static inline bool skb_pfmemalloc(const struct sk_buff *skb)
908{
909 return unlikely(skb->pfmemalloc);
910}
911
912
913
914
915
916#define SKB_DST_NOREF 1UL
917#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
918
919#define SKB_NFCT_PTRMASK ~(7UL)
920
921
922
923
924
925
926static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
927{
928
929
930
931 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
932 !rcu_read_lock_held() &&
933 !rcu_read_lock_bh_held());
934 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
935}
936
937
938
939
940
941
942
943
944
945static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
946{
947 skb->_skb_refdst = (unsigned long)dst;
948}
949
950
951
952
953
954
955
956
957
958
959
960static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
961{
962 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
963 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
964}
965
966
967
968
969
970static inline bool skb_dst_is_noref(const struct sk_buff *skb)
971{
972 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
973}
974
975
976
977
978
979static inline struct rtable *skb_rtable(const struct sk_buff *skb)
980{
981 return (struct rtable *)skb_dst(skb);
982}
983
984
985
986
987
988static inline bool skb_pkt_type_ok(u32 ptype)
989{
990 return ptype <= PACKET_OTHERHOST;
991}
992
993
994
995
996
997static inline unsigned int skb_napi_id(const struct sk_buff *skb)
998{
999#ifdef CONFIG_NET_RX_BUSY_POLL
1000 return skb->napi_id;
1001#else
1002 return 0;
1003#endif
1004}
1005
1006
1007
1008
1009
1010
1011
1012static inline bool skb_unref(struct sk_buff *skb)
1013{
1014 if (unlikely(!skb))
1015 return false;
1016 if (likely(refcount_read(&skb->users) == 1))
1017 smp_rmb();
1018 else if (likely(!refcount_dec_and_test(&skb->users)))
1019 return false;
1020
1021 return true;
1022}
1023
1024void skb_release_head_state(struct sk_buff *skb);
1025void kfree_skb(struct sk_buff *skb);
1026void kfree_skb_list(struct sk_buff *segs);
1027void skb_tx_error(struct sk_buff *skb);
1028void consume_skb(struct sk_buff *skb);
1029void __consume_stateless_skb(struct sk_buff *skb);
1030void __kfree_skb(struct sk_buff *skb);
1031extern struct kmem_cache *skbuff_head_cache;
1032
1033void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1034bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1035 bool *fragstolen, int *delta_truesize);
1036
1037struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1038 int node);
1039struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1040struct sk_buff *build_skb(void *data, unsigned int frag_size);
1041struct sk_buff *build_skb_around(struct sk_buff *skb,
1042 void *data, unsigned int frag_size);
1043
1044
1045
1046
1047
1048
1049
1050
1051static inline struct sk_buff *alloc_skb(unsigned int size,
1052 gfp_t priority)
1053{
1054 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1055}
1056
1057struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1058 unsigned long data_len,
1059 int max_page_order,
1060 int *errcode,
1061 gfp_t gfp_mask);
1062
1063
1064struct sk_buff_fclones {
1065 struct sk_buff skb1;
1066
1067 struct sk_buff skb2;
1068
1069 refcount_t fclone_ref;
1070};
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081static inline bool skb_fclone_busy(const struct sock *sk,
1082 const struct sk_buff *skb)
1083{
1084 const struct sk_buff_fclones *fclones;
1085
1086 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1087
1088 return skb->fclone == SKB_FCLONE_ORIG &&
1089 refcount_read(&fclones->fclone_ref) > 1 &&
1090 fclones->skb2.sk == sk;
1091}
1092
1093
1094
1095
1096
1097
1098
1099
1100static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1101 gfp_t priority)
1102{
1103 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1104}
1105
1106struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1107void skb_headers_offset_update(struct sk_buff *skb, int off);
1108int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1109struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1110void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1111struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1112struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1113 gfp_t gfp_mask, bool fclone);
1114static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1115 gfp_t gfp_mask)
1116{
1117 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1118}
1119
1120int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1121struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1122 unsigned int headroom);
1123struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1124 int newtailroom, gfp_t priority);
1125int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1126 int offset, int len);
1127int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1128 int offset, int len);
1129int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1130int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143static inline int skb_pad(struct sk_buff *skb, int pad)
1144{
1145 return __skb_pad(skb, pad, true);
1146}
1147#define dev_kfree_skb(a) consume_skb(a)
1148
1149int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1150 int offset, size_t size);
1151
1152struct skb_seq_state {
1153 __u32 lower_offset;
1154 __u32 upper_offset;
1155 __u32 frag_idx;
1156 __u32 stepped_offset;
1157 struct sk_buff *root_skb;
1158 struct sk_buff *cur_skb;
1159 __u8 *frag_data;
1160};
1161
1162void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1163 unsigned int to, struct skb_seq_state *st);
1164unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1165 struct skb_seq_state *st);
1166void skb_abort_seq_read(struct skb_seq_state *st);
1167
1168unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1169 unsigned int to, struct ts_config *config);
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197enum pkt_hash_types {
1198 PKT_HASH_TYPE_NONE,
1199 PKT_HASH_TYPE_L2,
1200 PKT_HASH_TYPE_L3,
1201 PKT_HASH_TYPE_L4,
1202};
1203
1204static inline void skb_clear_hash(struct sk_buff *skb)
1205{
1206 skb->hash = 0;
1207 skb->sw_hash = 0;
1208 skb->l4_hash = 0;
1209}
1210
1211static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1212{
1213 if (!skb->l4_hash)
1214 skb_clear_hash(skb);
1215}
1216
1217static inline void
1218__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1219{
1220 skb->l4_hash = is_l4;
1221 skb->sw_hash = is_sw;
1222 skb->hash = hash;
1223}
1224
1225static inline void
1226skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1227{
1228
1229 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1230}
1231
1232static inline void
1233__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1234{
1235 __skb_set_hash(skb, hash, true, is_l4);
1236}
1237
1238void __skb_get_hash(struct sk_buff *skb);
1239u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1240u32 skb_get_poff(const struct sk_buff *skb);
1241u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1242 const struct flow_keys_basic *keys, int hlen);
1243__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1244 void *data, int hlen_proto);
1245
1246static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1247 int thoff, u8 ip_proto)
1248{
1249 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1250}
1251
1252void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1253 const struct flow_dissector_key *key,
1254 unsigned int key_count);
1255
1256#ifdef CONFIG_NET
1257int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1258 union bpf_attr __user *uattr);
1259int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1260 struct bpf_prog *prog);
1261
1262int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1263#else
1264static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1265 union bpf_attr __user *uattr)
1266{
1267 return -EOPNOTSUPP;
1268}
1269
1270static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1271 struct bpf_prog *prog)
1272{
1273 return -EOPNOTSUPP;
1274}
1275
1276static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1277{
1278 return -EOPNOTSUPP;
1279}
1280#endif
1281
1282struct bpf_flow_dissector;
1283bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1284 __be16 proto, int nhoff, int hlen);
1285
1286bool __skb_flow_dissect(const struct net *net,
1287 const struct sk_buff *skb,
1288 struct flow_dissector *flow_dissector,
1289 void *target_container,
1290 void *data, __be16 proto, int nhoff, int hlen,
1291 unsigned int flags);
1292
1293static inline bool skb_flow_dissect(const struct sk_buff *skb,
1294 struct flow_dissector *flow_dissector,
1295 void *target_container, unsigned int flags)
1296{
1297 return __skb_flow_dissect(NULL, skb, flow_dissector,
1298 target_container, NULL, 0, 0, 0, flags);
1299}
1300
1301static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1302 struct flow_keys *flow,
1303 unsigned int flags)
1304{
1305 memset(flow, 0, sizeof(*flow));
1306 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1307 flow, NULL, 0, 0, 0, flags);
1308}
1309
1310static inline bool
1311skb_flow_dissect_flow_keys_basic(const struct net *net,
1312 const struct sk_buff *skb,
1313 struct flow_keys_basic *flow, void *data,
1314 __be16 proto, int nhoff, int hlen,
1315 unsigned int flags)
1316{
1317 memset(flow, 0, sizeof(*flow));
1318 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1319 data, proto, nhoff, hlen, flags);
1320}
1321
1322void
1323skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1324 struct flow_dissector *flow_dissector,
1325 void *target_container);
1326
1327static inline __u32 skb_get_hash(struct sk_buff *skb)
1328{
1329 if (!skb->l4_hash && !skb->sw_hash)
1330 __skb_get_hash(skb);
1331
1332 return skb->hash;
1333}
1334
1335static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1336{
1337 if (!skb->l4_hash && !skb->sw_hash) {
1338 struct flow_keys keys;
1339 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1340
1341 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1342 }
1343
1344 return skb->hash;
1345}
1346
1347__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1348
1349static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1350{
1351 return skb->hash;
1352}
1353
1354static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1355{
1356 to->hash = from->hash;
1357 to->sw_hash = from->sw_hash;
1358 to->l4_hash = from->l4_hash;
1359};
1360
1361#ifdef NET_SKBUFF_DATA_USES_OFFSET
1362static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1363{
1364 return skb->head + skb->end;
1365}
1366
1367static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1368{
1369 return skb->end;
1370}
1371#else
1372static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1373{
1374 return skb->end;
1375}
1376
1377static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1378{
1379 return skb->end - skb->head;
1380}
1381#endif
1382
1383
1384#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1385
1386static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1387{
1388 return &skb_shinfo(skb)->hwtstamps;
1389}
1390
1391static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1392{
1393 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1394
1395 return is_zcopy ? skb_uarg(skb) : NULL;
1396}
1397
1398static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1399 bool *have_ref)
1400{
1401 if (skb && uarg && !skb_zcopy(skb)) {
1402 if (unlikely(have_ref && *have_ref))
1403 *have_ref = false;
1404 else
1405 sock_zerocopy_get(uarg);
1406 skb_shinfo(skb)->destructor_arg = uarg;
1407 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1408 }
1409}
1410
1411static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1412{
1413 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1414 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1415}
1416
1417static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1418{
1419 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1420}
1421
1422static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1423{
1424 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1425}
1426
1427
1428static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1429{
1430 struct ubuf_info *uarg = skb_zcopy(skb);
1431
1432 if (uarg) {
1433 if (skb_zcopy_is_nouarg(skb)) {
1434
1435 } else if (uarg->callback == sock_zerocopy_callback) {
1436 uarg->zerocopy = uarg->zerocopy && zerocopy;
1437 sock_zerocopy_put(uarg);
1438 } else {
1439 uarg->callback(uarg, zerocopy);
1440 }
1441
1442 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1443 }
1444}
1445
1446
1447static inline void skb_zcopy_abort(struct sk_buff *skb)
1448{
1449 struct ubuf_info *uarg = skb_zcopy(skb);
1450
1451 if (uarg) {
1452 sock_zerocopy_put_abort(uarg, false);
1453 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1454 }
1455}
1456
1457static inline void skb_mark_not_on_list(struct sk_buff *skb)
1458{
1459 skb->next = NULL;
1460}
1461
1462static inline void skb_list_del_init(struct sk_buff *skb)
1463{
1464 __list_del_entry(&skb->list);
1465 skb_mark_not_on_list(skb);
1466}
1467
1468
1469
1470
1471
1472
1473
1474static inline int skb_queue_empty(const struct sk_buff_head *list)
1475{
1476 return list->next == (const struct sk_buff *) list;
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1487 const struct sk_buff *skb)
1488{
1489 return skb->next == (const struct sk_buff *) list;
1490}
1491
1492
1493
1494
1495
1496
1497
1498
1499static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1500 const struct sk_buff *skb)
1501{
1502 return skb->prev == (const struct sk_buff *) list;
1503}
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1514 const struct sk_buff *skb)
1515{
1516
1517
1518
1519 BUG_ON(skb_queue_is_last(list, skb));
1520 return skb->next;
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1532 const struct sk_buff *skb)
1533{
1534
1535
1536
1537 BUG_ON(skb_queue_is_first(list, skb));
1538 return skb->prev;
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548static inline struct sk_buff *skb_get(struct sk_buff *skb)
1549{
1550 refcount_inc(&skb->users);
1551 return skb;
1552}
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566static inline int skb_cloned(const struct sk_buff *skb)
1567{
1568 return skb->cloned &&
1569 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1570}
1571
1572static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1573{
1574 might_sleep_if(gfpflags_allow_blocking(pri));
1575
1576 if (skb_cloned(skb))
1577 return pskb_expand_head(skb, 0, 0, pri);
1578
1579 return 0;
1580}
1581
1582
1583
1584
1585
1586
1587
1588
1589static inline int skb_header_cloned(const struct sk_buff *skb)
1590{
1591 int dataref;
1592
1593 if (!skb->cloned)
1594 return 0;
1595
1596 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1597 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1598 return dataref != 1;
1599}
1600
1601static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1602{
1603 might_sleep_if(gfpflags_allow_blocking(pri));
1604
1605 if (skb_header_cloned(skb))
1606 return pskb_expand_head(skb, 0, 0, pri);
1607
1608 return 0;
1609}
1610
1611
1612
1613
1614
1615static inline void __skb_header_release(struct sk_buff *skb)
1616{
1617 skb->nohdr = 1;
1618 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629static inline int skb_shared(const struct sk_buff *skb)
1630{
1631 return refcount_read(&skb->users) != 1;
1632}
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1648{
1649 might_sleep_if(gfpflags_allow_blocking(pri));
1650 if (skb_shared(skb)) {
1651 struct sk_buff *nskb = skb_clone(skb, pri);
1652
1653 if (likely(nskb))
1654 consume_skb(skb);
1655 else
1656 kfree_skb(skb);
1657 skb = nskb;
1658 }
1659 return skb;
1660}
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1683 gfp_t pri)
1684{
1685 might_sleep_if(gfpflags_allow_blocking(pri));
1686 if (skb_cloned(skb)) {
1687 struct sk_buff *nskb = skb_copy(skb, pri);
1688
1689
1690 if (likely(nskb))
1691 consume_skb(skb);
1692 else
1693 kfree_skb(skb);
1694 skb = nskb;
1695 }
1696 return skb;
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1713{
1714 struct sk_buff *skb = list_->next;
1715
1716 if (skb == (struct sk_buff *)list_)
1717 skb = NULL;
1718 return skb;
1719}
1720
1721
1722
1723
1724
1725
1726
1727static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1728{
1729 return list_->next;
1730}
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1742 const struct sk_buff_head *list_)
1743{
1744 struct sk_buff *next = skb->next;
1745
1746 if (next == (struct sk_buff *)list_)
1747 next = NULL;
1748 return next;
1749}
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1765{
1766 struct sk_buff *skb = list_->prev;
1767
1768 if (skb == (struct sk_buff *)list_)
1769 skb = NULL;
1770 return skb;
1771
1772}
1773
1774
1775
1776
1777
1778
1779
1780static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1781{
1782 return list_->qlen;
1783}
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795static inline void __skb_queue_head_init(struct sk_buff_head *list)
1796{
1797 list->prev = list->next = (struct sk_buff *)list;
1798 list->qlen = 0;
1799}
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809static inline void skb_queue_head_init(struct sk_buff_head *list)
1810{
1811 spin_lock_init(&list->lock);
1812 __skb_queue_head_init(list);
1813}
1814
1815static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1816 struct lock_class_key *class)
1817{
1818 skb_queue_head_init(list);
1819 lockdep_set_class(&list->lock, class);
1820}
1821
1822
1823
1824
1825
1826
1827
1828static inline void __skb_insert(struct sk_buff *newsk,
1829 struct sk_buff *prev, struct sk_buff *next,
1830 struct sk_buff_head *list)
1831{
1832 newsk->next = next;
1833 newsk->prev = prev;
1834 next->prev = prev->next = newsk;
1835 list->qlen++;
1836}
1837
1838static inline void __skb_queue_splice(const struct sk_buff_head *list,
1839 struct sk_buff *prev,
1840 struct sk_buff *next)
1841{
1842 struct sk_buff *first = list->next;
1843 struct sk_buff *last = list->prev;
1844
1845 first->prev = prev;
1846 prev->next = first;
1847
1848 last->next = next;
1849 next->prev = last;
1850}
1851
1852
1853
1854
1855
1856
1857static inline void skb_queue_splice(const struct sk_buff_head *list,
1858 struct sk_buff_head *head)
1859{
1860 if (!skb_queue_empty(list)) {
1861 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1862 head->qlen += list->qlen;
1863 }
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873static inline void skb_queue_splice_init(struct sk_buff_head *list,
1874 struct sk_buff_head *head)
1875{
1876 if (!skb_queue_empty(list)) {
1877 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1878 head->qlen += list->qlen;
1879 __skb_queue_head_init(list);
1880 }
1881}
1882
1883
1884
1885
1886
1887
1888static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1889 struct sk_buff_head *head)
1890{
1891 if (!skb_queue_empty(list)) {
1892 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1893 head->qlen += list->qlen;
1894 }
1895}
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1906 struct sk_buff_head *head)
1907{
1908 if (!skb_queue_empty(list)) {
1909 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1910 head->qlen += list->qlen;
1911 __skb_queue_head_init(list);
1912 }
1913}
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926static inline void __skb_queue_after(struct sk_buff_head *list,
1927 struct sk_buff *prev,
1928 struct sk_buff *newsk)
1929{
1930 __skb_insert(newsk, prev, prev->next, list);
1931}
1932
1933void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1934 struct sk_buff_head *list);
1935
1936static inline void __skb_queue_before(struct sk_buff_head *list,
1937 struct sk_buff *next,
1938 struct sk_buff *newsk)
1939{
1940 __skb_insert(newsk, next->prev, next, list);
1941}
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953static inline void __skb_queue_head(struct sk_buff_head *list,
1954 struct sk_buff *newsk)
1955{
1956 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1957}
1958void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970static inline void __skb_queue_tail(struct sk_buff_head *list,
1971 struct sk_buff *newsk)
1972{
1973 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1974}
1975void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1976
1977
1978
1979
1980
1981void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1982static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1983{
1984 struct sk_buff *next, *prev;
1985
1986 list->qlen--;
1987 next = skb->next;
1988 prev = skb->prev;
1989 skb->next = skb->prev = NULL;
1990 next->prev = prev;
1991 prev->next = next;
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2003{
2004 struct sk_buff *skb = skb_peek(list);
2005 if (skb)
2006 __skb_unlink(skb, list);
2007 return skb;
2008}
2009struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2020{
2021 struct sk_buff *skb = skb_peek_tail(list);
2022 if (skb)
2023 __skb_unlink(skb, list);
2024 return skb;
2025}
2026struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2027
2028
2029static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2030{
2031 return skb->data_len;
2032}
2033
2034static inline unsigned int skb_headlen(const struct sk_buff *skb)
2035{
2036 return skb->len - skb->data_len;
2037}
2038
2039static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2040{
2041 unsigned int i, len = 0;
2042
2043 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2044 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2045 return len;
2046}
2047
2048static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2049{
2050 return skb_headlen(skb) + __skb_pagelen(skb);
2051}
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2067 struct page *page, int off, int size)
2068{
2069 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2070
2071
2072
2073
2074
2075
2076 frag->page.p = page;
2077 frag->page_offset = off;
2078 skb_frag_size_set(frag, size);
2079
2080 page = compound_head(page);
2081 if (page_is_pfmemalloc(page))
2082 skb->pfmemalloc = true;
2083}
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2100 struct page *page, int off, int size)
2101{
2102 __skb_fill_page_desc(skb, i, page, off, size);
2103 skb_shinfo(skb)->nr_frags = i + 1;
2104}
2105
2106void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2107 int size, unsigned int truesize);
2108
2109void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2110 unsigned int truesize);
2111
2112#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2113
2114#ifdef NET_SKBUFF_DATA_USES_OFFSET
2115static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2116{
2117 return skb->head + skb->tail;
2118}
2119
2120static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2121{
2122 skb->tail = skb->data - skb->head;
2123}
2124
2125static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2126{
2127 skb_reset_tail_pointer(skb);
2128 skb->tail += offset;
2129}
2130
2131#else
2132static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2133{
2134 return skb->tail;
2135}
2136
2137static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2138{
2139 skb->tail = skb->data;
2140}
2141
2142static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2143{
2144 skb->tail = skb->data + offset;
2145}
2146
2147#endif
2148
2149
2150
2151
2152void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2153void *skb_put(struct sk_buff *skb, unsigned int len);
2154static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2155{
2156 void *tmp = skb_tail_pointer(skb);
2157 SKB_LINEAR_ASSERT(skb);
2158 skb->tail += len;
2159 skb->len += len;
2160 return tmp;
2161}
2162
2163static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2164{
2165 void *tmp = __skb_put(skb, len);
2166
2167 memset(tmp, 0, len);
2168 return tmp;
2169}
2170
2171static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2172 unsigned int len)
2173{
2174 void *tmp = __skb_put(skb, len);
2175
2176 memcpy(tmp, data, len);
2177 return tmp;
2178}
2179
2180static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2181{
2182 *(u8 *)__skb_put(skb, 1) = val;
2183}
2184
2185static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2186{
2187 void *tmp = skb_put(skb, len);
2188
2189 memset(tmp, 0, len);
2190
2191 return tmp;
2192}
2193
2194static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2195 unsigned int len)
2196{
2197 void *tmp = skb_put(skb, len);
2198
2199 memcpy(tmp, data, len);
2200
2201 return tmp;
2202}
2203
2204static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2205{
2206 *(u8 *)skb_put(skb, 1) = val;
2207}
2208
2209void *skb_push(struct sk_buff *skb, unsigned int len);
2210static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2211{
2212 skb->data -= len;
2213 skb->len += len;
2214 return skb->data;
2215}
2216
2217void *skb_pull(struct sk_buff *skb, unsigned int len);
2218static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2219{
2220 skb->len -= len;
2221 BUG_ON(skb->len < skb->data_len);
2222 return skb->data += len;
2223}
2224
2225static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2226{
2227 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2228}
2229
2230void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2231
2232static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2233{
2234 if (len > skb_headlen(skb) &&
2235 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2236 return NULL;
2237 skb->len -= len;
2238 return skb->data += len;
2239}
2240
2241static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2242{
2243 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2244}
2245
2246static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2247{
2248 if (likely(len <= skb_headlen(skb)))
2249 return 1;
2250 if (unlikely(len > skb->len))
2251 return 0;
2252 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2253}
2254
2255void skb_condense(struct sk_buff *skb);
2256
2257
2258
2259
2260
2261
2262
2263static inline unsigned int skb_headroom(const struct sk_buff *skb)
2264{
2265 return skb->data - skb->head;
2266}
2267
2268
2269
2270
2271
2272
2273
2274static inline int skb_tailroom(const struct sk_buff *skb)
2275{
2276 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2277}
2278
2279
2280
2281
2282
2283
2284
2285
2286static inline int skb_availroom(const struct sk_buff *skb)
2287{
2288 if (skb_is_nonlinear(skb))
2289 return 0;
2290
2291 return skb->end - skb->tail - skb->reserved_tailroom;
2292}
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302static inline void skb_reserve(struct sk_buff *skb, int len)
2303{
2304 skb->data += len;
2305 skb->tail += len;
2306}
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2321 unsigned int needed_tailroom)
2322{
2323 SKB_LINEAR_ASSERT(skb);
2324 if (mtu < skb_tailroom(skb) - needed_tailroom)
2325
2326 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2327 else
2328
2329 skb->reserved_tailroom = needed_tailroom;
2330}
2331
2332#define ENCAP_TYPE_ETHER 0
2333#define ENCAP_TYPE_IPPROTO 1
2334
2335static inline void skb_set_inner_protocol(struct sk_buff *skb,
2336 __be16 protocol)
2337{
2338 skb->inner_protocol = protocol;
2339 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2340}
2341
2342static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2343 __u8 ipproto)
2344{
2345 skb->inner_ipproto = ipproto;
2346 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2347}
2348
2349static inline void skb_reset_inner_headers(struct sk_buff *skb)
2350{
2351 skb->inner_mac_header = skb->mac_header;
2352 skb->inner_network_header = skb->network_header;
2353 skb->inner_transport_header = skb->transport_header;
2354}
2355
2356static inline void skb_reset_mac_len(struct sk_buff *skb)
2357{
2358 skb->mac_len = skb->network_header - skb->mac_header;
2359}
2360
2361static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2362 *skb)
2363{
2364 return skb->head + skb->inner_transport_header;
2365}
2366
2367static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2368{
2369 return skb_inner_transport_header(skb) - skb->data;
2370}
2371
2372static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2373{
2374 skb->inner_transport_header = skb->data - skb->head;
2375}
2376
2377static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2378 const int offset)
2379{
2380 skb_reset_inner_transport_header(skb);
2381 skb->inner_transport_header += offset;
2382}
2383
2384static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2385{
2386 return skb->head + skb->inner_network_header;
2387}
2388
2389static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2390{
2391 skb->inner_network_header = skb->data - skb->head;
2392}
2393
2394static inline void skb_set_inner_network_header(struct sk_buff *skb,
2395 const int offset)
2396{
2397 skb_reset_inner_network_header(skb);
2398 skb->inner_network_header += offset;
2399}
2400
2401static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2402{
2403 return skb->head + skb->inner_mac_header;
2404}
2405
2406static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2407{
2408 skb->inner_mac_header = skb->data - skb->head;
2409}
2410
2411static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2412 const int offset)
2413{
2414 skb_reset_inner_mac_header(skb);
2415 skb->inner_mac_header += offset;
2416}
2417static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2418{
2419 return skb->transport_header != (typeof(skb->transport_header))~0U;
2420}
2421
2422static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2423{
2424 return skb->head + skb->transport_header;
2425}
2426
2427static inline void skb_reset_transport_header(struct sk_buff *skb)
2428{
2429 skb->transport_header = skb->data - skb->head;
2430}
2431
2432static inline void skb_set_transport_header(struct sk_buff *skb,
2433 const int offset)
2434{
2435 skb_reset_transport_header(skb);
2436 skb->transport_header += offset;
2437}
2438
2439static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2440{
2441 return skb->head + skb->network_header;
2442}
2443
2444static inline void skb_reset_network_header(struct sk_buff *skb)
2445{
2446 skb->network_header = skb->data - skb->head;
2447}
2448
2449static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2450{
2451 skb_reset_network_header(skb);
2452 skb->network_header += offset;
2453}
2454
2455static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2456{
2457 return skb->head + skb->mac_header;
2458}
2459
2460static inline int skb_mac_offset(const struct sk_buff *skb)
2461{
2462 return skb_mac_header(skb) - skb->data;
2463}
2464
2465static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2466{
2467 return skb->network_header - skb->mac_header;
2468}
2469
2470static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2471{
2472 return skb->mac_header != (typeof(skb->mac_header))~0U;
2473}
2474
2475static inline void skb_reset_mac_header(struct sk_buff *skb)
2476{
2477 skb->mac_header = skb->data - skb->head;
2478}
2479
2480static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2481{
2482 skb_reset_mac_header(skb);
2483 skb->mac_header += offset;
2484}
2485
2486static inline void skb_pop_mac_header(struct sk_buff *skb)
2487{
2488 skb->mac_header = skb->network_header;
2489}
2490
2491static inline void skb_probe_transport_header(struct sk_buff *skb)
2492{
2493 struct flow_keys_basic keys;
2494
2495 if (skb_transport_header_was_set(skb))
2496 return;
2497
2498 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2499 NULL, 0, 0, 0, 0))
2500 skb_set_transport_header(skb, keys.control.thoff);
2501}
2502
2503static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2504{
2505 if (skb_mac_header_was_set(skb)) {
2506 const unsigned char *old_mac = skb_mac_header(skb);
2507
2508 skb_set_mac_header(skb, -skb->mac_len);
2509 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2510 }
2511}
2512
2513static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2514{
2515 return skb->csum_start - skb_headroom(skb);
2516}
2517
2518static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2519{
2520 return skb->head + skb->csum_start;
2521}
2522
2523static inline int skb_transport_offset(const struct sk_buff *skb)
2524{
2525 return skb_transport_header(skb) - skb->data;
2526}
2527
2528static inline u32 skb_network_header_len(const struct sk_buff *skb)
2529{
2530 return skb->transport_header - skb->network_header;
2531}
2532
2533static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2534{
2535 return skb->inner_transport_header - skb->inner_network_header;
2536}
2537
2538static inline int skb_network_offset(const struct sk_buff *skb)
2539{
2540 return skb_network_header(skb) - skb->data;
2541}
2542
2543static inline int skb_inner_network_offset(const struct sk_buff *skb)
2544{
2545 return skb_inner_network_header(skb) - skb->data;
2546}
2547
2548static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2549{
2550 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2551}
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573#ifndef NET_IP_ALIGN
2574#define NET_IP_ALIGN 2
2575#endif
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597#ifndef NET_SKB_PAD
2598#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2599#endif
2600
2601int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2602
2603static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2604{
2605 if (WARN_ON(skb_is_nonlinear(skb)))
2606 return;
2607 skb->len = len;
2608 skb_set_tail_pointer(skb, len);
2609}
2610
2611static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2612{
2613 __skb_set_length(skb, len);
2614}
2615
2616void skb_trim(struct sk_buff *skb, unsigned int len);
2617
2618static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2619{
2620 if (skb->data_len)
2621 return ___pskb_trim(skb, len);
2622 __skb_trim(skb, len);
2623 return 0;
2624}
2625
2626static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2627{
2628 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2629}
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2641{
2642 int err = pskb_trim(skb, len);
2643 BUG_ON(err);
2644}
2645
2646static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2647{
2648 unsigned int diff = len - skb->len;
2649
2650 if (skb_tailroom(skb) < diff) {
2651 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2652 GFP_ATOMIC);
2653 if (ret)
2654 return ret;
2655 }
2656 __skb_set_length(skb, len);
2657 return 0;
2658}
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668static inline void skb_orphan(struct sk_buff *skb)
2669{
2670 if (skb->destructor) {
2671 skb->destructor(skb);
2672 skb->destructor = NULL;
2673 skb->sk = NULL;
2674 } else {
2675 BUG_ON(skb->sk);
2676 }
2677}
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2689{
2690 if (likely(!skb_zcopy(skb)))
2691 return 0;
2692 if (!skb_zcopy_is_nouarg(skb) &&
2693 skb_uarg(skb)->callback == sock_zerocopy_callback)
2694 return 0;
2695 return skb_copy_ubufs(skb, gfp_mask);
2696}
2697
2698
2699static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2700{
2701 if (likely(!skb_zcopy(skb)))
2702 return 0;
2703 return skb_copy_ubufs(skb, gfp_mask);
2704}
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714static inline void __skb_queue_purge(struct sk_buff_head *list)
2715{
2716 struct sk_buff *skb;
2717 while ((skb = __skb_dequeue(list)) != NULL)
2718 kfree_skb(skb);
2719}
2720void skb_queue_purge(struct sk_buff_head *list);
2721
2722unsigned int skb_rbtree_purge(struct rb_root *root);
2723
2724void *netdev_alloc_frag(unsigned int fragsz);
2725
2726struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2727 gfp_t gfp_mask);
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2743 unsigned int length)
2744{
2745 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2746}
2747
2748
2749static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2750 gfp_t gfp_mask)
2751{
2752 return __netdev_alloc_skb(NULL, length, gfp_mask);
2753}
2754
2755
2756static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2757{
2758 return netdev_alloc_skb(NULL, length);
2759}
2760
2761
2762static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2763 unsigned int length, gfp_t gfp)
2764{
2765 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2766
2767 if (NET_IP_ALIGN && skb)
2768 skb_reserve(skb, NET_IP_ALIGN);
2769 return skb;
2770}
2771
2772static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2773 unsigned int length)
2774{
2775 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2776}
2777
2778static inline void skb_free_frag(void *addr)
2779{
2780 page_frag_free(addr);
2781}
2782
2783void *napi_alloc_frag(unsigned int fragsz);
2784struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2785 unsigned int length, gfp_t gfp_mask);
2786static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2787 unsigned int length)
2788{
2789 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2790}
2791void napi_consume_skb(struct sk_buff *skb, int budget);
2792
2793void __kfree_skb_flush(void);
2794void __kfree_skb_defer(struct sk_buff *skb);
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2806 unsigned int order)
2807{
2808
2809
2810
2811
2812
2813
2814
2815
2816 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2817
2818 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2819}
2820
2821static inline struct page *dev_alloc_pages(unsigned int order)
2822{
2823 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2824}
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2835{
2836 return __dev_alloc_pages(gfp_mask, 0);
2837}
2838
2839static inline struct page *dev_alloc_page(void)
2840{
2841 return dev_alloc_pages(0);
2842}
2843
2844
2845
2846
2847
2848
2849static inline void skb_propagate_pfmemalloc(struct page *page,
2850 struct sk_buff *skb)
2851{
2852 if (page_is_pfmemalloc(page))
2853 skb->pfmemalloc = true;
2854}
2855
2856
2857
2858
2859
2860
2861
2862static inline struct page *skb_frag_page(const skb_frag_t *frag)
2863{
2864 return frag->page.p;
2865}
2866
2867
2868
2869
2870
2871
2872
2873static inline void __skb_frag_ref(skb_frag_t *frag)
2874{
2875 get_page(skb_frag_page(frag));
2876}
2877
2878
2879
2880
2881
2882
2883
2884
2885static inline void skb_frag_ref(struct sk_buff *skb, int f)
2886{
2887 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2888}
2889
2890
2891
2892
2893
2894
2895
2896static inline void __skb_frag_unref(skb_frag_t *frag)
2897{
2898 put_page(skb_frag_page(frag));
2899}
2900
2901
2902
2903
2904
2905
2906
2907
2908static inline void skb_frag_unref(struct sk_buff *skb, int f)
2909{
2910 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920static inline void *skb_frag_address(const skb_frag_t *frag)
2921{
2922 return page_address(skb_frag_page(frag)) + frag->page_offset;
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2933{
2934 void *ptr = page_address(skb_frag_page(frag));
2935 if (unlikely(!ptr))
2936 return NULL;
2937
2938 return ptr + frag->page_offset;
2939}
2940
2941
2942
2943
2944
2945
2946
2947
2948static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2949{
2950 frag->page.p = page;
2951}
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2962 struct page *page)
2963{
2964 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2965}
2966
2967bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2981 const skb_frag_t *frag,
2982 size_t offset, size_t size,
2983 enum dma_data_direction dir)
2984{
2985 return dma_map_page(dev, skb_frag_page(frag),
2986 frag->page_offset + offset, size, dir);
2987}
2988
2989static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2990 gfp_t gfp_mask)
2991{
2992 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2993}
2994
2995
2996static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2997 gfp_t gfp_mask)
2998{
2999 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3000}
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3012{
3013 return !skb_header_cloned(skb) &&
3014 skb_headroom(skb) + len <= skb->hdr_len;
3015}
3016
3017static inline int skb_try_make_writable(struct sk_buff *skb,
3018 unsigned int write_len)
3019{
3020 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3021 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3022}
3023
3024static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3025 int cloned)
3026{
3027 int delta = 0;
3028
3029 if (headroom > skb_headroom(skb))
3030 delta = headroom - skb_headroom(skb);
3031
3032 if (delta || cloned)
3033 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3034 GFP_ATOMIC);
3035 return 0;
3036}
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3051{
3052 return __skb_cow(skb, headroom, skb_cloned(skb));
3053}
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3066{
3067 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3068}
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3081{
3082 unsigned int size = skb->len;
3083 if (likely(size >= len))
3084 return 0;
3085 return skb_pad(skb, len - size);
3086}
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
3100 bool free_on_error)
3101{
3102 unsigned int size = skb->len;
3103
3104 if (unlikely(size < len)) {
3105 len -= size;
3106 if (__skb_pad(skb, len, free_on_error))
3107 return -ENOMEM;
3108 __skb_put(skb, len);
3109 }
3110 return 0;
3111}
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
3124{
3125 return __skb_put_padto(skb, len, true);
3126}
3127
3128static inline int skb_add_data(struct sk_buff *skb,
3129 struct iov_iter *from, int copy)
3130{
3131 const int off = skb->len;
3132
3133 if (skb->ip_summed == CHECKSUM_NONE) {
3134 __wsum csum = 0;
3135 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3136 &csum, from)) {
3137 skb->csum = csum_block_add(skb->csum, csum, off);
3138 return 0;
3139 }
3140 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3141 return 0;
3142
3143 __skb_trim(skb, off);
3144 return -EFAULT;
3145}
3146
3147static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3148 const struct page *page, int off)
3149{
3150 if (skb_zcopy(skb))
3151 return false;
3152 if (i) {
3153 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3154
3155 return page == skb_frag_page(frag) &&
3156 off == frag->page_offset + skb_frag_size(frag);
3157 }
3158 return false;
3159}
3160
3161static inline int __skb_linearize(struct sk_buff *skb)
3162{
3163 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3164}
3165
3166
3167
3168
3169
3170
3171
3172
3173static inline int skb_linearize(struct sk_buff *skb)
3174{
3175 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3176}
3177
3178
3179
3180
3181
3182
3183
3184
3185static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3186{
3187 return skb_is_nonlinear(skb) &&
3188 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3189}
3190
3191
3192
3193
3194
3195
3196
3197
3198static inline int skb_linearize_cow(struct sk_buff *skb)
3199{
3200 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3201 __skb_linearize(skb) : 0;
3202}
3203
3204static __always_inline void
3205__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3206 unsigned int off)
3207{
3208 if (skb->ip_summed == CHECKSUM_COMPLETE)
3209 skb->csum = csum_block_sub(skb->csum,
3210 csum_partial(start, len, 0), off);
3211 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3212 skb_checksum_start_offset(skb) < 0)
3213 skb->ip_summed = CHECKSUM_NONE;
3214}
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226static inline void skb_postpull_rcsum(struct sk_buff *skb,
3227 const void *start, unsigned int len)
3228{
3229 __skb_postpull_rcsum(skb, start, len, 0);
3230}
3231
3232static __always_inline void
3233__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3234 unsigned int off)
3235{
3236 if (skb->ip_summed == CHECKSUM_COMPLETE)
3237 skb->csum = csum_block_add(skb->csum,
3238 csum_partial(start, len, 0), off);
3239}
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250static inline void skb_postpush_rcsum(struct sk_buff *skb,
3251 const void *start, unsigned int len)
3252{
3253 __skb_postpush_rcsum(skb, start, len, 0);
3254}
3255
3256void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3270{
3271 skb_push(skb, len);
3272 skb_postpush_rcsum(skb, skb->data, len);
3273 return skb->data;
3274}
3275
3276int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3288{
3289 if (likely(len >= skb->len))
3290 return 0;
3291 return pskb_trim_rcsum_slow(skb, len);
3292}
3293
3294static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3295{
3296 if (skb->ip_summed == CHECKSUM_COMPLETE)
3297 skb->ip_summed = CHECKSUM_NONE;
3298 __skb_trim(skb, len);
3299 return 0;
3300}
3301
3302static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3303{
3304 if (skb->ip_summed == CHECKSUM_COMPLETE)
3305 skb->ip_summed = CHECKSUM_NONE;
3306 return __skb_grow(skb, len);
3307}
3308
3309#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3310#define skb_rb_first(root) rb_to_skb(rb_first(root))
3311#define skb_rb_last(root) rb_to_skb(rb_last(root))
3312#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3313#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3314
3315#define skb_queue_walk(queue, skb) \
3316 for (skb = (queue)->next; \
3317 skb != (struct sk_buff *)(queue); \
3318 skb = skb->next)
3319
3320#define skb_queue_walk_safe(queue, skb, tmp) \
3321 for (skb = (queue)->next, tmp = skb->next; \
3322 skb != (struct sk_buff *)(queue); \
3323 skb = tmp, tmp = skb->next)
3324
3325#define skb_queue_walk_from(queue, skb) \
3326 for (; skb != (struct sk_buff *)(queue); \
3327 skb = skb->next)
3328
3329#define skb_rbtree_walk(skb, root) \
3330 for (skb = skb_rb_first(root); skb != NULL; \
3331 skb = skb_rb_next(skb))
3332
3333#define skb_rbtree_walk_from(skb) \
3334 for (; skb != NULL; \
3335 skb = skb_rb_next(skb))
3336
3337#define skb_rbtree_walk_from_safe(skb, tmp) \
3338 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3339 skb = tmp)
3340
3341#define skb_queue_walk_from_safe(queue, skb, tmp) \
3342 for (tmp = skb->next; \
3343 skb != (struct sk_buff *)(queue); \
3344 skb = tmp, tmp = skb->next)
3345
3346#define skb_queue_reverse_walk(queue, skb) \
3347 for (skb = (queue)->prev; \
3348 skb != (struct sk_buff *)(queue); \
3349 skb = skb->prev)
3350
3351#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3352 for (skb = (queue)->prev, tmp = skb->prev; \
3353 skb != (struct sk_buff *)(queue); \
3354 skb = tmp, tmp = skb->prev)
3355
3356#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3357 for (tmp = skb->prev; \
3358 skb != (struct sk_buff *)(queue); \
3359 skb = tmp, tmp = skb->prev)
3360
3361static inline bool skb_has_frag_list(const struct sk_buff *skb)
3362{
3363 return skb_shinfo(skb)->frag_list != NULL;
3364}
3365
3366static inline void skb_frag_list_init(struct sk_buff *skb)
3367{
3368 skb_shinfo(skb)->frag_list = NULL;
3369}
3370
3371#define skb_walk_frags(skb, iter) \
3372 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3373
3374
3375int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3376 const struct sk_buff *skb);
3377struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3378 struct sk_buff_head *queue,
3379 unsigned int flags,
3380 void (*destructor)(struct sock *sk,
3381 struct sk_buff *skb),
3382 int *off, int *err,
3383 struct sk_buff **last);
3384struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3385 void (*destructor)(struct sock *sk,
3386 struct sk_buff *skb),
3387 int *off, int *err,
3388 struct sk_buff **last);
3389struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3390 void (*destructor)(struct sock *sk,
3391 struct sk_buff *skb),
3392 int *off, int *err);
3393struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3394 int *err);
3395__poll_t datagram_poll(struct file *file, struct socket *sock,
3396 struct poll_table_struct *wait);
3397int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3398 struct iov_iter *to, int size);
3399static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3400 struct msghdr *msg, int size)
3401{
3402 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3403}
3404int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3405 struct msghdr *msg);
3406int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3407 struct iov_iter *to, int len,
3408 struct ahash_request *hash);
3409int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3410 struct iov_iter *from, int len);
3411int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3412void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3413void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3414static inline void skb_free_datagram_locked(struct sock *sk,
3415 struct sk_buff *skb)
3416{
3417 __skb_free_datagram_locked(sk, skb, 0);
3418}
3419int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3420int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3421int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3422__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3423 int len, __wsum csum);
3424int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3425 struct pipe_inode_info *pipe, unsigned int len,
3426 unsigned int flags);
3427int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3428 int len);
3429void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3430unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3431int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3432 int len, int hlen);
3433void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3434int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3435void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3436bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3437bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3438struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3439struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3440int skb_ensure_writable(struct sk_buff *skb, int write_len);
3441int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3442int skb_vlan_pop(struct sk_buff *skb);
3443int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3444struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3445 gfp_t gfp);
3446
3447static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3448{
3449 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3450}
3451
3452static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3453{
3454 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3455}
3456
3457struct skb_checksum_ops {
3458 __wsum (*update)(const void *mem, int len, __wsum wsum);
3459 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3460};
3461
3462extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3463
3464__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3465 __wsum csum, const struct skb_checksum_ops *ops);
3466__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3467 __wsum csum);
3468
3469static inline void * __must_check
3470__skb_header_pointer(const struct sk_buff *skb, int offset,
3471 int len, void *data, int hlen, void *buffer)
3472{
3473 if (hlen - offset >= len)
3474 return data + offset;
3475
3476 if (!skb ||
3477 skb_copy_bits(skb, offset, buffer, len) < 0)
3478 return NULL;
3479
3480 return buffer;
3481}
3482
3483static inline void * __must_check
3484skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3485{
3486 return __skb_header_pointer(skb, offset, len, skb->data,
3487 skb_headlen(skb), buffer);
3488}
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500static inline bool skb_needs_linearize(struct sk_buff *skb,
3501 netdev_features_t features)
3502{
3503 return skb_is_nonlinear(skb) &&
3504 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3505 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3506}
3507
3508static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3509 void *to,
3510 const unsigned int len)
3511{
3512 memcpy(to, skb->data, len);
3513}
3514
3515static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3516 const int offset, void *to,
3517 const unsigned int len)
3518{
3519 memcpy(to, skb->data + offset, len);
3520}
3521
3522static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3523 const void *from,
3524 const unsigned int len)
3525{
3526 memcpy(skb->data, from, len);
3527}
3528
3529static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3530 const int offset,
3531 const void *from,
3532 const unsigned int len)
3533{
3534 memcpy(skb->data + offset, from, len);
3535}
3536
3537void skb_init(void);
3538
3539static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3540{
3541 return skb->tstamp;
3542}
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553static inline void skb_get_timestamp(const struct sk_buff *skb,
3554 struct __kernel_old_timeval *stamp)
3555{
3556 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3557}
3558
3559static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3560 struct __kernel_sock_timeval *stamp)
3561{
3562 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3563
3564 stamp->tv_sec = ts.tv_sec;
3565 stamp->tv_usec = ts.tv_nsec / 1000;
3566}
3567
3568static inline void skb_get_timestampns(const struct sk_buff *skb,
3569 struct timespec *stamp)
3570{
3571 *stamp = ktime_to_timespec(skb->tstamp);
3572}
3573
3574static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3575 struct __kernel_timespec *stamp)
3576{
3577 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3578
3579 stamp->tv_sec = ts.tv_sec;
3580 stamp->tv_nsec = ts.tv_nsec;
3581}
3582
3583static inline void __net_timestamp(struct sk_buff *skb)
3584{
3585 skb->tstamp = ktime_get_real();
3586}
3587
3588static inline ktime_t net_timedelta(ktime_t t)
3589{
3590 return ktime_sub(ktime_get_real(), t);
3591}
3592
3593static inline ktime_t net_invalid_timestamp(void)
3594{
3595 return 0;
3596}
3597
3598static inline u8 skb_metadata_len(const struct sk_buff *skb)
3599{
3600 return skb_shinfo(skb)->meta_len;
3601}
3602
3603static inline void *skb_metadata_end(const struct sk_buff *skb)
3604{
3605 return skb_mac_header(skb);
3606}
3607
3608static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3609 const struct sk_buff *skb_b,
3610 u8 meta_len)
3611{
3612 const void *a = skb_metadata_end(skb_a);
3613 const void *b = skb_metadata_end(skb_b);
3614
3615#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3616 u64 diffs = 0;
3617
3618 switch (meta_len) {
3619#define __it(x, op) (x -= sizeof(u##op))
3620#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3621 case 32: diffs |= __it_diff(a, b, 64);
3622
3623 case 24: diffs |= __it_diff(a, b, 64);
3624
3625 case 16: diffs |= __it_diff(a, b, 64);
3626
3627 case 8: diffs |= __it_diff(a, b, 64);
3628 break;
3629 case 28: diffs |= __it_diff(a, b, 64);
3630
3631 case 20: diffs |= __it_diff(a, b, 64);
3632
3633 case 12: diffs |= __it_diff(a, b, 64);
3634
3635 case 4: diffs |= __it_diff(a, b, 32);
3636 break;
3637 }
3638 return diffs;
3639#else
3640 return memcmp(a - meta_len, b - meta_len, meta_len);
3641#endif
3642}
3643
3644static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3645 const struct sk_buff *skb_b)
3646{
3647 u8 len_a = skb_metadata_len(skb_a);
3648 u8 len_b = skb_metadata_len(skb_b);
3649
3650 if (!(len_a | len_b))
3651 return false;
3652
3653 return len_a != len_b ?
3654 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3655}
3656
3657static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3658{
3659 skb_shinfo(skb)->meta_len = meta_len;
3660}
3661
3662static inline void skb_metadata_clear(struct sk_buff *skb)
3663{
3664 skb_metadata_set(skb, 0);
3665}
3666
3667struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3668
3669#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3670
3671void skb_clone_tx_timestamp(struct sk_buff *skb);
3672bool skb_defer_rx_timestamp(struct sk_buff *skb);
3673
3674#else
3675
3676static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3677{
3678}
3679
3680static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3681{
3682 return false;
3683}
3684
3685#endif
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699void skb_complete_tx_timestamp(struct sk_buff *skb,
3700 struct skb_shared_hwtstamps *hwtstamps);
3701
3702void __skb_tstamp_tx(struct sk_buff *orig_skb,
3703 struct skb_shared_hwtstamps *hwtstamps,
3704 struct sock *sk, int tstype);
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717void skb_tstamp_tx(struct sk_buff *orig_skb,
3718 struct skb_shared_hwtstamps *hwtstamps);
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731
3732static inline void skb_tx_timestamp(struct sk_buff *skb)
3733{
3734 skb_clone_tx_timestamp(skb);
3735 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3736 skb_tstamp_tx(skb, NULL);
3737}
3738
3739
3740
3741
3742
3743
3744
3745
3746void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3747
3748__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3749__sum16 __skb_checksum_complete(struct sk_buff *skb);
3750
3751static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3752{
3753 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3754 skb->csum_valid ||
3755 (skb->ip_summed == CHECKSUM_PARTIAL &&
3756 skb_checksum_start_offset(skb) >= 0));
3757}
3758
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3776{
3777 return skb_csum_unnecessary(skb) ?
3778 0 : __skb_checksum_complete(skb);
3779}
3780
3781static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3782{
3783 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3784 if (skb->csum_level == 0)
3785 skb->ip_summed = CHECKSUM_NONE;
3786 else
3787 skb->csum_level--;
3788 }
3789}
3790
3791static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3792{
3793 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3794 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3795 skb->csum_level++;
3796 } else if (skb->ip_summed == CHECKSUM_NONE) {
3797 skb->ip_summed = CHECKSUM_UNNECESSARY;
3798 skb->csum_level = 0;
3799 }
3800}
3801
3802
3803
3804
3805
3806
3807static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3808 bool zero_okay,
3809 __sum16 check)
3810{
3811 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3812 skb->csum_valid = 1;
3813 __skb_decr_checksum_unnecessary(skb);
3814 return false;
3815 }
3816
3817 return true;
3818}
3819
3820
3821
3822
3823#define CHECKSUM_BREAK 76
3824
3825
3826
3827
3828
3829
3830
3831static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3832{
3833 if (skb->ip_summed == CHECKSUM_COMPLETE)
3834 skb->ip_summed = CHECKSUM_NONE;
3835}
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3847 bool complete,
3848 __wsum psum)
3849{
3850 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3851 if (!csum_fold(csum_add(psum, skb->csum))) {
3852 skb->csum_valid = 1;
3853 return 0;
3854 }
3855 }
3856
3857 skb->csum = psum;
3858
3859 if (complete || skb->len <= CHECKSUM_BREAK) {
3860 __sum16 csum;
3861
3862 csum = __skb_checksum_complete(skb);
3863 skb->csum_valid = !csum;
3864 return csum;
3865 }
3866
3867 return 0;
3868}
3869
3870static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3871{
3872 return 0;
3873}
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885#define __skb_checksum_validate(skb, proto, complete, \
3886 zero_okay, check, compute_pseudo) \
3887({ \
3888 __sum16 __ret = 0; \
3889 skb->csum_valid = 0; \
3890 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3891 __ret = __skb_checksum_validate_complete(skb, \
3892 complete, compute_pseudo(skb, proto)); \
3893 __ret; \
3894})
3895
3896#define skb_checksum_init(skb, proto, compute_pseudo) \
3897 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3898
3899#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3900 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3901
3902#define skb_checksum_validate(skb, proto, compute_pseudo) \
3903 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3904
3905#define skb_checksum_validate_zero_check(skb, proto, check, \
3906 compute_pseudo) \
3907 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3908
3909#define skb_checksum_simple_validate(skb) \
3910 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3911
3912static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3913{
3914 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3915}
3916
3917static inline void __skb_checksum_convert(struct sk_buff *skb,
3918 __sum16 check, __wsum pseudo)
3919{
3920 skb->csum = ~pseudo;
3921 skb->ip_summed = CHECKSUM_COMPLETE;
3922}
3923
3924#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3925do { \
3926 if (__skb_checksum_convert_check(skb)) \
3927 __skb_checksum_convert(skb, check, \
3928 compute_pseudo(skb, proto)); \
3929} while (0)
3930
3931static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3932 u16 start, u16 offset)
3933{
3934 skb->ip_summed = CHECKSUM_PARTIAL;
3935 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3936 skb->csum_offset = offset - start;
3937}
3938
3939
3940
3941
3942
3943
3944static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3945 int start, int offset, bool nopartial)
3946{
3947 __wsum delta;
3948
3949 if (!nopartial) {
3950 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3951 return;
3952 }
3953
3954 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3955 __skb_checksum_complete(skb);
3956 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3957 }
3958
3959 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3960
3961
3962 skb->csum = csum_add(skb->csum, delta);
3963}
3964
3965static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3966{
3967#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3968 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3969#else
3970 return NULL;
3971#endif
3972}
3973
3974#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3975void nf_conntrack_destroy(struct nf_conntrack *nfct);
3976static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3977{
3978 if (nfct && atomic_dec_and_test(&nfct->use))
3979 nf_conntrack_destroy(nfct);
3980}
3981static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3982{
3983 if (nfct)
3984 atomic_inc(&nfct->use);
3985}
3986#endif
3987
3988#ifdef CONFIG_SKB_EXTENSIONS
3989enum skb_ext_id {
3990#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3991 SKB_EXT_BRIDGE_NF,
3992#endif
3993#ifdef CONFIG_XFRM
3994 SKB_EXT_SEC_PATH,
3995#endif
3996 SKB_EXT_NUM,
3997};
3998
3999
4000
4001
4002
4003
4004
4005
4006
4007
4008
4009struct skb_ext {
4010 refcount_t refcnt;
4011 u8 offset[SKB_EXT_NUM];
4012 u8 chunks;
4013 char data[0] __aligned(8);
4014};
4015
4016void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4017void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4018void __skb_ext_put(struct skb_ext *ext);
4019
4020static inline void skb_ext_put(struct sk_buff *skb)
4021{
4022 if (skb->active_extensions)
4023 __skb_ext_put(skb->extensions);
4024}
4025
4026static inline void __skb_ext_copy(struct sk_buff *dst,
4027 const struct sk_buff *src)
4028{
4029 dst->active_extensions = src->active_extensions;
4030
4031 if (src->active_extensions) {
4032 struct skb_ext *ext = src->extensions;
4033
4034 refcount_inc(&ext->refcnt);
4035 dst->extensions = ext;
4036 }
4037}
4038
4039static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4040{
4041 skb_ext_put(dst);
4042 __skb_ext_copy(dst, src);
4043}
4044
4045static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4046{
4047 return !!ext->offset[i];
4048}
4049
4050static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4051{
4052 return skb->active_extensions & (1 << id);
4053}
4054
4055static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4056{
4057 if (skb_ext_exist(skb, id))
4058 __skb_ext_del(skb, id);
4059}
4060
4061static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4062{
4063 if (skb_ext_exist(skb, id)) {
4064 struct skb_ext *ext = skb->extensions;
4065
4066 return (void *)ext + (ext->offset[id] << 3);
4067 }
4068
4069 return NULL;
4070}
4071#else
4072static inline void skb_ext_put(struct sk_buff *skb) {}
4073static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4074static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4075static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4076#endif
4077
4078static inline void nf_reset(struct sk_buff *skb)
4079{
4080#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4081 nf_conntrack_put(skb_nfct(skb));
4082 skb->_nfct = 0;
4083#endif
4084#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4085 skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
4086#endif
4087}
4088
4089static inline void nf_reset_trace(struct sk_buff *skb)
4090{
4091#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4092 skb->nf_trace = 0;
4093#endif
4094}
4095
4096static inline void ipvs_reset(struct sk_buff *skb)
4097{
4098#if IS_ENABLED(CONFIG_IP_VS)
4099 skb->ipvs_property = 0;
4100#endif
4101}
4102
4103
4104static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4105 bool copy)
4106{
4107#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4108 dst->_nfct = src->_nfct;
4109 nf_conntrack_get(skb_nfct(src));
4110#endif
4111#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4112 if (copy)
4113 dst->nf_trace = src->nf_trace;
4114#endif
4115}
4116
4117static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4118{
4119#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4120 nf_conntrack_put(skb_nfct(dst));
4121#endif
4122 __nf_copy(dst, src, true);
4123}
4124
4125#ifdef CONFIG_NETWORK_SECMARK
4126static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4127{
4128 to->secmark = from->secmark;
4129}
4130
4131static inline void skb_init_secmark(struct sk_buff *skb)
4132{
4133 skb->secmark = 0;
4134}
4135#else
4136static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4137{ }
4138
4139static inline void skb_init_secmark(struct sk_buff *skb)
4140{ }
4141#endif
4142
4143static inline int secpath_exists(const struct sk_buff *skb)
4144{
4145#ifdef CONFIG_XFRM
4146 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4147#else
4148 return 0;
4149#endif
4150}
4151
4152static inline bool skb_irq_freeable(const struct sk_buff *skb)
4153{
4154 return !skb->destructor &&
4155 !secpath_exists(skb) &&
4156 !skb_nfct(skb) &&
4157 !skb->_skb_refdst &&
4158 !skb_has_frag_list(skb);
4159}
4160
4161static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4162{
4163 skb->queue_mapping = queue_mapping;
4164}
4165
4166static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4167{
4168 return skb->queue_mapping;
4169}
4170
4171static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4172{
4173 to->queue_mapping = from->queue_mapping;
4174}
4175
4176static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4177{
4178 skb->queue_mapping = rx_queue + 1;
4179}
4180
4181static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4182{
4183 return skb->queue_mapping - 1;
4184}
4185
4186static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4187{
4188 return skb->queue_mapping != 0;
4189}
4190
4191static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4192{
4193 skb->dst_pending_confirm = val;
4194}
4195
4196static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4197{
4198 return skb->dst_pending_confirm != 0;
4199}
4200
4201static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4202{
4203#ifdef CONFIG_XFRM
4204 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4205#else
4206 return NULL;
4207#endif
4208}
4209
4210
4211
4212
4213
4214
4215
4216struct skb_gso_cb {
4217 union {
4218 int mac_offset;
4219 int data_offset;
4220 };
4221 int encap_level;
4222 __wsum csum;
4223 __u16 csum_start;
4224};
4225#define SKB_SGO_CB_OFFSET 32
4226#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4227
4228static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4229{
4230 return (skb_mac_header(inner_skb) - inner_skb->head) -
4231 SKB_GSO_CB(inner_skb)->mac_offset;
4232}
4233
4234static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4235{
4236 int new_headroom, headroom;
4237 int ret;
4238
4239 headroom = skb_headroom(skb);
4240 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4241 if (ret)
4242 return ret;
4243
4244 new_headroom = skb_headroom(skb);
4245 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4246 return 0;
4247}
4248
4249static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4250{
4251
4252 if (skb->remcsum_offload)
4253 return;
4254
4255 SKB_GSO_CB(skb)->csum = res;
4256 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4257}
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4268{
4269 unsigned char *csum_start = skb_transport_header(skb);
4270 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4271 __wsum partial = SKB_GSO_CB(skb)->csum;
4272
4273 SKB_GSO_CB(skb)->csum = res;
4274 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4275
4276 return csum_fold(csum_partial(csum_start, plen, partial));
4277}
4278
4279static inline bool skb_is_gso(const struct sk_buff *skb)
4280{
4281 return skb_shinfo(skb)->gso_size;
4282}
4283
4284
4285static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4286{
4287 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4288}
4289
4290
4291static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4292{
4293 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4294}
4295
4296
4297static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4298{
4299 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4300}
4301
4302static inline void skb_gso_reset(struct sk_buff *skb)
4303{
4304 skb_shinfo(skb)->gso_size = 0;
4305 skb_shinfo(skb)->gso_segs = 0;
4306 skb_shinfo(skb)->gso_type = 0;
4307}
4308
4309static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4310 u16 increment)
4311{
4312 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4313 return;
4314 shinfo->gso_size += increment;
4315}
4316
4317static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4318 u16 decrement)
4319{
4320 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4321 return;
4322 shinfo->gso_size -= decrement;
4323}
4324
4325void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4326
4327static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4328{
4329
4330
4331 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4332
4333 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4334 unlikely(shinfo->gso_type == 0)) {
4335 __skb_warn_lro_forwarding(skb);
4336 return true;
4337 }
4338 return false;
4339}
4340
4341static inline void skb_forward_csum(struct sk_buff *skb)
4342{
4343
4344 if (skb->ip_summed == CHECKSUM_COMPLETE)
4345 skb->ip_summed = CHECKSUM_NONE;
4346}
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4357{
4358#ifdef DEBUG
4359 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4360#endif
4361}
4362
4363bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4364
4365int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4366struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4367 unsigned int transport_len,
4368 __sum16(*skb_chkf)(struct sk_buff *skb));
4369
4370
4371
4372
4373
4374
4375
4376
4377
4378
4379static inline bool skb_head_is_locked(const struct sk_buff *skb)
4380{
4381 return !skb->head_frag || skb_cloned(skb);
4382}
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393static inline __wsum lco_csum(struct sk_buff *skb)
4394{
4395 unsigned char *csum_start = skb_checksum_start(skb);
4396 unsigned char *l4_hdr = skb_transport_header(skb);
4397 __wsum partial;
4398
4399
4400 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4401 skb->csum_offset));
4402
4403
4404
4405
4406 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4407}
4408
4409#endif
4410#endif
4411