1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/cache.h>
18#include <linux/rbtree.h>
19#include <linux/socket.h>
20#include <linux/refcount.h>
21
22#include <linux/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
25#include <linux/net.h>
26#include <linux/textsearch.h>
27#include <net/checksum.h>
28#include <linux/rcupdate.h>
29#include <linux/hrtimer.h>
30#include <linux/dma-mapping.h>
31#include <linux/netdev_features.h>
32#include <linux/sched.h>
33#include <linux/sched/clock.h>
34#include <net/flow_dissector.h>
35#include <linux/splice.h>
36#include <linux/in6.h>
37#include <linux/if_packet.h>
38#include <net/flow.h>
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216#define CHECKSUM_NONE 0
217#define CHECKSUM_UNNECESSARY 1
218#define CHECKSUM_COMPLETE 2
219#define CHECKSUM_PARTIAL 3
220
221
222#define SKB_MAX_CSUM_LEVEL 3
223
224#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
225#define SKB_WITH_OVERHEAD(X) \
226 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
227#define SKB_MAX_ORDER(X, ORDER) \
228 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
229#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
230#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
231
232
233#define SKB_TRUESIZE(X) ((X) + \
234 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
235 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
236
237struct net_device;
238struct scatterlist;
239struct pipe_inode_info;
240struct iov_iter;
241struct napi_struct;
242struct bpf_prog;
243union bpf_attr;
244struct skb_ext;
245
246#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
247struct nf_conntrack {
248 atomic_t use;
249};
250#endif
251
252#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
253struct nf_bridge_info {
254 enum {
255 BRNF_PROTO_UNCHANGED,
256 BRNF_PROTO_8021Q,
257 BRNF_PROTO_PPPOE
258 } orig_proto:8;
259 u8 pkt_otherhost:1;
260 u8 in_prerouting:1;
261 u8 bridged_dnat:1;
262 __u16 frag_max_size;
263 struct net_device *physindev;
264
265
266 struct net_device *physoutdev;
267 union {
268
269 __be32 ipv4_daddr;
270 struct in6_addr ipv6_daddr;
271
272
273
274
275
276 char neigh_header[8];
277 };
278};
279#endif
280
281struct sk_buff_head {
282
283 struct sk_buff *next;
284 struct sk_buff *prev;
285
286 __u32 qlen;
287 spinlock_t lock;
288};
289
290struct sk_buff;
291
292
293
294
295
296
297
298
299#if (65536/PAGE_SIZE + 1) < 16
300#define MAX_SKB_FRAGS 16UL
301#else
302#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
303#endif
304extern int sysctl_max_skb_frags;
305
306
307
308
309#define GSO_BY_FRAGS 0xFFFF
310
311typedef struct skb_frag_struct skb_frag_t;
312
313struct skb_frag_struct {
314 struct {
315 struct page *p;
316 } page;
317#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
318 __u32 page_offset;
319 __u32 size;
320#else
321 __u16 page_offset;
322 __u16 size;
323#endif
324};
325
326
327
328
329
330static inline unsigned int skb_frag_size(const skb_frag_t *frag)
331{
332 return frag->size;
333}
334
335
336
337
338
339
340static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
341{
342 frag->size = size;
343}
344
345
346
347
348
349
350static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
351{
352 frag->size += delta;
353}
354
355
356
357
358
359
360static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
361{
362 frag->size -= delta;
363}
364
365
366
367
368
369static inline bool skb_frag_must_loop(struct page *p)
370{
371#if defined(CONFIG_HIGHMEM)
372 if (PageHighMem(p))
373 return true;
374#endif
375 return false;
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
396 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
397 p_off = (f_off) & (PAGE_SIZE - 1), \
398 p_len = skb_frag_must_loop(p) ? \
399 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
400 copied = 0; \
401 copied < f_len; \
402 copied += p_len, p++, p_off = 0, \
403 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
404
405#define HAVE_HW_TIME_STAMP
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421struct skb_shared_hwtstamps {
422 ktime_t hwtstamp;
423};
424
425
426enum {
427
428 SKBTX_HW_TSTAMP = 1 << 0,
429
430
431 SKBTX_SW_TSTAMP = 1 << 1,
432
433
434 SKBTX_IN_PROGRESS = 1 << 2,
435
436
437 SKBTX_DEV_ZEROCOPY = 1 << 3,
438
439
440 SKBTX_WIFI_STATUS = 1 << 4,
441
442
443
444
445
446
447 SKBTX_SHARED_FRAG = 1 << 5,
448
449
450 SKBTX_SCHED_TSTAMP = 1 << 6,
451};
452
453#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
454#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
455 SKBTX_SCHED_TSTAMP)
456#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
457
458
459
460
461
462
463
464
465
466struct ubuf_info {
467 void (*callback)(struct ubuf_info *, bool zerocopy_success);
468 union {
469 struct {
470 unsigned long desc;
471 void *ctx;
472 };
473 struct {
474 u32 id;
475 u16 len;
476 u16 zerocopy:1;
477 u32 bytelen;
478 };
479 };
480 refcount_t refcnt;
481
482 struct mmpin {
483 struct user_struct *user;
484 unsigned int num_pg;
485 } mmp;
486};
487
488#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
489
490int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
491void mm_unaccount_pinned_pages(struct mmpin *mmp);
492
493struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
494struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
495 struct ubuf_info *uarg);
496
497static inline void sock_zerocopy_get(struct ubuf_info *uarg)
498{
499 refcount_inc(&uarg->refcnt);
500}
501
502void sock_zerocopy_put(struct ubuf_info *uarg);
503void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
504
505void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
506
507int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
508int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
509 struct msghdr *msg, int len,
510 struct ubuf_info *uarg);
511
512
513
514
515struct skb_shared_info {
516 __u8 __unused;
517 __u8 meta_len;
518 __u8 nr_frags;
519 __u8 tx_flags;
520 unsigned short gso_size;
521
522 unsigned short gso_segs;
523 struct sk_buff *frag_list;
524 struct skb_shared_hwtstamps hwtstamps;
525 unsigned int gso_type;
526 u32 tskey;
527
528
529
530
531 atomic_t dataref;
532
533
534
535 void * destructor_arg;
536
537
538 skb_frag_t frags[MAX_SKB_FRAGS];
539};
540
541
542
543
544
545
546
547
548
549
550
551
552#define SKB_DATAREF_SHIFT 16
553#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
554
555
556enum {
557 SKB_FCLONE_UNAVAILABLE,
558 SKB_FCLONE_ORIG,
559 SKB_FCLONE_CLONE,
560};
561
562enum {
563 SKB_GSO_TCPV4 = 1 << 0,
564
565
566 SKB_GSO_DODGY = 1 << 1,
567
568
569 SKB_GSO_TCP_ECN = 1 << 2,
570
571 SKB_GSO_TCP_FIXEDID = 1 << 3,
572
573 SKB_GSO_TCPV6 = 1 << 4,
574
575 SKB_GSO_FCOE = 1 << 5,
576
577 SKB_GSO_GRE = 1 << 6,
578
579 SKB_GSO_GRE_CSUM = 1 << 7,
580
581 SKB_GSO_IPXIP4 = 1 << 8,
582
583 SKB_GSO_IPXIP6 = 1 << 9,
584
585 SKB_GSO_UDP_TUNNEL = 1 << 10,
586
587 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
588
589 SKB_GSO_PARTIAL = 1 << 12,
590
591 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
592
593 SKB_GSO_SCTP = 1 << 14,
594
595 SKB_GSO_ESP = 1 << 15,
596
597 SKB_GSO_UDP = 1 << 16,
598
599 SKB_GSO_UDP_L4 = 1 << 17,
600};
601
602#if BITS_PER_LONG > 32
603#define NET_SKBUFF_DATA_USES_OFFSET 1
604#endif
605
606#ifdef NET_SKBUFF_DATA_USES_OFFSET
607typedef unsigned int sk_buff_data_t;
608#else
609typedef unsigned char *sk_buff_data_t;
610#endif
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690struct sk_buff {
691 union {
692 struct {
693
694 struct sk_buff *next;
695 struct sk_buff *prev;
696
697 union {
698 struct net_device *dev;
699
700
701
702
703 unsigned long dev_scratch;
704 };
705 };
706 struct rb_node rbnode;
707 struct list_head list;
708 };
709
710 union {
711 struct sock *sk;
712 int ip_defrag_offset;
713 };
714
715 union {
716 ktime_t tstamp;
717 u64 skb_mstamp_ns;
718 };
719
720
721
722
723
724
725 char cb[48] __aligned(8);
726
727 union {
728 struct {
729 unsigned long _skb_refdst;
730 void (*destructor)(struct sk_buff *skb);
731 };
732 struct list_head tcp_tsorted_anchor;
733 };
734
735#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
736 unsigned long _nfct;
737#endif
738 unsigned int len,
739 data_len;
740 __u16 mac_len,
741 hdr_len;
742
743
744
745
746 __u16 queue_mapping;
747
748
749#ifdef __BIG_ENDIAN_BITFIELD
750#define CLONED_MASK (1 << 7)
751#else
752#define CLONED_MASK 1
753#endif
754#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
755
756 __u8 __cloned_offset[0];
757 __u8 cloned:1,
758 nohdr:1,
759 fclone:2,
760 peeked:1,
761 head_frag:1,
762 pfmemalloc:1;
763#ifdef CONFIG_SKB_EXTENSIONS
764 __u8 active_extensions;
765#endif
766
767
768
769
770 __u32 headers_start[0];
771
772
773
774#ifdef __BIG_ENDIAN_BITFIELD
775#define PKT_TYPE_MAX (7 << 5)
776#else
777#define PKT_TYPE_MAX 7
778#endif
779#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
780
781 __u8 __pkt_type_offset[0];
782 __u8 pkt_type:3;
783 __u8 ignore_df:1;
784 __u8 nf_trace:1;
785 __u8 ip_summed:2;
786 __u8 ooo_okay:1;
787
788 __u8 l4_hash:1;
789 __u8 sw_hash:1;
790 __u8 wifi_acked_valid:1;
791 __u8 wifi_acked:1;
792 __u8 no_fcs:1;
793
794 __u8 encapsulation:1;
795 __u8 encap_hdr_csum:1;
796 __u8 csum_valid:1;
797
798#ifdef __BIG_ENDIAN_BITFIELD
799#define PKT_VLAN_PRESENT_BIT 7
800#else
801#define PKT_VLAN_PRESENT_BIT 0
802#endif
803#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
804 __u8 __pkt_vlan_present_offset[0];
805 __u8 vlan_present:1;
806 __u8 csum_complete_sw:1;
807 __u8 csum_level:2;
808 __u8 csum_not_inet:1;
809 __u8 dst_pending_confirm:1;
810#ifdef CONFIG_IPV6_NDISC_NODETYPE
811 __u8 ndisc_nodetype:2;
812#endif
813
814 __u8 ipvs_property:1;
815 __u8 inner_protocol_type:1;
816 __u8 remcsum_offload:1;
817#ifdef CONFIG_NET_SWITCHDEV
818 __u8 offload_fwd_mark:1;
819 __u8 offload_l3_fwd_mark:1;
820#endif
821#ifdef CONFIG_NET_CLS_ACT
822 __u8 tc_skip_classify:1;
823 __u8 tc_at_ingress:1;
824 __u8 tc_redirected:1;
825 __u8 tc_from_ingress:1;
826#endif
827#ifdef CONFIG_TLS_DEVICE
828 __u8 decrypted:1;
829#endif
830
831#ifdef CONFIG_NET_SCHED
832 __u16 tc_index;
833#endif
834
835 union {
836 __wsum csum;
837 struct {
838 __u16 csum_start;
839 __u16 csum_offset;
840 };
841 };
842 __u32 priority;
843 int skb_iif;
844 __u32 hash;
845 __be16 vlan_proto;
846 __u16 vlan_tci;
847#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
848 union {
849 unsigned int napi_id;
850 unsigned int sender_cpu;
851 };
852#endif
853#ifdef CONFIG_NETWORK_SECMARK
854 __u32 secmark;
855#endif
856
857 union {
858 __u32 mark;
859 __u32 reserved_tailroom;
860 };
861
862 union {
863 __be16 inner_protocol;
864 __u8 inner_ipproto;
865 };
866
867 __u16 inner_transport_header;
868 __u16 inner_network_header;
869 __u16 inner_mac_header;
870
871 __be16 protocol;
872 __u16 transport_header;
873 __u16 network_header;
874 __u16 mac_header;
875
876
877 __u32 headers_end[0];
878
879
880
881 sk_buff_data_t tail;
882 sk_buff_data_t end;
883 unsigned char *head,
884 *data;
885 unsigned int truesize;
886 refcount_t users;
887
888#ifdef CONFIG_SKB_EXTENSIONS
889
890 struct skb_ext *extensions;
891#endif
892};
893
894#ifdef __KERNEL__
895
896
897
898
899#define SKB_ALLOC_FCLONE 0x01
900#define SKB_ALLOC_RX 0x02
901#define SKB_ALLOC_NAPI 0x04
902
903
904
905
906
907static inline bool skb_pfmemalloc(const struct sk_buff *skb)
908{
909 return unlikely(skb->pfmemalloc);
910}
911
912
913
914
915
916#define SKB_DST_NOREF 1UL
917#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
918
919#define SKB_NFCT_PTRMASK ~(7UL)
920
921
922
923
924
925
926static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
927{
928
929
930
931 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
932 !rcu_read_lock_held() &&
933 !rcu_read_lock_bh_held());
934 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
935}
936
937
938
939
940
941
942
943
944
945static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
946{
947 skb->_skb_refdst = (unsigned long)dst;
948}
949
950
951
952
953
954
955
956
957
958
959
960static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
961{
962 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
963 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
964}
965
966
967
968
969
970static inline bool skb_dst_is_noref(const struct sk_buff *skb)
971{
972 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
973}
974
975
976
977
978
979static inline struct rtable *skb_rtable(const struct sk_buff *skb)
980{
981 return (struct rtable *)skb_dst(skb);
982}
983
984
985
986
987
988static inline bool skb_pkt_type_ok(u32 ptype)
989{
990 return ptype <= PACKET_OTHERHOST;
991}
992
993
994
995
996
997static inline unsigned int skb_napi_id(const struct sk_buff *skb)
998{
999#ifdef CONFIG_NET_RX_BUSY_POLL
1000 return skb->napi_id;
1001#else
1002 return 0;
1003#endif
1004}
1005
1006
1007
1008
1009
1010
1011
1012static inline bool skb_unref(struct sk_buff *skb)
1013{
1014 if (unlikely(!skb))
1015 return false;
1016 if (likely(refcount_read(&skb->users) == 1))
1017 smp_rmb();
1018 else if (likely(!refcount_dec_and_test(&skb->users)))
1019 return false;
1020
1021 return true;
1022}
1023
1024void skb_release_head_state(struct sk_buff *skb);
1025void kfree_skb(struct sk_buff *skb);
1026void kfree_skb_list(struct sk_buff *segs);
1027void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1028void skb_tx_error(struct sk_buff *skb);
1029void consume_skb(struct sk_buff *skb);
1030void __consume_stateless_skb(struct sk_buff *skb);
1031void __kfree_skb(struct sk_buff *skb);
1032extern struct kmem_cache *skbuff_head_cache;
1033
1034void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1035bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1036 bool *fragstolen, int *delta_truesize);
1037
1038struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1039 int node);
1040struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1041struct sk_buff *build_skb(void *data, unsigned int frag_size);
1042struct sk_buff *build_skb_around(struct sk_buff *skb,
1043 void *data, unsigned int frag_size);
1044
1045
1046
1047
1048
1049
1050
1051
1052static inline struct sk_buff *alloc_skb(unsigned int size,
1053 gfp_t priority)
1054{
1055 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1056}
1057
1058struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1059 unsigned long data_len,
1060 int max_page_order,
1061 int *errcode,
1062 gfp_t gfp_mask);
1063struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1064
1065
1066struct sk_buff_fclones {
1067 struct sk_buff skb1;
1068
1069 struct sk_buff skb2;
1070
1071 refcount_t fclone_ref;
1072};
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static inline bool skb_fclone_busy(const struct sock *sk,
1084 const struct sk_buff *skb)
1085{
1086 const struct sk_buff_fclones *fclones;
1087
1088 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1089
1090 return skb->fclone == SKB_FCLONE_ORIG &&
1091 refcount_read(&fclones->fclone_ref) > 1 &&
1092 fclones->skb2.sk == sk;
1093}
1094
1095
1096
1097
1098
1099
1100
1101
1102static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1103 gfp_t priority)
1104{
1105 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1106}
1107
1108struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1109void skb_headers_offset_update(struct sk_buff *skb, int off);
1110int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1111struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1112void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1113struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1114struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1115 gfp_t gfp_mask, bool fclone);
1116static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1117 gfp_t gfp_mask)
1118{
1119 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1120}
1121
1122int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1123struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1124 unsigned int headroom);
1125struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1126 int newtailroom, gfp_t priority);
1127int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1128 int offset, int len);
1129int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1130 int offset, int len);
1131int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1132int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static inline int skb_pad(struct sk_buff *skb, int pad)
1146{
1147 return __skb_pad(skb, pad, true);
1148}
1149#define dev_kfree_skb(a) consume_skb(a)
1150
1151int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1152 int offset, size_t size);
1153
1154struct skb_seq_state {
1155 __u32 lower_offset;
1156 __u32 upper_offset;
1157 __u32 frag_idx;
1158 __u32 stepped_offset;
1159 struct sk_buff *root_skb;
1160 struct sk_buff *cur_skb;
1161 __u8 *frag_data;
1162};
1163
1164void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1165 unsigned int to, struct skb_seq_state *st);
1166unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1167 struct skb_seq_state *st);
1168void skb_abort_seq_read(struct skb_seq_state *st);
1169
1170unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1171 unsigned int to, struct ts_config *config);
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199enum pkt_hash_types {
1200 PKT_HASH_TYPE_NONE,
1201 PKT_HASH_TYPE_L2,
1202 PKT_HASH_TYPE_L3,
1203 PKT_HASH_TYPE_L4,
1204};
1205
1206static inline void skb_clear_hash(struct sk_buff *skb)
1207{
1208 skb->hash = 0;
1209 skb->sw_hash = 0;
1210 skb->l4_hash = 0;
1211}
1212
1213static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1214{
1215 if (!skb->l4_hash)
1216 skb_clear_hash(skb);
1217}
1218
1219static inline void
1220__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1221{
1222 skb->l4_hash = is_l4;
1223 skb->sw_hash = is_sw;
1224 skb->hash = hash;
1225}
1226
1227static inline void
1228skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1229{
1230
1231 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1232}
1233
1234static inline void
1235__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1236{
1237 __skb_set_hash(skb, hash, true, is_l4);
1238}
1239
1240void __skb_get_hash(struct sk_buff *skb);
1241u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1242u32 skb_get_poff(const struct sk_buff *skb);
1243u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1244 const struct flow_keys_basic *keys, int hlen);
1245__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1246 void *data, int hlen_proto);
1247
1248static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1249 int thoff, u8 ip_proto)
1250{
1251 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1252}
1253
1254void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1255 const struct flow_dissector_key *key,
1256 unsigned int key_count);
1257
1258#ifdef CONFIG_NET
1259int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1260 union bpf_attr __user *uattr);
1261int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1262 struct bpf_prog *prog);
1263
1264int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
1265#else
1266static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
1267 union bpf_attr __user *uattr)
1268{
1269 return -EOPNOTSUPP;
1270}
1271
1272static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
1273 struct bpf_prog *prog)
1274{
1275 return -EOPNOTSUPP;
1276}
1277
1278static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
1279{
1280 return -EOPNOTSUPP;
1281}
1282#endif
1283
1284struct bpf_flow_dissector;
1285bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1286 __be16 proto, int nhoff, int hlen);
1287
1288bool __skb_flow_dissect(const struct net *net,
1289 const struct sk_buff *skb,
1290 struct flow_dissector *flow_dissector,
1291 void *target_container,
1292 void *data, __be16 proto, int nhoff, int hlen,
1293 unsigned int flags);
1294
1295static inline bool skb_flow_dissect(const struct sk_buff *skb,
1296 struct flow_dissector *flow_dissector,
1297 void *target_container, unsigned int flags)
1298{
1299 return __skb_flow_dissect(NULL, skb, flow_dissector,
1300 target_container, NULL, 0, 0, 0, flags);
1301}
1302
1303static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1304 struct flow_keys *flow,
1305 unsigned int flags)
1306{
1307 memset(flow, 0, sizeof(*flow));
1308 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1309 flow, NULL, 0, 0, 0, flags);
1310}
1311
1312static inline bool
1313skb_flow_dissect_flow_keys_basic(const struct net *net,
1314 const struct sk_buff *skb,
1315 struct flow_keys_basic *flow, void *data,
1316 __be16 proto, int nhoff, int hlen,
1317 unsigned int flags)
1318{
1319 memset(flow, 0, sizeof(*flow));
1320 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1321 data, proto, nhoff, hlen, flags);
1322}
1323
1324void skb_flow_dissect_meta(const struct sk_buff *skb,
1325 struct flow_dissector *flow_dissector,
1326 void *target_container);
1327
1328
1329
1330
1331
1332void
1333skb_flow_dissect_ct(const struct sk_buff *skb,
1334 struct flow_dissector *flow_dissector,
1335 void *target_container,
1336 u16 *ctinfo_map,
1337 size_t mapsize);
1338void
1339skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1340 struct flow_dissector *flow_dissector,
1341 void *target_container);
1342
1343static inline __u32 skb_get_hash(struct sk_buff *skb)
1344{
1345 if (!skb->l4_hash && !skb->sw_hash)
1346 __skb_get_hash(skb);
1347
1348 return skb->hash;
1349}
1350
1351static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1352{
1353 if (!skb->l4_hash && !skb->sw_hash) {
1354 struct flow_keys keys;
1355 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1356
1357 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1358 }
1359
1360 return skb->hash;
1361}
1362
1363__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1364
1365static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1366{
1367 return skb->hash;
1368}
1369
1370static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1371{
1372 to->hash = from->hash;
1373 to->sw_hash = from->sw_hash;
1374 to->l4_hash = from->l4_hash;
1375};
1376
1377static inline void skb_copy_decrypted(struct sk_buff *to,
1378 const struct sk_buff *from)
1379{
1380#ifdef CONFIG_TLS_DEVICE
1381 to->decrypted = from->decrypted;
1382#endif
1383}
1384
1385#ifdef NET_SKBUFF_DATA_USES_OFFSET
1386static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1387{
1388 return skb->head + skb->end;
1389}
1390
1391static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1392{
1393 return skb->end;
1394}
1395#else
1396static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1397{
1398 return skb->end;
1399}
1400
1401static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1402{
1403 return skb->end - skb->head;
1404}
1405#endif
1406
1407
1408#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1409
1410static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1411{
1412 return &skb_shinfo(skb)->hwtstamps;
1413}
1414
1415static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1416{
1417 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1418
1419 return is_zcopy ? skb_uarg(skb) : NULL;
1420}
1421
1422static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1423 bool *have_ref)
1424{
1425 if (skb && uarg && !skb_zcopy(skb)) {
1426 if (unlikely(have_ref && *have_ref))
1427 *have_ref = false;
1428 else
1429 sock_zerocopy_get(uarg);
1430 skb_shinfo(skb)->destructor_arg = uarg;
1431 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1432 }
1433}
1434
1435static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1436{
1437 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1438 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1439}
1440
1441static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1442{
1443 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1444}
1445
1446static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1447{
1448 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1449}
1450
1451
1452static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1453{
1454 struct ubuf_info *uarg = skb_zcopy(skb);
1455
1456 if (uarg) {
1457 if (skb_zcopy_is_nouarg(skb)) {
1458
1459 } else if (uarg->callback == sock_zerocopy_callback) {
1460 uarg->zerocopy = uarg->zerocopy && zerocopy;
1461 sock_zerocopy_put(uarg);
1462 } else {
1463 uarg->callback(uarg, zerocopy);
1464 }
1465
1466 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1467 }
1468}
1469
1470
1471static inline void skb_zcopy_abort(struct sk_buff *skb)
1472{
1473 struct ubuf_info *uarg = skb_zcopy(skb);
1474
1475 if (uarg) {
1476 sock_zerocopy_put_abort(uarg, false);
1477 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1478 }
1479}
1480
1481static inline void skb_mark_not_on_list(struct sk_buff *skb)
1482{
1483 skb->next = NULL;
1484}
1485
1486static inline void skb_list_del_init(struct sk_buff *skb)
1487{
1488 __list_del_entry(&skb->list);
1489 skb_mark_not_on_list(skb);
1490}
1491
1492
1493
1494
1495
1496
1497
1498static inline int skb_queue_empty(const struct sk_buff_head *list)
1499{
1500 return list->next == (const struct sk_buff *) list;
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1511 const struct sk_buff *skb)
1512{
1513 return skb->next == (const struct sk_buff *) list;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1524 const struct sk_buff *skb)
1525{
1526 return skb->prev == (const struct sk_buff *) list;
1527}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1538 const struct sk_buff *skb)
1539{
1540
1541
1542
1543 BUG_ON(skb_queue_is_last(list, skb));
1544 return skb->next;
1545}
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1556 const struct sk_buff *skb)
1557{
1558
1559
1560
1561 BUG_ON(skb_queue_is_first(list, skb));
1562 return skb->prev;
1563}
1564
1565
1566
1567
1568
1569
1570
1571
1572static inline struct sk_buff *skb_get(struct sk_buff *skb)
1573{
1574 refcount_inc(&skb->users);
1575 return skb;
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590static inline int skb_cloned(const struct sk_buff *skb)
1591{
1592 return skb->cloned &&
1593 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1594}
1595
1596static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1597{
1598 might_sleep_if(gfpflags_allow_blocking(pri));
1599
1600 if (skb_cloned(skb))
1601 return pskb_expand_head(skb, 0, 0, pri);
1602
1603 return 0;
1604}
1605
1606
1607
1608
1609
1610
1611
1612
1613static inline int skb_header_cloned(const struct sk_buff *skb)
1614{
1615 int dataref;
1616
1617 if (!skb->cloned)
1618 return 0;
1619
1620 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1621 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1622 return dataref != 1;
1623}
1624
1625static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1626{
1627 might_sleep_if(gfpflags_allow_blocking(pri));
1628
1629 if (skb_header_cloned(skb))
1630 return pskb_expand_head(skb, 0, 0, pri);
1631
1632 return 0;
1633}
1634
1635
1636
1637
1638
1639static inline void __skb_header_release(struct sk_buff *skb)
1640{
1641 skb->nohdr = 1;
1642 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1643}
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653static inline int skb_shared(const struct sk_buff *skb)
1654{
1655 return refcount_read(&skb->users) != 1;
1656}
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1672{
1673 might_sleep_if(gfpflags_allow_blocking(pri));
1674 if (skb_shared(skb)) {
1675 struct sk_buff *nskb = skb_clone(skb, pri);
1676
1677 if (likely(nskb))
1678 consume_skb(skb);
1679 else
1680 kfree_skb(skb);
1681 skb = nskb;
1682 }
1683 return skb;
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1707 gfp_t pri)
1708{
1709 might_sleep_if(gfpflags_allow_blocking(pri));
1710 if (skb_cloned(skb)) {
1711 struct sk_buff *nskb = skb_copy(skb, pri);
1712
1713
1714 if (likely(nskb))
1715 consume_skb(skb);
1716 else
1717 kfree_skb(skb);
1718 skb = nskb;
1719 }
1720 return skb;
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1737{
1738 struct sk_buff *skb = list_->next;
1739
1740 if (skb == (struct sk_buff *)list_)
1741 skb = NULL;
1742 return skb;
1743}
1744
1745
1746
1747
1748
1749
1750
1751static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1752{
1753 return list_->next;
1754}
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1766 const struct sk_buff_head *list_)
1767{
1768 struct sk_buff *next = skb->next;
1769
1770 if (next == (struct sk_buff *)list_)
1771 next = NULL;
1772 return next;
1773}
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1789{
1790 struct sk_buff *skb = list_->prev;
1791
1792 if (skb == (struct sk_buff *)list_)
1793 skb = NULL;
1794 return skb;
1795
1796}
1797
1798
1799
1800
1801
1802
1803
1804static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1805{
1806 return list_->qlen;
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819static inline void __skb_queue_head_init(struct sk_buff_head *list)
1820{
1821 list->prev = list->next = (struct sk_buff *)list;
1822 list->qlen = 0;
1823}
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833static inline void skb_queue_head_init(struct sk_buff_head *list)
1834{
1835 spin_lock_init(&list->lock);
1836 __skb_queue_head_init(list);
1837}
1838
1839static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1840 struct lock_class_key *class)
1841{
1842 skb_queue_head_init(list);
1843 lockdep_set_class(&list->lock, class);
1844}
1845
1846
1847
1848
1849
1850
1851
1852static inline void __skb_insert(struct sk_buff *newsk,
1853 struct sk_buff *prev, struct sk_buff *next,
1854 struct sk_buff_head *list)
1855{
1856 newsk->next = next;
1857 newsk->prev = prev;
1858 next->prev = prev->next = newsk;
1859 list->qlen++;
1860}
1861
1862static inline void __skb_queue_splice(const struct sk_buff_head *list,
1863 struct sk_buff *prev,
1864 struct sk_buff *next)
1865{
1866 struct sk_buff *first = list->next;
1867 struct sk_buff *last = list->prev;
1868
1869 first->prev = prev;
1870 prev->next = first;
1871
1872 last->next = next;
1873 next->prev = last;
1874}
1875
1876
1877
1878
1879
1880
1881static inline void skb_queue_splice(const struct sk_buff_head *list,
1882 struct sk_buff_head *head)
1883{
1884 if (!skb_queue_empty(list)) {
1885 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1886 head->qlen += list->qlen;
1887 }
1888}
1889
1890
1891
1892
1893
1894
1895
1896
1897static inline void skb_queue_splice_init(struct sk_buff_head *list,
1898 struct sk_buff_head *head)
1899{
1900 if (!skb_queue_empty(list)) {
1901 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1902 head->qlen += list->qlen;
1903 __skb_queue_head_init(list);
1904 }
1905}
1906
1907
1908
1909
1910
1911
1912static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1913 struct sk_buff_head *head)
1914{
1915 if (!skb_queue_empty(list)) {
1916 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1917 head->qlen += list->qlen;
1918 }
1919}
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1930 struct sk_buff_head *head)
1931{
1932 if (!skb_queue_empty(list)) {
1933 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1934 head->qlen += list->qlen;
1935 __skb_queue_head_init(list);
1936 }
1937}
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static inline void __skb_queue_after(struct sk_buff_head *list,
1951 struct sk_buff *prev,
1952 struct sk_buff *newsk)
1953{
1954 __skb_insert(newsk, prev, prev->next, list);
1955}
1956
1957void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1958 struct sk_buff_head *list);
1959
1960static inline void __skb_queue_before(struct sk_buff_head *list,
1961 struct sk_buff *next,
1962 struct sk_buff *newsk)
1963{
1964 __skb_insert(newsk, next->prev, next, list);
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977static inline void __skb_queue_head(struct sk_buff_head *list,
1978 struct sk_buff *newsk)
1979{
1980 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1981}
1982void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994static inline void __skb_queue_tail(struct sk_buff_head *list,
1995 struct sk_buff *newsk)
1996{
1997 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1998}
1999void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2000
2001
2002
2003
2004
2005void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2006static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2007{
2008 struct sk_buff *next, *prev;
2009
2010 list->qlen--;
2011 next = skb->next;
2012 prev = skb->prev;
2013 skb->next = skb->prev = NULL;
2014 next->prev = prev;
2015 prev->next = next;
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2027{
2028 struct sk_buff *skb = skb_peek(list);
2029 if (skb)
2030 __skb_unlink(skb, list);
2031 return skb;
2032}
2033struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2044{
2045 struct sk_buff *skb = skb_peek_tail(list);
2046 if (skb)
2047 __skb_unlink(skb, list);
2048 return skb;
2049}
2050struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2051
2052
2053static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2054{
2055 return skb->data_len;
2056}
2057
2058static inline unsigned int skb_headlen(const struct sk_buff *skb)
2059{
2060 return skb->len - skb->data_len;
2061}
2062
2063static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2064{
2065 unsigned int i, len = 0;
2066
2067 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2068 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2069 return len;
2070}
2071
2072static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2073{
2074 return skb_headlen(skb) + __skb_pagelen(skb);
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2091 struct page *page, int off, int size)
2092{
2093 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2094
2095
2096
2097
2098
2099
2100 frag->page.p = page;
2101 frag->page_offset = off;
2102 skb_frag_size_set(frag, size);
2103
2104 page = compound_head(page);
2105 if (page_is_pfmemalloc(page))
2106 skb->pfmemalloc = true;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2124 struct page *page, int off, int size)
2125{
2126 __skb_fill_page_desc(skb, i, page, off, size);
2127 skb_shinfo(skb)->nr_frags = i + 1;
2128}
2129
2130void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2131 int size, unsigned int truesize);
2132
2133void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2134 unsigned int truesize);
2135
2136#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2137
2138#ifdef NET_SKBUFF_DATA_USES_OFFSET
2139static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2140{
2141 return skb->head + skb->tail;
2142}
2143
2144static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2145{
2146 skb->tail = skb->data - skb->head;
2147}
2148
2149static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2150{
2151 skb_reset_tail_pointer(skb);
2152 skb->tail += offset;
2153}
2154
2155#else
2156static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2157{
2158 return skb->tail;
2159}
2160
2161static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2162{
2163 skb->tail = skb->data;
2164}
2165
2166static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2167{
2168 skb->tail = skb->data + offset;
2169}
2170
2171#endif
2172
2173
2174
2175
2176void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2177void *skb_put(struct sk_buff *skb, unsigned int len);
2178static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2179{
2180 void *tmp = skb_tail_pointer(skb);
2181 SKB_LINEAR_ASSERT(skb);
2182 skb->tail += len;
2183 skb->len += len;
2184 return tmp;
2185}
2186
2187static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2188{
2189 void *tmp = __skb_put(skb, len);
2190
2191 memset(tmp, 0, len);
2192 return tmp;
2193}
2194
2195static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2196 unsigned int len)
2197{
2198 void *tmp = __skb_put(skb, len);
2199
2200 memcpy(tmp, data, len);
2201 return tmp;
2202}
2203
2204static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2205{
2206 *(u8 *)__skb_put(skb, 1) = val;
2207}
2208
2209static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2210{
2211 void *tmp = skb_put(skb, len);
2212
2213 memset(tmp, 0, len);
2214
2215 return tmp;
2216}
2217
2218static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2219 unsigned int len)
2220{
2221 void *tmp = skb_put(skb, len);
2222
2223 memcpy(tmp, data, len);
2224
2225 return tmp;
2226}
2227
2228static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2229{
2230 *(u8 *)skb_put(skb, 1) = val;
2231}
2232
2233void *skb_push(struct sk_buff *skb, unsigned int len);
2234static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2235{
2236 skb->data -= len;
2237 skb->len += len;
2238 return skb->data;
2239}
2240
2241void *skb_pull(struct sk_buff *skb, unsigned int len);
2242static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2243{
2244 skb->len -= len;
2245 BUG_ON(skb->len < skb->data_len);
2246 return skb->data += len;
2247}
2248
2249static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2250{
2251 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2252}
2253
2254void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2255
2256static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2257{
2258 if (len > skb_headlen(skb) &&
2259 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2260 return NULL;
2261 skb->len -= len;
2262 return skb->data += len;
2263}
2264
2265static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2266{
2267 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2268}
2269
2270static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2271{
2272 if (likely(len <= skb_headlen(skb)))
2273 return 1;
2274 if (unlikely(len > skb->len))
2275 return 0;
2276 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2277}
2278
2279void skb_condense(struct sk_buff *skb);
2280
2281
2282
2283
2284
2285
2286
2287static inline unsigned int skb_headroom(const struct sk_buff *skb)
2288{
2289 return skb->data - skb->head;
2290}
2291
2292
2293
2294
2295
2296
2297
2298static inline int skb_tailroom(const struct sk_buff *skb)
2299{
2300 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2301}
2302
2303
2304
2305
2306
2307
2308
2309
2310static inline int skb_availroom(const struct sk_buff *skb)
2311{
2312 if (skb_is_nonlinear(skb))
2313 return 0;
2314
2315 return skb->end - skb->tail - skb->reserved_tailroom;
2316}
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326static inline void skb_reserve(struct sk_buff *skb, int len)
2327{
2328 skb->data += len;
2329 skb->tail += len;
2330}
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2345 unsigned int needed_tailroom)
2346{
2347 SKB_LINEAR_ASSERT(skb);
2348 if (mtu < skb_tailroom(skb) - needed_tailroom)
2349
2350 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2351 else
2352
2353 skb->reserved_tailroom = needed_tailroom;
2354}
2355
2356#define ENCAP_TYPE_ETHER 0
2357#define ENCAP_TYPE_IPPROTO 1
2358
2359static inline void skb_set_inner_protocol(struct sk_buff *skb,
2360 __be16 protocol)
2361{
2362 skb->inner_protocol = protocol;
2363 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2364}
2365
2366static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2367 __u8 ipproto)
2368{
2369 skb->inner_ipproto = ipproto;
2370 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2371}
2372
2373static inline void skb_reset_inner_headers(struct sk_buff *skb)
2374{
2375 skb->inner_mac_header = skb->mac_header;
2376 skb->inner_network_header = skb->network_header;
2377 skb->inner_transport_header = skb->transport_header;
2378}
2379
2380static inline void skb_reset_mac_len(struct sk_buff *skb)
2381{
2382 skb->mac_len = skb->network_header - skb->mac_header;
2383}
2384
2385static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2386 *skb)
2387{
2388 return skb->head + skb->inner_transport_header;
2389}
2390
2391static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2392{
2393 return skb_inner_transport_header(skb) - skb->data;
2394}
2395
2396static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2397{
2398 skb->inner_transport_header = skb->data - skb->head;
2399}
2400
2401static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2402 const int offset)
2403{
2404 skb_reset_inner_transport_header(skb);
2405 skb->inner_transport_header += offset;
2406}
2407
2408static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2409{
2410 return skb->head + skb->inner_network_header;
2411}
2412
2413static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2414{
2415 skb->inner_network_header = skb->data - skb->head;
2416}
2417
2418static inline void skb_set_inner_network_header(struct sk_buff *skb,
2419 const int offset)
2420{
2421 skb_reset_inner_network_header(skb);
2422 skb->inner_network_header += offset;
2423}
2424
2425static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2426{
2427 return skb->head + skb->inner_mac_header;
2428}
2429
2430static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2431{
2432 skb->inner_mac_header = skb->data - skb->head;
2433}
2434
2435static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2436 const int offset)
2437{
2438 skb_reset_inner_mac_header(skb);
2439 skb->inner_mac_header += offset;
2440}
2441static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2442{
2443 return skb->transport_header != (typeof(skb->transport_header))~0U;
2444}
2445
2446static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2447{
2448 return skb->head + skb->transport_header;
2449}
2450
2451static inline void skb_reset_transport_header(struct sk_buff *skb)
2452{
2453 skb->transport_header = skb->data - skb->head;
2454}
2455
2456static inline void skb_set_transport_header(struct sk_buff *skb,
2457 const int offset)
2458{
2459 skb_reset_transport_header(skb);
2460 skb->transport_header += offset;
2461}
2462
2463static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2464{
2465 return skb->head + skb->network_header;
2466}
2467
2468static inline void skb_reset_network_header(struct sk_buff *skb)
2469{
2470 skb->network_header = skb->data - skb->head;
2471}
2472
2473static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2474{
2475 skb_reset_network_header(skb);
2476 skb->network_header += offset;
2477}
2478
2479static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2480{
2481 return skb->head + skb->mac_header;
2482}
2483
2484static inline int skb_mac_offset(const struct sk_buff *skb)
2485{
2486 return skb_mac_header(skb) - skb->data;
2487}
2488
2489static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2490{
2491 return skb->network_header - skb->mac_header;
2492}
2493
2494static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2495{
2496 return skb->mac_header != (typeof(skb->mac_header))~0U;
2497}
2498
2499static inline void skb_reset_mac_header(struct sk_buff *skb)
2500{
2501 skb->mac_header = skb->data - skb->head;
2502}
2503
2504static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2505{
2506 skb_reset_mac_header(skb);
2507 skb->mac_header += offset;
2508}
2509
2510static inline void skb_pop_mac_header(struct sk_buff *skb)
2511{
2512 skb->mac_header = skb->network_header;
2513}
2514
2515static inline void skb_probe_transport_header(struct sk_buff *skb)
2516{
2517 struct flow_keys_basic keys;
2518
2519 if (skb_transport_header_was_set(skb))
2520 return;
2521
2522 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2523 NULL, 0, 0, 0, 0))
2524 skb_set_transport_header(skb, keys.control.thoff);
2525}
2526
2527static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2528{
2529 if (skb_mac_header_was_set(skb)) {
2530 const unsigned char *old_mac = skb_mac_header(skb);
2531
2532 skb_set_mac_header(skb, -skb->mac_len);
2533 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2534 }
2535}
2536
2537static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2538{
2539 return skb->csum_start - skb_headroom(skb);
2540}
2541
2542static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2543{
2544 return skb->head + skb->csum_start;
2545}
2546
2547static inline int skb_transport_offset(const struct sk_buff *skb)
2548{
2549 return skb_transport_header(skb) - skb->data;
2550}
2551
2552static inline u32 skb_network_header_len(const struct sk_buff *skb)
2553{
2554 return skb->transport_header - skb->network_header;
2555}
2556
2557static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2558{
2559 return skb->inner_transport_header - skb->inner_network_header;
2560}
2561
2562static inline int skb_network_offset(const struct sk_buff *skb)
2563{
2564 return skb_network_header(skb) - skb->data;
2565}
2566
2567static inline int skb_inner_network_offset(const struct sk_buff *skb)
2568{
2569 return skb_inner_network_header(skb) - skb->data;
2570}
2571
2572static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2573{
2574 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2575}
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597#ifndef NET_IP_ALIGN
2598#define NET_IP_ALIGN 2
2599#endif
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621#ifndef NET_SKB_PAD
2622#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2623#endif
2624
2625int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2626
2627static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2628{
2629 if (WARN_ON(skb_is_nonlinear(skb)))
2630 return;
2631 skb->len = len;
2632 skb_set_tail_pointer(skb, len);
2633}
2634
2635static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2636{
2637 __skb_set_length(skb, len);
2638}
2639
2640void skb_trim(struct sk_buff *skb, unsigned int len);
2641
2642static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2643{
2644 if (skb->data_len)
2645 return ___pskb_trim(skb, len);
2646 __skb_trim(skb, len);
2647 return 0;
2648}
2649
2650static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2651{
2652 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2653}
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2665{
2666 int err = pskb_trim(skb, len);
2667 BUG_ON(err);
2668}
2669
2670static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2671{
2672 unsigned int diff = len - skb->len;
2673
2674 if (skb_tailroom(skb) < diff) {
2675 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2676 GFP_ATOMIC);
2677 if (ret)
2678 return ret;
2679 }
2680 __skb_set_length(skb, len);
2681 return 0;
2682}
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692static inline void skb_orphan(struct sk_buff *skb)
2693{
2694 if (skb->destructor) {
2695 skb->destructor(skb);
2696 skb->destructor = NULL;
2697 skb->sk = NULL;
2698 } else {
2699 BUG_ON(skb->sk);
2700 }
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2713{
2714 if (likely(!skb_zcopy(skb)))
2715 return 0;
2716 if (!skb_zcopy_is_nouarg(skb) &&
2717 skb_uarg(skb)->callback == sock_zerocopy_callback)
2718 return 0;
2719 return skb_copy_ubufs(skb, gfp_mask);
2720}
2721
2722
2723static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2724{
2725 if (likely(!skb_zcopy(skb)))
2726 return 0;
2727 return skb_copy_ubufs(skb, gfp_mask);
2728}
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738static inline void __skb_queue_purge(struct sk_buff_head *list)
2739{
2740 struct sk_buff *skb;
2741 while ((skb = __skb_dequeue(list)) != NULL)
2742 kfree_skb(skb);
2743}
2744void skb_queue_purge(struct sk_buff_head *list);
2745
2746unsigned int skb_rbtree_purge(struct rb_root *root);
2747
2748void *netdev_alloc_frag(unsigned int fragsz);
2749
2750struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2751 gfp_t gfp_mask);
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2767 unsigned int length)
2768{
2769 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2770}
2771
2772
2773static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2774 gfp_t gfp_mask)
2775{
2776 return __netdev_alloc_skb(NULL, length, gfp_mask);
2777}
2778
2779
2780static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2781{
2782 return netdev_alloc_skb(NULL, length);
2783}
2784
2785
2786static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2787 unsigned int length, gfp_t gfp)
2788{
2789 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2790
2791 if (NET_IP_ALIGN && skb)
2792 skb_reserve(skb, NET_IP_ALIGN);
2793 return skb;
2794}
2795
2796static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2797 unsigned int length)
2798{
2799 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2800}
2801
2802static inline void skb_free_frag(void *addr)
2803{
2804 page_frag_free(addr);
2805}
2806
2807void *napi_alloc_frag(unsigned int fragsz);
2808struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2809 unsigned int length, gfp_t gfp_mask);
2810static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2811 unsigned int length)
2812{
2813 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2814}
2815void napi_consume_skb(struct sk_buff *skb, int budget);
2816
2817void __kfree_skb_flush(void);
2818void __kfree_skb_defer(struct sk_buff *skb);
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2830 unsigned int order)
2831{
2832
2833
2834
2835
2836
2837
2838
2839
2840 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2841
2842 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2843}
2844
2845static inline struct page *dev_alloc_pages(unsigned int order)
2846{
2847 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2859{
2860 return __dev_alloc_pages(gfp_mask, 0);
2861}
2862
2863static inline struct page *dev_alloc_page(void)
2864{
2865 return dev_alloc_pages(0);
2866}
2867
2868
2869
2870
2871
2872
2873static inline void skb_propagate_pfmemalloc(struct page *page,
2874 struct sk_buff *skb)
2875{
2876 if (page_is_pfmemalloc(page))
2877 skb->pfmemalloc = true;
2878}
2879
2880
2881
2882
2883
2884
2885
2886static inline struct page *skb_frag_page(const skb_frag_t *frag)
2887{
2888 return frag->page.p;
2889}
2890
2891
2892
2893
2894
2895
2896
2897static inline void __skb_frag_ref(skb_frag_t *frag)
2898{
2899 get_page(skb_frag_page(frag));
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909static inline void skb_frag_ref(struct sk_buff *skb, int f)
2910{
2911 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2912}
2913
2914
2915
2916
2917
2918
2919
2920static inline void __skb_frag_unref(skb_frag_t *frag)
2921{
2922 put_page(skb_frag_page(frag));
2923}
2924
2925
2926
2927
2928
2929
2930
2931
2932static inline void skb_frag_unref(struct sk_buff *skb, int f)
2933{
2934 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2935}
2936
2937
2938
2939
2940
2941
2942
2943
2944static inline void *skb_frag_address(const skb_frag_t *frag)
2945{
2946 return page_address(skb_frag_page(frag)) + frag->page_offset;
2947}
2948
2949
2950
2951
2952
2953
2954
2955
2956static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2957{
2958 void *ptr = page_address(skb_frag_page(frag));
2959 if (unlikely(!ptr))
2960 return NULL;
2961
2962 return ptr + frag->page_offset;
2963}
2964
2965
2966
2967
2968
2969
2970
2971
2972static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2973{
2974 frag->page.p = page;
2975}
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2986 struct page *page)
2987{
2988 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2989}
2990
2991bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3005 const skb_frag_t *frag,
3006 size_t offset, size_t size,
3007 enum dma_data_direction dir)
3008{
3009 return dma_map_page(dev, skb_frag_page(frag),
3010 frag->page_offset + offset, size, dir);
3011}
3012
3013static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3014 gfp_t gfp_mask)
3015{
3016 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3017}
3018
3019
3020static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3021 gfp_t gfp_mask)
3022{
3023 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3024}
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3036{
3037 return !skb_header_cloned(skb) &&
3038 skb_headroom(skb) + len <= skb->hdr_len;
3039}
3040
3041static inline int skb_try_make_writable(struct sk_buff *skb,
3042 unsigned int write_len)
3043{
3044 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3045 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3046}
3047
3048static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3049 int cloned)
3050{
3051 int delta = 0;
3052
3053 if (headroom > skb_headroom(skb))
3054 delta = headroom - skb_headroom(skb);
3055
3056 if (delta || cloned)
3057 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3058 GFP_ATOMIC);
3059 return 0;
3060}
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3075{
3076 return __skb_cow(skb, headroom, skb_cloned(skb));
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3090{
3091 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3092}
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3105{
3106 unsigned int size = skb->len;
3107 if (likely(size >= len))
3108 return 0;
3109 return skb_pad(skb, len - size);
3110}
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
3124 bool free_on_error)
3125{
3126 unsigned int size = skb->len;
3127
3128 if (unlikely(size < len)) {
3129 len -= size;
3130 if (__skb_pad(skb, len, free_on_error))
3131 return -ENOMEM;
3132 __skb_put(skb, len);
3133 }
3134 return 0;
3135}
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
3148{
3149 return __skb_put_padto(skb, len, true);
3150}
3151
3152static inline int skb_add_data(struct sk_buff *skb,
3153 struct iov_iter *from, int copy)
3154{
3155 const int off = skb->len;
3156
3157 if (skb->ip_summed == CHECKSUM_NONE) {
3158 __wsum csum = 0;
3159 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3160 &csum, from)) {
3161 skb->csum = csum_block_add(skb->csum, csum, off);
3162 return 0;
3163 }
3164 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3165 return 0;
3166
3167 __skb_trim(skb, off);
3168 return -EFAULT;
3169}
3170
3171static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3172 const struct page *page, int off)
3173{
3174 if (skb_zcopy(skb))
3175 return false;
3176 if (i) {
3177 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
3178
3179 return page == skb_frag_page(frag) &&
3180 off == frag->page_offset + skb_frag_size(frag);
3181 }
3182 return false;
3183}
3184
3185static inline int __skb_linearize(struct sk_buff *skb)
3186{
3187 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3188}
3189
3190
3191
3192
3193
3194
3195
3196
3197static inline int skb_linearize(struct sk_buff *skb)
3198{
3199 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3200}
3201
3202
3203
3204
3205
3206
3207
3208
3209static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3210{
3211 return skb_is_nonlinear(skb) &&
3212 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3213}
3214
3215
3216
3217
3218
3219
3220
3221
3222static inline int skb_linearize_cow(struct sk_buff *skb)
3223{
3224 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3225 __skb_linearize(skb) : 0;
3226}
3227
3228static __always_inline void
3229__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3230 unsigned int off)
3231{
3232 if (skb->ip_summed == CHECKSUM_COMPLETE)
3233 skb->csum = csum_block_sub(skb->csum,
3234 csum_partial(start, len, 0), off);
3235 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3236 skb_checksum_start_offset(skb) < 0)
3237 skb->ip_summed = CHECKSUM_NONE;
3238}
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250static inline void skb_postpull_rcsum(struct sk_buff *skb,
3251 const void *start, unsigned int len)
3252{
3253 __skb_postpull_rcsum(skb, start, len, 0);
3254}
3255
3256static __always_inline void
3257__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3258 unsigned int off)
3259{
3260 if (skb->ip_summed == CHECKSUM_COMPLETE)
3261 skb->csum = csum_block_add(skb->csum,
3262 csum_partial(start, len, 0), off);
3263}
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274static inline void skb_postpush_rcsum(struct sk_buff *skb,
3275 const void *start, unsigned int len)
3276{
3277 __skb_postpush_rcsum(skb, start, len, 0);
3278}
3279
3280void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3294{
3295 skb_push(skb, len);
3296 skb_postpush_rcsum(skb, skb->data, len);
3297 return skb->data;
3298}
3299
3300int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3312{
3313 if (likely(len >= skb->len))
3314 return 0;
3315 return pskb_trim_rcsum_slow(skb, len);
3316}
3317
3318static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3319{
3320 if (skb->ip_summed == CHECKSUM_COMPLETE)
3321 skb->ip_summed = CHECKSUM_NONE;
3322 __skb_trim(skb, len);
3323 return 0;
3324}
3325
3326static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3327{
3328 if (skb->ip_summed == CHECKSUM_COMPLETE)
3329 skb->ip_summed = CHECKSUM_NONE;
3330 return __skb_grow(skb, len);
3331}
3332
3333#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3334#define skb_rb_first(root) rb_to_skb(rb_first(root))
3335#define skb_rb_last(root) rb_to_skb(rb_last(root))
3336#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3337#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3338
3339#define skb_queue_walk(queue, skb) \
3340 for (skb = (queue)->next; \
3341 skb != (struct sk_buff *)(queue); \
3342 skb = skb->next)
3343
3344#define skb_queue_walk_safe(queue, skb, tmp) \
3345 for (skb = (queue)->next, tmp = skb->next; \
3346 skb != (struct sk_buff *)(queue); \
3347 skb = tmp, tmp = skb->next)
3348
3349#define skb_queue_walk_from(queue, skb) \
3350 for (; skb != (struct sk_buff *)(queue); \
3351 skb = skb->next)
3352
3353#define skb_rbtree_walk(skb, root) \
3354 for (skb = skb_rb_first(root); skb != NULL; \
3355 skb = skb_rb_next(skb))
3356
3357#define skb_rbtree_walk_from(skb) \
3358 for (; skb != NULL; \
3359 skb = skb_rb_next(skb))
3360
3361#define skb_rbtree_walk_from_safe(skb, tmp) \
3362 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3363 skb = tmp)
3364
3365#define skb_queue_walk_from_safe(queue, skb, tmp) \
3366 for (tmp = skb->next; \
3367 skb != (struct sk_buff *)(queue); \
3368 skb = tmp, tmp = skb->next)
3369
3370#define skb_queue_reverse_walk(queue, skb) \
3371 for (skb = (queue)->prev; \
3372 skb != (struct sk_buff *)(queue); \
3373 skb = skb->prev)
3374
3375#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3376 for (skb = (queue)->prev, tmp = skb->prev; \
3377 skb != (struct sk_buff *)(queue); \
3378 skb = tmp, tmp = skb->prev)
3379
3380#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3381 for (tmp = skb->prev; \
3382 skb != (struct sk_buff *)(queue); \
3383 skb = tmp, tmp = skb->prev)
3384
3385static inline bool skb_has_frag_list(const struct sk_buff *skb)
3386{
3387 return skb_shinfo(skb)->frag_list != NULL;
3388}
3389
3390static inline void skb_frag_list_init(struct sk_buff *skb)
3391{
3392 skb_shinfo(skb)->frag_list = NULL;
3393}
3394
3395#define skb_walk_frags(skb, iter) \
3396 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3397
3398
3399int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3400 const struct sk_buff *skb);
3401struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3402 struct sk_buff_head *queue,
3403 unsigned int flags,
3404 void (*destructor)(struct sock *sk,
3405 struct sk_buff *skb),
3406 int *off, int *err,
3407 struct sk_buff **last);
3408struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3409 void (*destructor)(struct sock *sk,
3410 struct sk_buff *skb),
3411 int *off, int *err,
3412 struct sk_buff **last);
3413struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3414 void (*destructor)(struct sock *sk,
3415 struct sk_buff *skb),
3416 int *off, int *err);
3417struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3418 int *err);
3419__poll_t datagram_poll(struct file *file, struct socket *sock,
3420 struct poll_table_struct *wait);
3421int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3422 struct iov_iter *to, int size);
3423static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3424 struct msghdr *msg, int size)
3425{
3426 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3427}
3428int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3429 struct msghdr *msg);
3430int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3431 struct iov_iter *to, int len,
3432 struct ahash_request *hash);
3433int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3434 struct iov_iter *from, int len);
3435int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3436void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3437void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3438static inline void skb_free_datagram_locked(struct sock *sk,
3439 struct sk_buff *skb)
3440{
3441 __skb_free_datagram_locked(sk, skb, 0);
3442}
3443int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3444int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3445int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3446__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3447 int len, __wsum csum);
3448int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3449 struct pipe_inode_info *pipe, unsigned int len,
3450 unsigned int flags);
3451int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3452 int len);
3453void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3454unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3455int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3456 int len, int hlen);
3457void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3458int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3459void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3460bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3461bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3462struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3463struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3464int skb_ensure_writable(struct sk_buff *skb, int write_len);
3465int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3466int skb_vlan_pop(struct sk_buff *skb);
3467int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3468int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
3469int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
3470int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3471int skb_mpls_dec_ttl(struct sk_buff *skb);
3472struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3473 gfp_t gfp);
3474
3475static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3476{
3477 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3478}
3479
3480static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3481{
3482 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3483}
3484
3485struct skb_checksum_ops {
3486 __wsum (*update)(const void *mem, int len, __wsum wsum);
3487 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3488};
3489
3490extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3491
3492__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3493 __wsum csum, const struct skb_checksum_ops *ops);
3494__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3495 __wsum csum);
3496
3497static inline void * __must_check
3498__skb_header_pointer(const struct sk_buff *skb, int offset,
3499 int len, void *data, int hlen, void *buffer)
3500{
3501 if (hlen - offset >= len)
3502 return data + offset;
3503
3504 if (!skb ||
3505 skb_copy_bits(skb, offset, buffer, len) < 0)
3506 return NULL;
3507
3508 return buffer;
3509}
3510
3511static inline void * __must_check
3512skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3513{
3514 return __skb_header_pointer(skb, offset, len, skb->data,
3515 skb_headlen(skb), buffer);
3516}
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528static inline bool skb_needs_linearize(struct sk_buff *skb,
3529 netdev_features_t features)
3530{
3531 return skb_is_nonlinear(skb) &&
3532 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3533 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3534}
3535
3536static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3537 void *to,
3538 const unsigned int len)
3539{
3540 memcpy(to, skb->data, len);
3541}
3542
3543static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3544 const int offset, void *to,
3545 const unsigned int len)
3546{
3547 memcpy(to, skb->data + offset, len);
3548}
3549
3550static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3551 const void *from,
3552 const unsigned int len)
3553{
3554 memcpy(skb->data, from, len);
3555}
3556
3557static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3558 const int offset,
3559 const void *from,
3560 const unsigned int len)
3561{
3562 memcpy(skb->data + offset, from, len);
3563}
3564
3565void skb_init(void);
3566
3567static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3568{
3569 return skb->tstamp;
3570}
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581static inline void skb_get_timestamp(const struct sk_buff *skb,
3582 struct __kernel_old_timeval *stamp)
3583{
3584 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3585}
3586
3587static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3588 struct __kernel_sock_timeval *stamp)
3589{
3590 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3591
3592 stamp->tv_sec = ts.tv_sec;
3593 stamp->tv_usec = ts.tv_nsec / 1000;
3594}
3595
3596static inline void skb_get_timestampns(const struct sk_buff *skb,
3597 struct timespec *stamp)
3598{
3599 *stamp = ktime_to_timespec(skb->tstamp);
3600}
3601
3602static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3603 struct __kernel_timespec *stamp)
3604{
3605 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3606
3607 stamp->tv_sec = ts.tv_sec;
3608 stamp->tv_nsec = ts.tv_nsec;
3609}
3610
3611static inline void __net_timestamp(struct sk_buff *skb)
3612{
3613 skb->tstamp = ktime_get_real();
3614}
3615
3616static inline ktime_t net_timedelta(ktime_t t)
3617{
3618 return ktime_sub(ktime_get_real(), t);
3619}
3620
3621static inline ktime_t net_invalid_timestamp(void)
3622{
3623 return 0;
3624}
3625
3626static inline u8 skb_metadata_len(const struct sk_buff *skb)
3627{
3628 return skb_shinfo(skb)->meta_len;
3629}
3630
3631static inline void *skb_metadata_end(const struct sk_buff *skb)
3632{
3633 return skb_mac_header(skb);
3634}
3635
3636static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3637 const struct sk_buff *skb_b,
3638 u8 meta_len)
3639{
3640 const void *a = skb_metadata_end(skb_a);
3641 const void *b = skb_metadata_end(skb_b);
3642
3643#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3644 u64 diffs = 0;
3645
3646 switch (meta_len) {
3647#define __it(x, op) (x -= sizeof(u##op))
3648#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3649 case 32: diffs |= __it_diff(a, b, 64);
3650
3651 case 24: diffs |= __it_diff(a, b, 64);
3652
3653 case 16: diffs |= __it_diff(a, b, 64);
3654
3655 case 8: diffs |= __it_diff(a, b, 64);
3656 break;
3657 case 28: diffs |= __it_diff(a, b, 64);
3658
3659 case 20: diffs |= __it_diff(a, b, 64);
3660
3661 case 12: diffs |= __it_diff(a, b, 64);
3662
3663 case 4: diffs |= __it_diff(a, b, 32);
3664 break;
3665 }
3666 return diffs;
3667#else
3668 return memcmp(a - meta_len, b - meta_len, meta_len);
3669#endif
3670}
3671
3672static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3673 const struct sk_buff *skb_b)
3674{
3675 u8 len_a = skb_metadata_len(skb_a);
3676 u8 len_b = skb_metadata_len(skb_b);
3677
3678 if (!(len_a | len_b))
3679 return false;
3680
3681 return len_a != len_b ?
3682 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3683}
3684
3685static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3686{
3687 skb_shinfo(skb)->meta_len = meta_len;
3688}
3689
3690static inline void skb_metadata_clear(struct sk_buff *skb)
3691{
3692 skb_metadata_set(skb, 0);
3693}
3694
3695struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3696
3697#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3698
3699void skb_clone_tx_timestamp(struct sk_buff *skb);
3700bool skb_defer_rx_timestamp(struct sk_buff *skb);
3701
3702#else
3703
3704static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3705{
3706}
3707
3708static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3709{
3710 return false;
3711}
3712
3713#endif
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727void skb_complete_tx_timestamp(struct sk_buff *skb,
3728 struct skb_shared_hwtstamps *hwtstamps);
3729
3730void __skb_tstamp_tx(struct sk_buff *orig_skb,
3731 struct skb_shared_hwtstamps *hwtstamps,
3732 struct sock *sk, int tstype);
3733
3734
3735
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745void skb_tstamp_tx(struct sk_buff *orig_skb,
3746 struct skb_shared_hwtstamps *hwtstamps);
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758
3759
3760static inline void skb_tx_timestamp(struct sk_buff *skb)
3761{
3762 skb_clone_tx_timestamp(skb);
3763 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3764 skb_tstamp_tx(skb, NULL);
3765}
3766
3767
3768
3769
3770
3771
3772
3773
3774void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3775
3776__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3777__sum16 __skb_checksum_complete(struct sk_buff *skb);
3778
3779static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3780{
3781 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3782 skb->csum_valid ||
3783 (skb->ip_summed == CHECKSUM_PARTIAL &&
3784 skb_checksum_start_offset(skb) >= 0));
3785}
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796
3797
3798
3799
3800
3801
3802
3803static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3804{
3805 return skb_csum_unnecessary(skb) ?
3806 0 : __skb_checksum_complete(skb);
3807}
3808
3809static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3810{
3811 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3812 if (skb->csum_level == 0)
3813 skb->ip_summed = CHECKSUM_NONE;
3814 else
3815 skb->csum_level--;
3816 }
3817}
3818
3819static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3820{
3821 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3822 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3823 skb->csum_level++;
3824 } else if (skb->ip_summed == CHECKSUM_NONE) {
3825 skb->ip_summed = CHECKSUM_UNNECESSARY;
3826 skb->csum_level = 0;
3827 }
3828}
3829
3830
3831
3832
3833
3834
3835static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3836 bool zero_okay,
3837 __sum16 check)
3838{
3839 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3840 skb->csum_valid = 1;
3841 __skb_decr_checksum_unnecessary(skb);
3842 return false;
3843 }
3844
3845 return true;
3846}
3847
3848
3849
3850
3851#define CHECKSUM_BREAK 76
3852
3853
3854
3855
3856
3857
3858
3859static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3860{
3861 if (skb->ip_summed == CHECKSUM_COMPLETE)
3862 skb->ip_summed = CHECKSUM_NONE;
3863}
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3875 bool complete,
3876 __wsum psum)
3877{
3878 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3879 if (!csum_fold(csum_add(psum, skb->csum))) {
3880 skb->csum_valid = 1;
3881 return 0;
3882 }
3883 }
3884
3885 skb->csum = psum;
3886
3887 if (complete || skb->len <= CHECKSUM_BREAK) {
3888 __sum16 csum;
3889
3890 csum = __skb_checksum_complete(skb);
3891 skb->csum_valid = !csum;
3892 return csum;
3893 }
3894
3895 return 0;
3896}
3897
3898static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3899{
3900 return 0;
3901}
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913#define __skb_checksum_validate(skb, proto, complete, \
3914 zero_okay, check, compute_pseudo) \
3915({ \
3916 __sum16 __ret = 0; \
3917 skb->csum_valid = 0; \
3918 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3919 __ret = __skb_checksum_validate_complete(skb, \
3920 complete, compute_pseudo(skb, proto)); \
3921 __ret; \
3922})
3923
3924#define skb_checksum_init(skb, proto, compute_pseudo) \
3925 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3926
3927#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3928 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3929
3930#define skb_checksum_validate(skb, proto, compute_pseudo) \
3931 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3932
3933#define skb_checksum_validate_zero_check(skb, proto, check, \
3934 compute_pseudo) \
3935 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3936
3937#define skb_checksum_simple_validate(skb) \
3938 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3939
3940static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3941{
3942 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3943}
3944
3945static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
3946{
3947 skb->csum = ~pseudo;
3948 skb->ip_summed = CHECKSUM_COMPLETE;
3949}
3950
3951#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
3952do { \
3953 if (__skb_checksum_convert_check(skb)) \
3954 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
3955} while (0)
3956
3957static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3958 u16 start, u16 offset)
3959{
3960 skb->ip_summed = CHECKSUM_PARTIAL;
3961 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3962 skb->csum_offset = offset - start;
3963}
3964
3965
3966
3967
3968
3969
3970static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3971 int start, int offset, bool nopartial)
3972{
3973 __wsum delta;
3974
3975 if (!nopartial) {
3976 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3977 return;
3978 }
3979
3980 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3981 __skb_checksum_complete(skb);
3982 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3983 }
3984
3985 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3986
3987
3988 skb->csum = csum_add(skb->csum, delta);
3989}
3990
3991static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3992{
3993#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3994 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3995#else
3996 return NULL;
3997#endif
3998}
3999
4000#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4001void nf_conntrack_destroy(struct nf_conntrack *nfct);
4002static inline void nf_conntrack_put(struct nf_conntrack *nfct)
4003{
4004 if (nfct && atomic_dec_and_test(&nfct->use))
4005 nf_conntrack_destroy(nfct);
4006}
4007static inline void nf_conntrack_get(struct nf_conntrack *nfct)
4008{
4009 if (nfct)
4010 atomic_inc(&nfct->use);
4011}
4012#endif
4013
4014#ifdef CONFIG_SKB_EXTENSIONS
4015enum skb_ext_id {
4016#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4017 SKB_EXT_BRIDGE_NF,
4018#endif
4019#ifdef CONFIG_XFRM
4020 SKB_EXT_SEC_PATH,
4021#endif
4022 SKB_EXT_NUM,
4023};
4024
4025
4026
4027
4028
4029
4030
4031
4032
4033
4034
4035struct skb_ext {
4036 refcount_t refcnt;
4037 u8 offset[SKB_EXT_NUM];
4038 u8 chunks;
4039 char data[0] __aligned(8);
4040};
4041
4042void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4043void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4044void __skb_ext_put(struct skb_ext *ext);
4045
4046static inline void skb_ext_put(struct sk_buff *skb)
4047{
4048 if (skb->active_extensions)
4049 __skb_ext_put(skb->extensions);
4050}
4051
4052static inline void __skb_ext_copy(struct sk_buff *dst,
4053 const struct sk_buff *src)
4054{
4055 dst->active_extensions = src->active_extensions;
4056
4057 if (src->active_extensions) {
4058 struct skb_ext *ext = src->extensions;
4059
4060 refcount_inc(&ext->refcnt);
4061 dst->extensions = ext;
4062 }
4063}
4064
4065static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4066{
4067 skb_ext_put(dst);
4068 __skb_ext_copy(dst, src);
4069}
4070
4071static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4072{
4073 return !!ext->offset[i];
4074}
4075
4076static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4077{
4078 return skb->active_extensions & (1 << id);
4079}
4080
4081static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4082{
4083 if (skb_ext_exist(skb, id))
4084 __skb_ext_del(skb, id);
4085}
4086
4087static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4088{
4089 if (skb_ext_exist(skb, id)) {
4090 struct skb_ext *ext = skb->extensions;
4091
4092 return (void *)ext + (ext->offset[id] << 3);
4093 }
4094
4095 return NULL;
4096}
4097#else
4098static inline void skb_ext_put(struct sk_buff *skb) {}
4099static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4100static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4101static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4102#endif
4103
4104static inline void nf_reset(struct sk_buff *skb)
4105{
4106#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4107 nf_conntrack_put(skb_nfct(skb));
4108 skb->_nfct = 0;
4109#endif
4110#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4111 skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
4112#endif
4113}
4114
4115static inline void nf_reset_trace(struct sk_buff *skb)
4116{
4117#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4118 skb->nf_trace = 0;
4119#endif
4120}
4121
4122static inline void ipvs_reset(struct sk_buff *skb)
4123{
4124#if IS_ENABLED(CONFIG_IP_VS)
4125 skb->ipvs_property = 0;
4126#endif
4127}
4128
4129
4130static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4131 bool copy)
4132{
4133#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4134 dst->_nfct = src->_nfct;
4135 nf_conntrack_get(skb_nfct(src));
4136#endif
4137#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4138 if (copy)
4139 dst->nf_trace = src->nf_trace;
4140#endif
4141}
4142
4143static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4144{
4145#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4146 nf_conntrack_put(skb_nfct(dst));
4147#endif
4148 __nf_copy(dst, src, true);
4149}
4150
4151#ifdef CONFIG_NETWORK_SECMARK
4152static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4153{
4154 to->secmark = from->secmark;
4155}
4156
4157static inline void skb_init_secmark(struct sk_buff *skb)
4158{
4159 skb->secmark = 0;
4160}
4161#else
4162static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4163{ }
4164
4165static inline void skb_init_secmark(struct sk_buff *skb)
4166{ }
4167#endif
4168
4169static inline int secpath_exists(const struct sk_buff *skb)
4170{
4171#ifdef CONFIG_XFRM
4172 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4173#else
4174 return 0;
4175#endif
4176}
4177
4178static inline bool skb_irq_freeable(const struct sk_buff *skb)
4179{
4180 return !skb->destructor &&
4181 !secpath_exists(skb) &&
4182 !skb_nfct(skb) &&
4183 !skb->_skb_refdst &&
4184 !skb_has_frag_list(skb);
4185}
4186
4187static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4188{
4189 skb->queue_mapping = queue_mapping;
4190}
4191
4192static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4193{
4194 return skb->queue_mapping;
4195}
4196
4197static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4198{
4199 to->queue_mapping = from->queue_mapping;
4200}
4201
4202static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4203{
4204 skb->queue_mapping = rx_queue + 1;
4205}
4206
4207static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4208{
4209 return skb->queue_mapping - 1;
4210}
4211
4212static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4213{
4214 return skb->queue_mapping != 0;
4215}
4216
4217static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4218{
4219 skb->dst_pending_confirm = val;
4220}
4221
4222static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4223{
4224 return skb->dst_pending_confirm != 0;
4225}
4226
4227static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4228{
4229#ifdef CONFIG_XFRM
4230 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4231#else
4232 return NULL;
4233#endif
4234}
4235
4236
4237
4238
4239
4240
4241
4242struct skb_gso_cb {
4243 union {
4244 int mac_offset;
4245 int data_offset;
4246 };
4247 int encap_level;
4248 __wsum csum;
4249 __u16 csum_start;
4250};
4251#define SKB_SGO_CB_OFFSET 32
4252#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
4253
4254static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4255{
4256 return (skb_mac_header(inner_skb) - inner_skb->head) -
4257 SKB_GSO_CB(inner_skb)->mac_offset;
4258}
4259
4260static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4261{
4262 int new_headroom, headroom;
4263 int ret;
4264
4265 headroom = skb_headroom(skb);
4266 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4267 if (ret)
4268 return ret;
4269
4270 new_headroom = skb_headroom(skb);
4271 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4272 return 0;
4273}
4274
4275static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4276{
4277
4278 if (skb->remcsum_offload)
4279 return;
4280
4281 SKB_GSO_CB(skb)->csum = res;
4282 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4283}
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4294{
4295 unsigned char *csum_start = skb_transport_header(skb);
4296 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4297 __wsum partial = SKB_GSO_CB(skb)->csum;
4298
4299 SKB_GSO_CB(skb)->csum = res;
4300 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4301
4302 return csum_fold(csum_partial(csum_start, plen, partial));
4303}
4304
4305static inline bool skb_is_gso(const struct sk_buff *skb)
4306{
4307 return skb_shinfo(skb)->gso_size;
4308}
4309
4310
4311static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4312{
4313 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4314}
4315
4316
4317static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4318{
4319 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4320}
4321
4322
4323static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4324{
4325 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4326}
4327
4328static inline void skb_gso_reset(struct sk_buff *skb)
4329{
4330 skb_shinfo(skb)->gso_size = 0;
4331 skb_shinfo(skb)->gso_segs = 0;
4332 skb_shinfo(skb)->gso_type = 0;
4333}
4334
4335static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4336 u16 increment)
4337{
4338 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4339 return;
4340 shinfo->gso_size += increment;
4341}
4342
4343static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4344 u16 decrement)
4345{
4346 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4347 return;
4348 shinfo->gso_size -= decrement;
4349}
4350
4351void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4352
4353static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4354{
4355
4356
4357 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4358
4359 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4360 unlikely(shinfo->gso_type == 0)) {
4361 __skb_warn_lro_forwarding(skb);
4362 return true;
4363 }
4364 return false;
4365}
4366
4367static inline void skb_forward_csum(struct sk_buff *skb)
4368{
4369
4370 if (skb->ip_summed == CHECKSUM_COMPLETE)
4371 skb->ip_summed = CHECKSUM_NONE;
4372}
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4383{
4384#ifdef DEBUG
4385 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4386#endif
4387}
4388
4389bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4390
4391int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4392struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4393 unsigned int transport_len,
4394 __sum16(*skb_chkf)(struct sk_buff *skb));
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405static inline bool skb_head_is_locked(const struct sk_buff *skb)
4406{
4407 return !skb->head_frag || skb_cloned(skb);
4408}
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419static inline __wsum lco_csum(struct sk_buff *skb)
4420{
4421 unsigned char *csum_start = skb_checksum_start(skb);
4422 unsigned char *l4_hdr = skb_transport_header(skb);
4423 __wsum partial;
4424
4425
4426 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4427 skb->csum_offset));
4428
4429
4430
4431
4432 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4433}
4434
4435#endif
4436#endif
4437