1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <linux/sched/clock.h>
38#include <net/flow_dissector.h>
39#include <linux/splice.h>
40#include <linux/in6.h>
41#include <linux/if_packet.h>
42#include <net/flow.h>
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct net_device;
242struct scatterlist;
243struct pipe_inode_info;
244struct iov_iter;
245struct napi_struct;
246
247#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
248struct nf_conntrack {
249 atomic_t use;
250};
251#endif
252
253#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
254struct nf_bridge_info {
255 refcount_t use;
256 enum {
257 BRNF_PROTO_UNCHANGED,
258 BRNF_PROTO_8021Q,
259 BRNF_PROTO_PPPOE
260 } orig_proto:8;
261 u8 pkt_otherhost:1;
262 u8 in_prerouting:1;
263 u8 bridged_dnat:1;
264 __u16 frag_max_size;
265 struct net_device *physindev;
266
267
268 struct net_device *physoutdev;
269 union {
270
271 __be32 ipv4_daddr;
272 struct in6_addr ipv6_daddr;
273
274
275
276
277
278 char neigh_header[8];
279 };
280};
281#endif
282
283struct sk_buff_head {
284
285 struct sk_buff *next;
286 struct sk_buff *prev;
287
288 __u32 qlen;
289 spinlock_t lock;
290};
291
292struct sk_buff;
293
294
295
296
297
298
299
300
301#if (65536/PAGE_SIZE + 1) < 16
302#define MAX_SKB_FRAGS 16UL
303#else
304#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
305#endif
306extern int sysctl_max_skb_frags;
307
308
309
310
311#define GSO_BY_FRAGS 0xFFFF
312
313typedef struct skb_frag_struct skb_frag_t;
314
315struct skb_frag_struct {
316 struct {
317 struct page *p;
318 } page;
319#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
320 __u32 page_offset;
321 __u32 size;
322#else
323 __u16 page_offset;
324 __u16 size;
325#endif
326};
327
328static inline unsigned int skb_frag_size(const skb_frag_t *frag)
329{
330 return frag->size;
331}
332
333static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
334{
335 frag->size = size;
336}
337
338static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
339{
340 frag->size += delta;
341}
342
343static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
344{
345 frag->size -= delta;
346}
347
348#define HAVE_HW_TIME_STAMP
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364struct skb_shared_hwtstamps {
365 ktime_t hwtstamp;
366};
367
368
369enum {
370
371 SKBTX_HW_TSTAMP = 1 << 0,
372
373
374 SKBTX_SW_TSTAMP = 1 << 1,
375
376
377 SKBTX_IN_PROGRESS = 1 << 2,
378
379
380 SKBTX_DEV_ZEROCOPY = 1 << 3,
381
382
383 SKBTX_WIFI_STATUS = 1 << 4,
384
385
386
387
388
389
390 SKBTX_SHARED_FRAG = 1 << 5,
391
392
393 SKBTX_SCHED_TSTAMP = 1 << 6,
394};
395
396#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
397 SKBTX_SCHED_TSTAMP)
398#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
399
400
401
402
403
404
405
406
407
408struct ubuf_info {
409 void (*callback)(struct ubuf_info *, bool zerocopy_success);
410 void *ctx;
411 unsigned long desc;
412};
413
414
415
416
417struct skb_shared_info {
418 unsigned short _unused;
419 unsigned char nr_frags;
420 __u8 tx_flags;
421 unsigned short gso_size;
422
423 unsigned short gso_segs;
424 struct sk_buff *frag_list;
425 struct skb_shared_hwtstamps hwtstamps;
426 unsigned int gso_type;
427 u32 tskey;
428 __be32 ip6_frag_id;
429
430
431
432
433 atomic_t dataref;
434
435
436
437 void * destructor_arg;
438
439
440 skb_frag_t frags[MAX_SKB_FRAGS];
441};
442
443
444
445
446
447
448
449
450
451
452
453
454#define SKB_DATAREF_SHIFT 16
455#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
456
457
458enum {
459 SKB_FCLONE_UNAVAILABLE,
460 SKB_FCLONE_ORIG,
461 SKB_FCLONE_CLONE,
462};
463
464enum {
465 SKB_GSO_TCPV4 = 1 << 0,
466 SKB_GSO_UDP = 1 << 1,
467
468
469 SKB_GSO_DODGY = 1 << 2,
470
471
472 SKB_GSO_TCP_ECN = 1 << 3,
473
474 SKB_GSO_TCP_FIXEDID = 1 << 4,
475
476 SKB_GSO_TCPV6 = 1 << 5,
477
478 SKB_GSO_FCOE = 1 << 6,
479
480 SKB_GSO_GRE = 1 << 7,
481
482 SKB_GSO_GRE_CSUM = 1 << 8,
483
484 SKB_GSO_IPXIP4 = 1 << 9,
485
486 SKB_GSO_IPXIP6 = 1 << 10,
487
488 SKB_GSO_UDP_TUNNEL = 1 << 11,
489
490 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
491
492 SKB_GSO_PARTIAL = 1 << 13,
493
494 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
495
496 SKB_GSO_SCTP = 1 << 15,
497
498 SKB_GSO_ESP = 1 << 16,
499};
500
501#if BITS_PER_LONG > 32
502#define NET_SKBUFF_DATA_USES_OFFSET 1
503#endif
504
505#ifdef NET_SKBUFF_DATA_USES_OFFSET
506typedef unsigned int sk_buff_data_t;
507#else
508typedef unsigned char *sk_buff_data_t;
509#endif
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583struct sk_buff {
584 union {
585 struct {
586
587 struct sk_buff *next;
588 struct sk_buff *prev;
589
590 union {
591 ktime_t tstamp;
592 u64 skb_mstamp;
593 };
594 };
595 struct rb_node rbnode;
596 };
597 struct sock *sk;
598
599 union {
600 struct net_device *dev;
601
602
603
604
605 unsigned long dev_scratch;
606 };
607
608
609
610
611
612
613 char cb[48] __aligned(8);
614
615 unsigned long _skb_refdst;
616 void (*destructor)(struct sk_buff *skb);
617#ifdef CONFIG_XFRM
618 struct sec_path *sp;
619#endif
620#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
621 unsigned long _nfct;
622#endif
623#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
624 struct nf_bridge_info *nf_bridge;
625#endif
626 unsigned int len,
627 data_len;
628 __u16 mac_len,
629 hdr_len;
630
631
632
633
634 kmemcheck_bitfield_begin(flags1);
635 __u16 queue_mapping;
636
637
638#ifdef __BIG_ENDIAN_BITFIELD
639#define CLONED_MASK (1 << 7)
640#else
641#define CLONED_MASK 1
642#endif
643#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
644
645 __u8 __cloned_offset[0];
646 __u8 cloned:1,
647 nohdr:1,
648 fclone:2,
649 peeked:1,
650 head_frag:1,
651 xmit_more:1,
652 __unused:1;
653 kmemcheck_bitfield_end(flags1);
654
655
656
657
658
659 __u32 headers_start[0];
660
661
662
663#ifdef __BIG_ENDIAN_BITFIELD
664#define PKT_TYPE_MAX (7 << 5)
665#else
666#define PKT_TYPE_MAX 7
667#endif
668#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
669
670 __u8 __pkt_type_offset[0];
671 __u8 pkt_type:3;
672 __u8 pfmemalloc:1;
673 __u8 ignore_df:1;
674
675 __u8 nf_trace:1;
676 __u8 ip_summed:2;
677 __u8 ooo_okay:1;
678 __u8 l4_hash:1;
679 __u8 sw_hash:1;
680 __u8 wifi_acked_valid:1;
681 __u8 wifi_acked:1;
682
683 __u8 no_fcs:1;
684
685 __u8 encapsulation:1;
686 __u8 encap_hdr_csum:1;
687 __u8 csum_valid:1;
688 __u8 csum_complete_sw:1;
689 __u8 csum_level:2;
690 __u8 csum_not_inet:1;
691
692 __u8 dst_pending_confirm:1;
693#ifdef CONFIG_IPV6_NDISC_NODETYPE
694 __u8 ndisc_nodetype:2;
695#endif
696 __u8 ipvs_property:1;
697 __u8 inner_protocol_type:1;
698 __u8 remcsum_offload:1;
699#ifdef CONFIG_NET_SWITCHDEV
700 __u8 offload_fwd_mark:1;
701#endif
702#ifdef CONFIG_NET_CLS_ACT
703 __u8 tc_skip_classify:1;
704 __u8 tc_at_ingress:1;
705 __u8 tc_redirected:1;
706 __u8 tc_from_ingress:1;
707#endif
708
709#ifdef CONFIG_NET_SCHED
710 __u16 tc_index;
711#endif
712
713 union {
714 __wsum csum;
715 struct {
716 __u16 csum_start;
717 __u16 csum_offset;
718 };
719 };
720 __u32 priority;
721 int skb_iif;
722 __u32 hash;
723 __be16 vlan_proto;
724 __u16 vlan_tci;
725#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
726 union {
727 unsigned int napi_id;
728 unsigned int sender_cpu;
729 };
730#endif
731#ifdef CONFIG_NETWORK_SECMARK
732 __u32 secmark;
733#endif
734
735 union {
736 __u32 mark;
737 __u32 reserved_tailroom;
738 };
739
740 union {
741 __be16 inner_protocol;
742 __u8 inner_ipproto;
743 };
744
745 __u16 inner_transport_header;
746 __u16 inner_network_header;
747 __u16 inner_mac_header;
748
749 __be16 protocol;
750 __u16 transport_header;
751 __u16 network_header;
752 __u16 mac_header;
753
754
755 __u32 headers_end[0];
756
757
758
759 sk_buff_data_t tail;
760 sk_buff_data_t end;
761 unsigned char *head,
762 *data;
763 unsigned int truesize;
764 refcount_t users;
765};
766
767#ifdef __KERNEL__
768
769
770
771#include <linux/slab.h>
772
773
774#define SKB_ALLOC_FCLONE 0x01
775#define SKB_ALLOC_RX 0x02
776#define SKB_ALLOC_NAPI 0x04
777
778
779static inline bool skb_pfmemalloc(const struct sk_buff *skb)
780{
781 return unlikely(skb->pfmemalloc);
782}
783
784
785
786
787
788#define SKB_DST_NOREF 1UL
789#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
790
791#define SKB_NFCT_PTRMASK ~(7UL)
792
793
794
795
796
797
798static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
799{
800
801
802
803 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
804 !rcu_read_lock_held() &&
805 !rcu_read_lock_bh_held());
806 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
807}
808
809
810
811
812
813
814
815
816
817static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
818{
819 skb->_skb_refdst = (unsigned long)dst;
820}
821
822
823
824
825
826
827
828
829
830
831
832static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
833{
834 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
835 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
836}
837
838
839
840
841
842static inline bool skb_dst_is_noref(const struct sk_buff *skb)
843{
844 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
845}
846
847static inline struct rtable *skb_rtable(const struct sk_buff *skb)
848{
849 return (struct rtable *)skb_dst(skb);
850}
851
852
853
854
855
856static inline bool skb_pkt_type_ok(u32 ptype)
857{
858 return ptype <= PACKET_OTHERHOST;
859}
860
861static inline unsigned int skb_napi_id(const struct sk_buff *skb)
862{
863#ifdef CONFIG_NET_RX_BUSY_POLL
864 return skb->napi_id;
865#else
866 return 0;
867#endif
868}
869
870
871static inline bool skb_unref(struct sk_buff *skb)
872{
873 if (unlikely(!skb))
874 return false;
875 if (likely(refcount_read(&skb->users) == 1))
876 smp_rmb();
877 else if (likely(!refcount_dec_and_test(&skb->users)))
878 return false;
879
880 return true;
881}
882
883void skb_release_head_state(struct sk_buff *skb);
884void kfree_skb(struct sk_buff *skb);
885void kfree_skb_list(struct sk_buff *segs);
886void skb_tx_error(struct sk_buff *skb);
887void consume_skb(struct sk_buff *skb);
888void consume_stateless_skb(struct sk_buff *skb);
889void __kfree_skb(struct sk_buff *skb);
890extern struct kmem_cache *skbuff_head_cache;
891
892void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
893bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
894 bool *fragstolen, int *delta_truesize);
895
896struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
897 int node);
898struct sk_buff *__build_skb(void *data, unsigned int frag_size);
899struct sk_buff *build_skb(void *data, unsigned int frag_size);
900static inline struct sk_buff *alloc_skb(unsigned int size,
901 gfp_t priority)
902{
903 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
904}
905
906struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
907 unsigned long data_len,
908 int max_page_order,
909 int *errcode,
910 gfp_t gfp_mask);
911
912
913struct sk_buff_fclones {
914 struct sk_buff skb1;
915
916 struct sk_buff skb2;
917
918 refcount_t fclone_ref;
919};
920
921
922
923
924
925
926
927
928
929
930static inline bool skb_fclone_busy(const struct sock *sk,
931 const struct sk_buff *skb)
932{
933 const struct sk_buff_fclones *fclones;
934
935 fclones = container_of(skb, struct sk_buff_fclones, skb1);
936
937 return skb->fclone == SKB_FCLONE_ORIG &&
938 refcount_read(&fclones->fclone_ref) > 1 &&
939 fclones->skb2.sk == sk;
940}
941
942static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
943 gfp_t priority)
944{
945 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
946}
947
948struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
949static inline struct sk_buff *alloc_skb_head(gfp_t priority)
950{
951 return __alloc_skb_head(priority, -1);
952}
953
954struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
955int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
956struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
957struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
958struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
959 gfp_t gfp_mask, bool fclone);
960static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
961 gfp_t gfp_mask)
962{
963 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
964}
965
966int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
967struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
968 unsigned int headroom);
969struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
970 int newtailroom, gfp_t priority);
971int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
972 int offset, int len);
973int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
974 int offset, int len);
975int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
976int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
977
978
979
980
981
982
983
984
985
986
987
988
989static inline int skb_pad(struct sk_buff *skb, int pad)
990{
991 return __skb_pad(skb, pad, true);
992}
993#define dev_kfree_skb(a) consume_skb(a)
994
995int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
996 int getfrag(void *from, char *to, int offset,
997 int len, int odd, struct sk_buff *skb),
998 void *from, int length);
999
1000int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1001 int offset, size_t size);
1002
1003struct skb_seq_state {
1004 __u32 lower_offset;
1005 __u32 upper_offset;
1006 __u32 frag_idx;
1007 __u32 stepped_offset;
1008 struct sk_buff *root_skb;
1009 struct sk_buff *cur_skb;
1010 __u8 *frag_data;
1011};
1012
1013void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1014 unsigned int to, struct skb_seq_state *st);
1015unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1016 struct skb_seq_state *st);
1017void skb_abort_seq_read(struct skb_seq_state *st);
1018
1019unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1020 unsigned int to, struct ts_config *config);
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048enum pkt_hash_types {
1049 PKT_HASH_TYPE_NONE,
1050 PKT_HASH_TYPE_L2,
1051 PKT_HASH_TYPE_L3,
1052 PKT_HASH_TYPE_L4,
1053};
1054
1055static inline void skb_clear_hash(struct sk_buff *skb)
1056{
1057 skb->hash = 0;
1058 skb->sw_hash = 0;
1059 skb->l4_hash = 0;
1060}
1061
1062static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1063{
1064 if (!skb->l4_hash)
1065 skb_clear_hash(skb);
1066}
1067
1068static inline void
1069__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1070{
1071 skb->l4_hash = is_l4;
1072 skb->sw_hash = is_sw;
1073 skb->hash = hash;
1074}
1075
1076static inline void
1077skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1078{
1079
1080 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1081}
1082
1083static inline void
1084__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1085{
1086 __skb_set_hash(skb, hash, true, is_l4);
1087}
1088
1089void __skb_get_hash(struct sk_buff *skb);
1090u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1091u32 skb_get_poff(const struct sk_buff *skb);
1092u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1093 const struct flow_keys *keys, int hlen);
1094__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1095 void *data, int hlen_proto);
1096
1097static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1098 int thoff, u8 ip_proto)
1099{
1100 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1101}
1102
1103void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1104 const struct flow_dissector_key *key,
1105 unsigned int key_count);
1106
1107bool __skb_flow_dissect(const struct sk_buff *skb,
1108 struct flow_dissector *flow_dissector,
1109 void *target_container,
1110 void *data, __be16 proto, int nhoff, int hlen,
1111 unsigned int flags);
1112
1113static inline bool skb_flow_dissect(const struct sk_buff *skb,
1114 struct flow_dissector *flow_dissector,
1115 void *target_container, unsigned int flags)
1116{
1117 return __skb_flow_dissect(skb, flow_dissector, target_container,
1118 NULL, 0, 0, 0, flags);
1119}
1120
1121static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1122 struct flow_keys *flow,
1123 unsigned int flags)
1124{
1125 memset(flow, 0, sizeof(*flow));
1126 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1127 NULL, 0, 0, 0, flags);
1128}
1129
1130static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1131 void *data, __be16 proto,
1132 int nhoff, int hlen,
1133 unsigned int flags)
1134{
1135 memset(flow, 0, sizeof(*flow));
1136 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1137 data, proto, nhoff, hlen, flags);
1138}
1139
1140static inline __u32 skb_get_hash(struct sk_buff *skb)
1141{
1142 if (!skb->l4_hash && !skb->sw_hash)
1143 __skb_get_hash(skb);
1144
1145 return skb->hash;
1146}
1147
1148__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1149
1150static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1151{
1152 if (!skb->l4_hash && !skb->sw_hash) {
1153 struct flow_keys keys;
1154 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1155
1156 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1157 }
1158
1159 return skb->hash;
1160}
1161
1162__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1163
1164static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1165{
1166 if (!skb->l4_hash && !skb->sw_hash) {
1167 struct flow_keys keys;
1168 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1169
1170 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1171 }
1172
1173 return skb->hash;
1174}
1175
1176__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1177
1178static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1179{
1180 return skb->hash;
1181}
1182
1183static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1184{
1185 to->hash = from->hash;
1186 to->sw_hash = from->sw_hash;
1187 to->l4_hash = from->l4_hash;
1188};
1189
1190#ifdef NET_SKBUFF_DATA_USES_OFFSET
1191static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1192{
1193 return skb->head + skb->end;
1194}
1195
1196static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1197{
1198 return skb->end;
1199}
1200#else
1201static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1202{
1203 return skb->end;
1204}
1205
1206static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1207{
1208 return skb->end - skb->head;
1209}
1210#endif
1211
1212
1213#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1214
1215static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1216{
1217 return &skb_shinfo(skb)->hwtstamps;
1218}
1219
1220
1221
1222
1223
1224
1225
1226static inline int skb_queue_empty(const struct sk_buff_head *list)
1227{
1228 return list->next == (const struct sk_buff *) list;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1239 const struct sk_buff *skb)
1240{
1241 return skb->next == (const struct sk_buff *) list;
1242}
1243
1244
1245
1246
1247
1248
1249
1250
1251static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1252 const struct sk_buff *skb)
1253{
1254 return skb->prev == (const struct sk_buff *) list;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1266 const struct sk_buff *skb)
1267{
1268
1269
1270
1271 BUG_ON(skb_queue_is_last(list, skb));
1272 return skb->next;
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1284 const struct sk_buff *skb)
1285{
1286
1287
1288
1289 BUG_ON(skb_queue_is_first(list, skb));
1290 return skb->prev;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300static inline struct sk_buff *skb_get(struct sk_buff *skb)
1301{
1302 refcount_inc(&skb->users);
1303 return skb;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static inline int skb_cloned(const struct sk_buff *skb)
1320{
1321 return skb->cloned &&
1322 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1323}
1324
1325static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1326{
1327 might_sleep_if(gfpflags_allow_blocking(pri));
1328
1329 if (skb_cloned(skb))
1330 return pskb_expand_head(skb, 0, 0, pri);
1331
1332 return 0;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342static inline int skb_header_cloned(const struct sk_buff *skb)
1343{
1344 int dataref;
1345
1346 if (!skb->cloned)
1347 return 0;
1348
1349 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1350 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1351 return dataref != 1;
1352}
1353
1354static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1355{
1356 might_sleep_if(gfpflags_allow_blocking(pri));
1357
1358 if (skb_header_cloned(skb))
1359 return pskb_expand_head(skb, 0, 0, pri);
1360
1361 return 0;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static inline void skb_header_release(struct sk_buff *skb)
1374{
1375 BUG_ON(skb->nohdr);
1376 skb->nohdr = 1;
1377 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387static inline void __skb_header_release(struct sk_buff *skb)
1388{
1389 skb->nohdr = 1;
1390 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static inline int skb_shared(const struct sk_buff *skb)
1402{
1403 return refcount_read(&skb->users) != 1;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1420{
1421 might_sleep_if(gfpflags_allow_blocking(pri));
1422 if (skb_shared(skb)) {
1423 struct sk_buff *nskb = skb_clone(skb, pri);
1424
1425 if (likely(nskb))
1426 consume_skb(skb);
1427 else
1428 kfree_skb(skb);
1429 skb = nskb;
1430 }
1431 return skb;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1455 gfp_t pri)
1456{
1457 might_sleep_if(gfpflags_allow_blocking(pri));
1458 if (skb_cloned(skb)) {
1459 struct sk_buff *nskb = skb_copy(skb, pri);
1460
1461
1462 if (likely(nskb))
1463 consume_skb(skb);
1464 else
1465 kfree_skb(skb);
1466 skb = nskb;
1467 }
1468 return skb;
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1485{
1486 struct sk_buff *skb = list_->next;
1487
1488 if (skb == (struct sk_buff *)list_)
1489 skb = NULL;
1490 return skb;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1503 const struct sk_buff_head *list_)
1504{
1505 struct sk_buff *next = skb->next;
1506
1507 if (next == (struct sk_buff *)list_)
1508 next = NULL;
1509 return next;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1526{
1527 struct sk_buff *skb = list_->prev;
1528
1529 if (skb == (struct sk_buff *)list_)
1530 skb = NULL;
1531 return skb;
1532
1533}
1534
1535
1536
1537
1538
1539
1540
1541static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1542{
1543 return list_->qlen;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556static inline void __skb_queue_head_init(struct sk_buff_head *list)
1557{
1558 list->prev = list->next = (struct sk_buff *)list;
1559 list->qlen = 0;
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static inline void skb_queue_head_init(struct sk_buff_head *list)
1571{
1572 spin_lock_init(&list->lock);
1573 __skb_queue_head_init(list);
1574}
1575
1576static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1577 struct lock_class_key *class)
1578{
1579 skb_queue_head_init(list);
1580 lockdep_set_class(&list->lock, class);
1581}
1582
1583
1584
1585
1586
1587
1588
1589void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1590 struct sk_buff_head *list);
1591static inline void __skb_insert(struct sk_buff *newsk,
1592 struct sk_buff *prev, struct sk_buff *next,
1593 struct sk_buff_head *list)
1594{
1595 newsk->next = next;
1596 newsk->prev = prev;
1597 next->prev = prev->next = newsk;
1598 list->qlen++;
1599}
1600
1601static inline void __skb_queue_splice(const struct sk_buff_head *list,
1602 struct sk_buff *prev,
1603 struct sk_buff *next)
1604{
1605 struct sk_buff *first = list->next;
1606 struct sk_buff *last = list->prev;
1607
1608 first->prev = prev;
1609 prev->next = first;
1610
1611 last->next = next;
1612 next->prev = last;
1613}
1614
1615
1616
1617
1618
1619
1620static inline void skb_queue_splice(const struct sk_buff_head *list,
1621 struct sk_buff_head *head)
1622{
1623 if (!skb_queue_empty(list)) {
1624 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1625 head->qlen += list->qlen;
1626 }
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636static inline void skb_queue_splice_init(struct sk_buff_head *list,
1637 struct sk_buff_head *head)
1638{
1639 if (!skb_queue_empty(list)) {
1640 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1641 head->qlen += list->qlen;
1642 __skb_queue_head_init(list);
1643 }
1644}
1645
1646
1647
1648
1649
1650
1651static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1652 struct sk_buff_head *head)
1653{
1654 if (!skb_queue_empty(list)) {
1655 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1656 head->qlen += list->qlen;
1657 }
1658}
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1669 struct sk_buff_head *head)
1670{
1671 if (!skb_queue_empty(list)) {
1672 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1673 head->qlen += list->qlen;
1674 __skb_queue_head_init(list);
1675 }
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static inline void __skb_queue_after(struct sk_buff_head *list,
1690 struct sk_buff *prev,
1691 struct sk_buff *newsk)
1692{
1693 __skb_insert(newsk, prev, prev->next, list);
1694}
1695
1696void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1697 struct sk_buff_head *list);
1698
1699static inline void __skb_queue_before(struct sk_buff_head *list,
1700 struct sk_buff *next,
1701 struct sk_buff *newsk)
1702{
1703 __skb_insert(newsk, next->prev, next, list);
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1717static inline void __skb_queue_head(struct sk_buff_head *list,
1718 struct sk_buff *newsk)
1719{
1720 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1734static inline void __skb_queue_tail(struct sk_buff_head *list,
1735 struct sk_buff *newsk)
1736{
1737 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1738}
1739
1740
1741
1742
1743
1744void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1745static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1746{
1747 struct sk_buff *next, *prev;
1748
1749 list->qlen--;
1750 next = skb->next;
1751 prev = skb->prev;
1752 skb->next = skb->prev = NULL;
1753 next->prev = prev;
1754 prev->next = next;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1766static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1767{
1768 struct sk_buff *skb = skb_peek(list);
1769 if (skb)
1770 __skb_unlink(skb, list);
1771 return skb;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1783static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1784{
1785 struct sk_buff *skb = skb_peek_tail(list);
1786 if (skb)
1787 __skb_unlink(skb, list);
1788 return skb;
1789}
1790
1791
1792static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1793{
1794 return skb->data_len;
1795}
1796
1797static inline unsigned int skb_headlen(const struct sk_buff *skb)
1798{
1799 return skb->len - skb->data_len;
1800}
1801
1802static inline unsigned int skb_pagelen(const struct sk_buff *skb)
1803{
1804 unsigned int i, len = 0;
1805
1806 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
1807 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1808 return len + skb_headlen(skb);
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1825 struct page *page, int off, int size)
1826{
1827 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1828
1829
1830
1831
1832
1833
1834 frag->page.p = page;
1835 frag->page_offset = off;
1836 skb_frag_size_set(frag, size);
1837
1838 page = compound_head(page);
1839 if (page_is_pfmemalloc(page))
1840 skb->pfmemalloc = true;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1858 struct page *page, int off, int size)
1859{
1860 __skb_fill_page_desc(skb, i, page, off, size);
1861 skb_shinfo(skb)->nr_frags = i + 1;
1862}
1863
1864void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1865 int size, unsigned int truesize);
1866
1867void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1868 unsigned int truesize);
1869
1870#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1871#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1872#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1873
1874#ifdef NET_SKBUFF_DATA_USES_OFFSET
1875static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1876{
1877 return skb->head + skb->tail;
1878}
1879
1880static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1881{
1882 skb->tail = skb->data - skb->head;
1883}
1884
1885static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1886{
1887 skb_reset_tail_pointer(skb);
1888 skb->tail += offset;
1889}
1890
1891#else
1892static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1893{
1894 return skb->tail;
1895}
1896
1897static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1898{
1899 skb->tail = skb->data;
1900}
1901
1902static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1903{
1904 skb->tail = skb->data + offset;
1905}
1906
1907#endif
1908
1909
1910
1911
1912void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1913void *skb_put(struct sk_buff *skb, unsigned int len);
1914static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
1915{
1916 void *tmp = skb_tail_pointer(skb);
1917 SKB_LINEAR_ASSERT(skb);
1918 skb->tail += len;
1919 skb->len += len;
1920 return tmp;
1921}
1922
1923static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
1924{
1925 void *tmp = __skb_put(skb, len);
1926
1927 memset(tmp, 0, len);
1928 return tmp;
1929}
1930
1931static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
1932 unsigned int len)
1933{
1934 void *tmp = __skb_put(skb, len);
1935
1936 memcpy(tmp, data, len);
1937 return tmp;
1938}
1939
1940static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
1941{
1942 *(u8 *)__skb_put(skb, 1) = val;
1943}
1944
1945static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
1946{
1947 void *tmp = skb_put(skb, len);
1948
1949 memset(tmp, 0, len);
1950
1951 return tmp;
1952}
1953
1954static inline void *skb_put_data(struct sk_buff *skb, const void *data,
1955 unsigned int len)
1956{
1957 void *tmp = skb_put(skb, len);
1958
1959 memcpy(tmp, data, len);
1960
1961 return tmp;
1962}
1963
1964static inline void skb_put_u8(struct sk_buff *skb, u8 val)
1965{
1966 *(u8 *)skb_put(skb, 1) = val;
1967}
1968
1969void *skb_push(struct sk_buff *skb, unsigned int len);
1970static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
1971{
1972 skb->data -= len;
1973 skb->len += len;
1974 return skb->data;
1975}
1976
1977void *skb_pull(struct sk_buff *skb, unsigned int len);
1978static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
1979{
1980 skb->len -= len;
1981 BUG_ON(skb->len < skb->data_len);
1982 return skb->data += len;
1983}
1984
1985static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1986{
1987 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1988}
1989
1990void *__pskb_pull_tail(struct sk_buff *skb, int delta);
1991
1992static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
1993{
1994 if (len > skb_headlen(skb) &&
1995 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1996 return NULL;
1997 skb->len -= len;
1998 return skb->data += len;
1999}
2000
2001static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2002{
2003 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2004}
2005
2006static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
2007{
2008 if (likely(len <= skb_headlen(skb)))
2009 return 1;
2010 if (unlikely(len > skb->len))
2011 return 0;
2012 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2013}
2014
2015void skb_condense(struct sk_buff *skb);
2016
2017
2018
2019
2020
2021
2022
2023static inline unsigned int skb_headroom(const struct sk_buff *skb)
2024{
2025 return skb->data - skb->head;
2026}
2027
2028
2029
2030
2031
2032
2033
2034static inline int skb_tailroom(const struct sk_buff *skb)
2035{
2036 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2037}
2038
2039
2040
2041
2042
2043
2044
2045
2046static inline int skb_availroom(const struct sk_buff *skb)
2047{
2048 if (skb_is_nonlinear(skb))
2049 return 0;
2050
2051 return skb->end - skb->tail - skb->reserved_tailroom;
2052}
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062static inline void skb_reserve(struct sk_buff *skb, int len)
2063{
2064 skb->data += len;
2065 skb->tail += len;
2066}
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2081 unsigned int needed_tailroom)
2082{
2083 SKB_LINEAR_ASSERT(skb);
2084 if (mtu < skb_tailroom(skb) - needed_tailroom)
2085
2086 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2087 else
2088
2089 skb->reserved_tailroom = needed_tailroom;
2090}
2091
2092#define ENCAP_TYPE_ETHER 0
2093#define ENCAP_TYPE_IPPROTO 1
2094
2095static inline void skb_set_inner_protocol(struct sk_buff *skb,
2096 __be16 protocol)
2097{
2098 skb->inner_protocol = protocol;
2099 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2100}
2101
2102static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2103 __u8 ipproto)
2104{
2105 skb->inner_ipproto = ipproto;
2106 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2107}
2108
2109static inline void skb_reset_inner_headers(struct sk_buff *skb)
2110{
2111 skb->inner_mac_header = skb->mac_header;
2112 skb->inner_network_header = skb->network_header;
2113 skb->inner_transport_header = skb->transport_header;
2114}
2115
2116static inline void skb_reset_mac_len(struct sk_buff *skb)
2117{
2118 skb->mac_len = skb->network_header - skb->mac_header;
2119}
2120
2121static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2122 *skb)
2123{
2124 return skb->head + skb->inner_transport_header;
2125}
2126
2127static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2128{
2129 return skb_inner_transport_header(skb) - skb->data;
2130}
2131
2132static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2133{
2134 skb->inner_transport_header = skb->data - skb->head;
2135}
2136
2137static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2138 const int offset)
2139{
2140 skb_reset_inner_transport_header(skb);
2141 skb->inner_transport_header += offset;
2142}
2143
2144static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2145{
2146 return skb->head + skb->inner_network_header;
2147}
2148
2149static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2150{
2151 skb->inner_network_header = skb->data - skb->head;
2152}
2153
2154static inline void skb_set_inner_network_header(struct sk_buff *skb,
2155 const int offset)
2156{
2157 skb_reset_inner_network_header(skb);
2158 skb->inner_network_header += offset;
2159}
2160
2161static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2162{
2163 return skb->head + skb->inner_mac_header;
2164}
2165
2166static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2167{
2168 skb->inner_mac_header = skb->data - skb->head;
2169}
2170
2171static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2172 const int offset)
2173{
2174 skb_reset_inner_mac_header(skb);
2175 skb->inner_mac_header += offset;
2176}
2177static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2178{
2179 return skb->transport_header != (typeof(skb->transport_header))~0U;
2180}
2181
2182static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2183{
2184 return skb->head + skb->transport_header;
2185}
2186
2187static inline void skb_reset_transport_header(struct sk_buff *skb)
2188{
2189 skb->transport_header = skb->data - skb->head;
2190}
2191
2192static inline void skb_set_transport_header(struct sk_buff *skb,
2193 const int offset)
2194{
2195 skb_reset_transport_header(skb);
2196 skb->transport_header += offset;
2197}
2198
2199static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2200{
2201 return skb->head + skb->network_header;
2202}
2203
2204static inline void skb_reset_network_header(struct sk_buff *skb)
2205{
2206 skb->network_header = skb->data - skb->head;
2207}
2208
2209static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2210{
2211 skb_reset_network_header(skb);
2212 skb->network_header += offset;
2213}
2214
2215static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2216{
2217 return skb->head + skb->mac_header;
2218}
2219
2220static inline int skb_mac_offset(const struct sk_buff *skb)
2221{
2222 return skb_mac_header(skb) - skb->data;
2223}
2224
2225static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2226{
2227 return skb->network_header - skb->mac_header;
2228}
2229
2230static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2231{
2232 return skb->mac_header != (typeof(skb->mac_header))~0U;
2233}
2234
2235static inline void skb_reset_mac_header(struct sk_buff *skb)
2236{
2237 skb->mac_header = skb->data - skb->head;
2238}
2239
2240static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2241{
2242 skb_reset_mac_header(skb);
2243 skb->mac_header += offset;
2244}
2245
2246static inline void skb_pop_mac_header(struct sk_buff *skb)
2247{
2248 skb->mac_header = skb->network_header;
2249}
2250
2251static inline void skb_probe_transport_header(struct sk_buff *skb,
2252 const int offset_hint)
2253{
2254 struct flow_keys keys;
2255
2256 if (skb_transport_header_was_set(skb))
2257 return;
2258 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2259 skb_set_transport_header(skb, keys.control.thoff);
2260 else
2261 skb_set_transport_header(skb, offset_hint);
2262}
2263
2264static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2265{
2266 if (skb_mac_header_was_set(skb)) {
2267 const unsigned char *old_mac = skb_mac_header(skb);
2268
2269 skb_set_mac_header(skb, -skb->mac_len);
2270 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2271 }
2272}
2273
2274static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2275{
2276 return skb->csum_start - skb_headroom(skb);
2277}
2278
2279static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2280{
2281 return skb->head + skb->csum_start;
2282}
2283
2284static inline int skb_transport_offset(const struct sk_buff *skb)
2285{
2286 return skb_transport_header(skb) - skb->data;
2287}
2288
2289static inline u32 skb_network_header_len(const struct sk_buff *skb)
2290{
2291 return skb->transport_header - skb->network_header;
2292}
2293
2294static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2295{
2296 return skb->inner_transport_header - skb->inner_network_header;
2297}
2298
2299static inline int skb_network_offset(const struct sk_buff *skb)
2300{
2301 return skb_network_header(skb) - skb->data;
2302}
2303
2304static inline int skb_inner_network_offset(const struct sk_buff *skb)
2305{
2306 return skb_inner_network_header(skb) - skb->data;
2307}
2308
2309static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2310{
2311 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334#ifndef NET_IP_ALIGN
2335#define NET_IP_ALIGN 2
2336#endif
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358#ifndef NET_SKB_PAD
2359#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2360#endif
2361
2362int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2363
2364static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2365{
2366 if (unlikely(skb_is_nonlinear(skb))) {
2367 WARN_ON(1);
2368 return;
2369 }
2370 skb->len = len;
2371 skb_set_tail_pointer(skb, len);
2372}
2373
2374static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2375{
2376 __skb_set_length(skb, len);
2377}
2378
2379void skb_trim(struct sk_buff *skb, unsigned int len);
2380
2381static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2382{
2383 if (skb->data_len)
2384 return ___pskb_trim(skb, len);
2385 __skb_trim(skb, len);
2386 return 0;
2387}
2388
2389static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2390{
2391 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2392}
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2404{
2405 int err = pskb_trim(skb, len);
2406 BUG_ON(err);
2407}
2408
2409static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2410{
2411 unsigned int diff = len - skb->len;
2412
2413 if (skb_tailroom(skb) < diff) {
2414 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2415 GFP_ATOMIC);
2416 if (ret)
2417 return ret;
2418 }
2419 __skb_set_length(skb, len);
2420 return 0;
2421}
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431static inline void skb_orphan(struct sk_buff *skb)
2432{
2433 if (skb->destructor) {
2434 skb->destructor(skb);
2435 skb->destructor = NULL;
2436 skb->sk = NULL;
2437 } else {
2438 BUG_ON(skb->sk);
2439 }
2440}
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2452{
2453 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2454 return 0;
2455 return skb_copy_ubufs(skb, gfp_mask);
2456}
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466void skb_queue_purge(struct sk_buff_head *list);
2467static inline void __skb_queue_purge(struct sk_buff_head *list)
2468{
2469 struct sk_buff *skb;
2470 while ((skb = __skb_dequeue(list)) != NULL)
2471 kfree_skb(skb);
2472}
2473
2474void skb_rbtree_purge(struct rb_root *root);
2475
2476void *netdev_alloc_frag(unsigned int fragsz);
2477
2478struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2479 gfp_t gfp_mask);
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2495 unsigned int length)
2496{
2497 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2498}
2499
2500
2501static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2502 gfp_t gfp_mask)
2503{
2504 return __netdev_alloc_skb(NULL, length, gfp_mask);
2505}
2506
2507
2508static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2509{
2510 return netdev_alloc_skb(NULL, length);
2511}
2512
2513
2514static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2515 unsigned int length, gfp_t gfp)
2516{
2517 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2518
2519 if (NET_IP_ALIGN && skb)
2520 skb_reserve(skb, NET_IP_ALIGN);
2521 return skb;
2522}
2523
2524static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2525 unsigned int length)
2526{
2527 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2528}
2529
2530static inline void skb_free_frag(void *addr)
2531{
2532 page_frag_free(addr);
2533}
2534
2535void *napi_alloc_frag(unsigned int fragsz);
2536struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2537 unsigned int length, gfp_t gfp_mask);
2538static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2539 unsigned int length)
2540{
2541 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2542}
2543void napi_consume_skb(struct sk_buff *skb, int budget);
2544
2545void __kfree_skb_flush(void);
2546void __kfree_skb_defer(struct sk_buff *skb);
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2558 unsigned int order)
2559{
2560
2561
2562
2563
2564
2565
2566
2567
2568 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2569
2570 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2571}
2572
2573static inline struct page *dev_alloc_pages(unsigned int order)
2574{
2575 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2576}
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2587{
2588 return __dev_alloc_pages(gfp_mask, 0);
2589}
2590
2591static inline struct page *dev_alloc_page(void)
2592{
2593 return dev_alloc_pages(0);
2594}
2595
2596
2597
2598
2599
2600
2601static inline void skb_propagate_pfmemalloc(struct page *page,
2602 struct sk_buff *skb)
2603{
2604 if (page_is_pfmemalloc(page))
2605 skb->pfmemalloc = true;
2606}
2607
2608
2609
2610
2611
2612
2613
2614static inline struct page *skb_frag_page(const skb_frag_t *frag)
2615{
2616 return frag->page.p;
2617}
2618
2619
2620
2621
2622
2623
2624
2625static inline void __skb_frag_ref(skb_frag_t *frag)
2626{
2627 get_page(skb_frag_page(frag));
2628}
2629
2630
2631
2632
2633
2634
2635
2636
2637static inline void skb_frag_ref(struct sk_buff *skb, int f)
2638{
2639 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2640}
2641
2642
2643
2644
2645
2646
2647
2648static inline void __skb_frag_unref(skb_frag_t *frag)
2649{
2650 put_page(skb_frag_page(frag));
2651}
2652
2653
2654
2655
2656
2657
2658
2659
2660static inline void skb_frag_unref(struct sk_buff *skb, int f)
2661{
2662 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672static inline void *skb_frag_address(const skb_frag_t *frag)
2673{
2674 return page_address(skb_frag_page(frag)) + frag->page_offset;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2685{
2686 void *ptr = page_address(skb_frag_page(frag));
2687 if (unlikely(!ptr))
2688 return NULL;
2689
2690 return ptr + frag->page_offset;
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2701{
2702 frag->page.p = page;
2703}
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2714 struct page *page)
2715{
2716 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2717}
2718
2719bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2733 const skb_frag_t *frag,
2734 size_t offset, size_t size,
2735 enum dma_data_direction dir)
2736{
2737 return dma_map_page(dev, skb_frag_page(frag),
2738 frag->page_offset + offset, size, dir);
2739}
2740
2741static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2742 gfp_t gfp_mask)
2743{
2744 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2745}
2746
2747
2748static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2749 gfp_t gfp_mask)
2750{
2751 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2752}
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2764{
2765 return !skb_header_cloned(skb) &&
2766 skb_headroom(skb) + len <= skb->hdr_len;
2767}
2768
2769static inline int skb_try_make_writable(struct sk_buff *skb,
2770 unsigned int write_len)
2771{
2772 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2773 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2774}
2775
2776static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2777 int cloned)
2778{
2779 int delta = 0;
2780
2781 if (headroom > skb_headroom(skb))
2782 delta = headroom - skb_headroom(skb);
2783
2784 if (delta || cloned)
2785 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2786 GFP_ATOMIC);
2787 return 0;
2788}
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2803{
2804 return __skb_cow(skb, headroom, skb_cloned(skb));
2805}
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2818{
2819 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2820}
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2833{
2834 unsigned int size = skb->len;
2835 if (likely(size >= len))
2836 return 0;
2837 return skb_pad(skb, len - size);
2838}
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
2852 bool free_on_error)
2853{
2854 unsigned int size = skb->len;
2855
2856 if (unlikely(size < len)) {
2857 len -= size;
2858 if (__skb_pad(skb, len, free_on_error))
2859 return -ENOMEM;
2860 __skb_put(skb, len);
2861 }
2862 return 0;
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2876{
2877 return __skb_put_padto(skb, len, true);
2878}
2879
2880static inline int skb_add_data(struct sk_buff *skb,
2881 struct iov_iter *from, int copy)
2882{
2883 const int off = skb->len;
2884
2885 if (skb->ip_summed == CHECKSUM_NONE) {
2886 __wsum csum = 0;
2887 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
2888 &csum, from)) {
2889 skb->csum = csum_block_add(skb->csum, csum, off);
2890 return 0;
2891 }
2892 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
2893 return 0;
2894
2895 __skb_trim(skb, off);
2896 return -EFAULT;
2897}
2898
2899static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2900 const struct page *page, int off)
2901{
2902 if (i) {
2903 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2904
2905 return page == skb_frag_page(frag) &&
2906 off == frag->page_offset + skb_frag_size(frag);
2907 }
2908 return false;
2909}
2910
2911static inline int __skb_linearize(struct sk_buff *skb)
2912{
2913 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923static inline int skb_linearize(struct sk_buff *skb)
2924{
2925 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2926}
2927
2928
2929
2930
2931
2932
2933
2934
2935static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2936{
2937 return skb_is_nonlinear(skb) &&
2938 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2939}
2940
2941
2942
2943
2944
2945
2946
2947
2948static inline int skb_linearize_cow(struct sk_buff *skb)
2949{
2950 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2951 __skb_linearize(skb) : 0;
2952}
2953
2954static __always_inline void
2955__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2956 unsigned int off)
2957{
2958 if (skb->ip_summed == CHECKSUM_COMPLETE)
2959 skb->csum = csum_block_sub(skb->csum,
2960 csum_partial(start, len, 0), off);
2961 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2962 skb_checksum_start_offset(skb) < 0)
2963 skb->ip_summed = CHECKSUM_NONE;
2964}
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976static inline void skb_postpull_rcsum(struct sk_buff *skb,
2977 const void *start, unsigned int len)
2978{
2979 __skb_postpull_rcsum(skb, start, len, 0);
2980}
2981
2982static __always_inline void
2983__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2984 unsigned int off)
2985{
2986 if (skb->ip_summed == CHECKSUM_COMPLETE)
2987 skb->csum = csum_block_add(skb->csum,
2988 csum_partial(start, len, 0), off);
2989}
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000static inline void skb_postpush_rcsum(struct sk_buff *skb,
3001 const void *start, unsigned int len)
3002{
3003 __skb_postpush_rcsum(skb, start, len, 0);
3004}
3005
3006void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3020{
3021 skb_push(skb, len);
3022 skb_postpush_rcsum(skb, skb->data, len);
3023 return skb->data;
3024}
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3036{
3037 if (likely(len >= skb->len))
3038 return 0;
3039 if (skb->ip_summed == CHECKSUM_COMPLETE)
3040 skb->ip_summed = CHECKSUM_NONE;
3041 return __pskb_trim(skb, len);
3042}
3043
3044static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3045{
3046 if (skb->ip_summed == CHECKSUM_COMPLETE)
3047 skb->ip_summed = CHECKSUM_NONE;
3048 __skb_trim(skb, len);
3049 return 0;
3050}
3051
3052static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3053{
3054 if (skb->ip_summed == CHECKSUM_COMPLETE)
3055 skb->ip_summed = CHECKSUM_NONE;
3056 return __skb_grow(skb, len);
3057}
3058
3059#define skb_queue_walk(queue, skb) \
3060 for (skb = (queue)->next; \
3061 skb != (struct sk_buff *)(queue); \
3062 skb = skb->next)
3063
3064#define skb_queue_walk_safe(queue, skb, tmp) \
3065 for (skb = (queue)->next, tmp = skb->next; \
3066 skb != (struct sk_buff *)(queue); \
3067 skb = tmp, tmp = skb->next)
3068
3069#define skb_queue_walk_from(queue, skb) \
3070 for (; skb != (struct sk_buff *)(queue); \
3071 skb = skb->next)
3072
3073#define skb_queue_walk_from_safe(queue, skb, tmp) \
3074 for (tmp = skb->next; \
3075 skb != (struct sk_buff *)(queue); \
3076 skb = tmp, tmp = skb->next)
3077
3078#define skb_queue_reverse_walk(queue, skb) \
3079 for (skb = (queue)->prev; \
3080 skb != (struct sk_buff *)(queue); \
3081 skb = skb->prev)
3082
3083#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3084 for (skb = (queue)->prev, tmp = skb->prev; \
3085 skb != (struct sk_buff *)(queue); \
3086 skb = tmp, tmp = skb->prev)
3087
3088#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3089 for (tmp = skb->prev; \
3090 skb != (struct sk_buff *)(queue); \
3091 skb = tmp, tmp = skb->prev)
3092
3093static inline bool skb_has_frag_list(const struct sk_buff *skb)
3094{
3095 return skb_shinfo(skb)->frag_list != NULL;
3096}
3097
3098static inline void skb_frag_list_init(struct sk_buff *skb)
3099{
3100 skb_shinfo(skb)->frag_list = NULL;
3101}
3102
3103#define skb_walk_frags(skb, iter) \
3104 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3105
3106
3107int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3108 const struct sk_buff *skb);
3109struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3110 struct sk_buff_head *queue,
3111 unsigned int flags,
3112 void (*destructor)(struct sock *sk,
3113 struct sk_buff *skb),
3114 int *peeked, int *off, int *err,
3115 struct sk_buff **last);
3116struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3117 void (*destructor)(struct sock *sk,
3118 struct sk_buff *skb),
3119 int *peeked, int *off, int *err,
3120 struct sk_buff **last);
3121struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3122 void (*destructor)(struct sock *sk,
3123 struct sk_buff *skb),
3124 int *peeked, int *off, int *err);
3125struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3126 int *err);
3127unsigned int datagram_poll(struct file *file, struct socket *sock,
3128 struct poll_table_struct *wait);
3129int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3130 struct iov_iter *to, int size);
3131static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3132 struct msghdr *msg, int size)
3133{
3134 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3135}
3136int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3137 struct msghdr *msg);
3138int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3139 struct iov_iter *from, int len);
3140int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3141void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3142void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3143static inline void skb_free_datagram_locked(struct sock *sk,
3144 struct sk_buff *skb)
3145{
3146 __skb_free_datagram_locked(sk, skb, 0);
3147}
3148int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3149int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3150int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3151__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3152 int len, __wsum csum);
3153int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3154 struct pipe_inode_info *pipe, unsigned int len,
3155 unsigned int flags);
3156void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3157unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3158int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3159 int len, int hlen);
3160void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3161int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3162void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3163unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3164bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3165struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3166struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3167int skb_ensure_writable(struct sk_buff *skb, int write_len);
3168int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3169int skb_vlan_pop(struct sk_buff *skb);
3170int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3171struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3172 gfp_t gfp);
3173
3174static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3175{
3176 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3177}
3178
3179static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3180{
3181 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3182}
3183
3184struct skb_checksum_ops {
3185 __wsum (*update)(const void *mem, int len, __wsum wsum);
3186 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3187};
3188
3189extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3190
3191__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3192 __wsum csum, const struct skb_checksum_ops *ops);
3193__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3194 __wsum csum);
3195
3196static inline void * __must_check
3197__skb_header_pointer(const struct sk_buff *skb, int offset,
3198 int len, void *data, int hlen, void *buffer)
3199{
3200 if (hlen - offset >= len)
3201 return data + offset;
3202
3203 if (!skb ||
3204 skb_copy_bits(skb, offset, buffer, len) < 0)
3205 return NULL;
3206
3207 return buffer;
3208}
3209
3210static inline void * __must_check
3211skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3212{
3213 return __skb_header_pointer(skb, offset, len, skb->data,
3214 skb_headlen(skb), buffer);
3215}
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227static inline bool skb_needs_linearize(struct sk_buff *skb,
3228 netdev_features_t features)
3229{
3230 return skb_is_nonlinear(skb) &&
3231 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3232 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3233}
3234
3235static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3236 void *to,
3237 const unsigned int len)
3238{
3239 memcpy(to, skb->data, len);
3240}
3241
3242static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3243 const int offset, void *to,
3244 const unsigned int len)
3245{
3246 memcpy(to, skb->data + offset, len);
3247}
3248
3249static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3250 const void *from,
3251 const unsigned int len)
3252{
3253 memcpy(skb->data, from, len);
3254}
3255
3256static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3257 const int offset,
3258 const void *from,
3259 const unsigned int len)
3260{
3261 memcpy(skb->data + offset, from, len);
3262}
3263
3264void skb_init(void);
3265
3266static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3267{
3268 return skb->tstamp;
3269}
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280static inline void skb_get_timestamp(const struct sk_buff *skb,
3281 struct timeval *stamp)
3282{
3283 *stamp = ktime_to_timeval(skb->tstamp);
3284}
3285
3286static inline void skb_get_timestampns(const struct sk_buff *skb,
3287 struct timespec *stamp)
3288{
3289 *stamp = ktime_to_timespec(skb->tstamp);
3290}
3291
3292static inline void __net_timestamp(struct sk_buff *skb)
3293{
3294 skb->tstamp = ktime_get_real();
3295}
3296
3297static inline ktime_t net_timedelta(ktime_t t)
3298{
3299 return ktime_sub(ktime_get_real(), t);
3300}
3301
3302static inline ktime_t net_invalid_timestamp(void)
3303{
3304 return 0;
3305}
3306
3307struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3308
3309#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3310
3311void skb_clone_tx_timestamp(struct sk_buff *skb);
3312bool skb_defer_rx_timestamp(struct sk_buff *skb);
3313
3314#else
3315
3316static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3317{
3318}
3319
3320static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3321{
3322 return false;
3323}
3324
3325#endif
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339void skb_complete_tx_timestamp(struct sk_buff *skb,
3340 struct skb_shared_hwtstamps *hwtstamps);
3341
3342void __skb_tstamp_tx(struct sk_buff *orig_skb,
3343 struct skb_shared_hwtstamps *hwtstamps,
3344 struct sock *sk, int tstype);
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357void skb_tstamp_tx(struct sk_buff *orig_skb,
3358 struct skb_shared_hwtstamps *hwtstamps);
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372static inline void skb_tx_timestamp(struct sk_buff *skb)
3373{
3374 skb_clone_tx_timestamp(skb);
3375 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3376 skb_tstamp_tx(skb, NULL);
3377}
3378
3379
3380
3381
3382
3383
3384
3385
3386void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3387
3388__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3389__sum16 __skb_checksum_complete(struct sk_buff *skb);
3390
3391static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3392{
3393 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3394 skb->csum_valid ||
3395 (skb->ip_summed == CHECKSUM_PARTIAL &&
3396 skb_checksum_start_offset(skb) >= 0));
3397}
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3416{
3417 return skb_csum_unnecessary(skb) ?
3418 0 : __skb_checksum_complete(skb);
3419}
3420
3421static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3422{
3423 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3424 if (skb->csum_level == 0)
3425 skb->ip_summed = CHECKSUM_NONE;
3426 else
3427 skb->csum_level--;
3428 }
3429}
3430
3431static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3432{
3433 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3434 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3435 skb->csum_level++;
3436 } else if (skb->ip_summed == CHECKSUM_NONE) {
3437 skb->ip_summed = CHECKSUM_UNNECESSARY;
3438 skb->csum_level = 0;
3439 }
3440}
3441
3442
3443
3444
3445
3446
3447static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3448 bool zero_okay,
3449 __sum16 check)
3450{
3451 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3452 skb->csum_valid = 1;
3453 __skb_decr_checksum_unnecessary(skb);
3454 return false;
3455 }
3456
3457 return true;
3458}
3459
3460
3461
3462
3463#define CHECKSUM_BREAK 76
3464
3465
3466
3467
3468
3469
3470
3471static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3472{
3473 if (skb->ip_summed == CHECKSUM_COMPLETE)
3474 skb->ip_summed = CHECKSUM_NONE;
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3487 bool complete,
3488 __wsum psum)
3489{
3490 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3491 if (!csum_fold(csum_add(psum, skb->csum))) {
3492 skb->csum_valid = 1;
3493 return 0;
3494 }
3495 }
3496
3497 skb->csum = psum;
3498
3499 if (complete || skb->len <= CHECKSUM_BREAK) {
3500 __sum16 csum;
3501
3502 csum = __skb_checksum_complete(skb);
3503 skb->csum_valid = !csum;
3504 return csum;
3505 }
3506
3507 return 0;
3508}
3509
3510static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3511{
3512 return 0;
3513}
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525#define __skb_checksum_validate(skb, proto, complete, \
3526 zero_okay, check, compute_pseudo) \
3527({ \
3528 __sum16 __ret = 0; \
3529 skb->csum_valid = 0; \
3530 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3531 __ret = __skb_checksum_validate_complete(skb, \
3532 complete, compute_pseudo(skb, proto)); \
3533 __ret; \
3534})
3535
3536#define skb_checksum_init(skb, proto, compute_pseudo) \
3537 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3538
3539#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3540 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3541
3542#define skb_checksum_validate(skb, proto, compute_pseudo) \
3543 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3544
3545#define skb_checksum_validate_zero_check(skb, proto, check, \
3546 compute_pseudo) \
3547 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3548
3549#define skb_checksum_simple_validate(skb) \
3550 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3551
3552static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3553{
3554 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
3555}
3556
3557static inline void __skb_checksum_convert(struct sk_buff *skb,
3558 __sum16 check, __wsum pseudo)
3559{
3560 skb->csum = ~pseudo;
3561 skb->ip_summed = CHECKSUM_COMPLETE;
3562}
3563
3564#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3565do { \
3566 if (__skb_checksum_convert_check(skb)) \
3567 __skb_checksum_convert(skb, check, \
3568 compute_pseudo(skb, proto)); \
3569} while (0)
3570
3571static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3572 u16 start, u16 offset)
3573{
3574 skb->ip_summed = CHECKSUM_PARTIAL;
3575 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3576 skb->csum_offset = offset - start;
3577}
3578
3579
3580
3581
3582
3583
3584static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3585 int start, int offset, bool nopartial)
3586{
3587 __wsum delta;
3588
3589 if (!nopartial) {
3590 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3591 return;
3592 }
3593
3594 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3595 __skb_checksum_complete(skb);
3596 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3597 }
3598
3599 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3600
3601
3602 skb->csum = csum_add(skb->csum, delta);
3603}
3604
3605static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
3606{
3607#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3608 return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
3609#else
3610 return NULL;
3611#endif
3612}
3613
3614#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3615void nf_conntrack_destroy(struct nf_conntrack *nfct);
3616static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3617{
3618 if (nfct && atomic_dec_and_test(&nfct->use))
3619 nf_conntrack_destroy(nfct);
3620}
3621static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3622{
3623 if (nfct)
3624 atomic_inc(&nfct->use);
3625}
3626#endif
3627#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3628static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3629{
3630 if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
3631 kfree(nf_bridge);
3632}
3633static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3634{
3635 if (nf_bridge)
3636 refcount_inc(&nf_bridge->use);
3637}
3638#endif
3639static inline void nf_reset(struct sk_buff *skb)
3640{
3641#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3642 nf_conntrack_put(skb_nfct(skb));
3643 skb->_nfct = 0;
3644#endif
3645#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3646 nf_bridge_put(skb->nf_bridge);
3647 skb->nf_bridge = NULL;
3648#endif
3649}
3650
3651static inline void nf_reset_trace(struct sk_buff *skb)
3652{
3653#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3654 skb->nf_trace = 0;
3655#endif
3656}
3657
3658
3659static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3660 bool copy)
3661{
3662#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3663 dst->_nfct = src->_nfct;
3664 nf_conntrack_get(skb_nfct(src));
3665#endif
3666#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3667 dst->nf_bridge = src->nf_bridge;
3668 nf_bridge_get(src->nf_bridge);
3669#endif
3670#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3671 if (copy)
3672 dst->nf_trace = src->nf_trace;
3673#endif
3674}
3675
3676static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3677{
3678#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3679 nf_conntrack_put(skb_nfct(dst));
3680#endif
3681#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3682 nf_bridge_put(dst->nf_bridge);
3683#endif
3684 __nf_copy(dst, src, true);
3685}
3686
3687#ifdef CONFIG_NETWORK_SECMARK
3688static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3689{
3690 to->secmark = from->secmark;
3691}
3692
3693static inline void skb_init_secmark(struct sk_buff *skb)
3694{
3695 skb->secmark = 0;
3696}
3697#else
3698static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3699{ }
3700
3701static inline void skb_init_secmark(struct sk_buff *skb)
3702{ }
3703#endif
3704
3705static inline bool skb_irq_freeable(const struct sk_buff *skb)
3706{
3707 return !skb->destructor &&
3708#if IS_ENABLED(CONFIG_XFRM)
3709 !skb->sp &&
3710#endif
3711 !skb_nfct(skb) &&
3712 !skb->_skb_refdst &&
3713 !skb_has_frag_list(skb);
3714}
3715
3716static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3717{
3718 skb->queue_mapping = queue_mapping;
3719}
3720
3721static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3722{
3723 return skb->queue_mapping;
3724}
3725
3726static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3727{
3728 to->queue_mapping = from->queue_mapping;
3729}
3730
3731static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3732{
3733 skb->queue_mapping = rx_queue + 1;
3734}
3735
3736static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3737{
3738 return skb->queue_mapping - 1;
3739}
3740
3741static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3742{
3743 return skb->queue_mapping != 0;
3744}
3745
3746static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
3747{
3748 skb->dst_pending_confirm = val;
3749}
3750
3751static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
3752{
3753 return skb->dst_pending_confirm != 0;
3754}
3755
3756static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3757{
3758#ifdef CONFIG_XFRM
3759 return skb->sp;
3760#else
3761 return NULL;
3762#endif
3763}
3764
3765
3766
3767
3768
3769
3770
3771struct skb_gso_cb {
3772 union {
3773 int mac_offset;
3774 int data_offset;
3775 };
3776 int encap_level;
3777 __wsum csum;
3778 __u16 csum_start;
3779};
3780#define SKB_SGO_CB_OFFSET 32
3781#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3782
3783static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3784{
3785 return (skb_mac_header(inner_skb) - inner_skb->head) -
3786 SKB_GSO_CB(inner_skb)->mac_offset;
3787}
3788
3789static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3790{
3791 int new_headroom, headroom;
3792 int ret;
3793
3794 headroom = skb_headroom(skb);
3795 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3796 if (ret)
3797 return ret;
3798
3799 new_headroom = skb_headroom(skb);
3800 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3801 return 0;
3802}
3803
3804static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3805{
3806
3807 if (skb->remcsum_offload)
3808 return;
3809
3810 SKB_GSO_CB(skb)->csum = res;
3811 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3812}
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3823{
3824 unsigned char *csum_start = skb_transport_header(skb);
3825 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3826 __wsum partial = SKB_GSO_CB(skb)->csum;
3827
3828 SKB_GSO_CB(skb)->csum = res;
3829 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3830
3831 return csum_fold(csum_partial(csum_start, plen, partial));
3832}
3833
3834static inline bool skb_is_gso(const struct sk_buff *skb)
3835{
3836 return skb_shinfo(skb)->gso_size;
3837}
3838
3839
3840static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3841{
3842 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3843}
3844
3845static inline void skb_gso_reset(struct sk_buff *skb)
3846{
3847 skb_shinfo(skb)->gso_size = 0;
3848 skb_shinfo(skb)->gso_segs = 0;
3849 skb_shinfo(skb)->gso_type = 0;
3850}
3851
3852void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3853
3854static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3855{
3856
3857
3858 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3859
3860 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3861 unlikely(shinfo->gso_type == 0)) {
3862 __skb_warn_lro_forwarding(skb);
3863 return true;
3864 }
3865 return false;
3866}
3867
3868static inline void skb_forward_csum(struct sk_buff *skb)
3869{
3870
3871 if (skb->ip_summed == CHECKSUM_COMPLETE)
3872 skb->ip_summed = CHECKSUM_NONE;
3873}
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3884{
3885#ifdef DEBUG
3886 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3887#endif
3888}
3889
3890bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3891
3892int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3893struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3894 unsigned int transport_len,
3895 __sum16(*skb_chkf)(struct sk_buff *skb));
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906static inline bool skb_head_is_locked(const struct sk_buff *skb)
3907{
3908 return !skb->head_frag || skb_cloned(skb);
3909}
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3922{
3923 unsigned int hdr_len = skb_transport_header(skb) -
3924 skb_network_header(skb);
3925 return hdr_len + skb_gso_transport_seglen(skb);
3926}
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937static inline __wsum lco_csum(struct sk_buff *skb)
3938{
3939 unsigned char *csum_start = skb_checksum_start(skb);
3940 unsigned char *l4_hdr = skb_transport_header(skb);
3941 __wsum partial;
3942
3943
3944 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3945 skb->csum_offset));
3946
3947
3948
3949
3950 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3951}
3952
3953#endif
3954#endif
3955