1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <net/flow_dissector.h>
38#include <linux/splice.h>
39#include <linux/in6.h>
40#include <linux/if_packet.h>
41#include <net/flow.h>
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217#define CHECKSUM_NONE 0
218#define CHECKSUM_UNNECESSARY 1
219#define CHECKSUM_COMPLETE 2
220#define CHECKSUM_PARTIAL 3
221
222
223#define SKB_MAX_CSUM_LEVEL 3
224
225#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
226#define SKB_WITH_OVERHEAD(X) \
227 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
228#define SKB_MAX_ORDER(X, ORDER) \
229 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
230#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
231#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
232
233
234#define SKB_TRUESIZE(X) ((X) + \
235 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
236 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
237
238struct net_device;
239struct scatterlist;
240struct pipe_inode_info;
241struct iov_iter;
242struct napi_struct;
243
244#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
245struct nf_conntrack {
246 atomic_t use;
247};
248#endif
249
250#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
251struct nf_bridge_info {
252 atomic_t use;
253 enum {
254 BRNF_PROTO_UNCHANGED,
255 BRNF_PROTO_8021Q,
256 BRNF_PROTO_PPPOE
257 } orig_proto:8;
258 u8 pkt_otherhost:1;
259 u8 in_prerouting:1;
260 u8 bridged_dnat:1;
261 __u16 frag_max_size;
262 struct net_device *physindev;
263
264
265 struct net_device *physoutdev;
266 union {
267
268 __be32 ipv4_daddr;
269 struct in6_addr ipv6_daddr;
270
271
272
273
274
275 char neigh_header[8];
276 };
277};
278#endif
279
280struct sk_buff_head {
281
282 struct sk_buff *next;
283 struct sk_buff *prev;
284
285 __u32 qlen;
286 spinlock_t lock;
287};
288
289struct sk_buff;
290
291
292
293
294
295
296
297
298#if (65536/PAGE_SIZE + 1) < 16
299#define MAX_SKB_FRAGS 16UL
300#else
301#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
302#endif
303extern int sysctl_max_skb_frags;
304
305
306
307
308#define GSO_BY_FRAGS 0xFFFF
309
310typedef struct skb_frag_struct skb_frag_t;
311
312struct skb_frag_struct {
313 struct {
314 struct page *p;
315 } page;
316#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
317 __u32 page_offset;
318 __u32 size;
319#else
320 __u16 page_offset;
321 __u16 size;
322#endif
323};
324
325static inline unsigned int skb_frag_size(const skb_frag_t *frag)
326{
327 return frag->size;
328}
329
330static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
331{
332 frag->size = size;
333}
334
335static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
336{
337 frag->size += delta;
338}
339
340static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
341{
342 frag->size -= delta;
343}
344
345#define HAVE_HW_TIME_STAMP
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361struct skb_shared_hwtstamps {
362 ktime_t hwtstamp;
363};
364
365
366enum {
367
368 SKBTX_HW_TSTAMP = 1 << 0,
369
370
371 SKBTX_SW_TSTAMP = 1 << 1,
372
373
374 SKBTX_IN_PROGRESS = 1 << 2,
375
376
377 SKBTX_DEV_ZEROCOPY = 1 << 3,
378
379
380 SKBTX_WIFI_STATUS = 1 << 4,
381
382
383
384
385
386
387 SKBTX_SHARED_FRAG = 1 << 5,
388
389
390 SKBTX_SCHED_TSTAMP = 1 << 6,
391};
392
393#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
394 SKBTX_SCHED_TSTAMP)
395#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
396
397
398
399
400
401
402
403
404
405struct ubuf_info {
406 void (*callback)(struct ubuf_info *, bool zerocopy_success);
407 void *ctx;
408 unsigned long desc;
409};
410
411
412
413
414struct skb_shared_info {
415 unsigned char nr_frags;
416 __u8 tx_flags;
417 unsigned short gso_size;
418
419 unsigned short gso_segs;
420 unsigned short gso_type;
421 struct sk_buff *frag_list;
422 struct skb_shared_hwtstamps hwtstamps;
423 u32 tskey;
424 __be32 ip6_frag_id;
425
426
427
428
429 atomic_t dataref;
430
431
432
433 void * destructor_arg;
434
435
436 skb_frag_t frags[MAX_SKB_FRAGS];
437};
438
439
440
441
442
443
444
445
446
447
448
449
450#define SKB_DATAREF_SHIFT 16
451#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
452
453
454enum {
455 SKB_FCLONE_UNAVAILABLE,
456 SKB_FCLONE_ORIG,
457 SKB_FCLONE_CLONE,
458};
459
460enum {
461 SKB_GSO_TCPV4 = 1 << 0,
462 SKB_GSO_UDP = 1 << 1,
463
464
465 SKB_GSO_DODGY = 1 << 2,
466
467
468 SKB_GSO_TCP_ECN = 1 << 3,
469
470 SKB_GSO_TCP_FIXEDID = 1 << 4,
471
472 SKB_GSO_TCPV6 = 1 << 5,
473
474 SKB_GSO_FCOE = 1 << 6,
475
476 SKB_GSO_GRE = 1 << 7,
477
478 SKB_GSO_GRE_CSUM = 1 << 8,
479
480 SKB_GSO_IPXIP4 = 1 << 9,
481
482 SKB_GSO_IPXIP6 = 1 << 10,
483
484 SKB_GSO_UDP_TUNNEL = 1 << 11,
485
486 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12,
487
488 SKB_GSO_PARTIAL = 1 << 13,
489
490 SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
491
492 SKB_GSO_SCTP = 1 << 15,
493};
494
495#if BITS_PER_LONG > 32
496#define NET_SKBUFF_DATA_USES_OFFSET 1
497#endif
498
499#ifdef NET_SKBUFF_DATA_USES_OFFSET
500typedef unsigned int sk_buff_data_t;
501#else
502typedef unsigned char *sk_buff_data_t;
503#endif
504
505
506
507
508
509
510struct skb_mstamp {
511 union {
512 u64 v64;
513 struct {
514 u32 stamp_us;
515 u32 stamp_jiffies;
516 };
517 };
518};
519
520
521
522
523
524static inline void skb_mstamp_get(struct skb_mstamp *cl)
525{
526 u64 val = local_clock();
527
528 do_div(val, NSEC_PER_USEC);
529 cl->stamp_us = (u32)val;
530 cl->stamp_jiffies = (u32)jiffies;
531}
532
533
534
535
536
537
538static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
539 const struct skb_mstamp *t0)
540{
541 s32 delta_us = t1->stamp_us - t0->stamp_us;
542 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
543
544
545
546
547 if (delta_us <= 0 ||
548 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
549
550 delta_us = jiffies_to_usecs(delta_jiffies);
551
552 return delta_us;
553}
554
555static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
556 const struct skb_mstamp *t0)
557{
558 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
559
560 if (!diff)
561 diff = t1->stamp_us - t0->stamp_us;
562 return diff > 0;
563}
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633struct sk_buff {
634 union {
635 struct {
636
637 struct sk_buff *next;
638 struct sk_buff *prev;
639
640 union {
641 ktime_t tstamp;
642 struct skb_mstamp skb_mstamp;
643 };
644 };
645 struct rb_node rbnode;
646 };
647 struct sock *sk;
648 struct net_device *dev;
649
650
651
652
653
654
655
656 char cb[48] __aligned(8);
657
658 unsigned long _skb_refdst;
659 void (*destructor)(struct sk_buff *skb);
660#ifdef CONFIG_XFRM
661 struct sec_path *sp;
662#endif
663#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
664 struct nf_conntrack *nfct;
665#endif
666#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
667 struct nf_bridge_info *nf_bridge;
668#endif
669 unsigned int len,
670 data_len;
671 __u16 mac_len,
672 hdr_len;
673
674
675
676
677 kmemcheck_bitfield_begin(flags1);
678 __u16 queue_mapping;
679
680
681#ifdef __BIG_ENDIAN_BITFIELD
682#define CLONED_MASK (1 << 7)
683#else
684#define CLONED_MASK 1
685#endif
686#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
687
688 __u8 __cloned_offset[0];
689 __u8 cloned:1,
690 nohdr:1,
691 fclone:2,
692 peeked:1,
693 head_frag:1,
694 xmit_more:1,
695 __unused:1;
696 kmemcheck_bitfield_end(flags1);
697
698
699
700
701
702 __u32 headers_start[0];
703
704
705
706#ifdef __BIG_ENDIAN_BITFIELD
707#define PKT_TYPE_MAX (7 << 5)
708#else
709#define PKT_TYPE_MAX 7
710#endif
711#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
712
713 __u8 __pkt_type_offset[0];
714 __u8 pkt_type:3;
715 __u8 pfmemalloc:1;
716 __u8 ignore_df:1;
717 __u8 nfctinfo:3;
718
719 __u8 nf_trace:1;
720 __u8 ip_summed:2;
721 __u8 ooo_okay:1;
722 __u8 l4_hash:1;
723 __u8 sw_hash:1;
724 __u8 wifi_acked_valid:1;
725 __u8 wifi_acked:1;
726
727 __u8 no_fcs:1;
728
729 __u8 encapsulation:1;
730 __u8 encap_hdr_csum:1;
731 __u8 csum_valid:1;
732 __u8 csum_complete_sw:1;
733 __u8 csum_level:2;
734 __u8 csum_bad:1;
735
736#ifdef CONFIG_IPV6_NDISC_NODETYPE
737 __u8 ndisc_nodetype:2;
738#endif
739 __u8 ipvs_property:1;
740 __u8 inner_protocol_type:1;
741 __u8 remcsum_offload:1;
742#ifdef CONFIG_NET_SWITCHDEV
743 __u8 offload_fwd_mark:1;
744#endif
745
746
747#ifdef CONFIG_NET_SCHED
748 __u16 tc_index;
749#ifdef CONFIG_NET_CLS_ACT
750 __u16 tc_verd;
751#endif
752#endif
753
754 union {
755 __wsum csum;
756 struct {
757 __u16 csum_start;
758 __u16 csum_offset;
759 };
760 };
761 __u32 priority;
762 int skb_iif;
763 __u32 hash;
764 __be16 vlan_proto;
765 __u16 vlan_tci;
766#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
767 union {
768 unsigned int napi_id;
769 unsigned int sender_cpu;
770 };
771#endif
772#ifdef CONFIG_NETWORK_SECMARK
773 __u32 secmark;
774#endif
775
776 union {
777 __u32 mark;
778 __u32 reserved_tailroom;
779 };
780
781 union {
782 __be16 inner_protocol;
783 __u8 inner_ipproto;
784 };
785
786 __u16 inner_transport_header;
787 __u16 inner_network_header;
788 __u16 inner_mac_header;
789
790 __be16 protocol;
791 __u16 transport_header;
792 __u16 network_header;
793 __u16 mac_header;
794
795
796 __u32 headers_end[0];
797
798
799
800 sk_buff_data_t tail;
801 sk_buff_data_t end;
802 unsigned char *head,
803 *data;
804 unsigned int truesize;
805 atomic_t users;
806};
807
808#ifdef __KERNEL__
809
810
811
812#include <linux/slab.h>
813
814
815#define SKB_ALLOC_FCLONE 0x01
816#define SKB_ALLOC_RX 0x02
817#define SKB_ALLOC_NAPI 0x04
818
819
820static inline bool skb_pfmemalloc(const struct sk_buff *skb)
821{
822 return unlikely(skb->pfmemalloc);
823}
824
825
826
827
828
829#define SKB_DST_NOREF 1UL
830#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
831
832
833
834
835
836
837
838static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
839{
840
841
842
843 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
844 !rcu_read_lock_held() &&
845 !rcu_read_lock_bh_held());
846 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
847}
848
849
850
851
852
853
854
855
856
857static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
858{
859 skb->_skb_refdst = (unsigned long)dst;
860}
861
862
863
864
865
866
867
868
869
870
871
872static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
873{
874 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
875 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
876}
877
878
879
880
881
882static inline bool skb_dst_is_noref(const struct sk_buff *skb)
883{
884 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
885}
886
887static inline struct rtable *skb_rtable(const struct sk_buff *skb)
888{
889 return (struct rtable *)skb_dst(skb);
890}
891
892
893
894
895
896static inline bool skb_pkt_type_ok(u32 ptype)
897{
898 return ptype <= PACKET_OTHERHOST;
899}
900
901void kfree_skb(struct sk_buff *skb);
902void kfree_skb_list(struct sk_buff *segs);
903void skb_tx_error(struct sk_buff *skb);
904void consume_skb(struct sk_buff *skb);
905void __kfree_skb(struct sk_buff *skb);
906extern struct kmem_cache *skbuff_head_cache;
907
908void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
909bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
910 bool *fragstolen, int *delta_truesize);
911
912struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
913 int node);
914struct sk_buff *__build_skb(void *data, unsigned int frag_size);
915struct sk_buff *build_skb(void *data, unsigned int frag_size);
916static inline struct sk_buff *alloc_skb(unsigned int size,
917 gfp_t priority)
918{
919 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
920}
921
922struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
923 unsigned long data_len,
924 int max_page_order,
925 int *errcode,
926 gfp_t gfp_mask);
927
928
929struct sk_buff_fclones {
930 struct sk_buff skb1;
931
932 struct sk_buff skb2;
933
934 atomic_t fclone_ref;
935};
936
937
938
939
940
941
942
943
944
945
946static inline bool skb_fclone_busy(const struct sock *sk,
947 const struct sk_buff *skb)
948{
949 const struct sk_buff_fclones *fclones;
950
951 fclones = container_of(skb, struct sk_buff_fclones, skb1);
952
953 return skb->fclone == SKB_FCLONE_ORIG &&
954 atomic_read(&fclones->fclone_ref) > 1 &&
955 fclones->skb2.sk == sk;
956}
957
958static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
959 gfp_t priority)
960{
961 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
962}
963
964struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
965static inline struct sk_buff *alloc_skb_head(gfp_t priority)
966{
967 return __alloc_skb_head(priority, -1);
968}
969
970struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
971int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
972struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
973struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
974struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
975 gfp_t gfp_mask, bool fclone);
976static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
977 gfp_t gfp_mask)
978{
979 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
980}
981
982int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
983struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
984 unsigned int headroom);
985struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
986 int newtailroom, gfp_t priority);
987int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
988 int offset, int len);
989int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
990 int len);
991int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
992int skb_pad(struct sk_buff *skb, int pad);
993#define dev_kfree_skb(a) consume_skb(a)
994
995int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
996 int getfrag(void *from, char *to, int offset,
997 int len, int odd, struct sk_buff *skb),
998 void *from, int length);
999
1000int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1001 int offset, size_t size);
1002
1003struct skb_seq_state {
1004 __u32 lower_offset;
1005 __u32 upper_offset;
1006 __u32 frag_idx;
1007 __u32 stepped_offset;
1008 struct sk_buff *root_skb;
1009 struct sk_buff *cur_skb;
1010 __u8 *frag_data;
1011};
1012
1013void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1014 unsigned int to, struct skb_seq_state *st);
1015unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1016 struct skb_seq_state *st);
1017void skb_abort_seq_read(struct skb_seq_state *st);
1018
1019unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1020 unsigned int to, struct ts_config *config);
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048enum pkt_hash_types {
1049 PKT_HASH_TYPE_NONE,
1050 PKT_HASH_TYPE_L2,
1051 PKT_HASH_TYPE_L3,
1052 PKT_HASH_TYPE_L4,
1053};
1054
1055static inline void skb_clear_hash(struct sk_buff *skb)
1056{
1057 skb->hash = 0;
1058 skb->sw_hash = 0;
1059 skb->l4_hash = 0;
1060}
1061
1062static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1063{
1064 if (!skb->l4_hash)
1065 skb_clear_hash(skb);
1066}
1067
1068static inline void
1069__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1070{
1071 skb->l4_hash = is_l4;
1072 skb->sw_hash = is_sw;
1073 skb->hash = hash;
1074}
1075
1076static inline void
1077skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1078{
1079
1080 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1081}
1082
1083static inline void
1084__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1085{
1086 __skb_set_hash(skb, hash, true, is_l4);
1087}
1088
1089void __skb_get_hash(struct sk_buff *skb);
1090u32 __skb_get_hash_symmetric(struct sk_buff *skb);
1091u32 skb_get_poff(const struct sk_buff *skb);
1092u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1093 const struct flow_keys *keys, int hlen);
1094__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1095 void *data, int hlen_proto);
1096
1097static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1098 int thoff, u8 ip_proto)
1099{
1100 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1101}
1102
1103void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1104 const struct flow_dissector_key *key,
1105 unsigned int key_count);
1106
1107bool __skb_flow_dissect(const struct sk_buff *skb,
1108 struct flow_dissector *flow_dissector,
1109 void *target_container,
1110 void *data, __be16 proto, int nhoff, int hlen,
1111 unsigned int flags);
1112
1113static inline bool skb_flow_dissect(const struct sk_buff *skb,
1114 struct flow_dissector *flow_dissector,
1115 void *target_container, unsigned int flags)
1116{
1117 return __skb_flow_dissect(skb, flow_dissector, target_container,
1118 NULL, 0, 0, 0, flags);
1119}
1120
1121static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1122 struct flow_keys *flow,
1123 unsigned int flags)
1124{
1125 memset(flow, 0, sizeof(*flow));
1126 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1127 NULL, 0, 0, 0, flags);
1128}
1129
1130static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1131 void *data, __be16 proto,
1132 int nhoff, int hlen,
1133 unsigned int flags)
1134{
1135 memset(flow, 0, sizeof(*flow));
1136 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1137 data, proto, nhoff, hlen, flags);
1138}
1139
1140static inline __u32 skb_get_hash(struct sk_buff *skb)
1141{
1142 if (!skb->l4_hash && !skb->sw_hash)
1143 __skb_get_hash(skb);
1144
1145 return skb->hash;
1146}
1147
1148__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1149
1150static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1151{
1152 if (!skb->l4_hash && !skb->sw_hash) {
1153 struct flow_keys keys;
1154 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1155
1156 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1157 }
1158
1159 return skb->hash;
1160}
1161
1162__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1163
1164static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1165{
1166 if (!skb->l4_hash && !skb->sw_hash) {
1167 struct flow_keys keys;
1168 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1169
1170 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1171 }
1172
1173 return skb->hash;
1174}
1175
1176__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1177
1178static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1179{
1180 return skb->hash;
1181}
1182
1183static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1184{
1185 to->hash = from->hash;
1186 to->sw_hash = from->sw_hash;
1187 to->l4_hash = from->l4_hash;
1188};
1189
1190#ifdef NET_SKBUFF_DATA_USES_OFFSET
1191static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1192{
1193 return skb->head + skb->end;
1194}
1195
1196static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1197{
1198 return skb->end;
1199}
1200#else
1201static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1202{
1203 return skb->end;
1204}
1205
1206static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1207{
1208 return skb->end - skb->head;
1209}
1210#endif
1211
1212
1213#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1214
1215static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1216{
1217 return &skb_shinfo(skb)->hwtstamps;
1218}
1219
1220
1221
1222
1223
1224
1225
1226static inline int skb_queue_empty(const struct sk_buff_head *list)
1227{
1228 return list->next == (const struct sk_buff *) list;
1229}
1230
1231
1232
1233
1234
1235
1236
1237
1238static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1239 const struct sk_buff *skb)
1240{
1241 return skb->next == (const struct sk_buff *) list;
1242}
1243
1244
1245
1246
1247
1248
1249
1250
1251static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1252 const struct sk_buff *skb)
1253{
1254 return skb->prev == (const struct sk_buff *) list;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1266 const struct sk_buff *skb)
1267{
1268
1269
1270
1271 BUG_ON(skb_queue_is_last(list, skb));
1272 return skb->next;
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1284 const struct sk_buff *skb)
1285{
1286
1287
1288
1289 BUG_ON(skb_queue_is_first(list, skb));
1290 return skb->prev;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300static inline struct sk_buff *skb_get(struct sk_buff *skb)
1301{
1302 atomic_inc(&skb->users);
1303 return skb;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static inline int skb_cloned(const struct sk_buff *skb)
1320{
1321 return skb->cloned &&
1322 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1323}
1324
1325static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1326{
1327 might_sleep_if(gfpflags_allow_blocking(pri));
1328
1329 if (skb_cloned(skb))
1330 return pskb_expand_head(skb, 0, 0, pri);
1331
1332 return 0;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342static inline int skb_header_cloned(const struct sk_buff *skb)
1343{
1344 int dataref;
1345
1346 if (!skb->cloned)
1347 return 0;
1348
1349 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1350 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1351 return dataref != 1;
1352}
1353
1354static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1355{
1356 might_sleep_if(gfpflags_allow_blocking(pri));
1357
1358 if (skb_header_cloned(skb))
1359 return pskb_expand_head(skb, 0, 0, pri);
1360
1361 return 0;
1362}
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373static inline void skb_header_release(struct sk_buff *skb)
1374{
1375 BUG_ON(skb->nohdr);
1376 skb->nohdr = 1;
1377 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1378}
1379
1380
1381
1382
1383
1384
1385
1386
1387static inline void __skb_header_release(struct sk_buff *skb)
1388{
1389 skb->nohdr = 1;
1390 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1391}
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401static inline int skb_shared(const struct sk_buff *skb)
1402{
1403 return atomic_read(&skb->users) != 1;
1404}
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1420{
1421 might_sleep_if(gfpflags_allow_blocking(pri));
1422 if (skb_shared(skb)) {
1423 struct sk_buff *nskb = skb_clone(skb, pri);
1424
1425 if (likely(nskb))
1426 consume_skb(skb);
1427 else
1428 kfree_skb(skb);
1429 skb = nskb;
1430 }
1431 return skb;
1432}
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1455 gfp_t pri)
1456{
1457 might_sleep_if(gfpflags_allow_blocking(pri));
1458 if (skb_cloned(skb)) {
1459 struct sk_buff *nskb = skb_copy(skb, pri);
1460
1461
1462 if (likely(nskb))
1463 consume_skb(skb);
1464 else
1465 kfree_skb(skb);
1466 skb = nskb;
1467 }
1468 return skb;
1469}
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1485{
1486 struct sk_buff *skb = list_->next;
1487
1488 if (skb == (struct sk_buff *)list_)
1489 skb = NULL;
1490 return skb;
1491}
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1503 const struct sk_buff_head *list_)
1504{
1505 struct sk_buff *next = skb->next;
1506
1507 if (next == (struct sk_buff *)list_)
1508 next = NULL;
1509 return next;
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1526{
1527 struct sk_buff *skb = list_->prev;
1528
1529 if (skb == (struct sk_buff *)list_)
1530 skb = NULL;
1531 return skb;
1532
1533}
1534
1535
1536
1537
1538
1539
1540
1541static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1542{
1543 return list_->qlen;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556static inline void __skb_queue_head_init(struct sk_buff_head *list)
1557{
1558 list->prev = list->next = (struct sk_buff *)list;
1559 list->qlen = 0;
1560}
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570static inline void skb_queue_head_init(struct sk_buff_head *list)
1571{
1572 spin_lock_init(&list->lock);
1573 __skb_queue_head_init(list);
1574}
1575
1576static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1577 struct lock_class_key *class)
1578{
1579 skb_queue_head_init(list);
1580 lockdep_set_class(&list->lock, class);
1581}
1582
1583
1584
1585
1586
1587
1588
1589void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1590 struct sk_buff_head *list);
1591static inline void __skb_insert(struct sk_buff *newsk,
1592 struct sk_buff *prev, struct sk_buff *next,
1593 struct sk_buff_head *list)
1594{
1595 newsk->next = next;
1596 newsk->prev = prev;
1597 next->prev = prev->next = newsk;
1598 list->qlen++;
1599}
1600
1601static inline void __skb_queue_splice(const struct sk_buff_head *list,
1602 struct sk_buff *prev,
1603 struct sk_buff *next)
1604{
1605 struct sk_buff *first = list->next;
1606 struct sk_buff *last = list->prev;
1607
1608 first->prev = prev;
1609 prev->next = first;
1610
1611 last->next = next;
1612 next->prev = last;
1613}
1614
1615
1616
1617
1618
1619
1620static inline void skb_queue_splice(const struct sk_buff_head *list,
1621 struct sk_buff_head *head)
1622{
1623 if (!skb_queue_empty(list)) {
1624 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1625 head->qlen += list->qlen;
1626 }
1627}
1628
1629
1630
1631
1632
1633
1634
1635
1636static inline void skb_queue_splice_init(struct sk_buff_head *list,
1637 struct sk_buff_head *head)
1638{
1639 if (!skb_queue_empty(list)) {
1640 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1641 head->qlen += list->qlen;
1642 __skb_queue_head_init(list);
1643 }
1644}
1645
1646
1647
1648
1649
1650
1651static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1652 struct sk_buff_head *head)
1653{
1654 if (!skb_queue_empty(list)) {
1655 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1656 head->qlen += list->qlen;
1657 }
1658}
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1669 struct sk_buff_head *head)
1670{
1671 if (!skb_queue_empty(list)) {
1672 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1673 head->qlen += list->qlen;
1674 __skb_queue_head_init(list);
1675 }
1676}
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689static inline void __skb_queue_after(struct sk_buff_head *list,
1690 struct sk_buff *prev,
1691 struct sk_buff *newsk)
1692{
1693 __skb_insert(newsk, prev, prev->next, list);
1694}
1695
1696void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1697 struct sk_buff_head *list);
1698
1699static inline void __skb_queue_before(struct sk_buff_head *list,
1700 struct sk_buff *next,
1701 struct sk_buff *newsk)
1702{
1703 __skb_insert(newsk, next->prev, next, list);
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1717static inline void __skb_queue_head(struct sk_buff_head *list,
1718 struct sk_buff *newsk)
1719{
1720 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1721}
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1734static inline void __skb_queue_tail(struct sk_buff_head *list,
1735 struct sk_buff *newsk)
1736{
1737 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1738}
1739
1740
1741
1742
1743
1744void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1745static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1746{
1747 struct sk_buff *next, *prev;
1748
1749 list->qlen--;
1750 next = skb->next;
1751 prev = skb->prev;
1752 skb->next = skb->prev = NULL;
1753 next->prev = prev;
1754 prev->next = next;
1755}
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1766static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1767{
1768 struct sk_buff *skb = skb_peek(list);
1769 if (skb)
1770 __skb_unlink(skb, list);
1771 return skb;
1772}
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1783static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1784{
1785 struct sk_buff *skb = skb_peek_tail(list);
1786 if (skb)
1787 __skb_unlink(skb, list);
1788 return skb;
1789}
1790
1791
1792static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1793{
1794 return skb->data_len;
1795}
1796
1797static inline unsigned int skb_headlen(const struct sk_buff *skb)
1798{
1799 return skb->len - skb->data_len;
1800}
1801
1802static inline int skb_pagelen(const struct sk_buff *skb)
1803{
1804 int i, len = 0;
1805
1806 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1807 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1808 return len + skb_headlen(skb);
1809}
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1825 struct page *page, int off, int size)
1826{
1827 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1828
1829
1830
1831
1832
1833
1834 frag->page.p = page;
1835 frag->page_offset = off;
1836 skb_frag_size_set(frag, size);
1837
1838 page = compound_head(page);
1839 if (page_is_pfmemalloc(page))
1840 skb->pfmemalloc = true;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1858 struct page *page, int off, int size)
1859{
1860 __skb_fill_page_desc(skb, i, page, off, size);
1861 skb_shinfo(skb)->nr_frags = i + 1;
1862}
1863
1864void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1865 int size, unsigned int truesize);
1866
1867void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1868 unsigned int truesize);
1869
1870#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1871#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1872#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1873
1874#ifdef NET_SKBUFF_DATA_USES_OFFSET
1875static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1876{
1877 return skb->head + skb->tail;
1878}
1879
1880static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1881{
1882 skb->tail = skb->data - skb->head;
1883}
1884
1885static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1886{
1887 skb_reset_tail_pointer(skb);
1888 skb->tail += offset;
1889}
1890
1891#else
1892static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1893{
1894 return skb->tail;
1895}
1896
1897static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1898{
1899 skb->tail = skb->data;
1900}
1901
1902static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1903{
1904 skb->tail = skb->data + offset;
1905}
1906
1907#endif
1908
1909
1910
1911
1912unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1913unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1914static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1915{
1916 unsigned char *tmp = skb_tail_pointer(skb);
1917 SKB_LINEAR_ASSERT(skb);
1918 skb->tail += len;
1919 skb->len += len;
1920 return tmp;
1921}
1922
1923unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1924static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1925{
1926 skb->data -= len;
1927 skb->len += len;
1928 return skb->data;
1929}
1930
1931unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1932static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1933{
1934 skb->len -= len;
1935 BUG_ON(skb->len < skb->data_len);
1936 return skb->data += len;
1937}
1938
1939static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1940{
1941 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1942}
1943
1944unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1945
1946static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1947{
1948 if (len > skb_headlen(skb) &&
1949 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1950 return NULL;
1951 skb->len -= len;
1952 return skb->data += len;
1953}
1954
1955static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1956{
1957 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1958}
1959
1960static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1961{
1962 if (likely(len <= skb_headlen(skb)))
1963 return 1;
1964 if (unlikely(len > skb->len))
1965 return 0;
1966 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1967}
1968
1969
1970
1971
1972
1973
1974
1975static inline unsigned int skb_headroom(const struct sk_buff *skb)
1976{
1977 return skb->data - skb->head;
1978}
1979
1980
1981
1982
1983
1984
1985
1986static inline int skb_tailroom(const struct sk_buff *skb)
1987{
1988 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1989}
1990
1991
1992
1993
1994
1995
1996
1997
1998static inline int skb_availroom(const struct sk_buff *skb)
1999{
2000 if (skb_is_nonlinear(skb))
2001 return 0;
2002
2003 return skb->end - skb->tail - skb->reserved_tailroom;
2004}
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014static inline void skb_reserve(struct sk_buff *skb, int len)
2015{
2016 skb->data += len;
2017 skb->tail += len;
2018}
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2033 unsigned int needed_tailroom)
2034{
2035 SKB_LINEAR_ASSERT(skb);
2036 if (mtu < skb_tailroom(skb) - needed_tailroom)
2037
2038 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2039 else
2040
2041 skb->reserved_tailroom = needed_tailroom;
2042}
2043
2044#define ENCAP_TYPE_ETHER 0
2045#define ENCAP_TYPE_IPPROTO 1
2046
2047static inline void skb_set_inner_protocol(struct sk_buff *skb,
2048 __be16 protocol)
2049{
2050 skb->inner_protocol = protocol;
2051 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2052}
2053
2054static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2055 __u8 ipproto)
2056{
2057 skb->inner_ipproto = ipproto;
2058 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2059}
2060
2061static inline void skb_reset_inner_headers(struct sk_buff *skb)
2062{
2063 skb->inner_mac_header = skb->mac_header;
2064 skb->inner_network_header = skb->network_header;
2065 skb->inner_transport_header = skb->transport_header;
2066}
2067
2068static inline void skb_reset_mac_len(struct sk_buff *skb)
2069{
2070 skb->mac_len = skb->network_header - skb->mac_header;
2071}
2072
2073static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2074 *skb)
2075{
2076 return skb->head + skb->inner_transport_header;
2077}
2078
2079static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2080{
2081 return skb_inner_transport_header(skb) - skb->data;
2082}
2083
2084static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2085{
2086 skb->inner_transport_header = skb->data - skb->head;
2087}
2088
2089static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2090 const int offset)
2091{
2092 skb_reset_inner_transport_header(skb);
2093 skb->inner_transport_header += offset;
2094}
2095
2096static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2097{
2098 return skb->head + skb->inner_network_header;
2099}
2100
2101static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2102{
2103 skb->inner_network_header = skb->data - skb->head;
2104}
2105
2106static inline void skb_set_inner_network_header(struct sk_buff *skb,
2107 const int offset)
2108{
2109 skb_reset_inner_network_header(skb);
2110 skb->inner_network_header += offset;
2111}
2112
2113static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2114{
2115 return skb->head + skb->inner_mac_header;
2116}
2117
2118static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2119{
2120 skb->inner_mac_header = skb->data - skb->head;
2121}
2122
2123static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2124 const int offset)
2125{
2126 skb_reset_inner_mac_header(skb);
2127 skb->inner_mac_header += offset;
2128}
2129static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2130{
2131 return skb->transport_header != (typeof(skb->transport_header))~0U;
2132}
2133
2134static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2135{
2136 return skb->head + skb->transport_header;
2137}
2138
2139static inline void skb_reset_transport_header(struct sk_buff *skb)
2140{
2141 skb->transport_header = skb->data - skb->head;
2142}
2143
2144static inline void skb_set_transport_header(struct sk_buff *skb,
2145 const int offset)
2146{
2147 skb_reset_transport_header(skb);
2148 skb->transport_header += offset;
2149}
2150
2151static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2152{
2153 return skb->head + skb->network_header;
2154}
2155
2156static inline void skb_reset_network_header(struct sk_buff *skb)
2157{
2158 skb->network_header = skb->data - skb->head;
2159}
2160
2161static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2162{
2163 skb_reset_network_header(skb);
2164 skb->network_header += offset;
2165}
2166
2167static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2168{
2169 return skb->head + skb->mac_header;
2170}
2171
2172static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2173{
2174 return skb->mac_header != (typeof(skb->mac_header))~0U;
2175}
2176
2177static inline void skb_reset_mac_header(struct sk_buff *skb)
2178{
2179 skb->mac_header = skb->data - skb->head;
2180}
2181
2182static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2183{
2184 skb_reset_mac_header(skb);
2185 skb->mac_header += offset;
2186}
2187
2188static inline void skb_pop_mac_header(struct sk_buff *skb)
2189{
2190 skb->mac_header = skb->network_header;
2191}
2192
2193static inline void skb_probe_transport_header(struct sk_buff *skb,
2194 const int offset_hint)
2195{
2196 struct flow_keys keys;
2197
2198 if (skb_transport_header_was_set(skb))
2199 return;
2200 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2201 skb_set_transport_header(skb, keys.control.thoff);
2202 else
2203 skb_set_transport_header(skb, offset_hint);
2204}
2205
2206static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2207{
2208 if (skb_mac_header_was_set(skb)) {
2209 const unsigned char *old_mac = skb_mac_header(skb);
2210
2211 skb_set_mac_header(skb, -skb->mac_len);
2212 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2213 }
2214}
2215
2216static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2217{
2218 return skb->csum_start - skb_headroom(skb);
2219}
2220
2221static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2222{
2223 return skb->head + skb->csum_start;
2224}
2225
2226static inline int skb_transport_offset(const struct sk_buff *skb)
2227{
2228 return skb_transport_header(skb) - skb->data;
2229}
2230
2231static inline u32 skb_network_header_len(const struct sk_buff *skb)
2232{
2233 return skb->transport_header - skb->network_header;
2234}
2235
2236static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2237{
2238 return skb->inner_transport_header - skb->inner_network_header;
2239}
2240
2241static inline int skb_network_offset(const struct sk_buff *skb)
2242{
2243 return skb_network_header(skb) - skb->data;
2244}
2245
2246static inline int skb_inner_network_offset(const struct sk_buff *skb)
2247{
2248 return skb_inner_network_header(skb) - skb->data;
2249}
2250
2251static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2252{
2253 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2254}
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276#ifndef NET_IP_ALIGN
2277#define NET_IP_ALIGN 2
2278#endif
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300#ifndef NET_SKB_PAD
2301#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2302#endif
2303
2304int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2305
2306static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2307{
2308 if (unlikely(skb_is_nonlinear(skb))) {
2309 WARN_ON(1);
2310 return;
2311 }
2312 skb->len = len;
2313 skb_set_tail_pointer(skb, len);
2314}
2315
2316static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2317{
2318 __skb_set_length(skb, len);
2319}
2320
2321void skb_trim(struct sk_buff *skb, unsigned int len);
2322
2323static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2324{
2325 if (skb->data_len)
2326 return ___pskb_trim(skb, len);
2327 __skb_trim(skb, len);
2328 return 0;
2329}
2330
2331static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2332{
2333 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2334}
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2346{
2347 int err = pskb_trim(skb, len);
2348 BUG_ON(err);
2349}
2350
2351static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2352{
2353 unsigned int diff = len - skb->len;
2354
2355 if (skb_tailroom(skb) < diff) {
2356 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2357 GFP_ATOMIC);
2358 if (ret)
2359 return ret;
2360 }
2361 __skb_set_length(skb, len);
2362 return 0;
2363}
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373static inline void skb_orphan(struct sk_buff *skb)
2374{
2375 if (skb->destructor) {
2376 skb->destructor(skb);
2377 skb->destructor = NULL;
2378 skb->sk = NULL;
2379 } else {
2380 BUG_ON(skb->sk);
2381 }
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2394{
2395 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2396 return 0;
2397 return skb_copy_ubufs(skb, gfp_mask);
2398}
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408void skb_queue_purge(struct sk_buff_head *list);
2409static inline void __skb_queue_purge(struct sk_buff_head *list)
2410{
2411 struct sk_buff *skb;
2412 while ((skb = __skb_dequeue(list)) != NULL)
2413 kfree_skb(skb);
2414}
2415
2416void skb_rbtree_purge(struct rb_root *root);
2417
2418void *netdev_alloc_frag(unsigned int fragsz);
2419
2420struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2421 gfp_t gfp_mask);
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2437 unsigned int length)
2438{
2439 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2440}
2441
2442
2443static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2444 gfp_t gfp_mask)
2445{
2446 return __netdev_alloc_skb(NULL, length, gfp_mask);
2447}
2448
2449
2450static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2451{
2452 return netdev_alloc_skb(NULL, length);
2453}
2454
2455
2456static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2457 unsigned int length, gfp_t gfp)
2458{
2459 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2460
2461 if (NET_IP_ALIGN && skb)
2462 skb_reserve(skb, NET_IP_ALIGN);
2463 return skb;
2464}
2465
2466static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2467 unsigned int length)
2468{
2469 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2470}
2471
2472static inline void skb_free_frag(void *addr)
2473{
2474 __free_page_frag(addr);
2475}
2476
2477void *napi_alloc_frag(unsigned int fragsz);
2478struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2479 unsigned int length, gfp_t gfp_mask);
2480static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2481 unsigned int length)
2482{
2483 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2484}
2485void napi_consume_skb(struct sk_buff *skb, int budget);
2486
2487void __kfree_skb_flush(void);
2488void __kfree_skb_defer(struct sk_buff *skb);
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2500 unsigned int order)
2501{
2502
2503
2504
2505
2506
2507
2508
2509
2510 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2511
2512 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2513}
2514
2515static inline struct page *dev_alloc_pages(unsigned int order)
2516{
2517 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2518}
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2529{
2530 return __dev_alloc_pages(gfp_mask, 0);
2531}
2532
2533static inline struct page *dev_alloc_page(void)
2534{
2535 return dev_alloc_pages(0);
2536}
2537
2538
2539
2540
2541
2542
2543static inline void skb_propagate_pfmemalloc(struct page *page,
2544 struct sk_buff *skb)
2545{
2546 if (page_is_pfmemalloc(page))
2547 skb->pfmemalloc = true;
2548}
2549
2550
2551
2552
2553
2554
2555
2556static inline struct page *skb_frag_page(const skb_frag_t *frag)
2557{
2558 return frag->page.p;
2559}
2560
2561
2562
2563
2564
2565
2566
2567static inline void __skb_frag_ref(skb_frag_t *frag)
2568{
2569 get_page(skb_frag_page(frag));
2570}
2571
2572
2573
2574
2575
2576
2577
2578
2579static inline void skb_frag_ref(struct sk_buff *skb, int f)
2580{
2581 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2582}
2583
2584
2585
2586
2587
2588
2589
2590static inline void __skb_frag_unref(skb_frag_t *frag)
2591{
2592 put_page(skb_frag_page(frag));
2593}
2594
2595
2596
2597
2598
2599
2600
2601
2602static inline void skb_frag_unref(struct sk_buff *skb, int f)
2603{
2604 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2605}
2606
2607
2608
2609
2610
2611
2612
2613
2614static inline void *skb_frag_address(const skb_frag_t *frag)
2615{
2616 return page_address(skb_frag_page(frag)) + frag->page_offset;
2617}
2618
2619
2620
2621
2622
2623
2624
2625
2626static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2627{
2628 void *ptr = page_address(skb_frag_page(frag));
2629 if (unlikely(!ptr))
2630 return NULL;
2631
2632 return ptr + frag->page_offset;
2633}
2634
2635
2636
2637
2638
2639
2640
2641
2642static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2643{
2644 frag->page.p = page;
2645}
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2656 struct page *page)
2657{
2658 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2659}
2660
2661bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2675 const skb_frag_t *frag,
2676 size_t offset, size_t size,
2677 enum dma_data_direction dir)
2678{
2679 return dma_map_page(dev, skb_frag_page(frag),
2680 frag->page_offset + offset, size, dir);
2681}
2682
2683static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2684 gfp_t gfp_mask)
2685{
2686 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2687}
2688
2689
2690static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2691 gfp_t gfp_mask)
2692{
2693 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2694}
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2706{
2707 return !skb_header_cloned(skb) &&
2708 skb_headroom(skb) + len <= skb->hdr_len;
2709}
2710
2711static inline int skb_try_make_writable(struct sk_buff *skb,
2712 unsigned int write_len)
2713{
2714 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
2715 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2716}
2717
2718static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2719 int cloned)
2720{
2721 int delta = 0;
2722
2723 if (headroom > skb_headroom(skb))
2724 delta = headroom - skb_headroom(skb);
2725
2726 if (delta || cloned)
2727 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2728 GFP_ATOMIC);
2729 return 0;
2730}
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2745{
2746 return __skb_cow(skb, headroom, skb_cloned(skb));
2747}
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2760{
2761 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2762}
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2775{
2776 unsigned int size = skb->len;
2777 if (likely(size >= len))
2778 return 0;
2779 return skb_pad(skb, len - size);
2780}
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2793{
2794 unsigned int size = skb->len;
2795
2796 if (unlikely(size < len)) {
2797 len -= size;
2798 if (skb_pad(skb, len))
2799 return -ENOMEM;
2800 __skb_put(skb, len);
2801 }
2802 return 0;
2803}
2804
2805static inline int skb_add_data(struct sk_buff *skb,
2806 struct iov_iter *from, int copy)
2807{
2808 const int off = skb->len;
2809
2810 if (skb->ip_summed == CHECKSUM_NONE) {
2811 __wsum csum = 0;
2812 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2813 &csum, from) == copy) {
2814 skb->csum = csum_block_add(skb->csum, csum, off);
2815 return 0;
2816 }
2817 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2818 return 0;
2819
2820 __skb_trim(skb, off);
2821 return -EFAULT;
2822}
2823
2824static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2825 const struct page *page, int off)
2826{
2827 if (i) {
2828 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2829
2830 return page == skb_frag_page(frag) &&
2831 off == frag->page_offset + skb_frag_size(frag);
2832 }
2833 return false;
2834}
2835
2836static inline int __skb_linearize(struct sk_buff *skb)
2837{
2838 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2839}
2840
2841
2842
2843
2844
2845
2846
2847
2848static inline int skb_linearize(struct sk_buff *skb)
2849{
2850 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2851}
2852
2853
2854
2855
2856
2857
2858
2859
2860static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2861{
2862 return skb_is_nonlinear(skb) &&
2863 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2864}
2865
2866
2867
2868
2869
2870
2871
2872
2873static inline int skb_linearize_cow(struct sk_buff *skb)
2874{
2875 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2876 __skb_linearize(skb) : 0;
2877}
2878
2879static __always_inline void
2880__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2881 unsigned int off)
2882{
2883 if (skb->ip_summed == CHECKSUM_COMPLETE)
2884 skb->csum = csum_block_sub(skb->csum,
2885 csum_partial(start, len, 0), off);
2886 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2887 skb_checksum_start_offset(skb) < 0)
2888 skb->ip_summed = CHECKSUM_NONE;
2889}
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901static inline void skb_postpull_rcsum(struct sk_buff *skb,
2902 const void *start, unsigned int len)
2903{
2904 __skb_postpull_rcsum(skb, start, len, 0);
2905}
2906
2907static __always_inline void
2908__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2909 unsigned int off)
2910{
2911 if (skb->ip_summed == CHECKSUM_COMPLETE)
2912 skb->csum = csum_block_add(skb->csum,
2913 csum_partial(start, len, 0), off);
2914}
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925static inline void skb_postpush_rcsum(struct sk_buff *skb,
2926 const void *start, unsigned int len)
2927{
2928 __skb_postpush_rcsum(skb, start, len, 0);
2929}
2930
2931unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
2945 unsigned int len)
2946{
2947 skb_push(skb, len);
2948 skb_postpush_rcsum(skb, skb->data, len);
2949 return skb->data;
2950}
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2962{
2963 if (likely(len >= skb->len))
2964 return 0;
2965 if (skb->ip_summed == CHECKSUM_COMPLETE)
2966 skb->ip_summed = CHECKSUM_NONE;
2967 return __pskb_trim(skb, len);
2968}
2969
2970static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2971{
2972 if (skb->ip_summed == CHECKSUM_COMPLETE)
2973 skb->ip_summed = CHECKSUM_NONE;
2974 __skb_trim(skb, len);
2975 return 0;
2976}
2977
2978static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
2979{
2980 if (skb->ip_summed == CHECKSUM_COMPLETE)
2981 skb->ip_summed = CHECKSUM_NONE;
2982 return __skb_grow(skb, len);
2983}
2984
2985#define skb_queue_walk(queue, skb) \
2986 for (skb = (queue)->next; \
2987 skb != (struct sk_buff *)(queue); \
2988 skb = skb->next)
2989
2990#define skb_queue_walk_safe(queue, skb, tmp) \
2991 for (skb = (queue)->next, tmp = skb->next; \
2992 skb != (struct sk_buff *)(queue); \
2993 skb = tmp, tmp = skb->next)
2994
2995#define skb_queue_walk_from(queue, skb) \
2996 for (; skb != (struct sk_buff *)(queue); \
2997 skb = skb->next)
2998
2999#define skb_queue_walk_from_safe(queue, skb, tmp) \
3000 for (tmp = skb->next; \
3001 skb != (struct sk_buff *)(queue); \
3002 skb = tmp, tmp = skb->next)
3003
3004#define skb_queue_reverse_walk(queue, skb) \
3005 for (skb = (queue)->prev; \
3006 skb != (struct sk_buff *)(queue); \
3007 skb = skb->prev)
3008
3009#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3010 for (skb = (queue)->prev, tmp = skb->prev; \
3011 skb != (struct sk_buff *)(queue); \
3012 skb = tmp, tmp = skb->prev)
3013
3014#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3015 for (tmp = skb->prev; \
3016 skb != (struct sk_buff *)(queue); \
3017 skb = tmp, tmp = skb->prev)
3018
3019static inline bool skb_has_frag_list(const struct sk_buff *skb)
3020{
3021 return skb_shinfo(skb)->frag_list != NULL;
3022}
3023
3024static inline void skb_frag_list_init(struct sk_buff *skb)
3025{
3026 skb_shinfo(skb)->frag_list = NULL;
3027}
3028
3029#define skb_walk_frags(skb, iter) \
3030 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3031
3032
3033int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
3034 const struct sk_buff *skb);
3035struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
3036 int *peeked, int *off, int *err,
3037 struct sk_buff **last);
3038struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
3039 int *peeked, int *off, int *err);
3040struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3041 int *err);
3042unsigned int datagram_poll(struct file *file, struct socket *sock,
3043 struct poll_table_struct *wait);
3044int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3045 struct iov_iter *to, int size);
3046static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3047 struct msghdr *msg, int size)
3048{
3049 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3050}
3051int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3052 struct msghdr *msg);
3053int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3054 struct iov_iter *from, int len);
3055int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3056void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3057void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3058static inline void skb_free_datagram_locked(struct sock *sk,
3059 struct sk_buff *skb)
3060{
3061 __skb_free_datagram_locked(sk, skb, 0);
3062}
3063int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3064int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3065int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3066__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3067 int len, __wsum csum);
3068int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3069 struct pipe_inode_info *pipe, unsigned int len,
3070 unsigned int flags);
3071void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3072unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3073int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3074 int len, int hlen);
3075void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3076int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3077void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3078unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
3079bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
3080struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3081struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3082int skb_ensure_writable(struct sk_buff *skb, int write_len);
3083int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3084int skb_vlan_pop(struct sk_buff *skb);
3085int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3086struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3087 gfp_t gfp);
3088
3089static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3090{
3091 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3092}
3093
3094static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3095{
3096 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3097}
3098
3099struct skb_checksum_ops {
3100 __wsum (*update)(const void *mem, int len, __wsum wsum);
3101 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3102};
3103
3104__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3105 __wsum csum, const struct skb_checksum_ops *ops);
3106__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3107 __wsum csum);
3108
3109static inline void * __must_check
3110__skb_header_pointer(const struct sk_buff *skb, int offset,
3111 int len, void *data, int hlen, void *buffer)
3112{
3113 if (hlen - offset >= len)
3114 return data + offset;
3115
3116 if (!skb ||
3117 skb_copy_bits(skb, offset, buffer, len) < 0)
3118 return NULL;
3119
3120 return buffer;
3121}
3122
3123static inline void * __must_check
3124skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3125{
3126 return __skb_header_pointer(skb, offset, len, skb->data,
3127 skb_headlen(skb), buffer);
3128}
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140static inline bool skb_needs_linearize(struct sk_buff *skb,
3141 netdev_features_t features)
3142{
3143 return skb_is_nonlinear(skb) &&
3144 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3145 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3146}
3147
3148static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3149 void *to,
3150 const unsigned int len)
3151{
3152 memcpy(to, skb->data, len);
3153}
3154
3155static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3156 const int offset, void *to,
3157 const unsigned int len)
3158{
3159 memcpy(to, skb->data + offset, len);
3160}
3161
3162static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3163 const void *from,
3164 const unsigned int len)
3165{
3166 memcpy(skb->data, from, len);
3167}
3168
3169static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3170 const int offset,
3171 const void *from,
3172 const unsigned int len)
3173{
3174 memcpy(skb->data + offset, from, len);
3175}
3176
3177void skb_init(void);
3178
3179static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3180{
3181 return skb->tstamp;
3182}
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193static inline void skb_get_timestamp(const struct sk_buff *skb,
3194 struct timeval *stamp)
3195{
3196 *stamp = ktime_to_timeval(skb->tstamp);
3197}
3198
3199static inline void skb_get_timestampns(const struct sk_buff *skb,
3200 struct timespec *stamp)
3201{
3202 *stamp = ktime_to_timespec(skb->tstamp);
3203}
3204
3205static inline void __net_timestamp(struct sk_buff *skb)
3206{
3207 skb->tstamp = ktime_get_real();
3208}
3209
3210static inline ktime_t net_timedelta(ktime_t t)
3211{
3212 return ktime_sub(ktime_get_real(), t);
3213}
3214
3215static inline ktime_t net_invalid_timestamp(void)
3216{
3217 return ktime_set(0, 0);
3218}
3219
3220struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3221
3222#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3223
3224void skb_clone_tx_timestamp(struct sk_buff *skb);
3225bool skb_defer_rx_timestamp(struct sk_buff *skb);
3226
3227#else
3228
3229static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3230{
3231}
3232
3233static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3234{
3235 return false;
3236}
3237
3238#endif
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252void skb_complete_tx_timestamp(struct sk_buff *skb,
3253 struct skb_shared_hwtstamps *hwtstamps);
3254
3255void __skb_tstamp_tx(struct sk_buff *orig_skb,
3256 struct skb_shared_hwtstamps *hwtstamps,
3257 struct sock *sk, int tstype);
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270void skb_tstamp_tx(struct sk_buff *orig_skb,
3271 struct skb_shared_hwtstamps *hwtstamps);
3272
3273static inline void sw_tx_timestamp(struct sk_buff *skb)
3274{
3275 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3276 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3277 skb_tstamp_tx(skb, NULL);
3278}
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292static inline void skb_tx_timestamp(struct sk_buff *skb)
3293{
3294 skb_clone_tx_timestamp(skb);
3295 sw_tx_timestamp(skb);
3296}
3297
3298
3299
3300
3301
3302
3303
3304
3305void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3306
3307__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3308__sum16 __skb_checksum_complete(struct sk_buff *skb);
3309
3310static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3311{
3312 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3313 skb->csum_valid ||
3314 (skb->ip_summed == CHECKSUM_PARTIAL &&
3315 skb_checksum_start_offset(skb) >= 0));
3316}
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3335{
3336 return skb_csum_unnecessary(skb) ?
3337 0 : __skb_checksum_complete(skb);
3338}
3339
3340static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3341{
3342 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3343 if (skb->csum_level == 0)
3344 skb->ip_summed = CHECKSUM_NONE;
3345 else
3346 skb->csum_level--;
3347 }
3348}
3349
3350static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3351{
3352 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3353 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3354 skb->csum_level++;
3355 } else if (skb->ip_summed == CHECKSUM_NONE) {
3356 skb->ip_summed = CHECKSUM_UNNECESSARY;
3357 skb->csum_level = 0;
3358 }
3359}
3360
3361static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3362{
3363
3364
3365
3366
3367
3368
3369
3370
3371 if (skb->ip_summed == CHECKSUM_NONE ||
3372 skb->ip_summed == CHECKSUM_UNNECESSARY)
3373 skb->csum_bad = 1;
3374}
3375
3376
3377
3378
3379
3380
3381static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3382 bool zero_okay,
3383 __sum16 check)
3384{
3385 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3386 skb->csum_valid = 1;
3387 __skb_decr_checksum_unnecessary(skb);
3388 return false;
3389 }
3390
3391 return true;
3392}
3393
3394
3395
3396
3397#define CHECKSUM_BREAK 76
3398
3399
3400
3401
3402
3403
3404
3405static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3406{
3407 if (skb->ip_summed == CHECKSUM_COMPLETE)
3408 skb->ip_summed = CHECKSUM_NONE;
3409}
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3421 bool complete,
3422 __wsum psum)
3423{
3424 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3425 if (!csum_fold(csum_add(psum, skb->csum))) {
3426 skb->csum_valid = 1;
3427 return 0;
3428 }
3429 } else if (skb->csum_bad) {
3430
3431 return (__force __sum16)1;
3432 }
3433
3434 skb->csum = psum;
3435
3436 if (complete || skb->len <= CHECKSUM_BREAK) {
3437 __sum16 csum;
3438
3439 csum = __skb_checksum_complete(skb);
3440 skb->csum_valid = !csum;
3441 return csum;
3442 }
3443
3444 return 0;
3445}
3446
3447static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3448{
3449 return 0;
3450}
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462#define __skb_checksum_validate(skb, proto, complete, \
3463 zero_okay, check, compute_pseudo) \
3464({ \
3465 __sum16 __ret = 0; \
3466 skb->csum_valid = 0; \
3467 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3468 __ret = __skb_checksum_validate_complete(skb, \
3469 complete, compute_pseudo(skb, proto)); \
3470 __ret; \
3471})
3472
3473#define skb_checksum_init(skb, proto, compute_pseudo) \
3474 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3475
3476#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3477 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3478
3479#define skb_checksum_validate(skb, proto, compute_pseudo) \
3480 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3481
3482#define skb_checksum_validate_zero_check(skb, proto, check, \
3483 compute_pseudo) \
3484 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3485
3486#define skb_checksum_simple_validate(skb) \
3487 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3488
3489static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3490{
3491 return (skb->ip_summed == CHECKSUM_NONE &&
3492 skb->csum_valid && !skb->csum_bad);
3493}
3494
3495static inline void __skb_checksum_convert(struct sk_buff *skb,
3496 __sum16 check, __wsum pseudo)
3497{
3498 skb->csum = ~pseudo;
3499 skb->ip_summed = CHECKSUM_COMPLETE;
3500}
3501
3502#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3503do { \
3504 if (__skb_checksum_convert_check(skb)) \
3505 __skb_checksum_convert(skb, check, \
3506 compute_pseudo(skb, proto)); \
3507} while (0)
3508
3509static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3510 u16 start, u16 offset)
3511{
3512 skb->ip_summed = CHECKSUM_PARTIAL;
3513 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3514 skb->csum_offset = offset - start;
3515}
3516
3517
3518
3519
3520
3521
3522static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3523 int start, int offset, bool nopartial)
3524{
3525 __wsum delta;
3526
3527 if (!nopartial) {
3528 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3529 return;
3530 }
3531
3532 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3533 __skb_checksum_complete(skb);
3534 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3535 }
3536
3537 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3538
3539
3540 skb->csum = csum_add(skb->csum, delta);
3541}
3542
3543#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3544void nf_conntrack_destroy(struct nf_conntrack *nfct);
3545static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3546{
3547 if (nfct && atomic_dec_and_test(&nfct->use))
3548 nf_conntrack_destroy(nfct);
3549}
3550static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3551{
3552 if (nfct)
3553 atomic_inc(&nfct->use);
3554}
3555#endif
3556#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3557static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3558{
3559 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3560 kfree(nf_bridge);
3561}
3562static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3563{
3564 if (nf_bridge)
3565 atomic_inc(&nf_bridge->use);
3566}
3567#endif
3568static inline void nf_reset(struct sk_buff *skb)
3569{
3570#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3571 nf_conntrack_put(skb->nfct);
3572 skb->nfct = NULL;
3573#endif
3574#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3575 nf_bridge_put(skb->nf_bridge);
3576 skb->nf_bridge = NULL;
3577#endif
3578}
3579
3580static inline void nf_reset_trace(struct sk_buff *skb)
3581{
3582#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3583 skb->nf_trace = 0;
3584#endif
3585}
3586
3587
3588static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3589 bool copy)
3590{
3591#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3592 dst->nfct = src->nfct;
3593 nf_conntrack_get(src->nfct);
3594 if (copy)
3595 dst->nfctinfo = src->nfctinfo;
3596#endif
3597#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3598 dst->nf_bridge = src->nf_bridge;
3599 nf_bridge_get(src->nf_bridge);
3600#endif
3601#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3602 if (copy)
3603 dst->nf_trace = src->nf_trace;
3604#endif
3605}
3606
3607static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3608{
3609#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3610 nf_conntrack_put(dst->nfct);
3611#endif
3612#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3613 nf_bridge_put(dst->nf_bridge);
3614#endif
3615 __nf_copy(dst, src, true);
3616}
3617
3618#ifdef CONFIG_NETWORK_SECMARK
3619static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3620{
3621 to->secmark = from->secmark;
3622}
3623
3624static inline void skb_init_secmark(struct sk_buff *skb)
3625{
3626 skb->secmark = 0;
3627}
3628#else
3629static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3630{ }
3631
3632static inline void skb_init_secmark(struct sk_buff *skb)
3633{ }
3634#endif
3635
3636static inline bool skb_irq_freeable(const struct sk_buff *skb)
3637{
3638 return !skb->destructor &&
3639#if IS_ENABLED(CONFIG_XFRM)
3640 !skb->sp &&
3641#endif
3642#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3643 !skb->nfct &&
3644#endif
3645 !skb->_skb_refdst &&
3646 !skb_has_frag_list(skb);
3647}
3648
3649static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3650{
3651 skb->queue_mapping = queue_mapping;
3652}
3653
3654static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3655{
3656 return skb->queue_mapping;
3657}
3658
3659static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3660{
3661 to->queue_mapping = from->queue_mapping;
3662}
3663
3664static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3665{
3666 skb->queue_mapping = rx_queue + 1;
3667}
3668
3669static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3670{
3671 return skb->queue_mapping - 1;
3672}
3673
3674static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3675{
3676 return skb->queue_mapping != 0;
3677}
3678
3679static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3680{
3681#ifdef CONFIG_XFRM
3682 return skb->sp;
3683#else
3684 return NULL;
3685#endif
3686}
3687
3688
3689
3690
3691
3692
3693
3694struct skb_gso_cb {
3695 union {
3696 int mac_offset;
3697 int data_offset;
3698 };
3699 int encap_level;
3700 __wsum csum;
3701 __u16 csum_start;
3702};
3703#define SKB_SGO_CB_OFFSET 32
3704#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
3705
3706static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3707{
3708 return (skb_mac_header(inner_skb) - inner_skb->head) -
3709 SKB_GSO_CB(inner_skb)->mac_offset;
3710}
3711
3712static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3713{
3714 int new_headroom, headroom;
3715 int ret;
3716
3717 headroom = skb_headroom(skb);
3718 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3719 if (ret)
3720 return ret;
3721
3722 new_headroom = skb_headroom(skb);
3723 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3724 return 0;
3725}
3726
3727static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
3728{
3729
3730 if (skb->remcsum_offload)
3731 return;
3732
3733 SKB_GSO_CB(skb)->csum = res;
3734 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3746{
3747 unsigned char *csum_start = skb_transport_header(skb);
3748 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
3749 __wsum partial = SKB_GSO_CB(skb)->csum;
3750
3751 SKB_GSO_CB(skb)->csum = res;
3752 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
3753
3754 return csum_fold(csum_partial(csum_start, plen, partial));
3755}
3756
3757static inline bool skb_is_gso(const struct sk_buff *skb)
3758{
3759 return skb_shinfo(skb)->gso_size;
3760}
3761
3762
3763static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3764{
3765 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3766}
3767
3768static inline void skb_gso_reset(struct sk_buff *skb)
3769{
3770 skb_shinfo(skb)->gso_size = 0;
3771 skb_shinfo(skb)->gso_segs = 0;
3772 skb_shinfo(skb)->gso_type = 0;
3773}
3774
3775void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3776
3777static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3778{
3779
3780
3781 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3782
3783 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3784 unlikely(shinfo->gso_type == 0)) {
3785 __skb_warn_lro_forwarding(skb);
3786 return true;
3787 }
3788 return false;
3789}
3790
3791static inline void skb_forward_csum(struct sk_buff *skb)
3792{
3793
3794 if (skb->ip_summed == CHECKSUM_COMPLETE)
3795 skb->ip_summed = CHECKSUM_NONE;
3796}
3797
3798
3799
3800
3801
3802
3803
3804
3805
3806static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3807{
3808#ifdef DEBUG
3809 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3810#endif
3811}
3812
3813bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3814
3815int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3816struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3817 unsigned int transport_len,
3818 __sum16(*skb_chkf)(struct sk_buff *skb));
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829static inline bool skb_head_is_locked(const struct sk_buff *skb)
3830{
3831 return !skb->head_frag || skb_cloned(skb);
3832}
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3845{
3846 unsigned int hdr_len = skb_transport_header(skb) -
3847 skb_network_header(skb);
3848 return hdr_len + skb_gso_transport_seglen(skb);
3849}
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859
3860static inline __wsum lco_csum(struct sk_buff *skb)
3861{
3862 unsigned char *csum_start = skb_checksum_start(skb);
3863 unsigned char *l4_hdr = skb_transport_header(skb);
3864 __wsum partial;
3865
3866
3867 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
3868 skb->csum_offset));
3869
3870
3871
3872
3873 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3874}
3875
3876#endif
3877#endif
3878