1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <net/flow_keys.h>
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133#define CHECKSUM_NONE 0
134#define CHECKSUM_UNNECESSARY 1
135#define CHECKSUM_COMPLETE 2
136#define CHECKSUM_PARTIAL 3
137
138
139#define SKB_MAX_CSUM_LEVEL 3
140
141#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
142#define SKB_WITH_OVERHEAD(X) \
143 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
144#define SKB_MAX_ORDER(X, ORDER) \
145 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
146#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
147#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
148
149
150#define SKB_TRUESIZE(X) ((X) + \
151 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
152 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
153
154struct net_device;
155struct scatterlist;
156struct pipe_inode_info;
157struct iov_iter;
158struct napi_struct;
159
160#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
161struct nf_conntrack {
162 atomic_t use;
163};
164#endif
165
166#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
167struct nf_bridge_info {
168 atomic_t use;
169 enum {
170 BRNF_PROTO_UNCHANGED,
171 BRNF_PROTO_8021Q,
172 BRNF_PROTO_PPPOE
173 } orig_proto;
174 bool pkt_otherhost;
175 unsigned int mask;
176 struct net_device *physindev;
177 struct net_device *physoutdev;
178 char neigh_header[8];
179 __be32 ipv4_daddr;
180};
181#endif
182
183struct sk_buff_head {
184
185 struct sk_buff *next;
186 struct sk_buff *prev;
187
188 __u32 qlen;
189 spinlock_t lock;
190};
191
192struct sk_buff;
193
194
195
196
197
198
199
200
201#if (65536/PAGE_SIZE + 1) < 16
202#define MAX_SKB_FRAGS 16UL
203#else
204#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
205#endif
206
207typedef struct skb_frag_struct skb_frag_t;
208
209struct skb_frag_struct {
210 struct {
211 struct page *p;
212 } page;
213#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
214 __u32 page_offset;
215 __u32 size;
216#else
217 __u16 page_offset;
218 __u16 size;
219#endif
220};
221
222static inline unsigned int skb_frag_size(const skb_frag_t *frag)
223{
224 return frag->size;
225}
226
227static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
228{
229 frag->size = size;
230}
231
232static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
233{
234 frag->size += delta;
235}
236
237static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
238{
239 frag->size -= delta;
240}
241
242#define HAVE_HW_TIME_STAMP
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258struct skb_shared_hwtstamps {
259 ktime_t hwtstamp;
260};
261
262
263enum {
264
265 SKBTX_HW_TSTAMP = 1 << 0,
266
267
268 SKBTX_SW_TSTAMP = 1 << 1,
269
270
271 SKBTX_IN_PROGRESS = 1 << 2,
272
273
274 SKBTX_DEV_ZEROCOPY = 1 << 3,
275
276
277 SKBTX_WIFI_STATUS = 1 << 4,
278
279
280
281
282
283
284 SKBTX_SHARED_FRAG = 1 << 5,
285
286
287 SKBTX_SCHED_TSTAMP = 1 << 6,
288
289
290 SKBTX_ACK_TSTAMP = 1 << 7,
291};
292
293#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
294 SKBTX_SCHED_TSTAMP | \
295 SKBTX_ACK_TSTAMP)
296#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
297
298
299
300
301
302
303
304
305
306struct ubuf_info {
307 void (*callback)(struct ubuf_info *, bool zerocopy_success);
308 void *ctx;
309 unsigned long desc;
310};
311
312
313
314
315struct skb_shared_info {
316 unsigned char nr_frags;
317 __u8 tx_flags;
318 unsigned short gso_size;
319
320 unsigned short gso_segs;
321 unsigned short gso_type;
322 struct sk_buff *frag_list;
323 struct skb_shared_hwtstamps hwtstamps;
324 u32 tskey;
325 __be32 ip6_frag_id;
326
327
328
329
330 atomic_t dataref;
331
332
333
334 void * destructor_arg;
335
336
337 skb_frag_t frags[MAX_SKB_FRAGS];
338};
339
340
341
342
343
344
345
346
347
348
349
350
351#define SKB_DATAREF_SHIFT 16
352#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
353
354
355enum {
356 SKB_FCLONE_UNAVAILABLE,
357 SKB_FCLONE_ORIG,
358 SKB_FCLONE_CLONE,
359};
360
361enum {
362 SKB_GSO_TCPV4 = 1 << 0,
363 SKB_GSO_UDP = 1 << 1,
364
365
366 SKB_GSO_DODGY = 1 << 2,
367
368
369 SKB_GSO_TCP_ECN = 1 << 3,
370
371 SKB_GSO_TCPV6 = 1 << 4,
372
373 SKB_GSO_FCOE = 1 << 5,
374
375 SKB_GSO_GRE = 1 << 6,
376
377 SKB_GSO_GRE_CSUM = 1 << 7,
378
379 SKB_GSO_IPIP = 1 << 8,
380
381 SKB_GSO_SIT = 1 << 9,
382
383 SKB_GSO_UDP_TUNNEL = 1 << 10,
384
385 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
386
387 SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
388};
389
390#if BITS_PER_LONG > 32
391#define NET_SKBUFF_DATA_USES_OFFSET 1
392#endif
393
394#ifdef NET_SKBUFF_DATA_USES_OFFSET
395typedef unsigned int sk_buff_data_t;
396#else
397typedef unsigned char *sk_buff_data_t;
398#endif
399
400
401
402
403
404
405struct skb_mstamp {
406 union {
407 u64 v64;
408 struct {
409 u32 stamp_us;
410 u32 stamp_jiffies;
411 };
412 };
413};
414
415
416
417
418
419static inline void skb_mstamp_get(struct skb_mstamp *cl)
420{
421 u64 val = local_clock();
422
423 do_div(val, NSEC_PER_USEC);
424 cl->stamp_us = (u32)val;
425 cl->stamp_jiffies = (u32)jiffies;
426}
427
428
429
430
431
432
433static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
434 const struct skb_mstamp *t0)
435{
436 s32 delta_us = t1->stamp_us - t0->stamp_us;
437 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
438
439
440
441
442 if (delta_us <= 0 ||
443 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
444
445 delta_us = jiffies_to_usecs(delta_jiffies);
446
447 return delta_us;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519struct sk_buff {
520 union {
521 struct {
522
523 struct sk_buff *next;
524 struct sk_buff *prev;
525
526 union {
527 ktime_t tstamp;
528 struct skb_mstamp skb_mstamp;
529 };
530 };
531 struct rb_node rbnode;
532 };
533 struct sock *sk;
534 struct net_device *dev;
535
536
537
538
539
540
541
542 char cb[48] __aligned(8);
543
544 unsigned long _skb_refdst;
545 void (*destructor)(struct sk_buff *skb);
546#ifdef CONFIG_XFRM
547 struct sec_path *sp;
548#endif
549#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
550 struct nf_conntrack *nfct;
551#endif
552#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
553 struct nf_bridge_info *nf_bridge;
554#endif
555 unsigned int len,
556 data_len;
557 __u16 mac_len,
558 hdr_len;
559
560
561
562
563 kmemcheck_bitfield_begin(flags1);
564 __u16 queue_mapping;
565 __u8 cloned:1,
566 nohdr:1,
567 fclone:2,
568 peeked:1,
569 head_frag:1,
570 xmit_more:1;
571
572 kmemcheck_bitfield_end(flags1);
573
574
575
576
577
578 __u32 headers_start[0];
579
580
581
582#ifdef __BIG_ENDIAN_BITFIELD
583#define PKT_TYPE_MAX (7 << 5)
584#else
585#define PKT_TYPE_MAX 7
586#endif
587#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
588
589 __u8 __pkt_type_offset[0];
590 __u8 pkt_type:3;
591 __u8 pfmemalloc:1;
592 __u8 ignore_df:1;
593 __u8 nfctinfo:3;
594
595 __u8 nf_trace:1;
596 __u8 ip_summed:2;
597 __u8 ooo_okay:1;
598 __u8 l4_hash:1;
599 __u8 sw_hash:1;
600 __u8 wifi_acked_valid:1;
601 __u8 wifi_acked:1;
602
603 __u8 no_fcs:1;
604
605 __u8 encapsulation:1;
606 __u8 encap_hdr_csum:1;
607 __u8 csum_valid:1;
608 __u8 csum_complete_sw:1;
609 __u8 csum_level:2;
610 __u8 csum_bad:1;
611
612#ifdef CONFIG_IPV6_NDISC_NODETYPE
613 __u8 ndisc_nodetype:2;
614#endif
615 __u8 ipvs_property:1;
616 __u8 inner_protocol_type:1;
617 __u8 remcsum_offload:1;
618
619
620#ifdef CONFIG_NET_SCHED
621 __u16 tc_index;
622#ifdef CONFIG_NET_CLS_ACT
623 __u16 tc_verd;
624#endif
625#endif
626
627 union {
628 __wsum csum;
629 struct {
630 __u16 csum_start;
631 __u16 csum_offset;
632 };
633 };
634 __u32 priority;
635 int skb_iif;
636 __u32 hash;
637 __be16 vlan_proto;
638 __u16 vlan_tci;
639#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
640 union {
641 unsigned int napi_id;
642 unsigned int sender_cpu;
643 };
644#endif
645#ifdef CONFIG_NETWORK_SECMARK
646 __u32 secmark;
647#endif
648 union {
649 __u32 mark;
650 __u32 reserved_tailroom;
651 };
652
653 union {
654 __be16 inner_protocol;
655 __u8 inner_ipproto;
656 };
657
658 __u16 inner_transport_header;
659 __u16 inner_network_header;
660 __u16 inner_mac_header;
661
662 __be16 protocol;
663 __u16 transport_header;
664 __u16 network_header;
665 __u16 mac_header;
666
667
668 __u32 headers_end[0];
669
670
671
672 sk_buff_data_t tail;
673 sk_buff_data_t end;
674 unsigned char *head,
675 *data;
676 unsigned int truesize;
677 atomic_t users;
678};
679
680#ifdef __KERNEL__
681
682
683
684#include <linux/slab.h>
685
686
687#define SKB_ALLOC_FCLONE 0x01
688#define SKB_ALLOC_RX 0x02
689#define SKB_ALLOC_NAPI 0x04
690
691
692static inline bool skb_pfmemalloc(const struct sk_buff *skb)
693{
694 return unlikely(skb->pfmemalloc);
695}
696
697
698
699
700
701#define SKB_DST_NOREF 1UL
702#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
703
704
705
706
707
708
709
710static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
711{
712
713
714
715 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
716 !rcu_read_lock_held() &&
717 !rcu_read_lock_bh_held());
718 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
719}
720
721
722
723
724
725
726
727
728
729static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
730{
731 skb->_skb_refdst = (unsigned long)dst;
732}
733
734
735
736
737
738
739
740
741
742
743
744static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
745{
746 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
747 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
748}
749
750
751
752
753
754static inline bool skb_dst_is_noref(const struct sk_buff *skb)
755{
756 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
757}
758
759static inline struct rtable *skb_rtable(const struct sk_buff *skb)
760{
761 return (struct rtable *)skb_dst(skb);
762}
763
764void kfree_skb(struct sk_buff *skb);
765void kfree_skb_list(struct sk_buff *segs);
766void skb_tx_error(struct sk_buff *skb);
767void consume_skb(struct sk_buff *skb);
768void __kfree_skb(struct sk_buff *skb);
769extern struct kmem_cache *skbuff_head_cache;
770
771void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
772bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
773 bool *fragstolen, int *delta_truesize);
774
775struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
776 int node);
777struct sk_buff *__build_skb(void *data, unsigned int frag_size);
778struct sk_buff *build_skb(void *data, unsigned int frag_size);
779static inline struct sk_buff *alloc_skb(unsigned int size,
780 gfp_t priority)
781{
782 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
783}
784
785struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
786 unsigned long data_len,
787 int max_page_order,
788 int *errcode,
789 gfp_t gfp_mask);
790
791
792struct sk_buff_fclones {
793 struct sk_buff skb1;
794
795 struct sk_buff skb2;
796
797 atomic_t fclone_ref;
798};
799
800
801
802
803
804
805
806
807
808static inline bool skb_fclone_busy(const struct sock *sk,
809 const struct sk_buff *skb)
810{
811 const struct sk_buff_fclones *fclones;
812
813 fclones = container_of(skb, struct sk_buff_fclones, skb1);
814
815 return skb->fclone == SKB_FCLONE_ORIG &&
816 atomic_read(&fclones->fclone_ref) > 1 &&
817 fclones->skb2.sk == sk;
818}
819
820static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
821 gfp_t priority)
822{
823 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
824}
825
826struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
827static inline struct sk_buff *alloc_skb_head(gfp_t priority)
828{
829 return __alloc_skb_head(priority, -1);
830}
831
832struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
833int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
834struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
835struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
836struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
837 gfp_t gfp_mask, bool fclone);
838static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
839 gfp_t gfp_mask)
840{
841 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
842}
843
844int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
845struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
846 unsigned int headroom);
847struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
848 int newtailroom, gfp_t priority);
849int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
850 int offset, int len);
851int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
852 int len);
853int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
854int skb_pad(struct sk_buff *skb, int pad);
855#define dev_kfree_skb(a) consume_skb(a)
856
857int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
858 int getfrag(void *from, char *to, int offset,
859 int len, int odd, struct sk_buff *skb),
860 void *from, int length);
861
862struct skb_seq_state {
863 __u32 lower_offset;
864 __u32 upper_offset;
865 __u32 frag_idx;
866 __u32 stepped_offset;
867 struct sk_buff *root_skb;
868 struct sk_buff *cur_skb;
869 __u8 *frag_data;
870};
871
872void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
873 unsigned int to, struct skb_seq_state *st);
874unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
875 struct skb_seq_state *st);
876void skb_abort_seq_read(struct skb_seq_state *st);
877
878unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
879 unsigned int to, struct ts_config *config);
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907enum pkt_hash_types {
908 PKT_HASH_TYPE_NONE,
909 PKT_HASH_TYPE_L2,
910 PKT_HASH_TYPE_L3,
911 PKT_HASH_TYPE_L4,
912};
913
914static inline void
915skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
916{
917 skb->l4_hash = (type == PKT_HASH_TYPE_L4);
918 skb->sw_hash = 0;
919 skb->hash = hash;
920}
921
922void __skb_get_hash(struct sk_buff *skb);
923static inline __u32 skb_get_hash(struct sk_buff *skb)
924{
925 if (!skb->l4_hash && !skb->sw_hash)
926 __skb_get_hash(skb);
927
928 return skb->hash;
929}
930
931static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
932{
933 return skb->hash;
934}
935
936static inline void skb_clear_hash(struct sk_buff *skb)
937{
938 skb->hash = 0;
939 skb->sw_hash = 0;
940 skb->l4_hash = 0;
941}
942
943static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
944{
945 if (!skb->l4_hash)
946 skb_clear_hash(skb);
947}
948
949static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
950{
951 to->hash = from->hash;
952 to->sw_hash = from->sw_hash;
953 to->l4_hash = from->l4_hash;
954};
955
956static inline void skb_sender_cpu_clear(struct sk_buff *skb)
957{
958#ifdef CONFIG_XPS
959 skb->sender_cpu = 0;
960#endif
961}
962
963#ifdef NET_SKBUFF_DATA_USES_OFFSET
964static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
965{
966 return skb->head + skb->end;
967}
968
969static inline unsigned int skb_end_offset(const struct sk_buff *skb)
970{
971 return skb->end;
972}
973#else
974static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
975{
976 return skb->end;
977}
978
979static inline unsigned int skb_end_offset(const struct sk_buff *skb)
980{
981 return skb->end - skb->head;
982}
983#endif
984
985
986#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
987
988static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
989{
990 return &skb_shinfo(skb)->hwtstamps;
991}
992
993
994
995
996
997
998
999static inline int skb_queue_empty(const struct sk_buff_head *list)
1000{
1001 return list->next == (const struct sk_buff *) list;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1012 const struct sk_buff *skb)
1013{
1014 return skb->next == (const struct sk_buff *) list;
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1025 const struct sk_buff *skb)
1026{
1027 return skb->prev == (const struct sk_buff *) list;
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1039 const struct sk_buff *skb)
1040{
1041
1042
1043
1044 BUG_ON(skb_queue_is_last(list, skb));
1045 return skb->next;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1057 const struct sk_buff *skb)
1058{
1059
1060
1061
1062 BUG_ON(skb_queue_is_first(list, skb));
1063 return skb->prev;
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073static inline struct sk_buff *skb_get(struct sk_buff *skb)
1074{
1075 atomic_inc(&skb->users);
1076 return skb;
1077}
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092static inline int skb_cloned(const struct sk_buff *skb)
1093{
1094 return skb->cloned &&
1095 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1096}
1097
1098static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1099{
1100 might_sleep_if(pri & __GFP_WAIT);
1101
1102 if (skb_cloned(skb))
1103 return pskb_expand_head(skb, 0, 0, pri);
1104
1105 return 0;
1106}
1107
1108
1109
1110
1111
1112
1113
1114
1115static inline int skb_header_cloned(const struct sk_buff *skb)
1116{
1117 int dataref;
1118
1119 if (!skb->cloned)
1120 return 0;
1121
1122 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1123 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1124 return dataref != 1;
1125}
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136static inline void skb_header_release(struct sk_buff *skb)
1137{
1138 BUG_ON(skb->nohdr);
1139 skb->nohdr = 1;
1140 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1141}
1142
1143
1144
1145
1146
1147
1148
1149
1150static inline void __skb_header_release(struct sk_buff *skb)
1151{
1152 skb->nohdr = 1;
1153 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164static inline int skb_shared(const struct sk_buff *skb)
1165{
1166 return atomic_read(&skb->users) != 1;
1167}
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1183{
1184 might_sleep_if(pri & __GFP_WAIT);
1185 if (skb_shared(skb)) {
1186 struct sk_buff *nskb = skb_clone(skb, pri);
1187
1188 if (likely(nskb))
1189 consume_skb(skb);
1190 else
1191 kfree_skb(skb);
1192 skb = nskb;
1193 }
1194 return skb;
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1218 gfp_t pri)
1219{
1220 might_sleep_if(pri & __GFP_WAIT);
1221 if (skb_cloned(skb)) {
1222 struct sk_buff *nskb = skb_copy(skb, pri);
1223
1224
1225 if (likely(nskb))
1226 consume_skb(skb);
1227 else
1228 kfree_skb(skb);
1229 skb = nskb;
1230 }
1231 return skb;
1232}
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1248{
1249 struct sk_buff *skb = list_->next;
1250
1251 if (skb == (struct sk_buff *)list_)
1252 skb = NULL;
1253 return skb;
1254}
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1266 const struct sk_buff_head *list_)
1267{
1268 struct sk_buff *next = skb->next;
1269
1270 if (next == (struct sk_buff *)list_)
1271 next = NULL;
1272 return next;
1273}
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1289{
1290 struct sk_buff *skb = list_->prev;
1291
1292 if (skb == (struct sk_buff *)list_)
1293 skb = NULL;
1294 return skb;
1295
1296}
1297
1298
1299
1300
1301
1302
1303
1304static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1305{
1306 return list_->qlen;
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319static inline void __skb_queue_head_init(struct sk_buff_head *list)
1320{
1321 list->prev = list->next = (struct sk_buff *)list;
1322 list->qlen = 0;
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333static inline void skb_queue_head_init(struct sk_buff_head *list)
1334{
1335 spin_lock_init(&list->lock);
1336 __skb_queue_head_init(list);
1337}
1338
1339static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1340 struct lock_class_key *class)
1341{
1342 skb_queue_head_init(list);
1343 lockdep_set_class(&list->lock, class);
1344}
1345
1346
1347
1348
1349
1350
1351
1352void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1353 struct sk_buff_head *list);
1354static inline void __skb_insert(struct sk_buff *newsk,
1355 struct sk_buff *prev, struct sk_buff *next,
1356 struct sk_buff_head *list)
1357{
1358 newsk->next = next;
1359 newsk->prev = prev;
1360 next->prev = prev->next = newsk;
1361 list->qlen++;
1362}
1363
1364static inline void __skb_queue_splice(const struct sk_buff_head *list,
1365 struct sk_buff *prev,
1366 struct sk_buff *next)
1367{
1368 struct sk_buff *first = list->next;
1369 struct sk_buff *last = list->prev;
1370
1371 first->prev = prev;
1372 prev->next = first;
1373
1374 last->next = next;
1375 next->prev = last;
1376}
1377
1378
1379
1380
1381
1382
1383static inline void skb_queue_splice(const struct sk_buff_head *list,
1384 struct sk_buff_head *head)
1385{
1386 if (!skb_queue_empty(list)) {
1387 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1388 head->qlen += list->qlen;
1389 }
1390}
1391
1392
1393
1394
1395
1396
1397
1398
1399static inline void skb_queue_splice_init(struct sk_buff_head *list,
1400 struct sk_buff_head *head)
1401{
1402 if (!skb_queue_empty(list)) {
1403 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1404 head->qlen += list->qlen;
1405 __skb_queue_head_init(list);
1406 }
1407}
1408
1409
1410
1411
1412
1413
1414static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1415 struct sk_buff_head *head)
1416{
1417 if (!skb_queue_empty(list)) {
1418 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1419 head->qlen += list->qlen;
1420 }
1421}
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1432 struct sk_buff_head *head)
1433{
1434 if (!skb_queue_empty(list)) {
1435 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1436 head->qlen += list->qlen;
1437 __skb_queue_head_init(list);
1438 }
1439}
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452static inline void __skb_queue_after(struct sk_buff_head *list,
1453 struct sk_buff *prev,
1454 struct sk_buff *newsk)
1455{
1456 __skb_insert(newsk, prev, prev->next, list);
1457}
1458
1459void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1460 struct sk_buff_head *list);
1461
1462static inline void __skb_queue_before(struct sk_buff_head *list,
1463 struct sk_buff *next,
1464 struct sk_buff *newsk)
1465{
1466 __skb_insert(newsk, next->prev, next, list);
1467}
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1480static inline void __skb_queue_head(struct sk_buff_head *list,
1481 struct sk_buff *newsk)
1482{
1483 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1497static inline void __skb_queue_tail(struct sk_buff_head *list,
1498 struct sk_buff *newsk)
1499{
1500 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1501}
1502
1503
1504
1505
1506
1507void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1508static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1509{
1510 struct sk_buff *next, *prev;
1511
1512 list->qlen--;
1513 next = skb->next;
1514 prev = skb->prev;
1515 skb->next = skb->prev = NULL;
1516 next->prev = prev;
1517 prev->next = next;
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1529static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1530{
1531 struct sk_buff *skb = skb_peek(list);
1532 if (skb)
1533 __skb_unlink(skb, list);
1534 return skb;
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1546static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1547{
1548 struct sk_buff *skb = skb_peek_tail(list);
1549 if (skb)
1550 __skb_unlink(skb, list);
1551 return skb;
1552}
1553
1554
1555static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1556{
1557 return skb->data_len;
1558}
1559
1560static inline unsigned int skb_headlen(const struct sk_buff *skb)
1561{
1562 return skb->len - skb->data_len;
1563}
1564
1565static inline int skb_pagelen(const struct sk_buff *skb)
1566{
1567 int i, len = 0;
1568
1569 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1570 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1571 return len + skb_headlen(skb);
1572}
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1588 struct page *page, int off, int size)
1589{
1590 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 frag->page.p = page;
1602 frag->page_offset = off;
1603 skb_frag_size_set(frag, size);
1604
1605 page = compound_head(page);
1606 if (page->pfmemalloc && !page->mapping)
1607 skb->pfmemalloc = true;
1608}
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1625 struct page *page, int off, int size)
1626{
1627 __skb_fill_page_desc(skb, i, page, off, size);
1628 skb_shinfo(skb)->nr_frags = i + 1;
1629}
1630
1631void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1632 int size, unsigned int truesize);
1633
1634void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1635 unsigned int truesize);
1636
1637#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1638#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1639#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1640
1641#ifdef NET_SKBUFF_DATA_USES_OFFSET
1642static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1643{
1644 return skb->head + skb->tail;
1645}
1646
1647static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1648{
1649 skb->tail = skb->data - skb->head;
1650}
1651
1652static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1653{
1654 skb_reset_tail_pointer(skb);
1655 skb->tail += offset;
1656}
1657
1658#else
1659static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1660{
1661 return skb->tail;
1662}
1663
1664static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1665{
1666 skb->tail = skb->data;
1667}
1668
1669static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1670{
1671 skb->tail = skb->data + offset;
1672}
1673
1674#endif
1675
1676
1677
1678
1679unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1680unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1681static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1682{
1683 unsigned char *tmp = skb_tail_pointer(skb);
1684 SKB_LINEAR_ASSERT(skb);
1685 skb->tail += len;
1686 skb->len += len;
1687 return tmp;
1688}
1689
1690unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1691static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1692{
1693 skb->data -= len;
1694 skb->len += len;
1695 return skb->data;
1696}
1697
1698unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1699static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1700{
1701 skb->len -= len;
1702 BUG_ON(skb->len < skb->data_len);
1703 return skb->data += len;
1704}
1705
1706static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1707{
1708 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1709}
1710
1711unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1712
1713static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1714{
1715 if (len > skb_headlen(skb) &&
1716 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1717 return NULL;
1718 skb->len -= len;
1719 return skb->data += len;
1720}
1721
1722static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1723{
1724 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1725}
1726
1727static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1728{
1729 if (likely(len <= skb_headlen(skb)))
1730 return 1;
1731 if (unlikely(len > skb->len))
1732 return 0;
1733 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1734}
1735
1736
1737
1738
1739
1740
1741
1742static inline unsigned int skb_headroom(const struct sk_buff *skb)
1743{
1744 return skb->data - skb->head;
1745}
1746
1747
1748
1749
1750
1751
1752
1753static inline int skb_tailroom(const struct sk_buff *skb)
1754{
1755 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1756}
1757
1758
1759
1760
1761
1762
1763
1764
1765static inline int skb_availroom(const struct sk_buff *skb)
1766{
1767 if (skb_is_nonlinear(skb))
1768 return 0;
1769
1770 return skb->end - skb->tail - skb->reserved_tailroom;
1771}
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781static inline void skb_reserve(struct sk_buff *skb, int len)
1782{
1783 skb->data += len;
1784 skb->tail += len;
1785}
1786
1787#define ENCAP_TYPE_ETHER 0
1788#define ENCAP_TYPE_IPPROTO 1
1789
1790static inline void skb_set_inner_protocol(struct sk_buff *skb,
1791 __be16 protocol)
1792{
1793 skb->inner_protocol = protocol;
1794 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
1795}
1796
1797static inline void skb_set_inner_ipproto(struct sk_buff *skb,
1798 __u8 ipproto)
1799{
1800 skb->inner_ipproto = ipproto;
1801 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
1802}
1803
1804static inline void skb_reset_inner_headers(struct sk_buff *skb)
1805{
1806 skb->inner_mac_header = skb->mac_header;
1807 skb->inner_network_header = skb->network_header;
1808 skb->inner_transport_header = skb->transport_header;
1809}
1810
1811static inline void skb_reset_mac_len(struct sk_buff *skb)
1812{
1813 skb->mac_len = skb->network_header - skb->mac_header;
1814}
1815
1816static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1817 *skb)
1818{
1819 return skb->head + skb->inner_transport_header;
1820}
1821
1822static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1823{
1824 skb->inner_transport_header = skb->data - skb->head;
1825}
1826
1827static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1828 const int offset)
1829{
1830 skb_reset_inner_transport_header(skb);
1831 skb->inner_transport_header += offset;
1832}
1833
1834static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1835{
1836 return skb->head + skb->inner_network_header;
1837}
1838
1839static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1840{
1841 skb->inner_network_header = skb->data - skb->head;
1842}
1843
1844static inline void skb_set_inner_network_header(struct sk_buff *skb,
1845 const int offset)
1846{
1847 skb_reset_inner_network_header(skb);
1848 skb->inner_network_header += offset;
1849}
1850
1851static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1852{
1853 return skb->head + skb->inner_mac_header;
1854}
1855
1856static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1857{
1858 skb->inner_mac_header = skb->data - skb->head;
1859}
1860
1861static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1862 const int offset)
1863{
1864 skb_reset_inner_mac_header(skb);
1865 skb->inner_mac_header += offset;
1866}
1867static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1868{
1869 return skb->transport_header != (typeof(skb->transport_header))~0U;
1870}
1871
1872static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1873{
1874 return skb->head + skb->transport_header;
1875}
1876
1877static inline void skb_reset_transport_header(struct sk_buff *skb)
1878{
1879 skb->transport_header = skb->data - skb->head;
1880}
1881
1882static inline void skb_set_transport_header(struct sk_buff *skb,
1883 const int offset)
1884{
1885 skb_reset_transport_header(skb);
1886 skb->transport_header += offset;
1887}
1888
1889static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1890{
1891 return skb->head + skb->network_header;
1892}
1893
1894static inline void skb_reset_network_header(struct sk_buff *skb)
1895{
1896 skb->network_header = skb->data - skb->head;
1897}
1898
1899static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1900{
1901 skb_reset_network_header(skb);
1902 skb->network_header += offset;
1903}
1904
1905static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1906{
1907 return skb->head + skb->mac_header;
1908}
1909
1910static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1911{
1912 return skb->mac_header != (typeof(skb->mac_header))~0U;
1913}
1914
1915static inline void skb_reset_mac_header(struct sk_buff *skb)
1916{
1917 skb->mac_header = skb->data - skb->head;
1918}
1919
1920static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1921{
1922 skb_reset_mac_header(skb);
1923 skb->mac_header += offset;
1924}
1925
1926static inline void skb_pop_mac_header(struct sk_buff *skb)
1927{
1928 skb->mac_header = skb->network_header;
1929}
1930
1931static inline void skb_probe_transport_header(struct sk_buff *skb,
1932 const int offset_hint)
1933{
1934 struct flow_keys keys;
1935
1936 if (skb_transport_header_was_set(skb))
1937 return;
1938 else if (skb_flow_dissect(skb, &keys))
1939 skb_set_transport_header(skb, keys.thoff);
1940 else
1941 skb_set_transport_header(skb, offset_hint);
1942}
1943
1944static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1945{
1946 if (skb_mac_header_was_set(skb)) {
1947 const unsigned char *old_mac = skb_mac_header(skb);
1948
1949 skb_set_mac_header(skb, -skb->mac_len);
1950 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1951 }
1952}
1953
1954static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1955{
1956 return skb->csum_start - skb_headroom(skb);
1957}
1958
1959static inline int skb_transport_offset(const struct sk_buff *skb)
1960{
1961 return skb_transport_header(skb) - skb->data;
1962}
1963
1964static inline u32 skb_network_header_len(const struct sk_buff *skb)
1965{
1966 return skb->transport_header - skb->network_header;
1967}
1968
1969static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1970{
1971 return skb->inner_transport_header - skb->inner_network_header;
1972}
1973
1974static inline int skb_network_offset(const struct sk_buff *skb)
1975{
1976 return skb_network_header(skb) - skb->data;
1977}
1978
1979static inline int skb_inner_network_offset(const struct sk_buff *skb)
1980{
1981 return skb_inner_network_header(skb) - skb->data;
1982}
1983
1984static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1985{
1986 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1987}
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009#ifndef NET_IP_ALIGN
2010#define NET_IP_ALIGN 2
2011#endif
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033#ifndef NET_SKB_PAD
2034#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2035#endif
2036
2037int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2038
2039static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2040{
2041 if (unlikely(skb_is_nonlinear(skb))) {
2042 WARN_ON(1);
2043 return;
2044 }
2045 skb->len = len;
2046 skb_set_tail_pointer(skb, len);
2047}
2048
2049void skb_trim(struct sk_buff *skb, unsigned int len);
2050
2051static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2052{
2053 if (skb->data_len)
2054 return ___pskb_trim(skb, len);
2055 __skb_trim(skb, len);
2056 return 0;
2057}
2058
2059static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2060{
2061 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2062}
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2074{
2075 int err = pskb_trim(skb, len);
2076 BUG_ON(err);
2077}
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087static inline void skb_orphan(struct sk_buff *skb)
2088{
2089 if (skb->destructor) {
2090 skb->destructor(skb);
2091 skb->destructor = NULL;
2092 skb->sk = NULL;
2093 } else {
2094 BUG_ON(skb->sk);
2095 }
2096}
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2108{
2109 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2110 return 0;
2111 return skb_copy_ubufs(skb, gfp_mask);
2112}
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122void skb_queue_purge(struct sk_buff_head *list);
2123static inline void __skb_queue_purge(struct sk_buff_head *list)
2124{
2125 struct sk_buff *skb;
2126 while ((skb = __skb_dequeue(list)) != NULL)
2127 kfree_skb(skb);
2128}
2129
2130#define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
2131#define NETDEV_FRAG_PAGE_MAX_SIZE (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
2132#define NETDEV_PAGECNT_MAX_BIAS NETDEV_FRAG_PAGE_MAX_SIZE
2133
2134void *netdev_alloc_frag(unsigned int fragsz);
2135
2136struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2137 gfp_t gfp_mask);
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2153 unsigned int length)
2154{
2155 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2156}
2157
2158
2159static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2160 gfp_t gfp_mask)
2161{
2162 return __netdev_alloc_skb(NULL, length, gfp_mask);
2163}
2164
2165
2166static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2167{
2168 return netdev_alloc_skb(NULL, length);
2169}
2170
2171
2172static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2173 unsigned int length, gfp_t gfp)
2174{
2175 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2176
2177 if (NET_IP_ALIGN && skb)
2178 skb_reserve(skb, NET_IP_ALIGN);
2179 return skb;
2180}
2181
2182static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2183 unsigned int length)
2184{
2185 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2186}
2187
2188void *napi_alloc_frag(unsigned int fragsz);
2189struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2190 unsigned int length, gfp_t gfp_mask);
2191static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2192 unsigned int length)
2193{
2194 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2195}
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2207 unsigned int order)
2208{
2209
2210
2211
2212
2213
2214
2215
2216
2217 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2218
2219 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2220}
2221
2222static inline struct page *dev_alloc_pages(unsigned int order)
2223{
2224 return __dev_alloc_pages(GFP_ATOMIC, order);
2225}
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2236{
2237 return __dev_alloc_pages(gfp_mask, 0);
2238}
2239
2240static inline struct page *dev_alloc_page(void)
2241{
2242 return __dev_alloc_page(GFP_ATOMIC);
2243}
2244
2245
2246
2247
2248
2249
2250static inline void skb_propagate_pfmemalloc(struct page *page,
2251 struct sk_buff *skb)
2252{
2253 if (page && page->pfmemalloc)
2254 skb->pfmemalloc = true;
2255}
2256
2257
2258
2259
2260
2261
2262
2263static inline struct page *skb_frag_page(const skb_frag_t *frag)
2264{
2265 return frag->page.p;
2266}
2267
2268
2269
2270
2271
2272
2273
2274static inline void __skb_frag_ref(skb_frag_t *frag)
2275{
2276 get_page(skb_frag_page(frag));
2277}
2278
2279
2280
2281
2282
2283
2284
2285
2286static inline void skb_frag_ref(struct sk_buff *skb, int f)
2287{
2288 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2289}
2290
2291
2292
2293
2294
2295
2296
2297static inline void __skb_frag_unref(skb_frag_t *frag)
2298{
2299 put_page(skb_frag_page(frag));
2300}
2301
2302
2303
2304
2305
2306
2307
2308
2309static inline void skb_frag_unref(struct sk_buff *skb, int f)
2310{
2311 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2312}
2313
2314
2315
2316
2317
2318
2319
2320
2321static inline void *skb_frag_address(const skb_frag_t *frag)
2322{
2323 return page_address(skb_frag_page(frag)) + frag->page_offset;
2324}
2325
2326
2327
2328
2329
2330
2331
2332
2333static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2334{
2335 void *ptr = page_address(skb_frag_page(frag));
2336 if (unlikely(!ptr))
2337 return NULL;
2338
2339 return ptr + frag->page_offset;
2340}
2341
2342
2343
2344
2345
2346
2347
2348
2349static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2350{
2351 frag->page.p = page;
2352}
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2363 struct page *page)
2364{
2365 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2366}
2367
2368bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2382 const skb_frag_t *frag,
2383 size_t offset, size_t size,
2384 enum dma_data_direction dir)
2385{
2386 return dma_map_page(dev, skb_frag_page(frag),
2387 frag->page_offset + offset, size, dir);
2388}
2389
2390static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2391 gfp_t gfp_mask)
2392{
2393 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2394}
2395
2396
2397static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2398 gfp_t gfp_mask)
2399{
2400 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2401}
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2413{
2414 return !skb_header_cloned(skb) &&
2415 skb_headroom(skb) + len <= skb->hdr_len;
2416}
2417
2418static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2419 int cloned)
2420{
2421 int delta = 0;
2422
2423 if (headroom > skb_headroom(skb))
2424 delta = headroom - skb_headroom(skb);
2425
2426 if (delta || cloned)
2427 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2428 GFP_ATOMIC);
2429 return 0;
2430}
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2445{
2446 return __skb_cow(skb, headroom, skb_cloned(skb));
2447}
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2460{
2461 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2462}
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2475{
2476 unsigned int size = skb->len;
2477 if (likely(size >= len))
2478 return 0;
2479 return skb_pad(skb, len - size);
2480}
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2493{
2494 unsigned int size = skb->len;
2495
2496 if (unlikely(size < len)) {
2497 len -= size;
2498 if (skb_pad(skb, len))
2499 return -ENOMEM;
2500 __skb_put(skb, len);
2501 }
2502 return 0;
2503}
2504
2505static inline int skb_add_data(struct sk_buff *skb,
2506 struct iov_iter *from, int copy)
2507{
2508 const int off = skb->len;
2509
2510 if (skb->ip_summed == CHECKSUM_NONE) {
2511 __wsum csum = 0;
2512 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2513 &csum, from) == copy) {
2514 skb->csum = csum_block_add(skb->csum, csum, off);
2515 return 0;
2516 }
2517 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2518 return 0;
2519
2520 __skb_trim(skb, off);
2521 return -EFAULT;
2522}
2523
2524static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2525 const struct page *page, int off)
2526{
2527 if (i) {
2528 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2529
2530 return page == skb_frag_page(frag) &&
2531 off == frag->page_offset + skb_frag_size(frag);
2532 }
2533 return false;
2534}
2535
2536static inline int __skb_linearize(struct sk_buff *skb)
2537{
2538 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548static inline int skb_linearize(struct sk_buff *skb)
2549{
2550 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2551}
2552
2553
2554
2555
2556
2557
2558
2559
2560static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2561{
2562 return skb_is_nonlinear(skb) &&
2563 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2564}
2565
2566
2567
2568
2569
2570
2571
2572
2573static inline int skb_linearize_cow(struct sk_buff *skb)
2574{
2575 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2576 __skb_linearize(skb) : 0;
2577}
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590static inline void skb_postpull_rcsum(struct sk_buff *skb,
2591 const void *start, unsigned int len)
2592{
2593 if (skb->ip_summed == CHECKSUM_COMPLETE)
2594 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2595}
2596
2597unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2609{
2610 if (likely(len >= skb->len))
2611 return 0;
2612 if (skb->ip_summed == CHECKSUM_COMPLETE)
2613 skb->ip_summed = CHECKSUM_NONE;
2614 return __pskb_trim(skb, len);
2615}
2616
2617#define skb_queue_walk(queue, skb) \
2618 for (skb = (queue)->next; \
2619 skb != (struct sk_buff *)(queue); \
2620 skb = skb->next)
2621
2622#define skb_queue_walk_safe(queue, skb, tmp) \
2623 for (skb = (queue)->next, tmp = skb->next; \
2624 skb != (struct sk_buff *)(queue); \
2625 skb = tmp, tmp = skb->next)
2626
2627#define skb_queue_walk_from(queue, skb) \
2628 for (; skb != (struct sk_buff *)(queue); \
2629 skb = skb->next)
2630
2631#define skb_queue_walk_from_safe(queue, skb, tmp) \
2632 for (tmp = skb->next; \
2633 skb != (struct sk_buff *)(queue); \
2634 skb = tmp, tmp = skb->next)
2635
2636#define skb_queue_reverse_walk(queue, skb) \
2637 for (skb = (queue)->prev; \
2638 skb != (struct sk_buff *)(queue); \
2639 skb = skb->prev)
2640
2641#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2642 for (skb = (queue)->prev, tmp = skb->prev; \
2643 skb != (struct sk_buff *)(queue); \
2644 skb = tmp, tmp = skb->prev)
2645
2646#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2647 for (tmp = skb->prev; \
2648 skb != (struct sk_buff *)(queue); \
2649 skb = tmp, tmp = skb->prev)
2650
2651static inline bool skb_has_frag_list(const struct sk_buff *skb)
2652{
2653 return skb_shinfo(skb)->frag_list != NULL;
2654}
2655
2656static inline void skb_frag_list_init(struct sk_buff *skb)
2657{
2658 skb_shinfo(skb)->frag_list = NULL;
2659}
2660
2661static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2662{
2663 frag->next = skb_shinfo(skb)->frag_list;
2664 skb_shinfo(skb)->frag_list = frag;
2665}
2666
2667#define skb_walk_frags(skb, iter) \
2668 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2669
2670struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2671 int *peeked, int *off, int *err);
2672struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2673 int *err);
2674unsigned int datagram_poll(struct file *file, struct socket *sock,
2675 struct poll_table_struct *wait);
2676int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
2677 struct iov_iter *to, int size);
2678static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
2679 struct msghdr *msg, int size)
2680{
2681 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
2682}
2683int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2684 struct msghdr *msg);
2685int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2686 struct iov_iter *from, int len);
2687int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2688void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2689void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2690int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2691int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2692int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2693__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2694 int len, __wsum csum);
2695int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
2696 struct pipe_inode_info *pipe, unsigned int len,
2697 unsigned int flags);
2698void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2699unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2700int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
2701 int len, int hlen);
2702void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2703int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2704void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2705unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2706struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2707struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2708int skb_ensure_writable(struct sk_buff *skb, int write_len);
2709int skb_vlan_pop(struct sk_buff *skb);
2710int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2711
2712static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
2713{
2714 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2715}
2716
2717static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
2718{
2719 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2720}
2721
2722struct skb_checksum_ops {
2723 __wsum (*update)(const void *mem, int len, __wsum wsum);
2724 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
2725};
2726
2727__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2728 __wsum csum, const struct skb_checksum_ops *ops);
2729__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2730 __wsum csum);
2731
2732static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
2733 int len, void *data, int hlen, void *buffer)
2734{
2735 if (hlen - offset >= len)
2736 return data + offset;
2737
2738 if (!skb ||
2739 skb_copy_bits(skb, offset, buffer, len) < 0)
2740 return NULL;
2741
2742 return buffer;
2743}
2744
2745static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2746 int len, void *buffer)
2747{
2748 return __skb_header_pointer(skb, offset, len, skb->data,
2749 skb_headlen(skb), buffer);
2750}
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762static inline bool skb_needs_linearize(struct sk_buff *skb,
2763 netdev_features_t features)
2764{
2765 return skb_is_nonlinear(skb) &&
2766 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
2767 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
2768}
2769
2770static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2771 void *to,
2772 const unsigned int len)
2773{
2774 memcpy(to, skb->data, len);
2775}
2776
2777static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2778 const int offset, void *to,
2779 const unsigned int len)
2780{
2781 memcpy(to, skb->data + offset, len);
2782}
2783
2784static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2785 const void *from,
2786 const unsigned int len)
2787{
2788 memcpy(skb->data, from, len);
2789}
2790
2791static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2792 const int offset,
2793 const void *from,
2794 const unsigned int len)
2795{
2796 memcpy(skb->data + offset, from, len);
2797}
2798
2799void skb_init(void);
2800
2801static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2802{
2803 return skb->tstamp;
2804}
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815static inline void skb_get_timestamp(const struct sk_buff *skb,
2816 struct timeval *stamp)
2817{
2818 *stamp = ktime_to_timeval(skb->tstamp);
2819}
2820
2821static inline void skb_get_timestampns(const struct sk_buff *skb,
2822 struct timespec *stamp)
2823{
2824 *stamp = ktime_to_timespec(skb->tstamp);
2825}
2826
2827static inline void __net_timestamp(struct sk_buff *skb)
2828{
2829 skb->tstamp = ktime_get_real();
2830}
2831
2832static inline ktime_t net_timedelta(ktime_t t)
2833{
2834 return ktime_sub(ktime_get_real(), t);
2835}
2836
2837static inline ktime_t net_invalid_timestamp(void)
2838{
2839 return ktime_set(0, 0);
2840}
2841
2842struct sk_buff *skb_clone_sk(struct sk_buff *skb);
2843
2844#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2845
2846void skb_clone_tx_timestamp(struct sk_buff *skb);
2847bool skb_defer_rx_timestamp(struct sk_buff *skb);
2848
2849#else
2850
2851static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2852{
2853}
2854
2855static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2856{
2857 return false;
2858}
2859
2860#endif
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874void skb_complete_tx_timestamp(struct sk_buff *skb,
2875 struct skb_shared_hwtstamps *hwtstamps);
2876
2877void __skb_tstamp_tx(struct sk_buff *orig_skb,
2878 struct skb_shared_hwtstamps *hwtstamps,
2879 struct sock *sk, int tstype);
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892void skb_tstamp_tx(struct sk_buff *orig_skb,
2893 struct skb_shared_hwtstamps *hwtstamps);
2894
2895static inline void sw_tx_timestamp(struct sk_buff *skb)
2896{
2897 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2898 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2899 skb_tstamp_tx(skb, NULL);
2900}
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914static inline void skb_tx_timestamp(struct sk_buff *skb)
2915{
2916 skb_clone_tx_timestamp(skb);
2917 sw_tx_timestamp(skb);
2918}
2919
2920
2921
2922
2923
2924
2925
2926
2927void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2928
2929__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2930__sum16 __skb_checksum_complete(struct sk_buff *skb);
2931
2932static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2933{
2934 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
2935 skb->csum_valid ||
2936 (skb->ip_summed == CHECKSUM_PARTIAL &&
2937 skb_checksum_start_offset(skb) >= 0));
2938}
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2957{
2958 return skb_csum_unnecessary(skb) ?
2959 0 : __skb_checksum_complete(skb);
2960}
2961
2962static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
2963{
2964 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2965 if (skb->csum_level == 0)
2966 skb->ip_summed = CHECKSUM_NONE;
2967 else
2968 skb->csum_level--;
2969 }
2970}
2971
2972static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
2973{
2974 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2975 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
2976 skb->csum_level++;
2977 } else if (skb->ip_summed == CHECKSUM_NONE) {
2978 skb->ip_summed = CHECKSUM_UNNECESSARY;
2979 skb->csum_level = 0;
2980 }
2981}
2982
2983static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
2984{
2985
2986
2987
2988
2989
2990
2991
2992
2993 if (skb->ip_summed == CHECKSUM_NONE ||
2994 skb->ip_summed == CHECKSUM_UNNECESSARY)
2995 skb->csum_bad = 1;
2996}
2997
2998
2999
3000
3001
3002
3003static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3004 bool zero_okay,
3005 __sum16 check)
3006{
3007 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3008 skb->csum_valid = 1;
3009 __skb_decr_checksum_unnecessary(skb);
3010 return false;
3011 }
3012
3013 return true;
3014}
3015
3016
3017
3018
3019#define CHECKSUM_BREAK 76
3020
3021
3022
3023
3024
3025
3026
3027static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3028{
3029 if (skb->ip_summed == CHECKSUM_COMPLETE)
3030 skb->ip_summed = CHECKSUM_NONE;
3031}
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3043 bool complete,
3044 __wsum psum)
3045{
3046 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3047 if (!csum_fold(csum_add(psum, skb->csum))) {
3048 skb->csum_valid = 1;
3049 return 0;
3050 }
3051 } else if (skb->csum_bad) {
3052
3053 return 1;
3054 }
3055
3056 skb->csum = psum;
3057
3058 if (complete || skb->len <= CHECKSUM_BREAK) {
3059 __sum16 csum;
3060
3061 csum = __skb_checksum_complete(skb);
3062 skb->csum_valid = !csum;
3063 return csum;
3064 }
3065
3066 return 0;
3067}
3068
3069static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3070{
3071 return 0;
3072}
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084#define __skb_checksum_validate(skb, proto, complete, \
3085 zero_okay, check, compute_pseudo) \
3086({ \
3087 __sum16 __ret = 0; \
3088 skb->csum_valid = 0; \
3089 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3090 __ret = __skb_checksum_validate_complete(skb, \
3091 complete, compute_pseudo(skb, proto)); \
3092 __ret; \
3093})
3094
3095#define skb_checksum_init(skb, proto, compute_pseudo) \
3096 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3097
3098#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3099 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3100
3101#define skb_checksum_validate(skb, proto, compute_pseudo) \
3102 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3103
3104#define skb_checksum_validate_zero_check(skb, proto, check, \
3105 compute_pseudo) \
3106 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3107
3108#define skb_checksum_simple_validate(skb) \
3109 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3110
3111static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3112{
3113 return (skb->ip_summed == CHECKSUM_NONE &&
3114 skb->csum_valid && !skb->csum_bad);
3115}
3116
3117static inline void __skb_checksum_convert(struct sk_buff *skb,
3118 __sum16 check, __wsum pseudo)
3119{
3120 skb->csum = ~pseudo;
3121 skb->ip_summed = CHECKSUM_COMPLETE;
3122}
3123
3124#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3125do { \
3126 if (__skb_checksum_convert_check(skb)) \
3127 __skb_checksum_convert(skb, check, \
3128 compute_pseudo(skb, proto)); \
3129} while (0)
3130
3131static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3132 u16 start, u16 offset)
3133{
3134 skb->ip_summed = CHECKSUM_PARTIAL;
3135 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3136 skb->csum_offset = offset - start;
3137}
3138
3139
3140
3141
3142
3143
3144static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3145 int start, int offset, bool nopartial)
3146{
3147 __wsum delta;
3148
3149 if (!nopartial) {
3150 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3151 return;
3152 }
3153
3154 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3155 __skb_checksum_complete(skb);
3156 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3157 }
3158
3159 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3160
3161
3162 skb->csum = csum_add(skb->csum, delta);
3163}
3164
3165#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3166void nf_conntrack_destroy(struct nf_conntrack *nfct);
3167static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3168{
3169 if (nfct && atomic_dec_and_test(&nfct->use))
3170 nf_conntrack_destroy(nfct);
3171}
3172static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3173{
3174 if (nfct)
3175 atomic_inc(&nfct->use);
3176}
3177#endif
3178#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3179static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3180{
3181 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3182 kfree(nf_bridge);
3183}
3184static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3185{
3186 if (nf_bridge)
3187 atomic_inc(&nf_bridge->use);
3188}
3189#endif
3190static inline void nf_reset(struct sk_buff *skb)
3191{
3192#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3193 nf_conntrack_put(skb->nfct);
3194 skb->nfct = NULL;
3195#endif
3196#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3197 nf_bridge_put(skb->nf_bridge);
3198 skb->nf_bridge = NULL;
3199#endif
3200}
3201
3202static inline void nf_reset_trace(struct sk_buff *skb)
3203{
3204#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3205 skb->nf_trace = 0;
3206#endif
3207}
3208
3209
3210static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3211 bool copy)
3212{
3213#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3214 dst->nfct = src->nfct;
3215 nf_conntrack_get(src->nfct);
3216 if (copy)
3217 dst->nfctinfo = src->nfctinfo;
3218#endif
3219#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3220 dst->nf_bridge = src->nf_bridge;
3221 nf_bridge_get(src->nf_bridge);
3222#endif
3223#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3224 if (copy)
3225 dst->nf_trace = src->nf_trace;
3226#endif
3227}
3228
3229static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3230{
3231#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3232 nf_conntrack_put(dst->nfct);
3233#endif
3234#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3235 nf_bridge_put(dst->nf_bridge);
3236#endif
3237 __nf_copy(dst, src, true);
3238}
3239
3240#ifdef CONFIG_NETWORK_SECMARK
3241static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3242{
3243 to->secmark = from->secmark;
3244}
3245
3246static inline void skb_init_secmark(struct sk_buff *skb)
3247{
3248 skb->secmark = 0;
3249}
3250#else
3251static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3252{ }
3253
3254static inline void skb_init_secmark(struct sk_buff *skb)
3255{ }
3256#endif
3257
3258static inline bool skb_irq_freeable(const struct sk_buff *skb)
3259{
3260 return !skb->destructor &&
3261#if IS_ENABLED(CONFIG_XFRM)
3262 !skb->sp &&
3263#endif
3264#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3265 !skb->nfct &&
3266#endif
3267 !skb->_skb_refdst &&
3268 !skb_has_frag_list(skb);
3269}
3270
3271static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3272{
3273 skb->queue_mapping = queue_mapping;
3274}
3275
3276static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3277{
3278 return skb->queue_mapping;
3279}
3280
3281static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3282{
3283 to->queue_mapping = from->queue_mapping;
3284}
3285
3286static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3287{
3288 skb->queue_mapping = rx_queue + 1;
3289}
3290
3291static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3292{
3293 return skb->queue_mapping - 1;
3294}
3295
3296static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3297{
3298 return skb->queue_mapping != 0;
3299}
3300
3301u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
3302 unsigned int num_tx_queues);
3303
3304static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3305{
3306#ifdef CONFIG_XFRM
3307 return skb->sp;
3308#else
3309 return NULL;
3310#endif
3311}
3312
3313
3314
3315
3316
3317
3318
3319struct skb_gso_cb {
3320 int mac_offset;
3321 int encap_level;
3322 __u16 csum_start;
3323};
3324#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
3325
3326static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3327{
3328 return (skb_mac_header(inner_skb) - inner_skb->head) -
3329 SKB_GSO_CB(inner_skb)->mac_offset;
3330}
3331
3332static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3333{
3334 int new_headroom, headroom;
3335 int ret;
3336
3337 headroom = skb_headroom(skb);
3338 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3339 if (ret)
3340 return ret;
3341
3342 new_headroom = skb_headroom(skb);
3343 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3344 return 0;
3345}
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3356{
3357 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
3358 skb_transport_offset(skb);
3359 __u16 csum;
3360
3361 csum = csum_fold(csum_partial(skb_transport_header(skb),
3362 plen, skb->csum));
3363 skb->csum = res;
3364 SKB_GSO_CB(skb)->csum_start -= plen;
3365
3366 return csum;
3367}
3368
3369static inline bool skb_is_gso(const struct sk_buff *skb)
3370{
3371 return skb_shinfo(skb)->gso_size;
3372}
3373
3374
3375static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3376{
3377 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3378}
3379
3380void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3381
3382static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3383{
3384
3385
3386 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3387
3388 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3389 unlikely(shinfo->gso_type == 0)) {
3390 __skb_warn_lro_forwarding(skb);
3391 return true;
3392 }
3393 return false;
3394}
3395
3396static inline void skb_forward_csum(struct sk_buff *skb)
3397{
3398
3399 if (skb->ip_summed == CHECKSUM_COMPLETE)
3400 skb->ip_summed = CHECKSUM_NONE;
3401}
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3412{
3413#ifdef DEBUG
3414 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3415#endif
3416}
3417
3418bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3419
3420int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3421
3422u32 skb_get_poff(const struct sk_buff *skb);
3423u32 __skb_get_poff(const struct sk_buff *skb, void *data,
3424 const struct flow_keys *keys, int hlen);
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435static inline bool skb_head_is_locked(const struct sk_buff *skb)
3436{
3437 return !skb->head_frag || skb_cloned(skb);
3438}
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3451{
3452 unsigned int hdr_len = skb_transport_header(skb) -
3453 skb_network_header(skb);
3454 return hdr_len + skb_gso_transport_seglen(skb);
3455}
3456#endif
3457#endif
3458