1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/bug.h>
22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
25
26#include <linux/atomic.h>
27#include <asm/types.h>
28#include <linux/spinlock.h>
29#include <linux/net.h>
30#include <linux/textsearch.h>
31#include <net/checksum.h>
32#include <linux/rcupdate.h>
33#include <linux/hrtimer.h>
34#include <linux/dma-mapping.h>
35#include <linux/netdev_features.h>
36#include <linux/sched.h>
37#include <net/flow_dissector.h>
38#include <linux/splice.h>
39#include <linux/in6.h>
40#include <net/flow.h>
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136#define CHECKSUM_NONE 0
137#define CHECKSUM_UNNECESSARY 1
138#define CHECKSUM_COMPLETE 2
139#define CHECKSUM_PARTIAL 3
140
141
142#define SKB_MAX_CSUM_LEVEL 3
143
144#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
145#define SKB_WITH_OVERHEAD(X) \
146 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
147#define SKB_MAX_ORDER(X, ORDER) \
148 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
149#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
150#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
151
152
153#define SKB_TRUESIZE(X) ((X) + \
154 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
155 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
156
157struct net_device;
158struct scatterlist;
159struct pipe_inode_info;
160struct iov_iter;
161struct napi_struct;
162
163#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
164struct nf_conntrack {
165 atomic_t use;
166};
167#endif
168
169#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
170struct nf_bridge_info {
171 atomic_t use;
172 enum {
173 BRNF_PROTO_UNCHANGED,
174 BRNF_PROTO_8021Q,
175 BRNF_PROTO_PPPOE
176 } orig_proto:8;
177 u8 pkt_otherhost:1;
178 u8 in_prerouting:1;
179 u8 bridged_dnat:1;
180 __u16 frag_max_size;
181 struct net_device *physindev;
182
183
184 struct net_device *physoutdev;
185 union {
186
187 __be32 ipv4_daddr;
188 struct in6_addr ipv6_daddr;
189
190
191
192
193
194 char neigh_header[8];
195 };
196};
197#endif
198
199struct sk_buff_head {
200
201 struct sk_buff *next;
202 struct sk_buff *prev;
203
204 __u32 qlen;
205 spinlock_t lock;
206};
207
208struct sk_buff;
209
210
211
212
213
214
215
216
217#if (65536/PAGE_SIZE + 1) < 16
218#define MAX_SKB_FRAGS 16UL
219#else
220#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
221#endif
222
223typedef struct skb_frag_struct skb_frag_t;
224
225struct skb_frag_struct {
226 struct {
227 struct page *p;
228 } page;
229#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
230 __u32 page_offset;
231 __u32 size;
232#else
233 __u16 page_offset;
234 __u16 size;
235#endif
236};
237
238static inline unsigned int skb_frag_size(const skb_frag_t *frag)
239{
240 return frag->size;
241}
242
243static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
244{
245 frag->size = size;
246}
247
248static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
249{
250 frag->size += delta;
251}
252
253static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
254{
255 frag->size -= delta;
256}
257
258#define HAVE_HW_TIME_STAMP
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274struct skb_shared_hwtstamps {
275 ktime_t hwtstamp;
276};
277
278
279enum {
280
281 SKBTX_HW_TSTAMP = 1 << 0,
282
283
284 SKBTX_SW_TSTAMP = 1 << 1,
285
286
287 SKBTX_IN_PROGRESS = 1 << 2,
288
289
290 SKBTX_DEV_ZEROCOPY = 1 << 3,
291
292
293 SKBTX_WIFI_STATUS = 1 << 4,
294
295
296
297
298
299
300 SKBTX_SHARED_FRAG = 1 << 5,
301
302
303 SKBTX_SCHED_TSTAMP = 1 << 6,
304
305
306 SKBTX_ACK_TSTAMP = 1 << 7,
307};
308
309#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
310 SKBTX_SCHED_TSTAMP | \
311 SKBTX_ACK_TSTAMP)
312#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
313
314
315
316
317
318
319
320
321
322struct ubuf_info {
323 void (*callback)(struct ubuf_info *, bool zerocopy_success);
324 void *ctx;
325 unsigned long desc;
326};
327
328
329
330
331struct skb_shared_info {
332 unsigned char nr_frags;
333 __u8 tx_flags;
334 unsigned short gso_size;
335
336 unsigned short gso_segs;
337 unsigned short gso_type;
338 struct sk_buff *frag_list;
339 struct skb_shared_hwtstamps hwtstamps;
340 u32 tskey;
341 __be32 ip6_frag_id;
342
343
344
345
346 atomic_t dataref;
347
348
349
350 void * destructor_arg;
351
352
353 skb_frag_t frags[MAX_SKB_FRAGS];
354};
355
356
357
358
359
360
361
362
363
364
365
366
367#define SKB_DATAREF_SHIFT 16
368#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
369
370
371enum {
372 SKB_FCLONE_UNAVAILABLE,
373 SKB_FCLONE_ORIG,
374 SKB_FCLONE_CLONE,
375};
376
377enum {
378 SKB_GSO_TCPV4 = 1 << 0,
379 SKB_GSO_UDP = 1 << 1,
380
381
382 SKB_GSO_DODGY = 1 << 2,
383
384
385 SKB_GSO_TCP_ECN = 1 << 3,
386
387 SKB_GSO_TCPV6 = 1 << 4,
388
389 SKB_GSO_FCOE = 1 << 5,
390
391 SKB_GSO_GRE = 1 << 6,
392
393 SKB_GSO_GRE_CSUM = 1 << 7,
394
395 SKB_GSO_IPIP = 1 << 8,
396
397 SKB_GSO_SIT = 1 << 9,
398
399 SKB_GSO_UDP_TUNNEL = 1 << 10,
400
401 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
402
403 SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
404};
405
406#if BITS_PER_LONG > 32
407#define NET_SKBUFF_DATA_USES_OFFSET 1
408#endif
409
410#ifdef NET_SKBUFF_DATA_USES_OFFSET
411typedef unsigned int sk_buff_data_t;
412#else
413typedef unsigned char *sk_buff_data_t;
414#endif
415
416
417
418
419
420
421struct skb_mstamp {
422 union {
423 u64 v64;
424 struct {
425 u32 stamp_us;
426 u32 stamp_jiffies;
427 };
428 };
429};
430
431
432
433
434
435static inline void skb_mstamp_get(struct skb_mstamp *cl)
436{
437 u64 val = local_clock();
438
439 do_div(val, NSEC_PER_USEC);
440 cl->stamp_us = (u32)val;
441 cl->stamp_jiffies = (u32)jiffies;
442}
443
444
445
446
447
448
449static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
450 const struct skb_mstamp *t0)
451{
452 s32 delta_us = t1->stamp_us - t0->stamp_us;
453 u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
454
455
456
457
458 if (delta_us <= 0 ||
459 delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
460
461 delta_us = jiffies_to_usecs(delta_jiffies);
462
463 return delta_us;
464}
465
466static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
467 const struct skb_mstamp *t0)
468{
469 s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
470
471 if (!diff)
472 diff = t1->stamp_us - t0->stamp_us;
473 return diff > 0;
474}
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545struct sk_buff {
546 union {
547 struct {
548
549 struct sk_buff *next;
550 struct sk_buff *prev;
551
552 union {
553 ktime_t tstamp;
554 struct skb_mstamp skb_mstamp;
555 };
556 };
557 struct rb_node rbnode;
558 };
559 struct sock *sk;
560 struct net_device *dev;
561
562
563
564
565
566
567
568 char cb[48] __aligned(8);
569
570 unsigned long _skb_refdst;
571 void (*destructor)(struct sk_buff *skb);
572#ifdef CONFIG_XFRM
573 struct sec_path *sp;
574#endif
575#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
576 struct nf_conntrack *nfct;
577#endif
578#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
579 struct nf_bridge_info *nf_bridge;
580#endif
581 unsigned int len,
582 data_len;
583 __u16 mac_len,
584 hdr_len;
585
586
587
588
589 kmemcheck_bitfield_begin(flags1);
590 __u16 queue_mapping;
591 __u8 cloned:1,
592 nohdr:1,
593 fclone:2,
594 peeked:1,
595 head_frag:1,
596 xmit_more:1;
597
598 kmemcheck_bitfield_end(flags1);
599
600
601
602
603
604 __u32 headers_start[0];
605
606
607
608#ifdef __BIG_ENDIAN_BITFIELD
609#define PKT_TYPE_MAX (7 << 5)
610#else
611#define PKT_TYPE_MAX 7
612#endif
613#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
614
615 __u8 __pkt_type_offset[0];
616 __u8 pkt_type:3;
617 __u8 pfmemalloc:1;
618 __u8 ignore_df:1;
619 __u8 nfctinfo:3;
620
621 __u8 nf_trace:1;
622 __u8 ip_summed:2;
623 __u8 ooo_okay:1;
624 __u8 l4_hash:1;
625 __u8 sw_hash:1;
626 __u8 wifi_acked_valid:1;
627 __u8 wifi_acked:1;
628
629 __u8 no_fcs:1;
630
631 __u8 encapsulation:1;
632 __u8 encap_hdr_csum:1;
633 __u8 csum_valid:1;
634 __u8 csum_complete_sw:1;
635 __u8 csum_level:2;
636 __u8 csum_bad:1;
637
638#ifdef CONFIG_IPV6_NDISC_NODETYPE
639 __u8 ndisc_nodetype:2;
640#endif
641 __u8 ipvs_property:1;
642 __u8 inner_protocol_type:1;
643 __u8 remcsum_offload:1;
644
645
646#ifdef CONFIG_NET_SCHED
647 __u16 tc_index;
648#ifdef CONFIG_NET_CLS_ACT
649 __u16 tc_verd;
650#endif
651#endif
652
653 union {
654 __wsum csum;
655 struct {
656 __u16 csum_start;
657 __u16 csum_offset;
658 };
659 };
660 __u32 priority;
661 int skb_iif;
662 __u32 hash;
663 __be16 vlan_proto;
664 __u16 vlan_tci;
665#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
666 union {
667 unsigned int napi_id;
668 unsigned int sender_cpu;
669 };
670#endif
671 union {
672#ifdef CONFIG_NETWORK_SECMARK
673 __u32 secmark;
674#endif
675#ifdef CONFIG_NET_SWITCHDEV
676 __u32 offload_fwd_mark;
677#endif
678 };
679
680 union {
681 __u32 mark;
682 __u32 reserved_tailroom;
683 };
684
685 union {
686 __be16 inner_protocol;
687 __u8 inner_ipproto;
688 };
689
690 __u16 inner_transport_header;
691 __u16 inner_network_header;
692 __u16 inner_mac_header;
693
694 __be16 protocol;
695 __u16 transport_header;
696 __u16 network_header;
697 __u16 mac_header;
698
699
700 __u32 headers_end[0];
701
702
703
704 sk_buff_data_t tail;
705 sk_buff_data_t end;
706 unsigned char *head,
707 *data;
708 unsigned int truesize;
709 atomic_t users;
710};
711
712#ifdef __KERNEL__
713
714
715
716#include <linux/slab.h>
717
718
719#define SKB_ALLOC_FCLONE 0x01
720#define SKB_ALLOC_RX 0x02
721#define SKB_ALLOC_NAPI 0x04
722
723
724static inline bool skb_pfmemalloc(const struct sk_buff *skb)
725{
726 return unlikely(skb->pfmemalloc);
727}
728
729
730
731
732
733#define SKB_DST_NOREF 1UL
734#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
735
736
737
738
739
740
741
742static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
743{
744
745
746
747 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
748 !rcu_read_lock_held() &&
749 !rcu_read_lock_bh_held());
750 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
751}
752
753
754
755
756
757
758
759
760
761static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
762{
763 skb->_skb_refdst = (unsigned long)dst;
764}
765
766
767
768
769
770
771
772
773
774
775
776static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
777{
778 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
779 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
780}
781
782
783
784
785
786static inline bool skb_dst_is_noref(const struct sk_buff *skb)
787{
788 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
789}
790
791static inline struct rtable *skb_rtable(const struct sk_buff *skb)
792{
793 return (struct rtable *)skb_dst(skb);
794}
795
796void kfree_skb(struct sk_buff *skb);
797void kfree_skb_list(struct sk_buff *segs);
798void skb_tx_error(struct sk_buff *skb);
799void consume_skb(struct sk_buff *skb);
800void __kfree_skb(struct sk_buff *skb);
801extern struct kmem_cache *skbuff_head_cache;
802
803void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
804bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
805 bool *fragstolen, int *delta_truesize);
806
807struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
808 int node);
809struct sk_buff *__build_skb(void *data, unsigned int frag_size);
810struct sk_buff *build_skb(void *data, unsigned int frag_size);
811static inline struct sk_buff *alloc_skb(unsigned int size,
812 gfp_t priority)
813{
814 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
815}
816
817struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
818 unsigned long data_len,
819 int max_page_order,
820 int *errcode,
821 gfp_t gfp_mask);
822
823
824struct sk_buff_fclones {
825 struct sk_buff skb1;
826
827 struct sk_buff skb2;
828
829 atomic_t fclone_ref;
830};
831
832
833
834
835
836
837
838
839
840static inline bool skb_fclone_busy(const struct sock *sk,
841 const struct sk_buff *skb)
842{
843 const struct sk_buff_fclones *fclones;
844
845 fclones = container_of(skb, struct sk_buff_fclones, skb1);
846
847 return skb->fclone == SKB_FCLONE_ORIG &&
848 atomic_read(&fclones->fclone_ref) > 1 &&
849 fclones->skb2.sk == sk;
850}
851
852static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
853 gfp_t priority)
854{
855 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
856}
857
858struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
859static inline struct sk_buff *alloc_skb_head(gfp_t priority)
860{
861 return __alloc_skb_head(priority, -1);
862}
863
864struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
865int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
866struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
867struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
868struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
869 gfp_t gfp_mask, bool fclone);
870static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
871 gfp_t gfp_mask)
872{
873 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
874}
875
876int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
877struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
878 unsigned int headroom);
879struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
880 int newtailroom, gfp_t priority);
881int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
882 int offset, int len);
883int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
884 int len);
885int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
886int skb_pad(struct sk_buff *skb, int pad);
887#define dev_kfree_skb(a) consume_skb(a)
888
889int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
890 int getfrag(void *from, char *to, int offset,
891 int len, int odd, struct sk_buff *skb),
892 void *from, int length);
893
894int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
895 int offset, size_t size);
896
897struct skb_seq_state {
898 __u32 lower_offset;
899 __u32 upper_offset;
900 __u32 frag_idx;
901 __u32 stepped_offset;
902 struct sk_buff *root_skb;
903 struct sk_buff *cur_skb;
904 __u8 *frag_data;
905};
906
907void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
908 unsigned int to, struct skb_seq_state *st);
909unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
910 struct skb_seq_state *st);
911void skb_abort_seq_read(struct skb_seq_state *st);
912
913unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
914 unsigned int to, struct ts_config *config);
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942enum pkt_hash_types {
943 PKT_HASH_TYPE_NONE,
944 PKT_HASH_TYPE_L2,
945 PKT_HASH_TYPE_L3,
946 PKT_HASH_TYPE_L4,
947};
948
949static inline void skb_clear_hash(struct sk_buff *skb)
950{
951 skb->hash = 0;
952 skb->sw_hash = 0;
953 skb->l4_hash = 0;
954}
955
956static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
957{
958 if (!skb->l4_hash)
959 skb_clear_hash(skb);
960}
961
962static inline void
963__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
964{
965 skb->l4_hash = is_l4;
966 skb->sw_hash = is_sw;
967 skb->hash = hash;
968}
969
970static inline void
971skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
972{
973
974 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
975}
976
977static inline void
978__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
979{
980 __skb_set_hash(skb, hash, true, is_l4);
981}
982
983void __skb_get_hash(struct sk_buff *skb);
984u32 skb_get_poff(const struct sk_buff *skb);
985u32 __skb_get_poff(const struct sk_buff *skb, void *data,
986 const struct flow_keys *keys, int hlen);
987__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
988 void *data, int hlen_proto);
989
990static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
991 int thoff, u8 ip_proto)
992{
993 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
994}
995
996void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
997 const struct flow_dissector_key *key,
998 unsigned int key_count);
999
1000bool __skb_flow_dissect(const struct sk_buff *skb,
1001 struct flow_dissector *flow_dissector,
1002 void *target_container,
1003 void *data, __be16 proto, int nhoff, int hlen,
1004 unsigned int flags);
1005
1006static inline bool skb_flow_dissect(const struct sk_buff *skb,
1007 struct flow_dissector *flow_dissector,
1008 void *target_container, unsigned int flags)
1009{
1010 return __skb_flow_dissect(skb, flow_dissector, target_container,
1011 NULL, 0, 0, 0, flags);
1012}
1013
1014static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1015 struct flow_keys *flow,
1016 unsigned int flags)
1017{
1018 memset(flow, 0, sizeof(*flow));
1019 return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
1020 NULL, 0, 0, 0, flags);
1021}
1022
1023static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow,
1024 void *data, __be16 proto,
1025 int nhoff, int hlen,
1026 unsigned int flags)
1027{
1028 memset(flow, 0, sizeof(*flow));
1029 return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow,
1030 data, proto, nhoff, hlen, flags);
1031}
1032
1033static inline __u32 skb_get_hash(struct sk_buff *skb)
1034{
1035 if (!skb->l4_hash && !skb->sw_hash)
1036 __skb_get_hash(skb);
1037
1038 return skb->hash;
1039}
1040
1041__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6);
1042
1043static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1044{
1045 if (!skb->l4_hash && !skb->sw_hash) {
1046 struct flow_keys keys;
1047 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1048
1049 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1050 }
1051
1052 return skb->hash;
1053}
1054
1055__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl);
1056
1057static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4)
1058{
1059 if (!skb->l4_hash && !skb->sw_hash) {
1060 struct flow_keys keys;
1061 __u32 hash = __get_hash_from_flowi4(fl4, &keys);
1062
1063 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1064 }
1065
1066 return skb->hash;
1067}
1068
1069__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
1070
1071static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1072{
1073 return skb->hash;
1074}
1075
1076static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1077{
1078 to->hash = from->hash;
1079 to->sw_hash = from->sw_hash;
1080 to->l4_hash = from->l4_hash;
1081};
1082
1083static inline void skb_sender_cpu_clear(struct sk_buff *skb)
1084{
1085#ifdef CONFIG_XPS
1086 skb->sender_cpu = 0;
1087#endif
1088}
1089
1090#ifdef NET_SKBUFF_DATA_USES_OFFSET
1091static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1092{
1093 return skb->head + skb->end;
1094}
1095
1096static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1097{
1098 return skb->end;
1099}
1100#else
1101static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1102{
1103 return skb->end;
1104}
1105
1106static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1107{
1108 return skb->end - skb->head;
1109}
1110#endif
1111
1112
1113#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1114
1115static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1116{
1117 return &skb_shinfo(skb)->hwtstamps;
1118}
1119
1120
1121
1122
1123
1124
1125
1126static inline int skb_queue_empty(const struct sk_buff_head *list)
1127{
1128 return list->next == (const struct sk_buff *) list;
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1139 const struct sk_buff *skb)
1140{
1141 return skb->next == (const struct sk_buff *) list;
1142}
1143
1144
1145
1146
1147
1148
1149
1150
1151static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1152 const struct sk_buff *skb)
1153{
1154 return skb->prev == (const struct sk_buff *) list;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1166 const struct sk_buff *skb)
1167{
1168
1169
1170
1171 BUG_ON(skb_queue_is_last(list, skb));
1172 return skb->next;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1184 const struct sk_buff *skb)
1185{
1186
1187
1188
1189 BUG_ON(skb_queue_is_first(list, skb));
1190 return skb->prev;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200static inline struct sk_buff *skb_get(struct sk_buff *skb)
1201{
1202 atomic_inc(&skb->users);
1203 return skb;
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219static inline int skb_cloned(const struct sk_buff *skb)
1220{
1221 return skb->cloned &&
1222 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1223}
1224
1225static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1226{
1227 might_sleep_if(gfpflags_allow_blocking(pri));
1228
1229 if (skb_cloned(skb))
1230 return pskb_expand_head(skb, 0, 0, pri);
1231
1232 return 0;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242static inline int skb_header_cloned(const struct sk_buff *skb)
1243{
1244 int dataref;
1245
1246 if (!skb->cloned)
1247 return 0;
1248
1249 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1250 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1251 return dataref != 1;
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263static inline void skb_header_release(struct sk_buff *skb)
1264{
1265 BUG_ON(skb->nohdr);
1266 skb->nohdr = 1;
1267 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277static inline void __skb_header_release(struct sk_buff *skb)
1278{
1279 skb->nohdr = 1;
1280 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291static inline int skb_shared(const struct sk_buff *skb)
1292{
1293 return atomic_read(&skb->users) != 1;
1294}
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1310{
1311 might_sleep_if(gfpflags_allow_blocking(pri));
1312 if (skb_shared(skb)) {
1313 struct sk_buff *nskb = skb_clone(skb, pri);
1314
1315 if (likely(nskb))
1316 consume_skb(skb);
1317 else
1318 kfree_skb(skb);
1319 skb = nskb;
1320 }
1321 return skb;
1322}
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1345 gfp_t pri)
1346{
1347 might_sleep_if(gfpflags_allow_blocking(pri));
1348 if (skb_cloned(skb)) {
1349 struct sk_buff *nskb = skb_copy(skb, pri);
1350
1351
1352 if (likely(nskb))
1353 consume_skb(skb);
1354 else
1355 kfree_skb(skb);
1356 skb = nskb;
1357 }
1358 return skb;
1359}
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1375{
1376 struct sk_buff *skb = list_->next;
1377
1378 if (skb == (struct sk_buff *)list_)
1379 skb = NULL;
1380 return skb;
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1393 const struct sk_buff_head *list_)
1394{
1395 struct sk_buff *next = skb->next;
1396
1397 if (next == (struct sk_buff *)list_)
1398 next = NULL;
1399 return next;
1400}
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1416{
1417 struct sk_buff *skb = list_->prev;
1418
1419 if (skb == (struct sk_buff *)list_)
1420 skb = NULL;
1421 return skb;
1422
1423}
1424
1425
1426
1427
1428
1429
1430
1431static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1432{
1433 return list_->qlen;
1434}
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446static inline void __skb_queue_head_init(struct sk_buff_head *list)
1447{
1448 list->prev = list->next = (struct sk_buff *)list;
1449 list->qlen = 0;
1450}
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460static inline void skb_queue_head_init(struct sk_buff_head *list)
1461{
1462 spin_lock_init(&list->lock);
1463 __skb_queue_head_init(list);
1464}
1465
1466static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1467 struct lock_class_key *class)
1468{
1469 skb_queue_head_init(list);
1470 lockdep_set_class(&list->lock, class);
1471}
1472
1473
1474
1475
1476
1477
1478
1479void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
1480 struct sk_buff_head *list);
1481static inline void __skb_insert(struct sk_buff *newsk,
1482 struct sk_buff *prev, struct sk_buff *next,
1483 struct sk_buff_head *list)
1484{
1485 newsk->next = next;
1486 newsk->prev = prev;
1487 next->prev = prev->next = newsk;
1488 list->qlen++;
1489}
1490
1491static inline void __skb_queue_splice(const struct sk_buff_head *list,
1492 struct sk_buff *prev,
1493 struct sk_buff *next)
1494{
1495 struct sk_buff *first = list->next;
1496 struct sk_buff *last = list->prev;
1497
1498 first->prev = prev;
1499 prev->next = first;
1500
1501 last->next = next;
1502 next->prev = last;
1503}
1504
1505
1506
1507
1508
1509
1510static inline void skb_queue_splice(const struct sk_buff_head *list,
1511 struct sk_buff_head *head)
1512{
1513 if (!skb_queue_empty(list)) {
1514 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1515 head->qlen += list->qlen;
1516 }
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526static inline void skb_queue_splice_init(struct sk_buff_head *list,
1527 struct sk_buff_head *head)
1528{
1529 if (!skb_queue_empty(list)) {
1530 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1531 head->qlen += list->qlen;
1532 __skb_queue_head_init(list);
1533 }
1534}
1535
1536
1537
1538
1539
1540
1541static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1542 struct sk_buff_head *head)
1543{
1544 if (!skb_queue_empty(list)) {
1545 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1546 head->qlen += list->qlen;
1547 }
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1559 struct sk_buff_head *head)
1560{
1561 if (!skb_queue_empty(list)) {
1562 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1563 head->qlen += list->qlen;
1564 __skb_queue_head_init(list);
1565 }
1566}
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static inline void __skb_queue_after(struct sk_buff_head *list,
1580 struct sk_buff *prev,
1581 struct sk_buff *newsk)
1582{
1583 __skb_insert(newsk, prev, prev->next, list);
1584}
1585
1586void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1587 struct sk_buff_head *list);
1588
1589static inline void __skb_queue_before(struct sk_buff_head *list,
1590 struct sk_buff *next,
1591 struct sk_buff *newsk)
1592{
1593 __skb_insert(newsk, next->prev, next, list);
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1607static inline void __skb_queue_head(struct sk_buff_head *list,
1608 struct sk_buff *newsk)
1609{
1610 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1611}
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1624static inline void __skb_queue_tail(struct sk_buff_head *list,
1625 struct sk_buff *newsk)
1626{
1627 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1628}
1629
1630
1631
1632
1633
1634void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1635static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1636{
1637 struct sk_buff *next, *prev;
1638
1639 list->qlen--;
1640 next = skb->next;
1641 prev = skb->prev;
1642 skb->next = skb->prev = NULL;
1643 next->prev = prev;
1644 prev->next = next;
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1656static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1657{
1658 struct sk_buff *skb = skb_peek(list);
1659 if (skb)
1660 __skb_unlink(skb, list);
1661 return skb;
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1673static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1674{
1675 struct sk_buff *skb = skb_peek_tail(list);
1676 if (skb)
1677 __skb_unlink(skb, list);
1678 return skb;
1679}
1680
1681
1682static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1683{
1684 return skb->data_len;
1685}
1686
1687static inline unsigned int skb_headlen(const struct sk_buff *skb)
1688{
1689 return skb->len - skb->data_len;
1690}
1691
1692static inline int skb_pagelen(const struct sk_buff *skb)
1693{
1694 int i, len = 0;
1695
1696 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1697 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1698 return len + skb_headlen(skb);
1699}
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1715 struct page *page, int off, int size)
1716{
1717 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1718
1719
1720
1721
1722
1723
1724 frag->page.p = page;
1725 frag->page_offset = off;
1726 skb_frag_size_set(frag, size);
1727
1728 page = compound_head(page);
1729 if (page_is_pfmemalloc(page))
1730 skb->pfmemalloc = true;
1731}
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1748 struct page *page, int off, int size)
1749{
1750 __skb_fill_page_desc(skb, i, page, off, size);
1751 skb_shinfo(skb)->nr_frags = i + 1;
1752}
1753
1754void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
1755 int size, unsigned int truesize);
1756
1757void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
1758 unsigned int truesize);
1759
1760#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1761#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1762#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1763
1764#ifdef NET_SKBUFF_DATA_USES_OFFSET
1765static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1766{
1767 return skb->head + skb->tail;
1768}
1769
1770static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1771{
1772 skb->tail = skb->data - skb->head;
1773}
1774
1775static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1776{
1777 skb_reset_tail_pointer(skb);
1778 skb->tail += offset;
1779}
1780
1781#else
1782static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1783{
1784 return skb->tail;
1785}
1786
1787static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1788{
1789 skb->tail = skb->data;
1790}
1791
1792static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1793{
1794 skb->tail = skb->data + offset;
1795}
1796
1797#endif
1798
1799
1800
1801
1802unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
1803unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1804static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1805{
1806 unsigned char *tmp = skb_tail_pointer(skb);
1807 SKB_LINEAR_ASSERT(skb);
1808 skb->tail += len;
1809 skb->len += len;
1810 return tmp;
1811}
1812
1813unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1814static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1815{
1816 skb->data -= len;
1817 skb->len += len;
1818 return skb->data;
1819}
1820
1821unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1822static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1823{
1824 skb->len -= len;
1825 BUG_ON(skb->len < skb->data_len);
1826 return skb->data += len;
1827}
1828
1829static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1830{
1831 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1832}
1833
1834unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1835
1836static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1837{
1838 if (len > skb_headlen(skb) &&
1839 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1840 return NULL;
1841 skb->len -= len;
1842 return skb->data += len;
1843}
1844
1845static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1846{
1847 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1848}
1849
1850static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1851{
1852 if (likely(len <= skb_headlen(skb)))
1853 return 1;
1854 if (unlikely(len > skb->len))
1855 return 0;
1856 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1857}
1858
1859
1860
1861
1862
1863
1864
1865static inline unsigned int skb_headroom(const struct sk_buff *skb)
1866{
1867 return skb->data - skb->head;
1868}
1869
1870
1871
1872
1873
1874
1875
1876static inline int skb_tailroom(const struct sk_buff *skb)
1877{
1878 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1879}
1880
1881
1882
1883
1884
1885
1886
1887
1888static inline int skb_availroom(const struct sk_buff *skb)
1889{
1890 if (skb_is_nonlinear(skb))
1891 return 0;
1892
1893 return skb->end - skb->tail - skb->reserved_tailroom;
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904static inline void skb_reserve(struct sk_buff *skb, int len)
1905{
1906 skb->data += len;
1907 skb->tail += len;
1908}
1909
1910#define ENCAP_TYPE_ETHER 0
1911#define ENCAP_TYPE_IPPROTO 1
1912
1913static inline void skb_set_inner_protocol(struct sk_buff *skb,
1914 __be16 protocol)
1915{
1916 skb->inner_protocol = protocol;
1917 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
1918}
1919
1920static inline void skb_set_inner_ipproto(struct sk_buff *skb,
1921 __u8 ipproto)
1922{
1923 skb->inner_ipproto = ipproto;
1924 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
1925}
1926
1927static inline void skb_reset_inner_headers(struct sk_buff *skb)
1928{
1929 skb->inner_mac_header = skb->mac_header;
1930 skb->inner_network_header = skb->network_header;
1931 skb->inner_transport_header = skb->transport_header;
1932}
1933
1934static inline void skb_reset_mac_len(struct sk_buff *skb)
1935{
1936 skb->mac_len = skb->network_header - skb->mac_header;
1937}
1938
1939static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1940 *skb)
1941{
1942 return skb->head + skb->inner_transport_header;
1943}
1944
1945static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1946{
1947 skb->inner_transport_header = skb->data - skb->head;
1948}
1949
1950static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1951 const int offset)
1952{
1953 skb_reset_inner_transport_header(skb);
1954 skb->inner_transport_header += offset;
1955}
1956
1957static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1958{
1959 return skb->head + skb->inner_network_header;
1960}
1961
1962static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1963{
1964 skb->inner_network_header = skb->data - skb->head;
1965}
1966
1967static inline void skb_set_inner_network_header(struct sk_buff *skb,
1968 const int offset)
1969{
1970 skb_reset_inner_network_header(skb);
1971 skb->inner_network_header += offset;
1972}
1973
1974static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1975{
1976 return skb->head + skb->inner_mac_header;
1977}
1978
1979static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1980{
1981 skb->inner_mac_header = skb->data - skb->head;
1982}
1983
1984static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1985 const int offset)
1986{
1987 skb_reset_inner_mac_header(skb);
1988 skb->inner_mac_header += offset;
1989}
1990static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1991{
1992 return skb->transport_header != (typeof(skb->transport_header))~0U;
1993}
1994
1995static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1996{
1997 return skb->head + skb->transport_header;
1998}
1999
2000static inline void skb_reset_transport_header(struct sk_buff *skb)
2001{
2002 skb->transport_header = skb->data - skb->head;
2003}
2004
2005static inline void skb_set_transport_header(struct sk_buff *skb,
2006 const int offset)
2007{
2008 skb_reset_transport_header(skb);
2009 skb->transport_header += offset;
2010}
2011
2012static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2013{
2014 return skb->head + skb->network_header;
2015}
2016
2017static inline void skb_reset_network_header(struct sk_buff *skb)
2018{
2019 skb->network_header = skb->data - skb->head;
2020}
2021
2022static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2023{
2024 skb_reset_network_header(skb);
2025 skb->network_header += offset;
2026}
2027
2028static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2029{
2030 return skb->head + skb->mac_header;
2031}
2032
2033static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2034{
2035 return skb->mac_header != (typeof(skb->mac_header))~0U;
2036}
2037
2038static inline void skb_reset_mac_header(struct sk_buff *skb)
2039{
2040 skb->mac_header = skb->data - skb->head;
2041}
2042
2043static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2044{
2045 skb_reset_mac_header(skb);
2046 skb->mac_header += offset;
2047}
2048
2049static inline void skb_pop_mac_header(struct sk_buff *skb)
2050{
2051 skb->mac_header = skb->network_header;
2052}
2053
2054static inline void skb_probe_transport_header(struct sk_buff *skb,
2055 const int offset_hint)
2056{
2057 struct flow_keys keys;
2058
2059 if (skb_transport_header_was_set(skb))
2060 return;
2061 else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
2062 skb_set_transport_header(skb, keys.control.thoff);
2063 else
2064 skb_set_transport_header(skb, offset_hint);
2065}
2066
2067static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2068{
2069 if (skb_mac_header_was_set(skb)) {
2070 const unsigned char *old_mac = skb_mac_header(skb);
2071
2072 skb_set_mac_header(skb, -skb->mac_len);
2073 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2074 }
2075}
2076
2077static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2078{
2079 return skb->csum_start - skb_headroom(skb);
2080}
2081
2082static inline int skb_transport_offset(const struct sk_buff *skb)
2083{
2084 return skb_transport_header(skb) - skb->data;
2085}
2086
2087static inline u32 skb_network_header_len(const struct sk_buff *skb)
2088{
2089 return skb->transport_header - skb->network_header;
2090}
2091
2092static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2093{
2094 return skb->inner_transport_header - skb->inner_network_header;
2095}
2096
2097static inline int skb_network_offset(const struct sk_buff *skb)
2098{
2099 return skb_network_header(skb) - skb->data;
2100}
2101
2102static inline int skb_inner_network_offset(const struct sk_buff *skb)
2103{
2104 return skb_inner_network_header(skb) - skb->data;
2105}
2106
2107static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2108{
2109 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2110}
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132#ifndef NET_IP_ALIGN
2133#define NET_IP_ALIGN 2
2134#endif
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156#ifndef NET_SKB_PAD
2157#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2158#endif
2159
2160int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2161
2162static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2163{
2164 if (unlikely(skb_is_nonlinear(skb))) {
2165 WARN_ON(1);
2166 return;
2167 }
2168 skb->len = len;
2169 skb_set_tail_pointer(skb, len);
2170}
2171
2172void skb_trim(struct sk_buff *skb, unsigned int len);
2173
2174static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2175{
2176 if (skb->data_len)
2177 return ___pskb_trim(skb, len);
2178 __skb_trim(skb, len);
2179 return 0;
2180}
2181
2182static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2183{
2184 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2197{
2198 int err = pskb_trim(skb, len);
2199 BUG_ON(err);
2200}
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210static inline void skb_orphan(struct sk_buff *skb)
2211{
2212 if (skb->destructor) {
2213 skb->destructor(skb);
2214 skb->destructor = NULL;
2215 skb->sk = NULL;
2216 } else {
2217 BUG_ON(skb->sk);
2218 }
2219}
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2231{
2232 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
2233 return 0;
2234 return skb_copy_ubufs(skb, gfp_mask);
2235}
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245void skb_queue_purge(struct sk_buff_head *list);
2246static inline void __skb_queue_purge(struct sk_buff_head *list)
2247{
2248 struct sk_buff *skb;
2249 while ((skb = __skb_dequeue(list)) != NULL)
2250 kfree_skb(skb);
2251}
2252
2253void *netdev_alloc_frag(unsigned int fragsz);
2254
2255struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2256 gfp_t gfp_mask);
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2272 unsigned int length)
2273{
2274 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2275}
2276
2277
2278static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2279 gfp_t gfp_mask)
2280{
2281 return __netdev_alloc_skb(NULL, length, gfp_mask);
2282}
2283
2284
2285static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2286{
2287 return netdev_alloc_skb(NULL, length);
2288}
2289
2290
2291static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2292 unsigned int length, gfp_t gfp)
2293{
2294 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2295
2296 if (NET_IP_ALIGN && skb)
2297 skb_reserve(skb, NET_IP_ALIGN);
2298 return skb;
2299}
2300
2301static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2302 unsigned int length)
2303{
2304 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2305}
2306
2307static inline void skb_free_frag(void *addr)
2308{
2309 __free_page_frag(addr);
2310}
2311
2312void *napi_alloc_frag(unsigned int fragsz);
2313struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2314 unsigned int length, gfp_t gfp_mask);
2315static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2316 unsigned int length)
2317{
2318 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2331 unsigned int order)
2332{
2333
2334
2335
2336
2337
2338
2339
2340
2341 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2342
2343 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2344}
2345
2346static inline struct page *dev_alloc_pages(unsigned int order)
2347{
2348 return __dev_alloc_pages(GFP_ATOMIC, order);
2349}
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2360{
2361 return __dev_alloc_pages(gfp_mask, 0);
2362}
2363
2364static inline struct page *dev_alloc_page(void)
2365{
2366 return __dev_alloc_page(GFP_ATOMIC);
2367}
2368
2369
2370
2371
2372
2373
2374static inline void skb_propagate_pfmemalloc(struct page *page,
2375 struct sk_buff *skb)
2376{
2377 if (page_is_pfmemalloc(page))
2378 skb->pfmemalloc = true;
2379}
2380
2381
2382
2383
2384
2385
2386
2387static inline struct page *skb_frag_page(const skb_frag_t *frag)
2388{
2389 return frag->page.p;
2390}
2391
2392
2393
2394
2395
2396
2397
2398static inline void __skb_frag_ref(skb_frag_t *frag)
2399{
2400 get_page(skb_frag_page(frag));
2401}
2402
2403
2404
2405
2406
2407
2408
2409
2410static inline void skb_frag_ref(struct sk_buff *skb, int f)
2411{
2412 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2413}
2414
2415
2416
2417
2418
2419
2420
2421static inline void __skb_frag_unref(skb_frag_t *frag)
2422{
2423 put_page(skb_frag_page(frag));
2424}
2425
2426
2427
2428
2429
2430
2431
2432
2433static inline void skb_frag_unref(struct sk_buff *skb, int f)
2434{
2435 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2436}
2437
2438
2439
2440
2441
2442
2443
2444
2445static inline void *skb_frag_address(const skb_frag_t *frag)
2446{
2447 return page_address(skb_frag_page(frag)) + frag->page_offset;
2448}
2449
2450
2451
2452
2453
2454
2455
2456
2457static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2458{
2459 void *ptr = page_address(skb_frag_page(frag));
2460 if (unlikely(!ptr))
2461 return NULL;
2462
2463 return ptr + frag->page_offset;
2464}
2465
2466
2467
2468
2469
2470
2471
2472
2473static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2474{
2475 frag->page.p = page;
2476}
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2487 struct page *page)
2488{
2489 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2490}
2491
2492bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2506 const skb_frag_t *frag,
2507 size_t offset, size_t size,
2508 enum dma_data_direction dir)
2509{
2510 return dma_map_page(dev, skb_frag_page(frag),
2511 frag->page_offset + offset, size, dir);
2512}
2513
2514static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2515 gfp_t gfp_mask)
2516{
2517 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2518}
2519
2520
2521static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
2522 gfp_t gfp_mask)
2523{
2524 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
2525}
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2537{
2538 return !skb_header_cloned(skb) &&
2539 skb_headroom(skb) + len <= skb->hdr_len;
2540}
2541
2542static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2543 int cloned)
2544{
2545 int delta = 0;
2546
2547 if (headroom > skb_headroom(skb))
2548 delta = headroom - skb_headroom(skb);
2549
2550 if (delta || cloned)
2551 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2552 GFP_ATOMIC);
2553 return 0;
2554}
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2569{
2570 return __skb_cow(skb, headroom, skb_cloned(skb));
2571}
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2584{
2585 return __skb_cow(skb, headroom, skb_header_cloned(skb));
2586}
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2599{
2600 unsigned int size = skb->len;
2601 if (likely(size >= len))
2602 return 0;
2603 return skb_pad(skb, len - size);
2604}
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2617{
2618 unsigned int size = skb->len;
2619
2620 if (unlikely(size < len)) {
2621 len -= size;
2622 if (skb_pad(skb, len))
2623 return -ENOMEM;
2624 __skb_put(skb, len);
2625 }
2626 return 0;
2627}
2628
2629static inline int skb_add_data(struct sk_buff *skb,
2630 struct iov_iter *from, int copy)
2631{
2632 const int off = skb->len;
2633
2634 if (skb->ip_summed == CHECKSUM_NONE) {
2635 __wsum csum = 0;
2636 if (csum_and_copy_from_iter(skb_put(skb, copy), copy,
2637 &csum, from) == copy) {
2638 skb->csum = csum_block_add(skb->csum, csum, off);
2639 return 0;
2640 }
2641 } else if (copy_from_iter(skb_put(skb, copy), copy, from) == copy)
2642 return 0;
2643
2644 __skb_trim(skb, off);
2645 return -EFAULT;
2646}
2647
2648static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2649 const struct page *page, int off)
2650{
2651 if (i) {
2652 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2653
2654 return page == skb_frag_page(frag) &&
2655 off == frag->page_offset + skb_frag_size(frag);
2656 }
2657 return false;
2658}
2659
2660static inline int __skb_linearize(struct sk_buff *skb)
2661{
2662 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2663}
2664
2665
2666
2667
2668
2669
2670
2671
2672static inline int skb_linearize(struct sk_buff *skb)
2673{
2674 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2675}
2676
2677
2678
2679
2680
2681
2682
2683
2684static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2685{
2686 return skb_is_nonlinear(skb) &&
2687 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2688}
2689
2690
2691
2692
2693
2694
2695
2696
2697static inline int skb_linearize_cow(struct sk_buff *skb)
2698{
2699 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2700 __skb_linearize(skb) : 0;
2701}
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714static inline void skb_postpull_rcsum(struct sk_buff *skb,
2715 const void *start, unsigned int len)
2716{
2717 if (skb->ip_summed == CHECKSUM_COMPLETE)
2718 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2719 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2720 skb_checksum_start_offset(skb) < 0)
2721 skb->ip_summed = CHECKSUM_NONE;
2722}
2723
2724unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2736{
2737 if (likely(len >= skb->len))
2738 return 0;
2739 if (skb->ip_summed == CHECKSUM_COMPLETE)
2740 skb->ip_summed = CHECKSUM_NONE;
2741 return __pskb_trim(skb, len);
2742}
2743
2744#define skb_queue_walk(queue, skb) \
2745 for (skb = (queue)->next; \
2746 skb != (struct sk_buff *)(queue); \
2747 skb = skb->next)
2748
2749#define skb_queue_walk_safe(queue, skb, tmp) \
2750 for (skb = (queue)->next, tmp = skb->next; \
2751 skb != (struct sk_buff *)(queue); \
2752 skb = tmp, tmp = skb->next)
2753
2754#define skb_queue_walk_from(queue, skb) \
2755 for (; skb != (struct sk_buff *)(queue); \
2756 skb = skb->next)
2757
2758#define skb_queue_walk_from_safe(queue, skb, tmp) \
2759 for (tmp = skb->next; \
2760 skb != (struct sk_buff *)(queue); \
2761 skb = tmp, tmp = skb->next)
2762
2763#define skb_queue_reverse_walk(queue, skb) \
2764 for (skb = (queue)->prev; \
2765 skb != (struct sk_buff *)(queue); \
2766 skb = skb->prev)
2767
2768#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2769 for (skb = (queue)->prev, tmp = skb->prev; \
2770 skb != (struct sk_buff *)(queue); \
2771 skb = tmp, tmp = skb->prev)
2772
2773#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
2774 for (tmp = skb->prev; \
2775 skb != (struct sk_buff *)(queue); \
2776 skb = tmp, tmp = skb->prev)
2777
2778static inline bool skb_has_frag_list(const struct sk_buff *skb)
2779{
2780 return skb_shinfo(skb)->frag_list != NULL;
2781}
2782
2783static inline void skb_frag_list_init(struct sk_buff *skb)
2784{
2785 skb_shinfo(skb)->frag_list = NULL;
2786}
2787
2788#define skb_walk_frags(skb, iter) \
2789 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2790
2791struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2792 int *peeked, int *off, int *err);
2793struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2794 int *err);
2795unsigned int datagram_poll(struct file *file, struct socket *sock,
2796 struct poll_table_struct *wait);
2797int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
2798 struct iov_iter *to, int size);
2799static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
2800 struct msghdr *msg, int size)
2801{
2802 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
2803}
2804int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2805 struct msghdr *msg);
2806int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2807 struct iov_iter *from, int len);
2808int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2809void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2810void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2811int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
2812int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
2813int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
2814__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
2815 int len, __wsum csum);
2816ssize_t skb_socket_splice(struct sock *sk,
2817 struct pipe_inode_info *pipe,
2818 struct splice_pipe_desc *spd);
2819int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2820 struct pipe_inode_info *pipe, unsigned int len,
2821 unsigned int flags,
2822 ssize_t (*splice_cb)(struct sock *,
2823 struct pipe_inode_info *,
2824 struct splice_pipe_desc *));
2825void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2826unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
2827int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
2828 int len, int hlen);
2829void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
2830int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
2831void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2832unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2833struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2834struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2835int skb_ensure_writable(struct sk_buff *skb, int write_len);
2836int skb_vlan_pop(struct sk_buff *skb);
2837int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2838
2839static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
2840{
2841 return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2842}
2843
2844static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
2845{
2846 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2847}
2848
2849struct skb_checksum_ops {
2850 __wsum (*update)(const void *mem, int len, __wsum wsum);
2851 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
2852};
2853
2854__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2855 __wsum csum, const struct skb_checksum_ops *ops);
2856__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2857 __wsum csum);
2858
2859static inline void * __must_check
2860__skb_header_pointer(const struct sk_buff *skb, int offset,
2861 int len, void *data, int hlen, void *buffer)
2862{
2863 if (hlen - offset >= len)
2864 return data + offset;
2865
2866 if (!skb ||
2867 skb_copy_bits(skb, offset, buffer, len) < 0)
2868 return NULL;
2869
2870 return buffer;
2871}
2872
2873static inline void * __must_check
2874skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
2875{
2876 return __skb_header_pointer(skb, offset, len, skb->data,
2877 skb_headlen(skb), buffer);
2878}
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890static inline bool skb_needs_linearize(struct sk_buff *skb,
2891 netdev_features_t features)
2892{
2893 return skb_is_nonlinear(skb) &&
2894 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
2895 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
2896}
2897
2898static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2899 void *to,
2900 const unsigned int len)
2901{
2902 memcpy(to, skb->data, len);
2903}
2904
2905static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2906 const int offset, void *to,
2907 const unsigned int len)
2908{
2909 memcpy(to, skb->data + offset, len);
2910}
2911
2912static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2913 const void *from,
2914 const unsigned int len)
2915{
2916 memcpy(skb->data, from, len);
2917}
2918
2919static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2920 const int offset,
2921 const void *from,
2922 const unsigned int len)
2923{
2924 memcpy(skb->data + offset, from, len);
2925}
2926
2927void skb_init(void);
2928
2929static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2930{
2931 return skb->tstamp;
2932}
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943static inline void skb_get_timestamp(const struct sk_buff *skb,
2944 struct timeval *stamp)
2945{
2946 *stamp = ktime_to_timeval(skb->tstamp);
2947}
2948
2949static inline void skb_get_timestampns(const struct sk_buff *skb,
2950 struct timespec *stamp)
2951{
2952 *stamp = ktime_to_timespec(skb->tstamp);
2953}
2954
2955static inline void __net_timestamp(struct sk_buff *skb)
2956{
2957 skb->tstamp = ktime_get_real();
2958}
2959
2960static inline ktime_t net_timedelta(ktime_t t)
2961{
2962 return ktime_sub(ktime_get_real(), t);
2963}
2964
2965static inline ktime_t net_invalid_timestamp(void)
2966{
2967 return ktime_set(0, 0);
2968}
2969
2970struct sk_buff *skb_clone_sk(struct sk_buff *skb);
2971
2972#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2973
2974void skb_clone_tx_timestamp(struct sk_buff *skb);
2975bool skb_defer_rx_timestamp(struct sk_buff *skb);
2976
2977#else
2978
2979static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2980{
2981}
2982
2983static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2984{
2985 return false;
2986}
2987
2988#endif
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002void skb_complete_tx_timestamp(struct sk_buff *skb,
3003 struct skb_shared_hwtstamps *hwtstamps);
3004
3005void __skb_tstamp_tx(struct sk_buff *orig_skb,
3006 struct skb_shared_hwtstamps *hwtstamps,
3007 struct sock *sk, int tstype);
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020void skb_tstamp_tx(struct sk_buff *orig_skb,
3021 struct skb_shared_hwtstamps *hwtstamps);
3022
3023static inline void sw_tx_timestamp(struct sk_buff *skb)
3024{
3025 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
3026 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
3027 skb_tstamp_tx(skb, NULL);
3028}
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042static inline void skb_tx_timestamp(struct sk_buff *skb)
3043{
3044 skb_clone_tx_timestamp(skb);
3045 sw_tx_timestamp(skb);
3046}
3047
3048
3049
3050
3051
3052
3053
3054
3055void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3056
3057__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3058__sum16 __skb_checksum_complete(struct sk_buff *skb);
3059
3060static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3061{
3062 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3063 skb->csum_valid ||
3064 (skb->ip_summed == CHECKSUM_PARTIAL &&
3065 skb_checksum_start_offset(skb) >= 0));
3066}
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3085{
3086 return skb_csum_unnecessary(skb) ?
3087 0 : __skb_checksum_complete(skb);
3088}
3089
3090static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3091{
3092 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3093 if (skb->csum_level == 0)
3094 skb->ip_summed = CHECKSUM_NONE;
3095 else
3096 skb->csum_level--;
3097 }
3098}
3099
3100static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3101{
3102 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3103 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3104 skb->csum_level++;
3105 } else if (skb->ip_summed == CHECKSUM_NONE) {
3106 skb->ip_summed = CHECKSUM_UNNECESSARY;
3107 skb->csum_level = 0;
3108 }
3109}
3110
3111static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
3112{
3113
3114
3115
3116
3117
3118
3119
3120
3121 if (skb->ip_summed == CHECKSUM_NONE ||
3122 skb->ip_summed == CHECKSUM_UNNECESSARY)
3123 skb->csum_bad = 1;
3124}
3125
3126
3127
3128
3129
3130
3131static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3132 bool zero_okay,
3133 __sum16 check)
3134{
3135 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3136 skb->csum_valid = 1;
3137 __skb_decr_checksum_unnecessary(skb);
3138 return false;
3139 }
3140
3141 return true;
3142}
3143
3144
3145
3146
3147#define CHECKSUM_BREAK 76
3148
3149
3150
3151
3152
3153
3154
3155static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3156{
3157 if (skb->ip_summed == CHECKSUM_COMPLETE)
3158 skb->ip_summed = CHECKSUM_NONE;
3159}
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
3171 bool complete,
3172 __wsum psum)
3173{
3174 if (skb->ip_summed == CHECKSUM_COMPLETE) {
3175 if (!csum_fold(csum_add(psum, skb->csum))) {
3176 skb->csum_valid = 1;
3177 return 0;
3178 }
3179 } else if (skb->csum_bad) {
3180
3181 return (__force __sum16)1;
3182 }
3183
3184 skb->csum = psum;
3185
3186 if (complete || skb->len <= CHECKSUM_BREAK) {
3187 __sum16 csum;
3188
3189 csum = __skb_checksum_complete(skb);
3190 skb->csum_valid = !csum;
3191 return csum;
3192 }
3193
3194 return 0;
3195}
3196
3197static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
3198{
3199 return 0;
3200}
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212#define __skb_checksum_validate(skb, proto, complete, \
3213 zero_okay, check, compute_pseudo) \
3214({ \
3215 __sum16 __ret = 0; \
3216 skb->csum_valid = 0; \
3217 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
3218 __ret = __skb_checksum_validate_complete(skb, \
3219 complete, compute_pseudo(skb, proto)); \
3220 __ret; \
3221})
3222
3223#define skb_checksum_init(skb, proto, compute_pseudo) \
3224 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
3225
3226#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
3227 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
3228
3229#define skb_checksum_validate(skb, proto, compute_pseudo) \
3230 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
3231
3232#define skb_checksum_validate_zero_check(skb, proto, check, \
3233 compute_pseudo) \
3234 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
3235
3236#define skb_checksum_simple_validate(skb) \
3237 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
3238
3239static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3240{
3241 return (skb->ip_summed == CHECKSUM_NONE &&
3242 skb->csum_valid && !skb->csum_bad);
3243}
3244
3245static inline void __skb_checksum_convert(struct sk_buff *skb,
3246 __sum16 check, __wsum pseudo)
3247{
3248 skb->csum = ~pseudo;
3249 skb->ip_summed = CHECKSUM_COMPLETE;
3250}
3251
3252#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3253do { \
3254 if (__skb_checksum_convert_check(skb)) \
3255 __skb_checksum_convert(skb, check, \
3256 compute_pseudo(skb, proto)); \
3257} while (0)
3258
3259static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
3260 u16 start, u16 offset)
3261{
3262 skb->ip_summed = CHECKSUM_PARTIAL;
3263 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
3264 skb->csum_offset = offset - start;
3265}
3266
3267
3268
3269
3270
3271
3272static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
3273 int start, int offset, bool nopartial)
3274{
3275 __wsum delta;
3276
3277 if (!nopartial) {
3278 skb_remcsum_adjust_partial(skb, ptr, start, offset);
3279 return;
3280 }
3281
3282 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
3283 __skb_checksum_complete(skb);
3284 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
3285 }
3286
3287 delta = remcsum_adjust(ptr, skb->csum, start, offset);
3288
3289
3290 skb->csum = csum_add(skb->csum, delta);
3291}
3292
3293#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3294void nf_conntrack_destroy(struct nf_conntrack *nfct);
3295static inline void nf_conntrack_put(struct nf_conntrack *nfct)
3296{
3297 if (nfct && atomic_dec_and_test(&nfct->use))
3298 nf_conntrack_destroy(nfct);
3299}
3300static inline void nf_conntrack_get(struct nf_conntrack *nfct)
3301{
3302 if (nfct)
3303 atomic_inc(&nfct->use);
3304}
3305#endif
3306#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3307static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
3308{
3309 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
3310 kfree(nf_bridge);
3311}
3312static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
3313{
3314 if (nf_bridge)
3315 atomic_inc(&nf_bridge->use);
3316}
3317#endif
3318static inline void nf_reset(struct sk_buff *skb)
3319{
3320#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3321 nf_conntrack_put(skb->nfct);
3322 skb->nfct = NULL;
3323#endif
3324#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3325 nf_bridge_put(skb->nf_bridge);
3326 skb->nf_bridge = NULL;
3327#endif
3328}
3329
3330static inline void nf_reset_trace(struct sk_buff *skb)
3331{
3332#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3333 skb->nf_trace = 0;
3334#endif
3335}
3336
3337
3338static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3339 bool copy)
3340{
3341#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3342 dst->nfct = src->nfct;
3343 nf_conntrack_get(src->nfct);
3344 if (copy)
3345 dst->nfctinfo = src->nfctinfo;
3346#endif
3347#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3348 dst->nf_bridge = src->nf_bridge;
3349 nf_bridge_get(src->nf_bridge);
3350#endif
3351#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
3352 if (copy)
3353 dst->nf_trace = src->nf_trace;
3354#endif
3355}
3356
3357static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
3358{
3359#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
3360 nf_conntrack_put(dst->nfct);
3361#endif
3362#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
3363 nf_bridge_put(dst->nf_bridge);
3364#endif
3365 __nf_copy(dst, src, true);
3366}
3367
3368#ifdef CONFIG_NETWORK_SECMARK
3369static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3370{
3371 to->secmark = from->secmark;
3372}
3373
3374static inline void skb_init_secmark(struct sk_buff *skb)
3375{
3376 skb->secmark = 0;
3377}
3378#else
3379static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
3380{ }
3381
3382static inline void skb_init_secmark(struct sk_buff *skb)
3383{ }
3384#endif
3385
3386static inline bool skb_irq_freeable(const struct sk_buff *skb)
3387{
3388 return !skb->destructor &&
3389#if IS_ENABLED(CONFIG_XFRM)
3390 !skb->sp &&
3391#endif
3392#if IS_ENABLED(CONFIG_NF_CONNTRACK)
3393 !skb->nfct &&
3394#endif
3395 !skb->_skb_refdst &&
3396 !skb_has_frag_list(skb);
3397}
3398
3399static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
3400{
3401 skb->queue_mapping = queue_mapping;
3402}
3403
3404static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
3405{
3406 return skb->queue_mapping;
3407}
3408
3409static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
3410{
3411 to->queue_mapping = from->queue_mapping;
3412}
3413
3414static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
3415{
3416 skb->queue_mapping = rx_queue + 1;
3417}
3418
3419static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
3420{
3421 return skb->queue_mapping - 1;
3422}
3423
3424static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
3425{
3426 return skb->queue_mapping != 0;
3427}
3428
3429static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
3430{
3431#ifdef CONFIG_XFRM
3432 return skb->sp;
3433#else
3434 return NULL;
3435#endif
3436}
3437
3438
3439
3440
3441
3442
3443
3444struct skb_gso_cb {
3445 int mac_offset;
3446 int encap_level;
3447 __u16 csum_start;
3448};
3449#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
3450
3451static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
3452{
3453 return (skb_mac_header(inner_skb) - inner_skb->head) -
3454 SKB_GSO_CB(inner_skb)->mac_offset;
3455}
3456
3457static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
3458{
3459 int new_headroom, headroom;
3460 int ret;
3461
3462 headroom = skb_headroom(skb);
3463 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
3464 if (ret)
3465 return ret;
3466
3467 new_headroom = skb_headroom(skb);
3468 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
3469 return 0;
3470}
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
3481{
3482 int plen = SKB_GSO_CB(skb)->csum_start - skb_headroom(skb) -
3483 skb_transport_offset(skb);
3484 __wsum partial;
3485
3486 partial = csum_partial(skb_transport_header(skb), plen, skb->csum);
3487 skb->csum = res;
3488 SKB_GSO_CB(skb)->csum_start -= plen;
3489
3490 return csum_fold(partial);
3491}
3492
3493static inline bool skb_is_gso(const struct sk_buff *skb)
3494{
3495 return skb_shinfo(skb)->gso_size;
3496}
3497
3498
3499static inline bool skb_is_gso_v6(const struct sk_buff *skb)
3500{
3501 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
3502}
3503
3504void __skb_warn_lro_forwarding(const struct sk_buff *skb);
3505
3506static inline bool skb_warn_if_lro(const struct sk_buff *skb)
3507{
3508
3509
3510 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3511
3512 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
3513 unlikely(shinfo->gso_type == 0)) {
3514 __skb_warn_lro_forwarding(skb);
3515 return true;
3516 }
3517 return false;
3518}
3519
3520static inline void skb_forward_csum(struct sk_buff *skb)
3521{
3522
3523 if (skb->ip_summed == CHECKSUM_COMPLETE)
3524 skb->ip_summed = CHECKSUM_NONE;
3525}
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535static inline void skb_checksum_none_assert(const struct sk_buff *skb)
3536{
3537#ifdef DEBUG
3538 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
3539#endif
3540}
3541
3542bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3543
3544int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3545struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
3546 unsigned int transport_len,
3547 __sum16(*skb_chkf)(struct sk_buff *skb));
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558static inline bool skb_head_is_locked(const struct sk_buff *skb)
3559{
3560 return !skb->head_frag || skb_cloned(skb);
3561}
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
3574{
3575 unsigned int hdr_len = skb_transport_header(skb) -
3576 skb_network_header(skb);
3577 return hdr_len + skb_gso_transport_seglen(skb);
3578}
3579
3580#endif
3581#endif
3582