1
2
3
4
5
6
7
8
9
10#ifndef _LINUX_SKBUFF_H
11#define _LINUX_SKBUFF_H
12
13#include <linux/kernel.h>
14#include <linux/compiler.h>
15#include <linux/time.h>
16#include <linux/bug.h>
17#include <linux/bvec.h>
18#include <linux/cache.h>
19#include <linux/rbtree.h>
20#include <linux/socket.h>
21#include <linux/refcount.h>
22
23#include <linux/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/hrtimer.h>
31#include <linux/dma-mapping.h>
32#include <linux/netdev_features.h>
33#include <linux/sched.h>
34#include <linux/sched/clock.h>
35#include <net/flow_dissector.h>
36#include <linux/splice.h>
37#include <linux/in6.h>
38#include <linux/if_packet.h>
39#include <net/flow.h>
40#if IS_ENABLED(CONFIG_NF_CONNTRACK)
41#include <linux/netfilter/nf_conntrack_common.h>
42#endif
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220#define CHECKSUM_NONE 0
221#define CHECKSUM_UNNECESSARY 1
222#define CHECKSUM_COMPLETE 2
223#define CHECKSUM_PARTIAL 3
224
225
226#define SKB_MAX_CSUM_LEVEL 3
227
228#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
229#define SKB_WITH_OVERHEAD(X) \
230 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
231#define SKB_MAX_ORDER(X, ORDER) \
232 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
233#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
234#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
235
236
237#define SKB_TRUESIZE(X) ((X) + \
238 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \
239 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
240
241struct ahash_request;
242struct net_device;
243struct scatterlist;
244struct pipe_inode_info;
245struct iov_iter;
246struct napi_struct;
247struct bpf_prog;
248union bpf_attr;
249struct skb_ext;
250
251#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
252struct nf_bridge_info {
253 enum {
254 BRNF_PROTO_UNCHANGED,
255 BRNF_PROTO_8021Q,
256 BRNF_PROTO_PPPOE
257 } orig_proto:8;
258 u8 pkt_otherhost:1;
259 u8 in_prerouting:1;
260 u8 bridged_dnat:1;
261 __u16 frag_max_size;
262 struct net_device *physindev;
263
264
265 struct net_device *physoutdev;
266 union {
267
268 __be32 ipv4_daddr;
269 struct in6_addr ipv6_daddr;
270
271
272
273
274
275 char neigh_header[8];
276 };
277};
278#endif
279
280#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
281
282
283
284
285struct tc_skb_ext {
286 __u32 chain;
287 __u16 mru;
288};
289#endif
290
291struct sk_buff_head {
292
293 struct sk_buff *next;
294 struct sk_buff *prev;
295
296 __u32 qlen;
297 spinlock_t lock;
298};
299
300struct sk_buff;
301
302
303
304
305
306
307
308
309#if (65536/PAGE_SIZE + 1) < 16
310#define MAX_SKB_FRAGS 16UL
311#else
312#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
313#endif
314extern int sysctl_max_skb_frags;
315
316
317
318
319#define GSO_BY_FRAGS 0xFFFF
320
321typedef struct bio_vec skb_frag_t;
322
323
324
325
326
327static inline unsigned int skb_frag_size(const skb_frag_t *frag)
328{
329 return frag->bv_len;
330}
331
332
333
334
335
336
337static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
338{
339 frag->bv_len = size;
340}
341
342
343
344
345
346
347static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
348{
349 frag->bv_len += delta;
350}
351
352
353
354
355
356
357static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
358{
359 frag->bv_len -= delta;
360}
361
362
363
364
365
366static inline bool skb_frag_must_loop(struct page *p)
367{
368#if defined(CONFIG_HIGHMEM)
369 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) || PageHighMem(p))
370 return true;
371#endif
372 return false;
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \
393 for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \
394 p_off = (f_off) & (PAGE_SIZE - 1), \
395 p_len = skb_frag_must_loop(p) ? \
396 min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \
397 copied = 0; \
398 copied < f_len; \
399 copied += p_len, p++, p_off = 0, \
400 p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \
401
402#define HAVE_HW_TIME_STAMP
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418struct skb_shared_hwtstamps {
419 ktime_t hwtstamp;
420};
421
422
423enum {
424
425 SKBTX_HW_TSTAMP = 1 << 0,
426
427
428 SKBTX_SW_TSTAMP = 1 << 1,
429
430
431 SKBTX_IN_PROGRESS = 1 << 2,
432
433
434 SKBTX_DEV_ZEROCOPY = 1 << 3,
435
436
437 SKBTX_WIFI_STATUS = 1 << 4,
438
439
440
441
442
443
444 SKBTX_SHARED_FRAG = 1 << 5,
445
446
447 SKBTX_SCHED_TSTAMP = 1 << 6,
448};
449
450#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG)
451#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \
452 SKBTX_SCHED_TSTAMP)
453#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP)
454
455
456
457
458
459
460
461
462
463struct ubuf_info {
464 void (*callback)(struct ubuf_info *, bool zerocopy_success);
465 union {
466 struct {
467 unsigned long desc;
468 void *ctx;
469 };
470 struct {
471 u32 id;
472 u16 len;
473 u16 zerocopy:1;
474 u32 bytelen;
475 };
476 };
477 refcount_t refcnt;
478
479 struct mmpin {
480 struct user_struct *user;
481 unsigned int num_pg;
482 } mmp;
483};
484
485#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg))
486
487int mm_account_pinned_pages(struct mmpin *mmp, size_t size);
488void mm_unaccount_pinned_pages(struct mmpin *mmp);
489
490struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
491struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
492 struct ubuf_info *uarg);
493
494static inline void sock_zerocopy_get(struct ubuf_info *uarg)
495{
496 refcount_inc(&uarg->refcnt);
497}
498
499void sock_zerocopy_put(struct ubuf_info *uarg);
500void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
501
502void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
503
504int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
505int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
506 struct msghdr *msg, int len,
507 struct ubuf_info *uarg);
508
509
510
511
512struct skb_shared_info {
513 __u8 __unused;
514 __u8 meta_len;
515 __u8 nr_frags;
516 __u8 tx_flags;
517 unsigned short gso_size;
518
519 unsigned short gso_segs;
520 struct sk_buff *frag_list;
521 struct skb_shared_hwtstamps hwtstamps;
522 unsigned int gso_type;
523 u32 tskey;
524
525
526
527
528 atomic_t dataref;
529
530
531
532 void * destructor_arg;
533
534
535 skb_frag_t frags[MAX_SKB_FRAGS];
536};
537
538
539
540
541
542
543
544
545
546
547
548
549#define SKB_DATAREF_SHIFT 16
550#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
551
552
553enum {
554 SKB_FCLONE_UNAVAILABLE,
555 SKB_FCLONE_ORIG,
556 SKB_FCLONE_CLONE,
557};
558
559enum {
560 SKB_GSO_TCPV4 = 1 << 0,
561
562
563 SKB_GSO_DODGY = 1 << 1,
564
565
566 SKB_GSO_TCP_ECN = 1 << 2,
567
568 SKB_GSO_TCP_FIXEDID = 1 << 3,
569
570 SKB_GSO_TCPV6 = 1 << 4,
571
572 SKB_GSO_FCOE = 1 << 5,
573
574 SKB_GSO_GRE = 1 << 6,
575
576 SKB_GSO_GRE_CSUM = 1 << 7,
577
578 SKB_GSO_IPXIP4 = 1 << 8,
579
580 SKB_GSO_IPXIP6 = 1 << 9,
581
582 SKB_GSO_UDP_TUNNEL = 1 << 10,
583
584 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
585
586 SKB_GSO_PARTIAL = 1 << 12,
587
588 SKB_GSO_TUNNEL_REMCSUM = 1 << 13,
589
590 SKB_GSO_SCTP = 1 << 14,
591
592 SKB_GSO_ESP = 1 << 15,
593
594 SKB_GSO_UDP = 1 << 16,
595
596 SKB_GSO_UDP_L4 = 1 << 17,
597
598 SKB_GSO_FRAGLIST = 1 << 18,
599};
600
601#if BITS_PER_LONG > 32
602#define NET_SKBUFF_DATA_USES_OFFSET 1
603#endif
604
605#ifdef NET_SKBUFF_DATA_USES_OFFSET
606typedef unsigned int sk_buff_data_t;
607#else
608typedef unsigned char *sk_buff_data_t;
609#endif
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714struct sk_buff {
715 union {
716 struct {
717
718 struct sk_buff *next;
719 struct sk_buff *prev;
720
721 union {
722 struct net_device *dev;
723
724
725
726
727 unsigned long dev_scratch;
728 };
729 };
730 struct rb_node rbnode;
731 struct list_head list;
732 };
733
734 union {
735 struct sock *sk;
736 int ip_defrag_offset;
737 };
738
739 union {
740 ktime_t tstamp;
741 u64 skb_mstamp_ns;
742 };
743
744
745
746
747
748
749 char cb[48] __aligned(8);
750
751 union {
752 struct {
753 unsigned long _skb_refdst;
754 void (*destructor)(struct sk_buff *skb);
755 };
756 struct list_head tcp_tsorted_anchor;
757 };
758
759#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
760 unsigned long _nfct;
761#endif
762 unsigned int len,
763 data_len;
764 __u16 mac_len,
765 hdr_len;
766
767
768
769
770 __u16 queue_mapping;
771
772
773#ifdef __BIG_ENDIAN_BITFIELD
774#define CLONED_MASK (1 << 7)
775#else
776#define CLONED_MASK 1
777#endif
778#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
779
780
781 __u8 __cloned_offset[0];
782
783 __u8 cloned:1,
784 nohdr:1,
785 fclone:2,
786 peeked:1,
787 head_frag:1,
788 pfmemalloc:1;
789#ifdef CONFIG_SKB_EXTENSIONS
790 __u8 active_extensions;
791#endif
792
793
794
795
796 __u32 headers_start[0];
797
798
799
800#ifdef __BIG_ENDIAN_BITFIELD
801#define PKT_TYPE_MAX (7 << 5)
802#else
803#define PKT_TYPE_MAX 7
804#endif
805#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
806
807
808 __u8 __pkt_type_offset[0];
809
810 __u8 pkt_type:3;
811 __u8 ignore_df:1;
812 __u8 nf_trace:1;
813 __u8 ip_summed:2;
814 __u8 ooo_okay:1;
815
816 __u8 l4_hash:1;
817 __u8 sw_hash:1;
818 __u8 wifi_acked_valid:1;
819 __u8 wifi_acked:1;
820 __u8 no_fcs:1;
821
822 __u8 encapsulation:1;
823 __u8 encap_hdr_csum:1;
824 __u8 csum_valid:1;
825
826#ifdef __BIG_ENDIAN_BITFIELD
827#define PKT_VLAN_PRESENT_BIT 7
828#else
829#define PKT_VLAN_PRESENT_BIT 0
830#endif
831#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
832
833 __u8 __pkt_vlan_present_offset[0];
834
835 __u8 vlan_present:1;
836 __u8 csum_complete_sw:1;
837 __u8 csum_level:2;
838 __u8 csum_not_inet:1;
839 __u8 dst_pending_confirm:1;
840#ifdef CONFIG_IPV6_NDISC_NODETYPE
841 __u8 ndisc_nodetype:2;
842#endif
843
844 __u8 ipvs_property:1;
845 __u8 inner_protocol_type:1;
846 __u8 remcsum_offload:1;
847#ifdef CONFIG_NET_SWITCHDEV
848 __u8 offload_fwd_mark:1;
849 __u8 offload_l3_fwd_mark:1;
850#endif
851#ifdef CONFIG_NET_CLS_ACT
852 __u8 tc_skip_classify:1;
853 __u8 tc_at_ingress:1;
854#endif
855#ifdef CONFIG_NET_REDIRECT
856 __u8 redirected:1;
857 __u8 from_ingress:1;
858#endif
859#ifdef CONFIG_TLS_DEVICE
860 __u8 decrypted:1;
861#endif
862
863#ifdef CONFIG_NET_SCHED
864 __u16 tc_index;
865#endif
866
867 union {
868 __wsum csum;
869 struct {
870 __u16 csum_start;
871 __u16 csum_offset;
872 };
873 };
874 __u32 priority;
875 int skb_iif;
876 __u32 hash;
877 __be16 vlan_proto;
878 __u16 vlan_tci;
879#if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS)
880 union {
881 unsigned int napi_id;
882 unsigned int sender_cpu;
883 };
884#endif
885#ifdef CONFIG_NETWORK_SECMARK
886 __u32 secmark;
887#endif
888
889 union {
890 __u32 mark;
891 __u32 reserved_tailroom;
892 };
893
894 union {
895 __be16 inner_protocol;
896 __u8 inner_ipproto;
897 };
898
899 __u16 inner_transport_header;
900 __u16 inner_network_header;
901 __u16 inner_mac_header;
902
903 __be16 protocol;
904 __u16 transport_header;
905 __u16 network_header;
906 __u16 mac_header;
907
908#ifdef CONFIG_KCOV
909 u64 kcov_handle;
910#endif
911
912
913 __u32 headers_end[0];
914
915
916
917 sk_buff_data_t tail;
918 sk_buff_data_t end;
919 unsigned char *head,
920 *data;
921 unsigned int truesize;
922 refcount_t users;
923
924#ifdef CONFIG_SKB_EXTENSIONS
925
926 struct skb_ext *extensions;
927#endif
928};
929
930#ifdef __KERNEL__
931
932
933
934
935#define SKB_ALLOC_FCLONE 0x01
936#define SKB_ALLOC_RX 0x02
937#define SKB_ALLOC_NAPI 0x04
938
939
940
941
942
943static inline bool skb_pfmemalloc(const struct sk_buff *skb)
944{
945 return unlikely(skb->pfmemalloc);
946}
947
948
949
950
951
952#define SKB_DST_NOREF 1UL
953#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
954
955
956
957
958
959
960
961static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
962{
963
964
965
966 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
967 !rcu_read_lock_held() &&
968 !rcu_read_lock_bh_held());
969 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
970}
971
972
973
974
975
976
977
978
979
980static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
981{
982 skb->_skb_refdst = (unsigned long)dst;
983}
984
985
986
987
988
989
990
991
992
993
994
995static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
996{
997 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
998 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
999}
1000
1001
1002
1003
1004
1005static inline bool skb_dst_is_noref(const struct sk_buff *skb)
1006{
1007 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
1008}
1009
1010
1011
1012
1013
1014static inline struct rtable *skb_rtable(const struct sk_buff *skb)
1015{
1016 return (struct rtable *)skb_dst(skb);
1017}
1018
1019
1020
1021
1022
1023static inline bool skb_pkt_type_ok(u32 ptype)
1024{
1025 return ptype <= PACKET_OTHERHOST;
1026}
1027
1028
1029
1030
1031
1032static inline unsigned int skb_napi_id(const struct sk_buff *skb)
1033{
1034#ifdef CONFIG_NET_RX_BUSY_POLL
1035 return skb->napi_id;
1036#else
1037 return 0;
1038#endif
1039}
1040
1041
1042
1043
1044
1045
1046
1047static inline bool skb_unref(struct sk_buff *skb)
1048{
1049 if (unlikely(!skb))
1050 return false;
1051 if (likely(refcount_read(&skb->users) == 1))
1052 smp_rmb();
1053 else if (likely(!refcount_dec_and_test(&skb->users)))
1054 return false;
1055
1056 return true;
1057}
1058
1059void skb_release_head_state(struct sk_buff *skb);
1060void kfree_skb(struct sk_buff *skb);
1061void kfree_skb_list(struct sk_buff *segs);
1062void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
1063void skb_tx_error(struct sk_buff *skb);
1064
1065#ifdef CONFIG_TRACEPOINTS
1066void consume_skb(struct sk_buff *skb);
1067#else
1068static inline void consume_skb(struct sk_buff *skb)
1069{
1070 return kfree_skb(skb);
1071}
1072#endif
1073
1074void __consume_stateless_skb(struct sk_buff *skb);
1075void __kfree_skb(struct sk_buff *skb);
1076extern struct kmem_cache *skbuff_head_cache;
1077
1078void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
1079bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
1080 bool *fragstolen, int *delta_truesize);
1081
1082struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
1083 int node);
1084struct sk_buff *__build_skb(void *data, unsigned int frag_size);
1085struct sk_buff *build_skb(void *data, unsigned int frag_size);
1086struct sk_buff *build_skb_around(struct sk_buff *skb,
1087 void *data, unsigned int frag_size);
1088
1089
1090
1091
1092
1093
1094
1095
1096static inline struct sk_buff *alloc_skb(unsigned int size,
1097 gfp_t priority)
1098{
1099 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
1100}
1101
1102struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
1103 unsigned long data_len,
1104 int max_page_order,
1105 int *errcode,
1106 gfp_t gfp_mask);
1107struct sk_buff *alloc_skb_for_msg(struct sk_buff *first);
1108
1109
1110struct sk_buff_fclones {
1111 struct sk_buff skb1;
1112
1113 struct sk_buff skb2;
1114
1115 refcount_t fclone_ref;
1116};
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127static inline bool skb_fclone_busy(const struct sock *sk,
1128 const struct sk_buff *skb)
1129{
1130 const struct sk_buff_fclones *fclones;
1131
1132 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1133
1134 return skb->fclone == SKB_FCLONE_ORIG &&
1135 refcount_read(&fclones->fclone_ref) > 1 &&
1136 fclones->skb2.sk == sk;
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
1147 gfp_t priority)
1148{
1149 return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
1150}
1151
1152struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
1153void skb_headers_offset_update(struct sk_buff *skb, int off);
1154int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
1155struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
1156void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
1157struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
1158struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1159 gfp_t gfp_mask, bool fclone);
1160static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom,
1161 gfp_t gfp_mask)
1162{
1163 return __pskb_copy_fclone(skb, headroom, gfp_mask, false);
1164}
1165
1166int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
1167struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
1168 unsigned int headroom);
1169struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
1170 int newtailroom, gfp_t priority);
1171int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
1172 int offset, int len);
1173int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
1174 int offset, int len);
1175int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
1176int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189static inline int skb_pad(struct sk_buff *skb, int pad)
1190{
1191 return __skb_pad(skb, pad, true);
1192}
1193#define dev_kfree_skb(a) consume_skb(a)
1194
1195int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
1196 int offset, size_t size);
1197
1198struct skb_seq_state {
1199 __u32 lower_offset;
1200 __u32 upper_offset;
1201 __u32 frag_idx;
1202 __u32 stepped_offset;
1203 struct sk_buff *root_skb;
1204 struct sk_buff *cur_skb;
1205 __u8 *frag_data;
1206 __u32 frag_off;
1207};
1208
1209void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
1210 unsigned int to, struct skb_seq_state *st);
1211unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1212 struct skb_seq_state *st);
1213void skb_abort_seq_read(struct skb_seq_state *st);
1214
1215unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
1216 unsigned int to, struct ts_config *config);
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244enum pkt_hash_types {
1245 PKT_HASH_TYPE_NONE,
1246 PKT_HASH_TYPE_L2,
1247 PKT_HASH_TYPE_L3,
1248 PKT_HASH_TYPE_L4,
1249};
1250
1251static inline void skb_clear_hash(struct sk_buff *skb)
1252{
1253 skb->hash = 0;
1254 skb->sw_hash = 0;
1255 skb->l4_hash = 0;
1256}
1257
1258static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
1259{
1260 if (!skb->l4_hash)
1261 skb_clear_hash(skb);
1262}
1263
1264static inline void
1265__skb_set_hash(struct sk_buff *skb, __u32 hash, bool is_sw, bool is_l4)
1266{
1267 skb->l4_hash = is_l4;
1268 skb->sw_hash = is_sw;
1269 skb->hash = hash;
1270}
1271
1272static inline void
1273skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
1274{
1275
1276 __skb_set_hash(skb, hash, false, type == PKT_HASH_TYPE_L4);
1277}
1278
1279static inline void
1280__skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
1281{
1282 __skb_set_hash(skb, hash, true, is_l4);
1283}
1284
1285void __skb_get_hash(struct sk_buff *skb);
1286u32 __skb_get_hash_symmetric(const struct sk_buff *skb);
1287u32 skb_get_poff(const struct sk_buff *skb);
1288u32 __skb_get_poff(const struct sk_buff *skb, void *data,
1289 const struct flow_keys_basic *keys, int hlen);
1290__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
1291 void *data, int hlen_proto);
1292
1293static inline __be32 skb_flow_get_ports(const struct sk_buff *skb,
1294 int thoff, u8 ip_proto)
1295{
1296 return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0);
1297}
1298
1299void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
1300 const struct flow_dissector_key *key,
1301 unsigned int key_count);
1302
1303struct bpf_flow_dissector;
1304bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
1305 __be16 proto, int nhoff, int hlen, unsigned int flags);
1306
1307bool __skb_flow_dissect(const struct net *net,
1308 const struct sk_buff *skb,
1309 struct flow_dissector *flow_dissector,
1310 void *target_container,
1311 void *data, __be16 proto, int nhoff, int hlen,
1312 unsigned int flags);
1313
1314static inline bool skb_flow_dissect(const struct sk_buff *skb,
1315 struct flow_dissector *flow_dissector,
1316 void *target_container, unsigned int flags)
1317{
1318 return __skb_flow_dissect(NULL, skb, flow_dissector,
1319 target_container, NULL, 0, 0, 0, flags);
1320}
1321
1322static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
1323 struct flow_keys *flow,
1324 unsigned int flags)
1325{
1326 memset(flow, 0, sizeof(*flow));
1327 return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
1328 flow, NULL, 0, 0, 0, flags);
1329}
1330
1331static inline bool
1332skb_flow_dissect_flow_keys_basic(const struct net *net,
1333 const struct sk_buff *skb,
1334 struct flow_keys_basic *flow, void *data,
1335 __be16 proto, int nhoff, int hlen,
1336 unsigned int flags)
1337{
1338 memset(flow, 0, sizeof(*flow));
1339 return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
1340 data, proto, nhoff, hlen, flags);
1341}
1342
1343void skb_flow_dissect_meta(const struct sk_buff *skb,
1344 struct flow_dissector *flow_dissector,
1345 void *target_container);
1346
1347
1348
1349
1350
1351void
1352skb_flow_dissect_ct(const struct sk_buff *skb,
1353 struct flow_dissector *flow_dissector,
1354 void *target_container,
1355 u16 *ctinfo_map,
1356 size_t mapsize);
1357void
1358skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
1359 struct flow_dissector *flow_dissector,
1360 void *target_container);
1361
1362void skb_flow_dissect_hash(const struct sk_buff *skb,
1363 struct flow_dissector *flow_dissector,
1364 void *target_container);
1365
1366static inline __u32 skb_get_hash(struct sk_buff *skb)
1367{
1368 if (!skb->l4_hash && !skb->sw_hash)
1369 __skb_get_hash(skb);
1370
1371 return skb->hash;
1372}
1373
1374static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6)
1375{
1376 if (!skb->l4_hash && !skb->sw_hash) {
1377 struct flow_keys keys;
1378 __u32 hash = __get_hash_from_flowi6(fl6, &keys);
1379
1380 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
1381 }
1382
1383 return skb->hash;
1384}
1385
1386__u32 skb_get_hash_perturb(const struct sk_buff *skb,
1387 const siphash_key_t *perturb);
1388
1389static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
1390{
1391 return skb->hash;
1392}
1393
1394static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
1395{
1396 to->hash = from->hash;
1397 to->sw_hash = from->sw_hash;
1398 to->l4_hash = from->l4_hash;
1399};
1400
1401static inline void skb_copy_decrypted(struct sk_buff *to,
1402 const struct sk_buff *from)
1403{
1404#ifdef CONFIG_TLS_DEVICE
1405 to->decrypted = from->decrypted;
1406#endif
1407}
1408
1409#ifdef NET_SKBUFF_DATA_USES_OFFSET
1410static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1411{
1412 return skb->head + skb->end;
1413}
1414
1415static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1416{
1417 return skb->end;
1418}
1419#else
1420static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
1421{
1422 return skb->end;
1423}
1424
1425static inline unsigned int skb_end_offset(const struct sk_buff *skb)
1426{
1427 return skb->end - skb->head;
1428}
1429#endif
1430
1431
1432#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
1433
1434static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
1435{
1436 return &skb_shinfo(skb)->hwtstamps;
1437}
1438
1439static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
1440{
1441 bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY;
1442
1443 return is_zcopy ? skb_uarg(skb) : NULL;
1444}
1445
1446static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
1447 bool *have_ref)
1448{
1449 if (skb && uarg && !skb_zcopy(skb)) {
1450 if (unlikely(have_ref && *have_ref))
1451 *have_ref = false;
1452 else
1453 sock_zerocopy_get(uarg);
1454 skb_shinfo(skb)->destructor_arg = uarg;
1455 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1456 }
1457}
1458
1459static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
1460{
1461 skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
1462 skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
1463}
1464
1465static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
1466{
1467 return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
1468}
1469
1470static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
1471{
1472 return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
1473}
1474
1475
1476static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
1477{
1478 struct ubuf_info *uarg = skb_zcopy(skb);
1479
1480 if (uarg) {
1481 if (skb_zcopy_is_nouarg(skb)) {
1482
1483 } else if (uarg->callback == sock_zerocopy_callback) {
1484 uarg->zerocopy = uarg->zerocopy && zerocopy;
1485 sock_zerocopy_put(uarg);
1486 } else {
1487 uarg->callback(uarg, zerocopy);
1488 }
1489
1490 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1491 }
1492}
1493
1494
1495static inline void skb_zcopy_abort(struct sk_buff *skb)
1496{
1497 struct ubuf_info *uarg = skb_zcopy(skb);
1498
1499 if (uarg) {
1500 sock_zerocopy_put_abort(uarg, false);
1501 skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
1502 }
1503}
1504
1505static inline void skb_mark_not_on_list(struct sk_buff *skb)
1506{
1507 skb->next = NULL;
1508}
1509
1510
1511#define skb_list_walk_safe(first, skb, next_skb) \
1512 for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
1513 (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
1514
1515static inline void skb_list_del_init(struct sk_buff *skb)
1516{
1517 __list_del_entry(&skb->list);
1518 skb_mark_not_on_list(skb);
1519}
1520
1521
1522
1523
1524
1525
1526
1527static inline int skb_queue_empty(const struct sk_buff_head *list)
1528{
1529 return list->next == (const struct sk_buff *) list;
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
1540{
1541 return READ_ONCE(list->next) == (const struct sk_buff *) list;
1542}
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552static inline bool skb_queue_is_last(const struct sk_buff_head *list,
1553 const struct sk_buff *skb)
1554{
1555 return skb->next == (const struct sk_buff *) list;
1556}
1557
1558
1559
1560
1561
1562
1563
1564
1565static inline bool skb_queue_is_first(const struct sk_buff_head *list,
1566 const struct sk_buff *skb)
1567{
1568 return skb->prev == (const struct sk_buff *) list;
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
1580 const struct sk_buff *skb)
1581{
1582
1583
1584
1585 BUG_ON(skb_queue_is_last(list, skb));
1586 return skb->next;
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
1598 const struct sk_buff *skb)
1599{
1600
1601
1602
1603 BUG_ON(skb_queue_is_first(list, skb));
1604 return skb->prev;
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614static inline struct sk_buff *skb_get(struct sk_buff *skb)
1615{
1616 refcount_inc(&skb->users);
1617 return skb;
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632static inline int skb_cloned(const struct sk_buff *skb)
1633{
1634 return skb->cloned &&
1635 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
1636}
1637
1638static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
1639{
1640 might_sleep_if(gfpflags_allow_blocking(pri));
1641
1642 if (skb_cloned(skb))
1643 return pskb_expand_head(skb, 0, 0, pri);
1644
1645 return 0;
1646}
1647
1648
1649
1650
1651
1652
1653
1654
1655static inline int skb_header_cloned(const struct sk_buff *skb)
1656{
1657 int dataref;
1658
1659 if (!skb->cloned)
1660 return 0;
1661
1662 dataref = atomic_read(&skb_shinfo(skb)->dataref);
1663 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
1664 return dataref != 1;
1665}
1666
1667static inline int skb_header_unclone(struct sk_buff *skb, gfp_t pri)
1668{
1669 might_sleep_if(gfpflags_allow_blocking(pri));
1670
1671 if (skb_header_cloned(skb))
1672 return pskb_expand_head(skb, 0, 0, pri);
1673
1674 return 0;
1675}
1676
1677
1678
1679
1680
1681static inline void __skb_header_release(struct sk_buff *skb)
1682{
1683 skb->nohdr = 1;
1684 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1685}
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695static inline int skb_shared(const struct sk_buff *skb)
1696{
1697 return refcount_read(&skb->users) != 1;
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
1714{
1715 might_sleep_if(gfpflags_allow_blocking(pri));
1716 if (skb_shared(skb)) {
1717 struct sk_buff *nskb = skb_clone(skb, pri);
1718
1719 if (likely(nskb))
1720 consume_skb(skb);
1721 else
1722 kfree_skb(skb);
1723 skb = nskb;
1724 }
1725 return skb;
1726}
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1749 gfp_t pri)
1750{
1751 might_sleep_if(gfpflags_allow_blocking(pri));
1752 if (skb_cloned(skb)) {
1753 struct sk_buff *nskb = skb_copy(skb, pri);
1754
1755
1756 if (likely(nskb))
1757 consume_skb(skb);
1758 else
1759 kfree_skb(skb);
1760 skb = nskb;
1761 }
1762 return skb;
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
1779{
1780 struct sk_buff *skb = list_->next;
1781
1782 if (skb == (struct sk_buff *)list_)
1783 skb = NULL;
1784 return skb;
1785}
1786
1787
1788
1789
1790
1791
1792
1793static inline struct sk_buff *__skb_peek(const struct sk_buff_head *list_)
1794{
1795 return list_->next;
1796}
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1808 const struct sk_buff_head *list_)
1809{
1810 struct sk_buff *next = skb->next;
1811
1812 if (next == (struct sk_buff *)list_)
1813 next = NULL;
1814 return next;
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1831{
1832 struct sk_buff *skb = READ_ONCE(list_->prev);
1833
1834 if (skb == (struct sk_buff *)list_)
1835 skb = NULL;
1836 return skb;
1837
1838}
1839
1840
1841
1842
1843
1844
1845
1846static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1847{
1848 return list_->qlen;
1849}
1850
1851
1852
1853
1854
1855
1856
1857
1858static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
1859{
1860 return READ_ONCE(list_->qlen);
1861}
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873static inline void __skb_queue_head_init(struct sk_buff_head *list)
1874{
1875 list->prev = list->next = (struct sk_buff *)list;
1876 list->qlen = 0;
1877}
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static inline void skb_queue_head_init(struct sk_buff_head *list)
1888{
1889 spin_lock_init(&list->lock);
1890 __skb_queue_head_init(list);
1891}
1892
1893static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1894 struct lock_class_key *class)
1895{
1896 skb_queue_head_init(list);
1897 lockdep_set_class(&list->lock, class);
1898}
1899
1900
1901
1902
1903
1904
1905
1906static inline void __skb_insert(struct sk_buff *newsk,
1907 struct sk_buff *prev, struct sk_buff *next,
1908 struct sk_buff_head *list)
1909{
1910
1911
1912
1913 WRITE_ONCE(newsk->next, next);
1914 WRITE_ONCE(newsk->prev, prev);
1915 WRITE_ONCE(next->prev, newsk);
1916 WRITE_ONCE(prev->next, newsk);
1917 list->qlen++;
1918}
1919
1920static inline void __skb_queue_splice(const struct sk_buff_head *list,
1921 struct sk_buff *prev,
1922 struct sk_buff *next)
1923{
1924 struct sk_buff *first = list->next;
1925 struct sk_buff *last = list->prev;
1926
1927 WRITE_ONCE(first->prev, prev);
1928 WRITE_ONCE(prev->next, first);
1929
1930 WRITE_ONCE(last->next, next);
1931 WRITE_ONCE(next->prev, last);
1932}
1933
1934
1935
1936
1937
1938
1939static inline void skb_queue_splice(const struct sk_buff_head *list,
1940 struct sk_buff_head *head)
1941{
1942 if (!skb_queue_empty(list)) {
1943 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1944 head->qlen += list->qlen;
1945 }
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955static inline void skb_queue_splice_init(struct sk_buff_head *list,
1956 struct sk_buff_head *head)
1957{
1958 if (!skb_queue_empty(list)) {
1959 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
1960 head->qlen += list->qlen;
1961 __skb_queue_head_init(list);
1962 }
1963}
1964
1965
1966
1967
1968
1969
1970static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1971 struct sk_buff_head *head)
1972{
1973 if (!skb_queue_empty(list)) {
1974 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1975 head->qlen += list->qlen;
1976 }
1977}
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1988 struct sk_buff_head *head)
1989{
1990 if (!skb_queue_empty(list)) {
1991 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1992 head->qlen += list->qlen;
1993 __skb_queue_head_init(list);
1994 }
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static inline void __skb_queue_after(struct sk_buff_head *list,
2009 struct sk_buff *prev,
2010 struct sk_buff *newsk)
2011{
2012 __skb_insert(newsk, prev, prev->next, list);
2013}
2014
2015void skb_append(struct sk_buff *old, struct sk_buff *newsk,
2016 struct sk_buff_head *list);
2017
2018static inline void __skb_queue_before(struct sk_buff_head *list,
2019 struct sk_buff *next,
2020 struct sk_buff *newsk)
2021{
2022 __skb_insert(newsk, next->prev, next, list);
2023}
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035static inline void __skb_queue_head(struct sk_buff_head *list,
2036 struct sk_buff *newsk)
2037{
2038 __skb_queue_after(list, (struct sk_buff *)list, newsk);
2039}
2040void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052static inline void __skb_queue_tail(struct sk_buff_head *list,
2053 struct sk_buff *newsk)
2054{
2055 __skb_queue_before(list, (struct sk_buff *)list, newsk);
2056}
2057void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
2058
2059
2060
2061
2062
2063void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
2064static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2065{
2066 struct sk_buff *next, *prev;
2067
2068 WRITE_ONCE(list->qlen, list->qlen - 1);
2069 next = skb->next;
2070 prev = skb->prev;
2071 skb->next = skb->prev = NULL;
2072 WRITE_ONCE(next->prev, prev);
2073 WRITE_ONCE(prev->next, next);
2074}
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
2085{
2086 struct sk_buff *skb = skb_peek(list);
2087 if (skb)
2088 __skb_unlink(skb, list);
2089 return skb;
2090}
2091struct sk_buff *skb_dequeue(struct sk_buff_head *list);
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
2102{
2103 struct sk_buff *skb = skb_peek_tail(list);
2104 if (skb)
2105 __skb_unlink(skb, list);
2106 return skb;
2107}
2108struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
2109
2110
2111static inline bool skb_is_nonlinear(const struct sk_buff *skb)
2112{
2113 return skb->data_len;
2114}
2115
2116static inline unsigned int skb_headlen(const struct sk_buff *skb)
2117{
2118 return skb->len - skb->data_len;
2119}
2120
2121static inline unsigned int __skb_pagelen(const struct sk_buff *skb)
2122{
2123 unsigned int i, len = 0;
2124
2125 for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--)
2126 len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
2127 return len;
2128}
2129
2130static inline unsigned int skb_pagelen(const struct sk_buff *skb)
2131{
2132 return skb_headlen(skb) + __skb_pagelen(skb);
2133}
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
2149 struct page *page, int off, int size)
2150{
2151 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2152
2153
2154
2155
2156
2157
2158 frag->bv_page = page;
2159 frag->bv_offset = off;
2160 skb_frag_size_set(frag, size);
2161
2162 page = compound_head(page);
2163 if (page_is_pfmemalloc(page))
2164 skb->pfmemalloc = true;
2165}
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
2182 struct page *page, int off, int size)
2183{
2184 __skb_fill_page_desc(skb, i, page, off, size);
2185 skb_shinfo(skb)->nr_frags = i + 1;
2186}
2187
2188void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
2189 int size, unsigned int truesize);
2190
2191void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
2192 unsigned int truesize);
2193
2194#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
2195
2196#ifdef NET_SKBUFF_DATA_USES_OFFSET
2197static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2198{
2199 return skb->head + skb->tail;
2200}
2201
2202static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2203{
2204 skb->tail = skb->data - skb->head;
2205}
2206
2207static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2208{
2209 skb_reset_tail_pointer(skb);
2210 skb->tail += offset;
2211}
2212
2213#else
2214static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
2215{
2216 return skb->tail;
2217}
2218
2219static inline void skb_reset_tail_pointer(struct sk_buff *skb)
2220{
2221 skb->tail = skb->data;
2222}
2223
2224static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
2225{
2226 skb->tail = skb->data + offset;
2227}
2228
2229#endif
2230
2231
2232
2233
2234void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
2235void *skb_put(struct sk_buff *skb, unsigned int len);
2236static inline void *__skb_put(struct sk_buff *skb, unsigned int len)
2237{
2238 void *tmp = skb_tail_pointer(skb);
2239 SKB_LINEAR_ASSERT(skb);
2240 skb->tail += len;
2241 skb->len += len;
2242 return tmp;
2243}
2244
2245static inline void *__skb_put_zero(struct sk_buff *skb, unsigned int len)
2246{
2247 void *tmp = __skb_put(skb, len);
2248
2249 memset(tmp, 0, len);
2250 return tmp;
2251}
2252
2253static inline void *__skb_put_data(struct sk_buff *skb, const void *data,
2254 unsigned int len)
2255{
2256 void *tmp = __skb_put(skb, len);
2257
2258 memcpy(tmp, data, len);
2259 return tmp;
2260}
2261
2262static inline void __skb_put_u8(struct sk_buff *skb, u8 val)
2263{
2264 *(u8 *)__skb_put(skb, 1) = val;
2265}
2266
2267static inline void *skb_put_zero(struct sk_buff *skb, unsigned int len)
2268{
2269 void *tmp = skb_put(skb, len);
2270
2271 memset(tmp, 0, len);
2272
2273 return tmp;
2274}
2275
2276static inline void *skb_put_data(struct sk_buff *skb, const void *data,
2277 unsigned int len)
2278{
2279 void *tmp = skb_put(skb, len);
2280
2281 memcpy(tmp, data, len);
2282
2283 return tmp;
2284}
2285
2286static inline void skb_put_u8(struct sk_buff *skb, u8 val)
2287{
2288 *(u8 *)skb_put(skb, 1) = val;
2289}
2290
2291void *skb_push(struct sk_buff *skb, unsigned int len);
2292static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
2293{
2294 skb->data -= len;
2295 skb->len += len;
2296 return skb->data;
2297}
2298
2299void *skb_pull(struct sk_buff *skb, unsigned int len);
2300static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
2301{
2302 skb->len -= len;
2303 BUG_ON(skb->len < skb->data_len);
2304 return skb->data += len;
2305}
2306
2307static inline void *skb_pull_inline(struct sk_buff *skb, unsigned int len)
2308{
2309 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
2310}
2311
2312void *__pskb_pull_tail(struct sk_buff *skb, int delta);
2313
2314static inline void *__pskb_pull(struct sk_buff *skb, unsigned int len)
2315{
2316 if (len > skb_headlen(skb) &&
2317 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
2318 return NULL;
2319 skb->len -= len;
2320 return skb->data += len;
2321}
2322
2323static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
2324{
2325 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
2326}
2327
2328static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
2329{
2330 if (likely(len <= skb_headlen(skb)))
2331 return true;
2332 if (unlikely(len > skb->len))
2333 return false;
2334 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
2335}
2336
2337void skb_condense(struct sk_buff *skb);
2338
2339
2340
2341
2342
2343
2344
2345static inline unsigned int skb_headroom(const struct sk_buff *skb)
2346{
2347 return skb->data - skb->head;
2348}
2349
2350
2351
2352
2353
2354
2355
2356static inline int skb_tailroom(const struct sk_buff *skb)
2357{
2358 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
2359}
2360
2361
2362
2363
2364
2365
2366
2367
2368static inline int skb_availroom(const struct sk_buff *skb)
2369{
2370 if (skb_is_nonlinear(skb))
2371 return 0;
2372
2373 return skb->end - skb->tail - skb->reserved_tailroom;
2374}
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384static inline void skb_reserve(struct sk_buff *skb, int len)
2385{
2386 skb->data += len;
2387 skb->tail += len;
2388}
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
2403 unsigned int needed_tailroom)
2404{
2405 SKB_LINEAR_ASSERT(skb);
2406 if (mtu < skb_tailroom(skb) - needed_tailroom)
2407
2408 skb->reserved_tailroom = skb_tailroom(skb) - mtu;
2409 else
2410
2411 skb->reserved_tailroom = needed_tailroom;
2412}
2413
2414#define ENCAP_TYPE_ETHER 0
2415#define ENCAP_TYPE_IPPROTO 1
2416
2417static inline void skb_set_inner_protocol(struct sk_buff *skb,
2418 __be16 protocol)
2419{
2420 skb->inner_protocol = protocol;
2421 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
2422}
2423
2424static inline void skb_set_inner_ipproto(struct sk_buff *skb,
2425 __u8 ipproto)
2426{
2427 skb->inner_ipproto = ipproto;
2428 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
2429}
2430
2431static inline void skb_reset_inner_headers(struct sk_buff *skb)
2432{
2433 skb->inner_mac_header = skb->mac_header;
2434 skb->inner_network_header = skb->network_header;
2435 skb->inner_transport_header = skb->transport_header;
2436}
2437
2438static inline void skb_reset_mac_len(struct sk_buff *skb)
2439{
2440 skb->mac_len = skb->network_header - skb->mac_header;
2441}
2442
2443static inline unsigned char *skb_inner_transport_header(const struct sk_buff
2444 *skb)
2445{
2446 return skb->head + skb->inner_transport_header;
2447}
2448
2449static inline int skb_inner_transport_offset(const struct sk_buff *skb)
2450{
2451 return skb_inner_transport_header(skb) - skb->data;
2452}
2453
2454static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
2455{
2456 skb->inner_transport_header = skb->data - skb->head;
2457}
2458
2459static inline void skb_set_inner_transport_header(struct sk_buff *skb,
2460 const int offset)
2461{
2462 skb_reset_inner_transport_header(skb);
2463 skb->inner_transport_header += offset;
2464}
2465
2466static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
2467{
2468 return skb->head + skb->inner_network_header;
2469}
2470
2471static inline void skb_reset_inner_network_header(struct sk_buff *skb)
2472{
2473 skb->inner_network_header = skb->data - skb->head;
2474}
2475
2476static inline void skb_set_inner_network_header(struct sk_buff *skb,
2477 const int offset)
2478{
2479 skb_reset_inner_network_header(skb);
2480 skb->inner_network_header += offset;
2481}
2482
2483static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
2484{
2485 return skb->head + skb->inner_mac_header;
2486}
2487
2488static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
2489{
2490 skb->inner_mac_header = skb->data - skb->head;
2491}
2492
2493static inline void skb_set_inner_mac_header(struct sk_buff *skb,
2494 const int offset)
2495{
2496 skb_reset_inner_mac_header(skb);
2497 skb->inner_mac_header += offset;
2498}
2499static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
2500{
2501 return skb->transport_header != (typeof(skb->transport_header))~0U;
2502}
2503
2504static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
2505{
2506 return skb->head + skb->transport_header;
2507}
2508
2509static inline void skb_reset_transport_header(struct sk_buff *skb)
2510{
2511 skb->transport_header = skb->data - skb->head;
2512}
2513
2514static inline void skb_set_transport_header(struct sk_buff *skb,
2515 const int offset)
2516{
2517 skb_reset_transport_header(skb);
2518 skb->transport_header += offset;
2519}
2520
2521static inline unsigned char *skb_network_header(const struct sk_buff *skb)
2522{
2523 return skb->head + skb->network_header;
2524}
2525
2526static inline void skb_reset_network_header(struct sk_buff *skb)
2527{
2528 skb->network_header = skb->data - skb->head;
2529}
2530
2531static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
2532{
2533 skb_reset_network_header(skb);
2534 skb->network_header += offset;
2535}
2536
2537static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
2538{
2539 return skb->head + skb->mac_header;
2540}
2541
2542static inline int skb_mac_offset(const struct sk_buff *skb)
2543{
2544 return skb_mac_header(skb) - skb->data;
2545}
2546
2547static inline u32 skb_mac_header_len(const struct sk_buff *skb)
2548{
2549 return skb->network_header - skb->mac_header;
2550}
2551
2552static inline int skb_mac_header_was_set(const struct sk_buff *skb)
2553{
2554 return skb->mac_header != (typeof(skb->mac_header))~0U;
2555}
2556
2557static inline void skb_unset_mac_header(struct sk_buff *skb)
2558{
2559 skb->mac_header = (typeof(skb->mac_header))~0U;
2560}
2561
2562static inline void skb_reset_mac_header(struct sk_buff *skb)
2563{
2564 skb->mac_header = skb->data - skb->head;
2565}
2566
2567static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
2568{
2569 skb_reset_mac_header(skb);
2570 skb->mac_header += offset;
2571}
2572
2573static inline void skb_pop_mac_header(struct sk_buff *skb)
2574{
2575 skb->mac_header = skb->network_header;
2576}
2577
2578static inline void skb_probe_transport_header(struct sk_buff *skb)
2579{
2580 struct flow_keys_basic keys;
2581
2582 if (skb_transport_header_was_set(skb))
2583 return;
2584
2585 if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
2586 NULL, 0, 0, 0, 0))
2587 skb_set_transport_header(skb, keys.control.thoff);
2588}
2589
2590static inline void skb_mac_header_rebuild(struct sk_buff *skb)
2591{
2592 if (skb_mac_header_was_set(skb)) {
2593 const unsigned char *old_mac = skb_mac_header(skb);
2594
2595 skb_set_mac_header(skb, -skb->mac_len);
2596 memmove(skb_mac_header(skb), old_mac, skb->mac_len);
2597 }
2598}
2599
2600static inline int skb_checksum_start_offset(const struct sk_buff *skb)
2601{
2602 return skb->csum_start - skb_headroom(skb);
2603}
2604
2605static inline unsigned char *skb_checksum_start(const struct sk_buff *skb)
2606{
2607 return skb->head + skb->csum_start;
2608}
2609
2610static inline int skb_transport_offset(const struct sk_buff *skb)
2611{
2612 return skb_transport_header(skb) - skb->data;
2613}
2614
2615static inline u32 skb_network_header_len(const struct sk_buff *skb)
2616{
2617 return skb->transport_header - skb->network_header;
2618}
2619
2620static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
2621{
2622 return skb->inner_transport_header - skb->inner_network_header;
2623}
2624
2625static inline int skb_network_offset(const struct sk_buff *skb)
2626{
2627 return skb_network_header(skb) - skb->data;
2628}
2629
2630static inline int skb_inner_network_offset(const struct sk_buff *skb)
2631{
2632 return skb_inner_network_header(skb) - skb->data;
2633}
2634
2635static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
2636{
2637 return pskb_may_pull(skb, skb_network_offset(skb) + len);
2638}
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660#ifndef NET_IP_ALIGN
2661#define NET_IP_ALIGN 2
2662#endif
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684#ifndef NET_SKB_PAD
2685#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
2686#endif
2687
2688int ___pskb_trim(struct sk_buff *skb, unsigned int len);
2689
2690static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
2691{
2692 if (WARN_ON(skb_is_nonlinear(skb)))
2693 return;
2694 skb->len = len;
2695 skb_set_tail_pointer(skb, len);
2696}
2697
2698static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
2699{
2700 __skb_set_length(skb, len);
2701}
2702
2703void skb_trim(struct sk_buff *skb, unsigned int len);
2704
2705static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
2706{
2707 if (skb->data_len)
2708 return ___pskb_trim(skb, len);
2709 __skb_trim(skb, len);
2710 return 0;
2711}
2712
2713static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
2714{
2715 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
2716}
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
2728{
2729 int err = pskb_trim(skb, len);
2730 BUG_ON(err);
2731}
2732
2733static inline int __skb_grow(struct sk_buff *skb, unsigned int len)
2734{
2735 unsigned int diff = len - skb->len;
2736
2737 if (skb_tailroom(skb) < diff) {
2738 int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb),
2739 GFP_ATOMIC);
2740 if (ret)
2741 return ret;
2742 }
2743 __skb_set_length(skb, len);
2744 return 0;
2745}
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755static inline void skb_orphan(struct sk_buff *skb)
2756{
2757 if (skb->destructor) {
2758 skb->destructor(skb);
2759 skb->destructor = NULL;
2760 skb->sk = NULL;
2761 } else {
2762 BUG_ON(skb->sk);
2763 }
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
2776{
2777 if (likely(!skb_zcopy(skb)))
2778 return 0;
2779 if (!skb_zcopy_is_nouarg(skb) &&
2780 skb_uarg(skb)->callback == sock_zerocopy_callback)
2781 return 0;
2782 return skb_copy_ubufs(skb, gfp_mask);
2783}
2784
2785
2786static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask)
2787{
2788 if (likely(!skb_zcopy(skb)))
2789 return 0;
2790 return skb_copy_ubufs(skb, gfp_mask);
2791}
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801static inline void __skb_queue_purge(struct sk_buff_head *list)
2802{
2803 struct sk_buff *skb;
2804 while ((skb = __skb_dequeue(list)) != NULL)
2805 kfree_skb(skb);
2806}
2807void skb_queue_purge(struct sk_buff_head *list);
2808
2809unsigned int skb_rbtree_purge(struct rb_root *root);
2810
2811void *netdev_alloc_frag(unsigned int fragsz);
2812
2813struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
2814 gfp_t gfp_mask);
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
2830 unsigned int length)
2831{
2832 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
2833}
2834
2835
2836static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
2837 gfp_t gfp_mask)
2838{
2839 return __netdev_alloc_skb(NULL, length, gfp_mask);
2840}
2841
2842
2843static inline struct sk_buff *dev_alloc_skb(unsigned int length)
2844{
2845 return netdev_alloc_skb(NULL, length);
2846}
2847
2848
2849static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
2850 unsigned int length, gfp_t gfp)
2851{
2852 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
2853
2854 if (NET_IP_ALIGN && skb)
2855 skb_reserve(skb, NET_IP_ALIGN);
2856 return skb;
2857}
2858
2859static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2860 unsigned int length)
2861{
2862 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2863}
2864
2865static inline void skb_free_frag(void *addr)
2866{
2867 page_frag_free(addr);
2868}
2869
2870void *napi_alloc_frag(unsigned int fragsz);
2871struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2872 unsigned int length, gfp_t gfp_mask);
2873static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2874 unsigned int length)
2875{
2876 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2877}
2878void napi_consume_skb(struct sk_buff *skb, int budget);
2879
2880void __kfree_skb_flush(void);
2881void __kfree_skb_defer(struct sk_buff *skb);
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2893 unsigned int order)
2894{
2895
2896
2897
2898
2899
2900
2901
2902
2903 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC;
2904
2905 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2906}
2907
2908static inline struct page *dev_alloc_pages(unsigned int order)
2909{
2910 return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order);
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2922{
2923 return __dev_alloc_pages(gfp_mask, 0);
2924}
2925
2926static inline struct page *dev_alloc_page(void)
2927{
2928 return dev_alloc_pages(0);
2929}
2930
2931
2932
2933
2934
2935
2936static inline void skb_propagate_pfmemalloc(struct page *page,
2937 struct sk_buff *skb)
2938{
2939 if (page_is_pfmemalloc(page))
2940 skb->pfmemalloc = true;
2941}
2942
2943
2944
2945
2946
2947static inline unsigned int skb_frag_off(const skb_frag_t *frag)
2948{
2949 return frag->bv_offset;
2950}
2951
2952
2953
2954
2955
2956
2957static inline void skb_frag_off_add(skb_frag_t *frag, int delta)
2958{
2959 frag->bv_offset += delta;
2960}
2961
2962
2963
2964
2965
2966
2967static inline void skb_frag_off_set(skb_frag_t *frag, unsigned int offset)
2968{
2969 frag->bv_offset = offset;
2970}
2971
2972
2973
2974
2975
2976
2977static inline void skb_frag_off_copy(skb_frag_t *fragto,
2978 const skb_frag_t *fragfrom)
2979{
2980 fragto->bv_offset = fragfrom->bv_offset;
2981}
2982
2983
2984
2985
2986
2987
2988
2989static inline struct page *skb_frag_page(const skb_frag_t *frag)
2990{
2991 return frag->bv_page;
2992}
2993
2994
2995
2996
2997
2998
2999
3000static inline void __skb_frag_ref(skb_frag_t *frag)
3001{
3002 get_page(skb_frag_page(frag));
3003}
3004
3005
3006
3007
3008
3009
3010
3011
3012static inline void skb_frag_ref(struct sk_buff *skb, int f)
3013{
3014 __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
3015}
3016
3017
3018
3019
3020
3021
3022
3023static inline void __skb_frag_unref(skb_frag_t *frag)
3024{
3025 put_page(skb_frag_page(frag));
3026}
3027
3028
3029
3030
3031
3032
3033
3034
3035static inline void skb_frag_unref(struct sk_buff *skb, int f)
3036{
3037 __skb_frag_unref(&skb_shinfo(skb)->frags[f]);
3038}
3039
3040
3041
3042
3043
3044
3045
3046
3047static inline void *skb_frag_address(const skb_frag_t *frag)
3048{
3049 return page_address(skb_frag_page(frag)) + skb_frag_off(frag);
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059static inline void *skb_frag_address_safe(const skb_frag_t *frag)
3060{
3061 void *ptr = page_address(skb_frag_page(frag));
3062 if (unlikely(!ptr))
3063 return NULL;
3064
3065 return ptr + skb_frag_off(frag);
3066}
3067
3068
3069
3070
3071
3072
3073static inline void skb_frag_page_copy(skb_frag_t *fragto,
3074 const skb_frag_t *fragfrom)
3075{
3076 fragto->bv_page = fragfrom->bv_page;
3077}
3078
3079
3080
3081
3082
3083
3084
3085
3086static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
3087{
3088 frag->bv_page = page;
3089}
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099static inline void skb_frag_set_page(struct sk_buff *skb, int f,
3100 struct page *page)
3101{
3102 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
3103}
3104
3105bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118static inline dma_addr_t skb_frag_dma_map(struct device *dev,
3119 const skb_frag_t *frag,
3120 size_t offset, size_t size,
3121 enum dma_data_direction dir)
3122{
3123 return dma_map_page(dev, skb_frag_page(frag),
3124 skb_frag_off(frag) + offset, size, dir);
3125}
3126
3127static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
3128 gfp_t gfp_mask)
3129{
3130 return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
3131}
3132
3133
3134static inline struct sk_buff *pskb_copy_for_clone(struct sk_buff *skb,
3135 gfp_t gfp_mask)
3136{
3137 return __pskb_copy_fclone(skb, skb_headroom(skb), gfp_mask, true);
3138}
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
3150{
3151 return !skb_header_cloned(skb) &&
3152 skb_headroom(skb) + len <= skb->hdr_len;
3153}
3154
3155static inline int skb_try_make_writable(struct sk_buff *skb,
3156 unsigned int write_len)
3157{
3158 return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
3159 pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3160}
3161
3162static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
3163 int cloned)
3164{
3165 int delta = 0;
3166
3167 if (headroom > skb_headroom(skb))
3168 delta = headroom - skb_headroom(skb);
3169
3170 if (delta || cloned)
3171 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
3172 GFP_ATOMIC);
3173 return 0;
3174}
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
3189{
3190 return __skb_cow(skb, headroom, skb_cloned(skb));
3191}
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
3204{
3205 return __skb_cow(skb, headroom, skb_header_cloned(skb));
3206}
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218static inline int skb_padto(struct sk_buff *skb, unsigned int len)
3219{
3220 unsigned int size = skb->len;
3221 if (likely(size >= len))
3222 return 0;
3223 return skb_pad(skb, len - size);
3224}
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237static inline int __must_check __skb_put_padto(struct sk_buff *skb,
3238 unsigned int len,
3239 bool free_on_error)
3240{
3241 unsigned int size = skb->len;
3242
3243 if (unlikely(size < len)) {
3244 len -= size;
3245 if (__skb_pad(skb, len, free_on_error))
3246 return -ENOMEM;
3247 __skb_put(skb, len);
3248 }
3249 return 0;
3250}
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len)
3263{
3264 return __skb_put_padto(skb, len, true);
3265}
3266
3267static inline int skb_add_data(struct sk_buff *skb,
3268 struct iov_iter *from, int copy)
3269{
3270 const int off = skb->len;
3271
3272 if (skb->ip_summed == CHECKSUM_NONE) {
3273 __wsum csum = 0;
3274 if (csum_and_copy_from_iter_full(skb_put(skb, copy), copy,
3275 &csum, from)) {
3276 skb->csum = csum_block_add(skb->csum, csum, off);
3277 return 0;
3278 }
3279 } else if (copy_from_iter_full(skb_put(skb, copy), copy, from))
3280 return 0;
3281
3282 __skb_trim(skb, off);
3283 return -EFAULT;
3284}
3285
3286static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
3287 const struct page *page, int off)
3288{
3289 if (skb_zcopy(skb))
3290 return false;
3291 if (i) {
3292 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
3293
3294 return page == skb_frag_page(frag) &&
3295 off == skb_frag_off(frag) + skb_frag_size(frag);
3296 }
3297 return false;
3298}
3299
3300static inline int __skb_linearize(struct sk_buff *skb)
3301{
3302 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
3303}
3304
3305
3306
3307
3308
3309
3310
3311
3312static inline int skb_linearize(struct sk_buff *skb)
3313{
3314 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
3315}
3316
3317
3318
3319
3320
3321
3322
3323
3324static inline bool skb_has_shared_frag(const struct sk_buff *skb)
3325{
3326 return skb_is_nonlinear(skb) &&
3327 skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337static inline int skb_linearize_cow(struct sk_buff *skb)
3338{
3339 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
3340 __skb_linearize(skb) : 0;
3341}
3342
3343static __always_inline void
3344__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3345 unsigned int off)
3346{
3347 if (skb->ip_summed == CHECKSUM_COMPLETE)
3348 skb->csum = csum_block_sub(skb->csum,
3349 csum_partial(start, len, 0), off);
3350 else if (skb->ip_summed == CHECKSUM_PARTIAL &&
3351 skb_checksum_start_offset(skb) < 0)
3352 skb->ip_summed = CHECKSUM_NONE;
3353}
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365static inline void skb_postpull_rcsum(struct sk_buff *skb,
3366 const void *start, unsigned int len)
3367{
3368 __skb_postpull_rcsum(skb, start, len, 0);
3369}
3370
3371static __always_inline void
3372__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
3373 unsigned int off)
3374{
3375 if (skb->ip_summed == CHECKSUM_COMPLETE)
3376 skb->csum = csum_block_add(skb->csum,
3377 csum_partial(start, len, 0), off);
3378}
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389static inline void skb_postpush_rcsum(struct sk_buff *skb,
3390 const void *start, unsigned int len)
3391{
3392 __skb_postpush_rcsum(skb, start, len, 0);
3393}
3394
3395void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408static inline void *skb_push_rcsum(struct sk_buff *skb, unsigned int len)
3409{
3410 skb_push(skb, len);
3411 skb_postpush_rcsum(skb, skb->data, len);
3412 return skb->data;
3413}
3414
3415int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3427{
3428 if (likely(len >= skb->len))
3429 return 0;
3430 return pskb_trim_rcsum_slow(skb, len);
3431}
3432
3433static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
3434{
3435 if (skb->ip_summed == CHECKSUM_COMPLETE)
3436 skb->ip_summed = CHECKSUM_NONE;
3437 __skb_trim(skb, len);
3438 return 0;
3439}
3440
3441static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len)
3442{
3443 if (skb->ip_summed == CHECKSUM_COMPLETE)
3444 skb->ip_summed = CHECKSUM_NONE;
3445 return __skb_grow(skb, len);
3446}
3447
3448#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
3449#define skb_rb_first(root) rb_to_skb(rb_first(root))
3450#define skb_rb_last(root) rb_to_skb(rb_last(root))
3451#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
3452#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
3453
3454#define skb_queue_walk(queue, skb) \
3455 for (skb = (queue)->next; \
3456 skb != (struct sk_buff *)(queue); \
3457 skb = skb->next)
3458
3459#define skb_queue_walk_safe(queue, skb, tmp) \
3460 for (skb = (queue)->next, tmp = skb->next; \
3461 skb != (struct sk_buff *)(queue); \
3462 skb = tmp, tmp = skb->next)
3463
3464#define skb_queue_walk_from(queue, skb) \
3465 for (; skb != (struct sk_buff *)(queue); \
3466 skb = skb->next)
3467
3468#define skb_rbtree_walk(skb, root) \
3469 for (skb = skb_rb_first(root); skb != NULL; \
3470 skb = skb_rb_next(skb))
3471
3472#define skb_rbtree_walk_from(skb) \
3473 for (; skb != NULL; \
3474 skb = skb_rb_next(skb))
3475
3476#define skb_rbtree_walk_from_safe(skb, tmp) \
3477 for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
3478 skb = tmp)
3479
3480#define skb_queue_walk_from_safe(queue, skb, tmp) \
3481 for (tmp = skb->next; \
3482 skb != (struct sk_buff *)(queue); \
3483 skb = tmp, tmp = skb->next)
3484
3485#define skb_queue_reverse_walk(queue, skb) \
3486 for (skb = (queue)->prev; \
3487 skb != (struct sk_buff *)(queue); \
3488 skb = skb->prev)
3489
3490#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3491 for (skb = (queue)->prev, tmp = skb->prev; \
3492 skb != (struct sk_buff *)(queue); \
3493 skb = tmp, tmp = skb->prev)
3494
3495#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
3496 for (tmp = skb->prev; \
3497 skb != (struct sk_buff *)(queue); \
3498 skb = tmp, tmp = skb->prev)
3499
3500static inline bool skb_has_frag_list(const struct sk_buff *skb)
3501{
3502 return skb_shinfo(skb)->frag_list != NULL;
3503}
3504
3505static inline void skb_frag_list_init(struct sk_buff *skb)
3506{
3507 skb_shinfo(skb)->frag_list = NULL;
3508}
3509
3510#define skb_walk_frags(skb, iter) \
3511 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
3512
3513
3514int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
3515 int *err, long *timeo_p,
3516 const struct sk_buff *skb);
3517struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
3518 struct sk_buff_head *queue,
3519 unsigned int flags,
3520 int *off, int *err,
3521 struct sk_buff **last);
3522struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
3523 struct sk_buff_head *queue,
3524 unsigned int flags, int *off, int *err,
3525 struct sk_buff **last);
3526struct sk_buff *__skb_recv_datagram(struct sock *sk,
3527 struct sk_buff_head *sk_queue,
3528 unsigned int flags, int *off, int *err);
3529struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
3530 int *err);
3531__poll_t datagram_poll(struct file *file, struct socket *sock,
3532 struct poll_table_struct *wait);
3533int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
3534 struct iov_iter *to, int size);
3535static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
3536 struct msghdr *msg, int size)
3537{
3538 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
3539}
3540int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
3541 struct msghdr *msg);
3542int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
3543 struct iov_iter *to, int len,
3544 struct ahash_request *hash);
3545int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
3546 struct iov_iter *from, int len);
3547int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
3548void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
3549void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
3550static inline void skb_free_datagram_locked(struct sock *sk,
3551 struct sk_buff *skb)
3552{
3553 __skb_free_datagram_locked(sk, skb, 0);
3554}
3555int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
3556int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
3557int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
3558__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
3559 int len);
3560int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3561 struct pipe_inode_info *pipe, unsigned int len,
3562 unsigned int flags);
3563int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3564 int len);
3565void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
3566unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
3567int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
3568 int len, int hlen);
3569void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
3570int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
3571void skb_scrub_packet(struct sk_buff *skb, bool xnet);
3572bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
3573bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
3574struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
3575struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
3576 unsigned int offset);
3577struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
3578int skb_ensure_writable(struct sk_buff *skb, int write_len);
3579int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
3580int skb_vlan_pop(struct sk_buff *skb);
3581int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
3582int skb_eth_pop(struct sk_buff *skb);
3583int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
3584 const unsigned char *src);
3585int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
3586 int mac_len, bool ethernet);
3587int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
3588 bool ethernet);
3589int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
3590int skb_mpls_dec_ttl(struct sk_buff *skb);
3591struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
3592 gfp_t gfp);
3593
3594static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
3595{
3596 return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
3597}
3598
3599static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
3600{
3601 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
3602}
3603
3604struct skb_checksum_ops {
3605 __wsum (*update)(const void *mem, int len, __wsum wsum);
3606 __wsum (*combine)(__wsum csum, __wsum csum2, int offset, int len);
3607};
3608
3609extern const struct skb_checksum_ops *crc32c_csum_stub __read_mostly;
3610
3611__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3612 __wsum csum, const struct skb_checksum_ops *ops);
3613__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
3614 __wsum csum);
3615
3616static inline void * __must_check
3617__skb_header_pointer(const struct sk_buff *skb, int offset,
3618 int len, void *data, int hlen, void *buffer)
3619{
3620 if (hlen - offset >= len)
3621 return data + offset;
3622
3623 if (!skb ||
3624 skb_copy_bits(skb, offset, buffer, len) < 0)
3625 return NULL;
3626
3627 return buffer;
3628}
3629
3630static inline void * __must_check
3631skb_header_pointer(const struct sk_buff *skb, int offset, int len, void *buffer)
3632{
3633 return __skb_header_pointer(skb, offset, len, skb->data,
3634 skb_headlen(skb), buffer);
3635}
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647static inline bool skb_needs_linearize(struct sk_buff *skb,
3648 netdev_features_t features)
3649{
3650 return skb_is_nonlinear(skb) &&
3651 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
3652 (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
3653}
3654
3655static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
3656 void *to,
3657 const unsigned int len)
3658{
3659 memcpy(to, skb->data, len);
3660}
3661
3662static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
3663 const int offset, void *to,
3664 const unsigned int len)
3665{
3666 memcpy(to, skb->data + offset, len);
3667}
3668
3669static inline void skb_copy_to_linear_data(struct sk_buff *skb,
3670 const void *from,
3671 const unsigned int len)
3672{
3673 memcpy(skb->data, from, len);
3674}
3675
3676static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
3677 const int offset,
3678 const void *from,
3679 const unsigned int len)
3680{
3681 memcpy(skb->data + offset, from, len);
3682}
3683
3684void skb_init(void);
3685
3686static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
3687{
3688 return skb->tstamp;
3689}
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700static inline void skb_get_timestamp(const struct sk_buff *skb,
3701 struct __kernel_old_timeval *stamp)
3702{
3703 *stamp = ns_to_kernel_old_timeval(skb->tstamp);
3704}
3705
3706static inline void skb_get_new_timestamp(const struct sk_buff *skb,
3707 struct __kernel_sock_timeval *stamp)
3708{
3709 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3710
3711 stamp->tv_sec = ts.tv_sec;
3712 stamp->tv_usec = ts.tv_nsec / 1000;
3713}
3714
3715static inline void skb_get_timestampns(const struct sk_buff *skb,
3716 struct __kernel_old_timespec *stamp)
3717{
3718 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3719
3720 stamp->tv_sec = ts.tv_sec;
3721 stamp->tv_nsec = ts.tv_nsec;
3722}
3723
3724static inline void skb_get_new_timestampns(const struct sk_buff *skb,
3725 struct __kernel_timespec *stamp)
3726{
3727 struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
3728
3729 stamp->tv_sec = ts.tv_sec;
3730 stamp->tv_nsec = ts.tv_nsec;
3731}
3732
3733static inline void __net_timestamp(struct sk_buff *skb)
3734{
3735 skb->tstamp = ktime_get_real();
3736}
3737
3738static inline ktime_t net_timedelta(ktime_t t)
3739{
3740 return ktime_sub(ktime_get_real(), t);
3741}
3742
3743static inline ktime_t net_invalid_timestamp(void)
3744{
3745 return 0;
3746}
3747
3748static inline u8 skb_metadata_len(const struct sk_buff *skb)
3749{
3750 return skb_shinfo(skb)->meta_len;
3751}
3752
3753static inline void *skb_metadata_end(const struct sk_buff *skb)
3754{
3755 return skb_mac_header(skb);
3756}
3757
3758static inline bool __skb_metadata_differs(const struct sk_buff *skb_a,
3759 const struct sk_buff *skb_b,
3760 u8 meta_len)
3761{
3762 const void *a = skb_metadata_end(skb_a);
3763 const void *b = skb_metadata_end(skb_b);
3764
3765#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
3766 u64 diffs = 0;
3767
3768 switch (meta_len) {
3769#define __it(x, op) (x -= sizeof(u##op))
3770#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op))
3771 case 32: diffs |= __it_diff(a, b, 64);
3772 fallthrough;
3773 case 24: diffs |= __it_diff(a, b, 64);
3774 fallthrough;
3775 case 16: diffs |= __it_diff(a, b, 64);
3776 fallthrough;
3777 case 8: diffs |= __it_diff(a, b, 64);
3778 break;
3779 case 28: diffs |= __it_diff(a, b, 64);
3780 fallthrough;
3781 case 20: diffs |= __it_diff(a, b, 64);
3782 fallthrough;
3783 case 12: diffs |= __it_diff(a, b, 64);
3784 fallthrough;
3785 case 4: diffs |= __it_diff(a, b, 32);
3786 break;
3787 }
3788 return diffs;
3789#else
3790 return memcmp(a - meta_len, b - meta_len, meta_len);
3791#endif
3792}
3793
3794static inline bool skb_metadata_differs(const struct sk_buff *skb_a,
3795 const struct sk_buff *skb_b)
3796{
3797 u8 len_a = skb_metadata_len(skb_a);
3798 u8 len_b = skb_metadata_len(skb_b);
3799
3800 if (!(len_a | len_b))
3801 return false;
3802
3803 return len_a != len_b ?
3804 true : __skb_metadata_differs(skb_a, skb_b, len_a);
3805}
3806
3807static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len)
3808{
3809 skb_shinfo(skb)->meta_len = meta_len;
3810}
3811
3812static inline void skb_metadata_clear(struct sk_buff *skb)
3813{
3814 skb_metadata_set(skb, 0);
3815}
3816
3817struct sk_buff *skb_clone_sk(struct sk_buff *skb);
3818
3819#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
3820
3821void skb_clone_tx_timestamp(struct sk_buff *skb);
3822bool skb_defer_rx_timestamp(struct sk_buff *skb);
3823
3824#else
3825
3826static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
3827{
3828}
3829
3830static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
3831{
3832 return false;
3833}
3834
3835#endif
3836
3837
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849void skb_complete_tx_timestamp(struct sk_buff *skb,
3850 struct skb_shared_hwtstamps *hwtstamps);
3851
3852void __skb_tstamp_tx(struct sk_buff *orig_skb,
3853 struct skb_shared_hwtstamps *hwtstamps,
3854 struct sock *sk, int tstype);
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867void skb_tstamp_tx(struct sk_buff *orig_skb,
3868 struct skb_shared_hwtstamps *hwtstamps);
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882static inline void skb_tx_timestamp(struct sk_buff *skb)
3883{
3884 skb_clone_tx_timestamp(skb);
3885 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP)
3886 skb_tstamp_tx(skb, NULL);
3887}
3888
3889
3890
3891
3892
3893
3894
3895
3896void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
3897
3898__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
3899__sum16 __skb_checksum_complete(struct sk_buff *skb);
3900
3901static inline int skb_csum_unnecessary(const struct sk_buff *skb)
3902{
3903 return ((skb->ip_summed == CHECKSUM_UNNECESSARY) ||
3904 skb->csum_valid ||
3905 (skb->ip_summed == CHECKSUM_PARTIAL &&
3906 skb_checksum_start_offset(skb) >= 0));
3907}
3908
3909
3910
3911
3912
3913
3914
3915
3916
3917
3918
3919
3920
3921
3922
3923
3924
3925static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
3926{
3927 return skb_csum_unnecessary(skb) ?
3928 0 : __skb_checksum_complete(skb);
3929}
3930
3931static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
3932{
3933 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3934 if (skb->csum_level == 0)
3935 skb->ip_summed = CHECKSUM_NONE;
3936 else
3937 skb->csum_level--;
3938 }
3939}
3940
3941static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
3942{
3943 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3944 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
3945 skb->csum_level++;
3946 } else if (skb->ip_summed == CHECKSUM_NONE) {
3947 skb->ip_summed = CHECKSUM_UNNECESSARY;
3948 skb->csum_level = 0;
3949 }
3950}
3951
3952static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb)
3953{
3954 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3955 skb->ip_summed = CHECKSUM_NONE;
3956 skb->csum_level = 0;
3957 }
3958}
3959
3960
3961
3962
3963
3964
3965static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3966 bool zero_okay,
3967 __sum16 check)
3968{
3969 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
3970 skb->csum_valid = 1;
3971 __skb_decr_checksum_unnecessary(skb);
3972 return false;
3973 }
3974
3975 return true;
3976}
3977
3978
3979
3980
3981#define CHECKSUM_BREAK 76
3982
3983
3984
3985
3986
3987
3988
3989static inline void skb_checksum_complete_unset(struct sk_buff *skb)
3990{
3991 if (skb->ip_summed == CHECKSUM_COMPLETE)
3992 skb->ip_summed = CHECKSUM_NONE;
3993}
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
4005 bool complete,
4006 __wsum psum)
4007{
4008 if (skb->ip_summed == CHECKSUM_COMPLETE) {
4009 if (!csum_fold(csum_add(psum, skb->csum))) {
4010 skb->csum_valid = 1;
4011 return 0;
4012 }
4013 }
4014
4015 skb->csum = psum;
4016
4017 if (complete || skb->len <= CHECKSUM_BREAK) {
4018 __sum16 csum;
4019
4020 csum = __skb_checksum_complete(skb);
4021 skb->csum_valid = !csum;
4022 return csum;
4023 }
4024
4025 return 0;
4026}
4027
4028static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
4029{
4030 return 0;
4031}
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043#define __skb_checksum_validate(skb, proto, complete, \
4044 zero_okay, check, compute_pseudo) \
4045({ \
4046 __sum16 __ret = 0; \
4047 skb->csum_valid = 0; \
4048 if (__skb_checksum_validate_needed(skb, zero_okay, check)) \
4049 __ret = __skb_checksum_validate_complete(skb, \
4050 complete, compute_pseudo(skb, proto)); \
4051 __ret; \
4052})
4053
4054#define skb_checksum_init(skb, proto, compute_pseudo) \
4055 __skb_checksum_validate(skb, proto, false, false, 0, compute_pseudo)
4056
4057#define skb_checksum_init_zero_check(skb, proto, check, compute_pseudo) \
4058 __skb_checksum_validate(skb, proto, false, true, check, compute_pseudo)
4059
4060#define skb_checksum_validate(skb, proto, compute_pseudo) \
4061 __skb_checksum_validate(skb, proto, true, false, 0, compute_pseudo)
4062
4063#define skb_checksum_validate_zero_check(skb, proto, check, \
4064 compute_pseudo) \
4065 __skb_checksum_validate(skb, proto, true, true, check, compute_pseudo)
4066
4067#define skb_checksum_simple_validate(skb) \
4068 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
4069
4070static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
4071{
4072 return (skb->ip_summed == CHECKSUM_NONE && skb->csum_valid);
4073}
4074
4075static inline void __skb_checksum_convert(struct sk_buff *skb, __wsum pseudo)
4076{
4077 skb->csum = ~pseudo;
4078 skb->ip_summed = CHECKSUM_COMPLETE;
4079}
4080
4081#define skb_checksum_try_convert(skb, proto, compute_pseudo) \
4082do { \
4083 if (__skb_checksum_convert_check(skb)) \
4084 __skb_checksum_convert(skb, compute_pseudo(skb, proto)); \
4085} while (0)
4086
4087static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr,
4088 u16 start, u16 offset)
4089{
4090 skb->ip_summed = CHECKSUM_PARTIAL;
4091 skb->csum_start = ((unsigned char *)ptr + start) - skb->head;
4092 skb->csum_offset = offset - start;
4093}
4094
4095
4096
4097
4098
4099
4100static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
4101 int start, int offset, bool nopartial)
4102{
4103 __wsum delta;
4104
4105 if (!nopartial) {
4106 skb_remcsum_adjust_partial(skb, ptr, start, offset);
4107 return;
4108 }
4109
4110 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
4111 __skb_checksum_complete(skb);
4112 skb_postpull_rcsum(skb, skb->data, ptr - (void *)skb->data);
4113 }
4114
4115 delta = remcsum_adjust(ptr, skb->csum, start, offset);
4116
4117
4118 skb->csum = csum_add(skb->csum, delta);
4119}
4120
4121static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
4122{
4123#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4124 return (void *)(skb->_nfct & NFCT_PTRMASK);
4125#else
4126 return NULL;
4127#endif
4128}
4129
4130static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
4131{
4132#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4133 return skb->_nfct;
4134#else
4135 return 0UL;
4136#endif
4137}
4138
4139static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
4140{
4141#if IS_ENABLED(CONFIG_NF_CONNTRACK)
4142 skb->_nfct = nfct;
4143#endif
4144}
4145
4146#ifdef CONFIG_SKB_EXTENSIONS
4147enum skb_ext_id {
4148#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4149 SKB_EXT_BRIDGE_NF,
4150#endif
4151#ifdef CONFIG_XFRM
4152 SKB_EXT_SEC_PATH,
4153#endif
4154#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4155 TC_SKB_EXT,
4156#endif
4157#if IS_ENABLED(CONFIG_MPTCP)
4158 SKB_EXT_MPTCP,
4159#endif
4160 SKB_EXT_NUM,
4161};
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173struct skb_ext {
4174 refcount_t refcnt;
4175 u8 offset[SKB_EXT_NUM];
4176 u8 chunks;
4177 char data[] __aligned(8);
4178};
4179
4180struct skb_ext *__skb_ext_alloc(gfp_t flags);
4181void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
4182 struct skb_ext *ext);
4183void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
4184void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
4185void __skb_ext_put(struct skb_ext *ext);
4186
4187static inline void skb_ext_put(struct sk_buff *skb)
4188{
4189 if (skb->active_extensions)
4190 __skb_ext_put(skb->extensions);
4191}
4192
4193static inline void __skb_ext_copy(struct sk_buff *dst,
4194 const struct sk_buff *src)
4195{
4196 dst->active_extensions = src->active_extensions;
4197
4198 if (src->active_extensions) {
4199 struct skb_ext *ext = src->extensions;
4200
4201 refcount_inc(&ext->refcnt);
4202 dst->extensions = ext;
4203 }
4204}
4205
4206static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
4207{
4208 skb_ext_put(dst);
4209 __skb_ext_copy(dst, src);
4210}
4211
4212static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
4213{
4214 return !!ext->offset[i];
4215}
4216
4217static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
4218{
4219 return skb->active_extensions & (1 << id);
4220}
4221
4222static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
4223{
4224 if (skb_ext_exist(skb, id))
4225 __skb_ext_del(skb, id);
4226}
4227
4228static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
4229{
4230 if (skb_ext_exist(skb, id)) {
4231 struct skb_ext *ext = skb->extensions;
4232
4233 return (void *)ext + (ext->offset[id] << 3);
4234 }
4235
4236 return NULL;
4237}
4238
4239static inline void skb_ext_reset(struct sk_buff *skb)
4240{
4241 if (unlikely(skb->active_extensions)) {
4242 __skb_ext_put(skb->extensions);
4243 skb->active_extensions = 0;
4244 }
4245}
4246
4247static inline bool skb_has_extensions(struct sk_buff *skb)
4248{
4249 return unlikely(skb->active_extensions);
4250}
4251#else
4252static inline void skb_ext_put(struct sk_buff *skb) {}
4253static inline void skb_ext_reset(struct sk_buff *skb) {}
4254static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
4255static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
4256static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
4257static inline bool skb_has_extensions(struct sk_buff *skb) { return false; }
4258#endif
4259
4260static inline void nf_reset_ct(struct sk_buff *skb)
4261{
4262#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4263 nf_conntrack_put(skb_nfct(skb));
4264 skb->_nfct = 0;
4265#endif
4266}
4267
4268static inline void nf_reset_trace(struct sk_buff *skb)
4269{
4270#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4271 skb->nf_trace = 0;
4272#endif
4273}
4274
4275static inline void ipvs_reset(struct sk_buff *skb)
4276{
4277#if IS_ENABLED(CONFIG_IP_VS)
4278 skb->ipvs_property = 0;
4279#endif
4280}
4281
4282
4283static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
4284 bool copy)
4285{
4286#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4287 dst->_nfct = src->_nfct;
4288 nf_conntrack_get(skb_nfct(src));
4289#endif
4290#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
4291 if (copy)
4292 dst->nf_trace = src->nf_trace;
4293#endif
4294}
4295
4296static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
4297{
4298#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
4299 nf_conntrack_put(skb_nfct(dst));
4300#endif
4301 __nf_copy(dst, src, true);
4302}
4303
4304#ifdef CONFIG_NETWORK_SECMARK
4305static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4306{
4307 to->secmark = from->secmark;
4308}
4309
4310static inline void skb_init_secmark(struct sk_buff *skb)
4311{
4312 skb->secmark = 0;
4313}
4314#else
4315static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
4316{ }
4317
4318static inline void skb_init_secmark(struct sk_buff *skb)
4319{ }
4320#endif
4321
4322static inline int secpath_exists(const struct sk_buff *skb)
4323{
4324#ifdef CONFIG_XFRM
4325 return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
4326#else
4327 return 0;
4328#endif
4329}
4330
4331static inline bool skb_irq_freeable(const struct sk_buff *skb)
4332{
4333 return !skb->destructor &&
4334 !secpath_exists(skb) &&
4335 !skb_nfct(skb) &&
4336 !skb->_skb_refdst &&
4337 !skb_has_frag_list(skb);
4338}
4339
4340static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
4341{
4342 skb->queue_mapping = queue_mapping;
4343}
4344
4345static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
4346{
4347 return skb->queue_mapping;
4348}
4349
4350static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
4351{
4352 to->queue_mapping = from->queue_mapping;
4353}
4354
4355static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
4356{
4357 skb->queue_mapping = rx_queue + 1;
4358}
4359
4360static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
4361{
4362 return skb->queue_mapping - 1;
4363}
4364
4365static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
4366{
4367 return skb->queue_mapping != 0;
4368}
4369
4370static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
4371{
4372 skb->dst_pending_confirm = val;
4373}
4374
4375static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
4376{
4377 return skb->dst_pending_confirm != 0;
4378}
4379
4380static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
4381{
4382#ifdef CONFIG_XFRM
4383 return skb_ext_find(skb, SKB_EXT_SEC_PATH);
4384#else
4385 return NULL;
4386#endif
4387}
4388
4389
4390
4391
4392
4393
4394
4395struct skb_gso_cb {
4396 union {
4397 int mac_offset;
4398 int data_offset;
4399 };
4400 int encap_level;
4401 __wsum csum;
4402 __u16 csum_start;
4403};
4404#define SKB_GSO_CB_OFFSET 32
4405#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_GSO_CB_OFFSET))
4406
4407static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
4408{
4409 return (skb_mac_header(inner_skb) - inner_skb->head) -
4410 SKB_GSO_CB(inner_skb)->mac_offset;
4411}
4412
4413static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
4414{
4415 int new_headroom, headroom;
4416 int ret;
4417
4418 headroom = skb_headroom(skb);
4419 ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
4420 if (ret)
4421 return ret;
4422
4423 new_headroom = skb_headroom(skb);
4424 SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
4425 return 0;
4426}
4427
4428static inline void gso_reset_checksum(struct sk_buff *skb, __wsum res)
4429{
4430
4431 if (skb->remcsum_offload)
4432 return;
4433
4434 SKB_GSO_CB(skb)->csum = res;
4435 SKB_GSO_CB(skb)->csum_start = skb_checksum_start(skb) - skb->head;
4436}
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446static inline __sum16 gso_make_checksum(struct sk_buff *skb, __wsum res)
4447{
4448 unsigned char *csum_start = skb_transport_header(skb);
4449 int plen = (skb->head + SKB_GSO_CB(skb)->csum_start) - csum_start;
4450 __wsum partial = SKB_GSO_CB(skb)->csum;
4451
4452 SKB_GSO_CB(skb)->csum = res;
4453 SKB_GSO_CB(skb)->csum_start = csum_start - skb->head;
4454
4455 return csum_fold(csum_partial(csum_start, plen, partial));
4456}
4457
4458static inline bool skb_is_gso(const struct sk_buff *skb)
4459{
4460 return skb_shinfo(skb)->gso_size;
4461}
4462
4463
4464static inline bool skb_is_gso_v6(const struct sk_buff *skb)
4465{
4466 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
4467}
4468
4469
4470static inline bool skb_is_gso_sctp(const struct sk_buff *skb)
4471{
4472 return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP;
4473}
4474
4475
4476static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
4477{
4478 return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
4479}
4480
4481static inline void skb_gso_reset(struct sk_buff *skb)
4482{
4483 skb_shinfo(skb)->gso_size = 0;
4484 skb_shinfo(skb)->gso_segs = 0;
4485 skb_shinfo(skb)->gso_type = 0;
4486}
4487
4488static inline void skb_increase_gso_size(struct skb_shared_info *shinfo,
4489 u16 increment)
4490{
4491 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4492 return;
4493 shinfo->gso_size += increment;
4494}
4495
4496static inline void skb_decrease_gso_size(struct skb_shared_info *shinfo,
4497 u16 decrement)
4498{
4499 if (WARN_ON_ONCE(shinfo->gso_size == GSO_BY_FRAGS))
4500 return;
4501 shinfo->gso_size -= decrement;
4502}
4503
4504void __skb_warn_lro_forwarding(const struct sk_buff *skb);
4505
4506static inline bool skb_warn_if_lro(const struct sk_buff *skb)
4507{
4508
4509
4510 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4511
4512 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
4513 unlikely(shinfo->gso_type == 0)) {
4514 __skb_warn_lro_forwarding(skb);
4515 return true;
4516 }
4517 return false;
4518}
4519
4520static inline void skb_forward_csum(struct sk_buff *skb)
4521{
4522
4523 if (skb->ip_summed == CHECKSUM_COMPLETE)
4524 skb->ip_summed = CHECKSUM_NONE;
4525}
4526
4527
4528
4529
4530
4531
4532
4533
4534
4535static inline void skb_checksum_none_assert(const struct sk_buff *skb)
4536{
4537#ifdef DEBUG
4538 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
4539#endif
4540}
4541
4542bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
4543
4544int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
4545struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4546 unsigned int transport_len,
4547 __sum16(*skb_chkf)(struct sk_buff *skb));
4548
4549
4550
4551
4552
4553
4554
4555
4556
4557
4558static inline bool skb_head_is_locked(const struct sk_buff *skb)
4559{
4560 return !skb->head_frag || skb_cloned(skb);
4561}
4562
4563
4564
4565
4566
4567
4568
4569
4570
4571
4572static inline __wsum lco_csum(struct sk_buff *skb)
4573{
4574 unsigned char *csum_start = skb_checksum_start(skb);
4575 unsigned char *l4_hdr = skb_transport_header(skb);
4576 __wsum partial;
4577
4578
4579 partial = ~csum_unfold(*(__force __sum16 *)(csum_start +
4580 skb->csum_offset));
4581
4582
4583
4584
4585 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
4586}
4587
4588static inline bool skb_is_redirected(const struct sk_buff *skb)
4589{
4590#ifdef CONFIG_NET_REDIRECT
4591 return skb->redirected;
4592#else
4593 return false;
4594#endif
4595}
4596
4597static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
4598{
4599#ifdef CONFIG_NET_REDIRECT
4600 skb->redirected = 1;
4601 skb->from_ingress = from_ingress;
4602 if (skb->from_ingress)
4603 skb->tstamp = 0;
4604#endif
4605}
4606
4607static inline void skb_reset_redirect(struct sk_buff *skb)
4608{
4609#ifdef CONFIG_NET_REDIRECT
4610 skb->redirected = 0;
4611#endif
4612}
4613
4614static inline void skb_set_kcov_handle(struct sk_buff *skb,
4615 const u64 kcov_handle)
4616{
4617#ifdef CONFIG_KCOV
4618 skb->kcov_handle = kcov_handle;
4619#endif
4620}
4621
4622static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
4623{
4624#ifdef CONFIG_KCOV
4625 return skb->kcov_handle;
4626#else
4627 return 0;
4628#endif
4629}
4630
4631#endif
4632#endif
4633