1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/compiler.h>
19#include <linux/time.h>
20#include <linux/cache.h>
21
22#include <asm/atomic.h>
23#include <asm/types.h>
24#include <linux/spinlock.h>
25#include <linux/net.h>
26#include <linux/textsearch.h>
27#include <net/checksum.h>
28#include <linux/rcupdate.h>
29#include <linux/dmaengine.h>
30#include <linux/hrtimer.h>
31
32#define HAVE_ALLOC_SKB
33#define HAVE_ALIGNABLE_SKB
34
35
36#define CHECKSUM_NONE 0
37#define CHECKSUM_UNNECESSARY 1
38#define CHECKSUM_COMPLETE 2
39#define CHECKSUM_PARTIAL 3
40
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1))
43#define SKB_WITH_OVERHEAD(X) \
44 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
45#define SKB_MAX_ORDER(X, ORDER) \
46 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
47#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
48#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96struct net_device;
97struct scatterlist;
98
99#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
100struct nf_conntrack {
101 atomic_t use;
102};
103#endif
104
105#ifdef CONFIG_BRIDGE_NETFILTER
106struct nf_bridge_info {
107 atomic_t use;
108 struct net_device *physindev;
109 struct net_device *physoutdev;
110#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
111 struct net_device *netoutdev;
112#endif
113 unsigned int mask;
114 unsigned long data[32 / sizeof(unsigned long)];
115};
116#endif
117
118struct sk_buff_head {
119
120 struct sk_buff *next;
121 struct sk_buff *prev;
122
123 __u32 qlen;
124 spinlock_t lock;
125};
126
127struct sk_buff;
128
129
130#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
131
132typedef struct skb_frag_struct skb_frag_t;
133
134struct skb_frag_struct {
135 struct page *page;
136 __u32 page_offset;
137 __u32 size;
138};
139
140
141
142
143struct skb_shared_info {
144 atomic_t dataref;
145 unsigned short nr_frags;
146 unsigned short gso_size;
147
148 unsigned short gso_segs;
149 unsigned short gso_type;
150 __be32 ip6_frag_id;
151 struct sk_buff *frag_list;
152 skb_frag_t frags[MAX_SKB_FRAGS];
153};
154
155
156
157
158
159
160
161
162
163
164
165
166#define SKB_DATAREF_SHIFT 16
167#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
168
169
170enum {
171 SKB_FCLONE_UNAVAILABLE,
172 SKB_FCLONE_ORIG,
173 SKB_FCLONE_CLONE,
174};
175
176enum {
177 SKB_GSO_TCPV4 = 1 << 0,
178 SKB_GSO_UDP = 1 << 1,
179
180
181 SKB_GSO_DODGY = 1 << 2,
182
183
184 SKB_GSO_TCP_ECN = 1 << 3,
185
186 SKB_GSO_TCPV6 = 1 << 4,
187};
188
189#if BITS_PER_LONG > 32
190#define NET_SKBUFF_DATA_USES_OFFSET 1
191#endif
192
193#ifdef NET_SKBUFF_DATA_USES_OFFSET
194typedef unsigned int sk_buff_data_t;
195#else
196typedef unsigned char *sk_buff_data_t;
197#endif
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250struct sk_buff {
251
252 struct sk_buff *next;
253 struct sk_buff *prev;
254
255 struct sock *sk;
256 ktime_t tstamp;
257 struct net_device *dev;
258
259 struct dst_entry *dst;
260 struct sec_path *sp;
261
262
263
264
265
266
267
268 char cb[48];
269
270 unsigned int len,
271 data_len;
272 __u16 mac_len,
273 hdr_len;
274 union {
275 __wsum csum;
276 struct {
277 __u16 csum_start;
278 __u16 csum_offset;
279 };
280 };
281 __u32 priority;
282 __u8 local_df:1,
283 cloned:1,
284 ip_summed:2,
285 nohdr:1,
286 nfctinfo:3;
287 __u8 pkt_type:3,
288 fclone:2,
289 ipvs_property:1,
290 nf_trace:1;
291 __be16 protocol;
292
293 void (*destructor)(struct sk_buff *skb);
294#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
295 struct nf_conntrack *nfct;
296 struct sk_buff *nfct_reasm;
297#endif
298#ifdef CONFIG_BRIDGE_NETFILTER
299 struct nf_bridge_info *nf_bridge;
300#endif
301
302 int iif;
303#ifdef CONFIG_NETDEVICES_MULTIQUEUE
304 __u16 queue_mapping;
305#endif
306#ifdef CONFIG_NET_SCHED
307 __u16 tc_index;
308#ifdef CONFIG_NET_CLS_ACT
309 __u16 tc_verd;
310#endif
311#endif
312
313
314#ifdef CONFIG_NET_DMA
315 dma_cookie_t dma_cookie;
316#endif
317#ifdef CONFIG_NETWORK_SECMARK
318 __u32 secmark;
319#endif
320
321 __u32 mark;
322
323 sk_buff_data_t transport_header;
324 sk_buff_data_t network_header;
325 sk_buff_data_t mac_header;
326
327 sk_buff_data_t tail;
328 sk_buff_data_t end;
329 unsigned char *head,
330 *data;
331 unsigned int truesize;
332 atomic_t users;
333};
334
335#ifdef __KERNEL__
336
337
338
339#include <linux/slab.h>
340
341#include <asm/system.h>
342
343extern void kfree_skb(struct sk_buff *skb);
344extern void __kfree_skb(struct sk_buff *skb);
345extern struct sk_buff *__alloc_skb(unsigned int size,
346 gfp_t priority, int fclone, int node);
347static inline struct sk_buff *alloc_skb(unsigned int size,
348 gfp_t priority)
349{
350 return __alloc_skb(size, priority, 0, -1);
351}
352
353static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
354 gfp_t priority)
355{
356 return __alloc_skb(size, priority, 1, -1);
357}
358
359extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
360extern struct sk_buff *skb_clone(struct sk_buff *skb,
361 gfp_t priority);
362extern struct sk_buff *skb_copy(const struct sk_buff *skb,
363 gfp_t priority);
364extern struct sk_buff *pskb_copy(struct sk_buff *skb,
365 gfp_t gfp_mask);
366extern int pskb_expand_head(struct sk_buff *skb,
367 int nhead, int ntail,
368 gfp_t gfp_mask);
369extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
370 unsigned int headroom);
371extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
372 int newheadroom, int newtailroom,
373 gfp_t priority);
374extern int skb_to_sgvec(struct sk_buff *skb,
375 struct scatterlist *sg, int offset,
376 int len);
377extern int skb_cow_data(struct sk_buff *skb, int tailbits,
378 struct sk_buff **trailer);
379extern int skb_pad(struct sk_buff *skb, int pad);
380#define dev_kfree_skb(a) kfree_skb(a)
381extern void skb_over_panic(struct sk_buff *skb, int len,
382 void *here);
383extern void skb_under_panic(struct sk_buff *skb, int len,
384 void *here);
385extern void skb_truesize_bug(struct sk_buff *skb);
386
387static inline void skb_truesize_check(struct sk_buff *skb)
388{
389 int len = sizeof(struct sk_buff) + skb->len;
390
391 if (unlikely((int)skb->truesize < len))
392 skb_truesize_bug(skb);
393}
394
395extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
396 int getfrag(void *from, char *to, int offset,
397 int len,int odd, struct sk_buff *skb),
398 void *from, int length);
399
400struct skb_seq_state
401{
402 __u32 lower_offset;
403 __u32 upper_offset;
404 __u32 frag_idx;
405 __u32 stepped_offset;
406 struct sk_buff *root_skb;
407 struct sk_buff *cur_skb;
408 __u8 *frag_data;
409};
410
411extern void skb_prepare_seq_read(struct sk_buff *skb,
412 unsigned int from, unsigned int to,
413 struct skb_seq_state *st);
414extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
415 struct skb_seq_state *st);
416extern void skb_abort_seq_read(struct skb_seq_state *st);
417
418extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
419 unsigned int to, struct ts_config *config,
420 struct ts_state *state);
421
422#ifdef NET_SKBUFF_DATA_USES_OFFSET
423static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
424{
425 return skb->head + skb->end;
426}
427#else
428static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
429{
430 return skb->end;
431}
432#endif
433
434
435#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
436
437
438
439
440
441
442
443static inline int skb_queue_empty(const struct sk_buff_head *list)
444{
445 return list->next == (struct sk_buff *)list;
446}
447
448
449
450
451
452
453
454
455static inline struct sk_buff *skb_get(struct sk_buff *skb)
456{
457 atomic_inc(&skb->users);
458 return skb;
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474static inline int skb_cloned(const struct sk_buff *skb)
475{
476 return skb->cloned &&
477 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
478}
479
480
481
482
483
484
485
486
487static inline int skb_header_cloned(const struct sk_buff *skb)
488{
489 int dataref;
490
491 if (!skb->cloned)
492 return 0;
493
494 dataref = atomic_read(&skb_shinfo(skb)->dataref);
495 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
496 return dataref != 1;
497}
498
499
500
501
502
503
504
505
506
507static inline void skb_header_release(struct sk_buff *skb)
508{
509 BUG_ON(skb->nohdr);
510 skb->nohdr = 1;
511 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
512}
513
514
515
516
517
518
519
520
521static inline int skb_shared(const struct sk_buff *skb)
522{
523 return atomic_read(&skb->users) != 1;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
540 gfp_t pri)
541{
542 might_sleep_if(pri & __GFP_WAIT);
543 if (skb_shared(skb)) {
544 struct sk_buff *nskb = skb_clone(skb, pri);
545 kfree_skb(skb);
546 skb = nskb;
547 }
548 return skb;
549}
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
572 gfp_t pri)
573{
574 might_sleep_if(pri & __GFP_WAIT);
575 if (skb_cloned(skb)) {
576 struct sk_buff *nskb = skb_copy(skb, pri);
577 kfree_skb(skb);
578 skb = nskb;
579 }
580 return skb;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
597{
598 struct sk_buff *list = ((struct sk_buff *)list_)->next;
599 if (list == (struct sk_buff *)list_)
600 list = NULL;
601 return list;
602}
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
618{
619 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
620 if (list == (struct sk_buff *)list_)
621 list = NULL;
622 return list;
623}
624
625
626
627
628
629
630
631static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
632{
633 return list_->qlen;
634}
635
636
637
638
639
640
641
642
643
644static inline void skb_queue_head_init(struct sk_buff_head *list)
645{
646 spin_lock_init(&list->lock);
647 list->prev = list->next = (struct sk_buff *)list;
648 list->qlen = 0;
649}
650
651static inline void skb_queue_head_init_class(struct sk_buff_head *list,
652 struct lock_class_key *class)
653{
654 skb_queue_head_init(list);
655 lockdep_set_class(&list->lock, class);
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676static inline void __skb_queue_after(struct sk_buff_head *list,
677 struct sk_buff *prev,
678 struct sk_buff *newsk)
679{
680 struct sk_buff *next;
681 list->qlen++;
682
683 next = prev->next;
684 newsk->next = next;
685 newsk->prev = prev;
686 next->prev = prev->next = newsk;
687}
688
689
690
691
692
693
694
695
696
697
698
699extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
700static inline void __skb_queue_head(struct sk_buff_head *list,
701 struct sk_buff *newsk)
702{
703 __skb_queue_after(list, (struct sk_buff *)list, newsk);
704}
705
706
707
708
709
710
711
712
713
714
715
716extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
717static inline void __skb_queue_tail(struct sk_buff_head *list,
718 struct sk_buff *newsk)
719{
720 struct sk_buff *prev, *next;
721
722 list->qlen++;
723 next = (struct sk_buff *)list;
724 prev = next->prev;
725 newsk->next = next;
726 newsk->prev = prev;
727 next->prev = prev->next = newsk;
728}
729
730
731
732
733
734
735
736
737
738
739extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
740static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
741{
742 struct sk_buff *next, *prev, *result;
743
744 prev = (struct sk_buff *) list;
745 next = prev->next;
746 result = NULL;
747 if (next != prev) {
748 result = next;
749 next = next->next;
750 list->qlen--;
751 next->prev = prev;
752 prev->next = next;
753 result->next = result->prev = NULL;
754 }
755 return result;
756}
757
758
759
760
761
762extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
763static inline void __skb_insert(struct sk_buff *newsk,
764 struct sk_buff *prev, struct sk_buff *next,
765 struct sk_buff_head *list)
766{
767 newsk->next = next;
768 newsk->prev = prev;
769 next->prev = prev->next = newsk;
770 list->qlen++;
771}
772
773
774
775
776extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
777static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
778{
779 __skb_insert(newsk, old, old->next, list);
780}
781
782
783
784
785
786extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
787static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
788{
789 struct sk_buff *next, *prev;
790
791 list->qlen--;
792 next = skb->next;
793 prev = skb->prev;
794 skb->next = skb->prev = NULL;
795 next->prev = prev;
796 prev->next = next;
797}
798
799
800
801
802
803
804
805
806
807
808
809
810extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
811static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
812{
813 struct sk_buff *skb = skb_peek_tail(list);
814 if (skb)
815 __skb_unlink(skb, list);
816 return skb;
817}
818
819
820static inline int skb_is_nonlinear(const struct sk_buff *skb)
821{
822 return skb->data_len;
823}
824
825static inline unsigned int skb_headlen(const struct sk_buff *skb)
826{
827 return skb->len - skb->data_len;
828}
829
830static inline int skb_pagelen(const struct sk_buff *skb)
831{
832 int i, len = 0;
833
834 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
835 len += skb_shinfo(skb)->frags[i].size;
836 return len + skb_headlen(skb);
837}
838
839static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
840 struct page *page, int off, int size)
841{
842 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
843
844 frag->page = page;
845 frag->page_offset = off;
846 frag->size = size;
847 skb_shinfo(skb)->nr_frags = i + 1;
848}
849
850#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
851#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
852#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
853
854#ifdef NET_SKBUFF_DATA_USES_OFFSET
855static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
856{
857 return skb->head + skb->tail;
858}
859
860static inline void skb_reset_tail_pointer(struct sk_buff *skb)
861{
862 skb->tail = skb->data - skb->head;
863}
864
865static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
866{
867 skb_reset_tail_pointer(skb);
868 skb->tail += offset;
869}
870#else
871static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
872{
873 return skb->tail;
874}
875
876static inline void skb_reset_tail_pointer(struct sk_buff *skb)
877{
878 skb->tail = skb->data;
879}
880
881static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
882{
883 skb->tail = skb->data + offset;
884}
885
886#endif
887
888
889
890
891static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
892{
893 unsigned char *tmp = skb_tail_pointer(skb);
894 SKB_LINEAR_ASSERT(skb);
895 skb->tail += len;
896 skb->len += len;
897 return tmp;
898}
899
900
901
902
903
904
905
906
907
908
909static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
910{
911 unsigned char *tmp = skb_tail_pointer(skb);
912 SKB_LINEAR_ASSERT(skb);
913 skb->tail += len;
914 skb->len += len;
915 if (unlikely(skb->tail > skb->end))
916 skb_over_panic(skb, len, current_text_addr());
917 return tmp;
918}
919
920static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
921{
922 skb->data -= len;
923 skb->len += len;
924 return skb->data;
925}
926
927
928
929
930
931
932
933
934
935
936static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
937{
938 skb->data -= len;
939 skb->len += len;
940 if (unlikely(skb->data<skb->head))
941 skb_under_panic(skb, len, current_text_addr());
942 return skb->data;
943}
944
945static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
946{
947 skb->len -= len;
948 BUG_ON(skb->len < skb->data_len);
949 return skb->data += len;
950}
951
952
953
954
955
956
957
958
959
960
961
962static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
963{
964 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
965}
966
967extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
968
969static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
970{
971 if (len > skb_headlen(skb) &&
972 !__pskb_pull_tail(skb, len-skb_headlen(skb)))
973 return NULL;
974 skb->len -= len;
975 return skb->data += len;
976}
977
978static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
979{
980 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
981}
982
983static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
984{
985 if (likely(len <= skb_headlen(skb)))
986 return 1;
987 if (unlikely(len > skb->len))
988 return 0;
989 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
990}
991
992
993
994
995
996
997
998static inline unsigned int skb_headroom(const struct sk_buff *skb)
999{
1000 return skb->data - skb->head;
1001}
1002
1003
1004
1005
1006
1007
1008
1009static inline int skb_tailroom(const struct sk_buff *skb)
1010{
1011 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022static inline void skb_reserve(struct sk_buff *skb, int len)
1023{
1024 skb->data += len;
1025 skb->tail += len;
1026}
1027
1028#ifdef NET_SKBUFF_DATA_USES_OFFSET
1029static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1030{
1031 return skb->head + skb->transport_header;
1032}
1033
1034static inline void skb_reset_transport_header(struct sk_buff *skb)
1035{
1036 skb->transport_header = skb->data - skb->head;
1037}
1038
1039static inline void skb_set_transport_header(struct sk_buff *skb,
1040 const int offset)
1041{
1042 skb_reset_transport_header(skb);
1043 skb->transport_header += offset;
1044}
1045
1046static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1047{
1048 return skb->head + skb->network_header;
1049}
1050
1051static inline void skb_reset_network_header(struct sk_buff *skb)
1052{
1053 skb->network_header = skb->data - skb->head;
1054}
1055
1056static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1057{
1058 skb_reset_network_header(skb);
1059 skb->network_header += offset;
1060}
1061
1062static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1063{
1064 return skb->head + skb->mac_header;
1065}
1066
1067static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1068{
1069 return skb->mac_header != ~0U;
1070}
1071
1072static inline void skb_reset_mac_header(struct sk_buff *skb)
1073{
1074 skb->mac_header = skb->data - skb->head;
1075}
1076
1077static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1078{
1079 skb_reset_mac_header(skb);
1080 skb->mac_header += offset;
1081}
1082
1083#else
1084
1085static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1086{
1087 return skb->transport_header;
1088}
1089
1090static inline void skb_reset_transport_header(struct sk_buff *skb)
1091{
1092 skb->transport_header = skb->data;
1093}
1094
1095static inline void skb_set_transport_header(struct sk_buff *skb,
1096 const int offset)
1097{
1098 skb->transport_header = skb->data + offset;
1099}
1100
1101static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1102{
1103 return skb->network_header;
1104}
1105
1106static inline void skb_reset_network_header(struct sk_buff *skb)
1107{
1108 skb->network_header = skb->data;
1109}
1110
1111static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1112{
1113 skb->network_header = skb->data + offset;
1114}
1115
1116static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1117{
1118 return skb->mac_header;
1119}
1120
1121static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1122{
1123 return skb->mac_header != NULL;
1124}
1125
1126static inline void skb_reset_mac_header(struct sk_buff *skb)
1127{
1128 skb->mac_header = skb->data;
1129}
1130
1131static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1132{
1133 skb->mac_header = skb->data + offset;
1134}
1135#endif
1136
1137static inline int skb_transport_offset(const struct sk_buff *skb)
1138{
1139 return skb_transport_header(skb) - skb->data;
1140}
1141
1142static inline u32 skb_network_header_len(const struct sk_buff *skb)
1143{
1144 return skb->transport_header - skb->network_header;
1145}
1146
1147static inline int skb_network_offset(const struct sk_buff *skb)
1148{
1149 return skb_network_header(skb) - skb->data;
1150}
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172#ifndef NET_IP_ALIGN
1173#define NET_IP_ALIGN 2
1174#endif
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191#ifndef NET_SKB_PAD
1192#define NET_SKB_PAD 16
1193#endif
1194
1195extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1196
1197static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1198{
1199 if (unlikely(skb->data_len)) {
1200 WARN_ON(1);
1201 return;
1202 }
1203 skb->len = len;
1204 skb_set_tail_pointer(skb, len);
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static inline void skb_trim(struct sk_buff *skb, unsigned int len)
1217{
1218 if (skb->len > len)
1219 __skb_trim(skb, len);
1220}
1221
1222
1223static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1224{
1225 if (skb->data_len)
1226 return ___pskb_trim(skb, len);
1227 __skb_trim(skb, len);
1228 return 0;
1229}
1230
1231static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1232{
1233 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1234}
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1246{
1247 int err = pskb_trim(skb, len);
1248 BUG_ON(err);
1249}
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static inline void skb_orphan(struct sk_buff *skb)
1260{
1261 if (skb->destructor)
1262 skb->destructor(skb);
1263 skb->destructor = NULL;
1264 skb->sk = NULL;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275extern void skb_queue_purge(struct sk_buff_head *list);
1276static inline void __skb_queue_purge(struct sk_buff_head *list)
1277{
1278 struct sk_buff *skb;
1279 while ((skb = __skb_dequeue(list)) != NULL)
1280 kfree_skb(skb);
1281}
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1296 gfp_t gfp_mask)
1297{
1298 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1299 if (likely(skb))
1300 skb_reserve(skb, NET_SKB_PAD);
1301 return skb;
1302}
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1317{
1318 return __dev_alloc_skb(length, GFP_ATOMIC);
1319}
1320
1321extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1322 unsigned int length, gfp_t gfp_mask);
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1338 unsigned int length)
1339{
1340 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1341}
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1352{
1353 return !skb_header_cloned(skb) &&
1354 skb_headroom(skb) + len <= skb->hdr_len;
1355}
1356
1357static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1358 int cloned)
1359{
1360 int delta = 0;
1361
1362 if (headroom < NET_SKB_PAD)
1363 headroom = NET_SKB_PAD;
1364 if (headroom > skb_headroom(skb))
1365 delta = headroom - skb_headroom(skb);
1366
1367 if (delta || cloned)
1368 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1369 GFP_ATOMIC);
1370 return 0;
1371}
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1386{
1387 return __skb_cow(skb, headroom, skb_cloned(skb));
1388}
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1401{
1402 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1403}
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1417{
1418 unsigned int size = skb->len;
1419 if (likely(size >= len))
1420 return 0;
1421 return skb_pad(skb, len-size);
1422}
1423
1424static inline int skb_add_data(struct sk_buff *skb,
1425 char __user *from, int copy)
1426{
1427 const int off = skb->len;
1428
1429 if (skb->ip_summed == CHECKSUM_NONE) {
1430 int err = 0;
1431 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1432 copy, 0, &err);
1433 if (!err) {
1434 skb->csum = csum_block_add(skb->csum, csum, off);
1435 return 0;
1436 }
1437 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1438 return 0;
1439
1440 __skb_trim(skb, off);
1441 return -EFAULT;
1442}
1443
1444static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1445 struct page *page, int off)
1446{
1447 if (i) {
1448 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1449
1450 return page == frag->page &&
1451 off == frag->page_offset + frag->size;
1452 }
1453 return 0;
1454}
1455
1456static inline int __skb_linearize(struct sk_buff *skb)
1457{
1458 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468static inline int skb_linearize(struct sk_buff *skb)
1469{
1470 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1471}
1472
1473
1474
1475
1476
1477
1478
1479
1480static inline int skb_linearize_cow(struct sk_buff *skb)
1481{
1482 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1483 __skb_linearize(skb) : 0;
1484}
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497static inline void skb_postpull_rcsum(struct sk_buff *skb,
1498 const void *start, unsigned int len)
1499{
1500 if (skb->ip_summed == CHECKSUM_COMPLETE)
1501 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1502}
1503
1504unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1516{
1517 if (likely(len >= skb->len))
1518 return 0;
1519 if (skb->ip_summed == CHECKSUM_COMPLETE)
1520 skb->ip_summed = CHECKSUM_NONE;
1521 return __pskb_trim(skb, len);
1522}
1523
1524#define skb_queue_walk(queue, skb) \
1525 for (skb = (queue)->next; \
1526 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1527 skb = skb->next)
1528
1529#define skb_queue_walk_safe(queue, skb, tmp) \
1530 for (skb = (queue)->next, tmp = skb->next; \
1531 skb != (struct sk_buff *)(queue); \
1532 skb = tmp, tmp = skb->next)
1533
1534#define skb_queue_reverse_walk(queue, skb) \
1535 for (skb = (queue)->prev; \
1536 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1537 skb = skb->prev)
1538
1539
1540extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1541 int noblock, int *err);
1542extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1543 struct poll_table_struct *wait);
1544extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1545 int offset, struct iovec *to,
1546 int size);
1547extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1548 int hlen,
1549 struct iovec *iov);
1550extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1551extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1552 unsigned int flags);
1553extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1554 int len, __wsum csum);
1555extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1556 void *to, int len);
1557extern int skb_store_bits(struct sk_buff *skb, int offset,
1558 const void *from, int len);
1559extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
1560 int offset, u8 *to, int len,
1561 __wsum csum);
1562extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1563extern void skb_split(struct sk_buff *skb,
1564 struct sk_buff *skb1, const u32 len);
1565
1566extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1567
1568static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1569 int len, void *buffer)
1570{
1571 int hlen = skb_headlen(skb);
1572
1573 if (hlen - offset >= len)
1574 return skb->data + offset;
1575
1576 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1577 return NULL;
1578
1579 return buffer;
1580}
1581
1582static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1583 void *to,
1584 const unsigned int len)
1585{
1586 memcpy(to, skb->data, len);
1587}
1588
1589static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1590 const int offset, void *to,
1591 const unsigned int len)
1592{
1593 memcpy(to, skb->data + offset, len);
1594}
1595
1596static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1597 const void *from,
1598 const unsigned int len)
1599{
1600 memcpy(skb->data, from, len);
1601}
1602
1603static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1604 const int offset,
1605 const void *from,
1606 const unsigned int len)
1607{
1608 memcpy(skb->data + offset, from, len);
1609}
1610
1611extern void skb_init(void);
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
1623{
1624 *stamp = ktime_to_timeval(skb->tstamp);
1625}
1626
1627static inline void __net_timestamp(struct sk_buff *skb)
1628{
1629 skb->tstamp = ktime_get_real();
1630}
1631
1632static inline ktime_t net_timedelta(ktime_t t)
1633{
1634 return ktime_sub(ktime_get_real(), t);
1635}
1636
1637static inline ktime_t net_invalid_timestamp(void)
1638{
1639 return ktime_set(0, 0);
1640}
1641
1642extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
1643extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
1644
1645static inline int skb_csum_unnecessary(const struct sk_buff *skb)
1646{
1647 return skb->ip_summed & CHECKSUM_UNNECESSARY;
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
1667{
1668 return skb_csum_unnecessary(skb) ?
1669 0 : __skb_checksum_complete(skb);
1670}
1671
1672#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1673extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
1674static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1675{
1676 if (nfct && atomic_dec_and_test(&nfct->use))
1677 nf_conntrack_destroy(nfct);
1678}
1679static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1680{
1681 if (nfct)
1682 atomic_inc(&nfct->use);
1683}
1684static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1685{
1686 if (skb)
1687 atomic_inc(&skb->users);
1688}
1689static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1690{
1691 if (skb)
1692 kfree_skb(skb);
1693}
1694#endif
1695#ifdef CONFIG_BRIDGE_NETFILTER
1696static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1697{
1698 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1699 kfree(nf_bridge);
1700}
1701static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1702{
1703 if (nf_bridge)
1704 atomic_inc(&nf_bridge->use);
1705}
1706#endif
1707static inline void nf_reset(struct sk_buff *skb)
1708{
1709#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1710 nf_conntrack_put(skb->nfct);
1711 skb->nfct = NULL;
1712 nf_conntrack_put_reasm(skb->nfct_reasm);
1713 skb->nfct_reasm = NULL;
1714#endif
1715#ifdef CONFIG_BRIDGE_NETFILTER
1716 nf_bridge_put(skb->nf_bridge);
1717 skb->nf_bridge = NULL;
1718#endif
1719}
1720
1721
1722static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1723{
1724#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1725 dst->nfct = src->nfct;
1726 nf_conntrack_get(src->nfct);
1727 dst->nfctinfo = src->nfctinfo;
1728 dst->nfct_reasm = src->nfct_reasm;
1729 nf_conntrack_get_reasm(src->nfct_reasm);
1730#endif
1731#ifdef CONFIG_BRIDGE_NETFILTER
1732 dst->nf_bridge = src->nf_bridge;
1733 nf_bridge_get(src->nf_bridge);
1734#endif
1735}
1736
1737static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1738{
1739#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1740 nf_conntrack_put(dst->nfct);
1741 nf_conntrack_put_reasm(dst->nfct_reasm);
1742#endif
1743#ifdef CONFIG_BRIDGE_NETFILTER
1744 nf_bridge_put(dst->nf_bridge);
1745#endif
1746 __nf_copy(dst, src);
1747}
1748
1749#ifdef CONFIG_NETWORK_SECMARK
1750static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1751{
1752 to->secmark = from->secmark;
1753}
1754
1755static inline void skb_init_secmark(struct sk_buff *skb)
1756{
1757 skb->secmark = 0;
1758}
1759#else
1760static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
1761{ }
1762
1763static inline void skb_init_secmark(struct sk_buff *skb)
1764{ }
1765#endif
1766
1767static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
1768{
1769#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1770 skb->queue_mapping = queue_mapping;
1771#endif
1772}
1773
1774static inline u16 skb_get_queue_mapping(struct sk_buff *skb)
1775{
1776#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1777 return skb->queue_mapping;
1778#else
1779 return 0;
1780#endif
1781}
1782
1783static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
1784{
1785#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1786 to->queue_mapping = from->queue_mapping;
1787#endif
1788}
1789
1790static inline int skb_is_gso(const struct sk_buff *skb)
1791{
1792 return skb_shinfo(skb)->gso_size;
1793}
1794
1795static inline int skb_is_gso_v6(const struct sk_buff *skb)
1796{
1797 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
1798}
1799
1800static inline void skb_forward_csum(struct sk_buff *skb)
1801{
1802
1803 if (skb->ip_summed == CHECKSUM_COMPLETE)
1804 skb->ip_summed = CHECKSUM_NONE;
1805}
1806
1807#endif
1808#endif
1809