1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include <linux/types.h>
56#include <linux/mm.h>
57#include <linux/capability.h>
58#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
65#include <linux/kernel.h>
66#include <linux/kmod.h>
67#include <linux/slab.h>
68#include <linux/vmalloc.h>
69#include <net/net_namespace.h>
70#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
76#include <linux/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
79#include <asm/cacheflush.h>
80#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
86#include <linux/mutex.h>
87#include <linux/if_vlan.h>
88#include <linux/virtio_net.h>
89#include <linux/errqueue.h>
90#include <linux/net_tstamp.h>
91#include <linux/percpu.h>
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95#include <linux/bpf.h>
96#include <net/compat.h>
97
98#include "internal.h"
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156struct packet_mreq_max {
157 int mr_ifindex;
158 unsigned short mr_type;
159 unsigned short mr_alen;
160 unsigned char mr_address[MAX_ADDR_LEN];
161};
162
163union tpacket_uhdr {
164 struct tpacket_hdr *h1;
165 struct tpacket2_hdr *h2;
166 struct tpacket3_hdr *h3;
167 void *raw;
168};
169
170static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
171 int closing, int tx_ring);
172
173#define V3_ALIGNMENT (8)
174
175#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
176
177#define BLK_PLUS_PRIV(sz_of_priv) \
178 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
179
180#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
181#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
182#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
183#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
184#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
185#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
186#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
187
188struct packet_sock;
189static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
190 struct packet_type *pt, struct net_device *orig_dev);
191
192static void *packet_previous_frame(struct packet_sock *po,
193 struct packet_ring_buffer *rb,
194 int status);
195static void packet_increment_head(struct packet_ring_buffer *buff);
196static int prb_curr_blk_in_use(struct tpacket_block_desc *);
197static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
198 struct packet_sock *);
199static void prb_retire_current_block(struct tpacket_kbdq_core *,
200 struct packet_sock *, unsigned int status);
201static int prb_queue_frozen(struct tpacket_kbdq_core *);
202static void prb_open_block(struct tpacket_kbdq_core *,
203 struct tpacket_block_desc *);
204static void prb_retire_rx_blk_timer_expired(struct timer_list *);
205static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
206static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
207static void prb_clear_rxhash(struct tpacket_kbdq_core *,
208 struct tpacket3_hdr *);
209static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
210 struct tpacket3_hdr *);
211static void packet_flush_mclist(struct sock *sk);
212static u16 packet_pick_tx_queue(struct sk_buff *skb);
213
214struct packet_skb_cb {
215 union {
216 struct sockaddr_pkt pkt;
217 union {
218
219
220
221
222 unsigned int origlen;
223 struct sockaddr_ll ll;
224 };
225 } sa;
226};
227
228#define vio_le() virtio_legacy_is_little_endian()
229
230#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
231
232#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
233#define GET_PBLOCK_DESC(x, bid) \
234 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
235#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
236 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
237#define GET_NEXT_PRB_BLK_NUM(x) \
238 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
239 ((x)->kactive_blk_num+1) : 0)
240
241static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
242static void __fanout_link(struct sock *sk, struct packet_sock *po);
243
244static int packet_direct_xmit(struct sk_buff *skb)
245{
246 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
247}
248
249static struct net_device *packet_cached_dev_get(struct packet_sock *po)
250{
251 struct net_device *dev;
252
253 rcu_read_lock();
254 dev = rcu_dereference(po->cached_dev);
255 if (likely(dev))
256 dev_hold(dev);
257 rcu_read_unlock();
258
259 return dev;
260}
261
262static void packet_cached_dev_assign(struct packet_sock *po,
263 struct net_device *dev)
264{
265 rcu_assign_pointer(po->cached_dev, dev);
266}
267
268static void packet_cached_dev_reset(struct packet_sock *po)
269{
270 RCU_INIT_POINTER(po->cached_dev, NULL);
271}
272
273static bool packet_use_direct_xmit(const struct packet_sock *po)
274{
275 return po->xmit == packet_direct_xmit;
276}
277
278static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
279 struct net_device *sb_dev)
280{
281 return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
282}
283
284static u16 packet_pick_tx_queue(struct sk_buff *skb)
285{
286 struct net_device *dev = skb->dev;
287 const struct net_device_ops *ops = dev->netdev_ops;
288 u16 queue_index;
289
290 if (ops->ndo_select_queue) {
291 queue_index = ops->ndo_select_queue(dev, skb, NULL,
292 __packet_pick_tx_queue);
293 queue_index = netdev_cap_txqueue(dev, queue_index);
294 } else {
295 queue_index = __packet_pick_tx_queue(dev, skb, NULL);
296 }
297
298 return queue_index;
299}
300
301
302
303
304
305static void __register_prot_hook(struct sock *sk)
306{
307 struct packet_sock *po = pkt_sk(sk);
308
309 if (!po->running) {
310 if (po->fanout)
311 __fanout_link(sk, po);
312 else
313 dev_add_pack(&po->prot_hook);
314
315 sock_hold(sk);
316 po->running = 1;
317 }
318}
319
320static void register_prot_hook(struct sock *sk)
321{
322 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
323 __register_prot_hook(sk);
324}
325
326
327
328
329
330
331
332static void __unregister_prot_hook(struct sock *sk, bool sync)
333{
334 struct packet_sock *po = pkt_sk(sk);
335
336 lockdep_assert_held_once(&po->bind_lock);
337
338 po->running = 0;
339
340 if (po->fanout)
341 __fanout_unlink(sk, po);
342 else
343 __dev_remove_pack(&po->prot_hook);
344
345 __sock_put(sk);
346
347 if (sync) {
348 spin_unlock(&po->bind_lock);
349 synchronize_net();
350 spin_lock(&po->bind_lock);
351 }
352}
353
354static void unregister_prot_hook(struct sock *sk, bool sync)
355{
356 struct packet_sock *po = pkt_sk(sk);
357
358 if (po->running)
359 __unregister_prot_hook(sk, sync);
360}
361
362static inline struct page * __pure pgv_to_page(void *addr)
363{
364 if (is_vmalloc_addr(addr))
365 return vmalloc_to_page(addr);
366 return virt_to_page(addr);
367}
368
369static void __packet_set_status(struct packet_sock *po, void *frame, int status)
370{
371 union tpacket_uhdr h;
372
373 h.raw = frame;
374 switch (po->tp_version) {
375 case TPACKET_V1:
376 h.h1->tp_status = status;
377 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
378 break;
379 case TPACKET_V2:
380 h.h2->tp_status = status;
381 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
382 break;
383 case TPACKET_V3:
384 h.h3->tp_status = status;
385 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
386 break;
387 default:
388 WARN(1, "TPACKET version not supported.\n");
389 BUG();
390 }
391
392 smp_wmb();
393}
394
395static int __packet_get_status(struct packet_sock *po, void *frame)
396{
397 union tpacket_uhdr h;
398
399 smp_rmb();
400
401 h.raw = frame;
402 switch (po->tp_version) {
403 case TPACKET_V1:
404 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
405 return h.h1->tp_status;
406 case TPACKET_V2:
407 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
408 return h.h2->tp_status;
409 case TPACKET_V3:
410 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
411 return h.h3->tp_status;
412 default:
413 WARN(1, "TPACKET version not supported.\n");
414 BUG();
415 return 0;
416 }
417}
418
419static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
420 unsigned int flags)
421{
422 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
423
424 if (shhwtstamps &&
425 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
426 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
427 return TP_STATUS_TS_RAW_HARDWARE;
428
429 if (ktime_to_timespec_cond(skb->tstamp, ts))
430 return TP_STATUS_TS_SOFTWARE;
431
432 return 0;
433}
434
435static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
436 struct sk_buff *skb)
437{
438 union tpacket_uhdr h;
439 struct timespec ts;
440 __u32 ts_status;
441
442 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
443 return 0;
444
445 h.raw = frame;
446 switch (po->tp_version) {
447 case TPACKET_V1:
448 h.h1->tp_sec = ts.tv_sec;
449 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
450 break;
451 case TPACKET_V2:
452 h.h2->tp_sec = ts.tv_sec;
453 h.h2->tp_nsec = ts.tv_nsec;
454 break;
455 case TPACKET_V3:
456 h.h3->tp_sec = ts.tv_sec;
457 h.h3->tp_nsec = ts.tv_nsec;
458 break;
459 default:
460 WARN(1, "TPACKET version not supported.\n");
461 BUG();
462 }
463
464
465 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
466 smp_wmb();
467
468 return ts_status;
469}
470
471static void *packet_lookup_frame(struct packet_sock *po,
472 struct packet_ring_buffer *rb,
473 unsigned int position,
474 int status)
475{
476 unsigned int pg_vec_pos, frame_offset;
477 union tpacket_uhdr h;
478
479 pg_vec_pos = position / rb->frames_per_block;
480 frame_offset = position % rb->frames_per_block;
481
482 h.raw = rb->pg_vec[pg_vec_pos].buffer +
483 (frame_offset * rb->frame_size);
484
485 if (status != __packet_get_status(po, h.raw))
486 return NULL;
487
488 return h.raw;
489}
490
491static void *packet_current_frame(struct packet_sock *po,
492 struct packet_ring_buffer *rb,
493 int status)
494{
495 return packet_lookup_frame(po, rb, rb->head, status);
496}
497
498static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
499{
500 del_timer_sync(&pkc->retire_blk_timer);
501}
502
503static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
504 struct sk_buff_head *rb_queue)
505{
506 struct tpacket_kbdq_core *pkc;
507
508 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
509
510 spin_lock_bh(&rb_queue->lock);
511 pkc->delete_blk_timer = 1;
512 spin_unlock_bh(&rb_queue->lock);
513
514 prb_del_retire_blk_timer(pkc);
515}
516
517static void prb_setup_retire_blk_timer(struct packet_sock *po)
518{
519 struct tpacket_kbdq_core *pkc;
520
521 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
522 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
523 0);
524 pkc->retire_blk_timer.expires = jiffies;
525}
526
527static int prb_calc_retire_blk_tmo(struct packet_sock *po,
528 int blk_size_in_bytes)
529{
530 struct net_device *dev;
531 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
532 struct ethtool_link_ksettings ecmd;
533 int err;
534
535 rtnl_lock();
536 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
537 if (unlikely(!dev)) {
538 rtnl_unlock();
539 return DEFAULT_PRB_RETIRE_TOV;
540 }
541 err = __ethtool_get_link_ksettings(dev, &ecmd);
542 rtnl_unlock();
543 if (!err) {
544
545
546
547
548 if (ecmd.base.speed < SPEED_1000 ||
549 ecmd.base.speed == SPEED_UNKNOWN) {
550 return DEFAULT_PRB_RETIRE_TOV;
551 } else {
552 msec = 1;
553 div = ecmd.base.speed / 1000;
554 }
555 }
556
557 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
558
559 if (div)
560 mbits /= div;
561
562 tmo = mbits * msec;
563
564 if (div)
565 return tmo+1;
566 return tmo;
567}
568
569static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
570 union tpacket_req_u *req_u)
571{
572 p1->feature_req_word = req_u->req3.tp_feature_req_word;
573}
574
575static void init_prb_bdqc(struct packet_sock *po,
576 struct packet_ring_buffer *rb,
577 struct pgv *pg_vec,
578 union tpacket_req_u *req_u)
579{
580 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
581 struct tpacket_block_desc *pbd;
582
583 memset(p1, 0x0, sizeof(*p1));
584
585 p1->knxt_seq_num = 1;
586 p1->pkbdq = pg_vec;
587 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
588 p1->pkblk_start = pg_vec[0].buffer;
589 p1->kblk_size = req_u->req3.tp_block_size;
590 p1->knum_blocks = req_u->req3.tp_block_nr;
591 p1->hdrlen = po->tp_hdrlen;
592 p1->version = po->tp_version;
593 p1->last_kactive_blk_num = 0;
594 po->stats.stats3.tp_freeze_q_cnt = 0;
595 if (req_u->req3.tp_retire_blk_tov)
596 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
597 else
598 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
599 req_u->req3.tp_block_size);
600 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
601 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
602
603 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
604 prb_init_ft_ops(p1, req_u);
605 prb_setup_retire_blk_timer(po);
606 prb_open_block(p1, pbd);
607}
608
609
610
611
612static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
613{
614 mod_timer(&pkc->retire_blk_timer,
615 jiffies + pkc->tov_in_jiffies);
616 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
617}
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
643{
644 struct packet_sock *po =
645 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
646 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
647 unsigned int frozen;
648 struct tpacket_block_desc *pbd;
649
650 spin_lock(&po->sk.sk_receive_queue.lock);
651
652 frozen = prb_queue_frozen(pkc);
653 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
654
655 if (unlikely(pkc->delete_blk_timer))
656 goto out;
657
658
659
660
661
662
663
664
665
666
667 if (BLOCK_NUM_PKTS(pbd)) {
668 while (atomic_read(&pkc->blk_fill_in_prog)) {
669
670 cpu_relax();
671 }
672 }
673
674 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
675 if (!frozen) {
676 if (!BLOCK_NUM_PKTS(pbd)) {
677
678 goto refresh_timer;
679 }
680 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
681 if (!prb_dispatch_next_block(pkc, po))
682 goto refresh_timer;
683 else
684 goto out;
685 } else {
686
687
688
689 if (prb_curr_blk_in_use(pbd)) {
690
691
692
693
694 goto refresh_timer;
695 } else {
696
697
698
699
700
701
702
703 prb_open_block(pkc, pbd);
704 goto out;
705 }
706 }
707 }
708
709refresh_timer:
710 _prb_refresh_rx_retire_blk_timer(pkc);
711
712out:
713 spin_unlock(&po->sk.sk_receive_queue.lock);
714}
715
716static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
717 struct tpacket_block_desc *pbd1, __u32 status)
718{
719
720
721#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
722 u8 *start, *end;
723
724 start = (u8 *)pbd1;
725
726
727 start += PAGE_SIZE;
728
729 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
730 for (; start < end; start += PAGE_SIZE)
731 flush_dcache_page(pgv_to_page(start));
732
733 smp_wmb();
734#endif
735
736
737
738 BLOCK_STATUS(pbd1) = status;
739
740
741
742#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
743 start = (u8 *)pbd1;
744 flush_dcache_page(pgv_to_page(start));
745
746 smp_wmb();
747#endif
748}
749
750
751
752
753
754
755
756
757
758
759static void prb_close_block(struct tpacket_kbdq_core *pkc1,
760 struct tpacket_block_desc *pbd1,
761 struct packet_sock *po, unsigned int stat)
762{
763 __u32 status = TP_STATUS_USER | stat;
764
765 struct tpacket3_hdr *last_pkt;
766 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
767 struct sock *sk = &po->sk;
768
769 if (po->stats.stats3.tp_drops)
770 status |= TP_STATUS_LOSING;
771
772 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
773 last_pkt->tp_next_offset = 0;
774
775
776 if (BLOCK_NUM_PKTS(pbd1)) {
777 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
778 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
779 } else {
780
781
782
783
784
785 struct timespec ts;
786 getnstimeofday(&ts);
787 h1->ts_last_pkt.ts_sec = ts.tv_sec;
788 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
789 }
790
791 smp_wmb();
792
793
794 prb_flush_block(pkc1, pbd1, status);
795
796 sk->sk_data_ready(sk);
797
798 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
799}
800
801static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
802{
803 pkc->reset_pending_on_curr_blk = 0;
804}
805
806
807
808
809
810
811
812
813static void prb_open_block(struct tpacket_kbdq_core *pkc1,
814 struct tpacket_block_desc *pbd1)
815{
816 struct timespec ts;
817 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
818
819 smp_rmb();
820
821
822
823
824
825 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
826 BLOCK_NUM_PKTS(pbd1) = 0;
827 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
828
829 getnstimeofday(&ts);
830
831 h1->ts_first_pkt.ts_sec = ts.tv_sec;
832 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
833
834 pkc1->pkblk_start = (char *)pbd1;
835 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
836
837 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
838 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
839
840 pbd1->version = pkc1->version;
841 pkc1->prev = pkc1->nxt_offset;
842 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
843
844 prb_thaw_queue(pkc1);
845 _prb_refresh_rx_retire_blk_timer(pkc1);
846
847 smp_wmb();
848}
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
874 struct packet_sock *po)
875{
876 pkc->reset_pending_on_curr_blk = 1;
877 po->stats.stats3.tp_freeze_q_cnt++;
878}
879
880#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
881
882
883
884
885
886
887
888static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
889 struct packet_sock *po)
890{
891 struct tpacket_block_desc *pbd;
892
893 smp_rmb();
894
895
896 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
897
898
899 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
900 prb_freeze_queue(pkc, po);
901 return NULL;
902 }
903
904
905
906
907
908
909 prb_open_block(pkc, pbd);
910 return (void *)pkc->nxt_offset;
911}
912
913static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
914 struct packet_sock *po, unsigned int status)
915{
916 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
917
918
919 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
920
921
922
923
924
925
926
927
928
929 if (!(status & TP_STATUS_BLK_TMO)) {
930 while (atomic_read(&pkc->blk_fill_in_prog)) {
931
932 cpu_relax();
933 }
934 }
935 prb_close_block(pkc, pbd, po, status);
936 return;
937 }
938}
939
940static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
941{
942 return TP_STATUS_USER & BLOCK_STATUS(pbd);
943}
944
945static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
946{
947 return pkc->reset_pending_on_curr_blk;
948}
949
950static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
951{
952 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
953 atomic_dec(&pkc->blk_fill_in_prog);
954}
955
956static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
957 struct tpacket3_hdr *ppd)
958{
959 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
960}
961
962static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
963 struct tpacket3_hdr *ppd)
964{
965 ppd->hv1.tp_rxhash = 0;
966}
967
968static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
969 struct tpacket3_hdr *ppd)
970{
971 if (skb_vlan_tag_present(pkc->skb)) {
972 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
973 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
974 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
975 } else {
976 ppd->hv1.tp_vlan_tci = 0;
977 ppd->hv1.tp_vlan_tpid = 0;
978 ppd->tp_status = TP_STATUS_AVAILABLE;
979 }
980}
981
982static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
983 struct tpacket3_hdr *ppd)
984{
985 ppd->hv1.tp_padding = 0;
986 prb_fill_vlan_info(pkc, ppd);
987
988 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
989 prb_fill_rxhash(pkc, ppd);
990 else
991 prb_clear_rxhash(pkc, ppd);
992}
993
994static void prb_fill_curr_block(char *curr,
995 struct tpacket_kbdq_core *pkc,
996 struct tpacket_block_desc *pbd,
997 unsigned int len)
998{
999 struct tpacket3_hdr *ppd;
1000
1001 ppd = (struct tpacket3_hdr *)curr;
1002 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1003 pkc->prev = curr;
1004 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 BLOCK_NUM_PKTS(pbd) += 1;
1007 atomic_inc(&pkc->blk_fill_in_prog);
1008 prb_run_all_ft_ops(pkc, ppd);
1009}
1010
1011
1012static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1013 struct sk_buff *skb,
1014 int status,
1015 unsigned int len
1016 )
1017{
1018 struct tpacket_kbdq_core *pkc;
1019 struct tpacket_block_desc *pbd;
1020 char *curr, *end;
1021
1022 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1023 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025
1026 if (prb_queue_frozen(pkc)) {
1027
1028
1029
1030
1031 if (prb_curr_blk_in_use(pbd)) {
1032
1033 return NULL;
1034 } else {
1035
1036
1037
1038
1039
1040
1041 prb_open_block(pkc, pbd);
1042 }
1043 }
1044
1045 smp_mb();
1046 curr = pkc->nxt_offset;
1047 pkc->skb = skb;
1048 end = (char *)pbd + pkc->kblk_size;
1049
1050
1051 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052 prb_fill_curr_block(curr, pkc, pbd, len);
1053 return (void *)curr;
1054 }
1055
1056
1057 prb_retire_current_block(pkc, po, 0);
1058
1059
1060 curr = (char *)prb_dispatch_next_block(pkc, po);
1061 if (curr) {
1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063 prb_fill_curr_block(curr, pkc, pbd, len);
1064 return (void *)curr;
1065 }
1066
1067
1068
1069
1070
1071 return NULL;
1072}
1073
1074static void *packet_current_rx_frame(struct packet_sock *po,
1075 struct sk_buff *skb,
1076 int status, unsigned int len)
1077{
1078 char *curr = NULL;
1079 switch (po->tp_version) {
1080 case TPACKET_V1:
1081 case TPACKET_V2:
1082 curr = packet_lookup_frame(po, &po->rx_ring,
1083 po->rx_ring.head, status);
1084 return curr;
1085 case TPACKET_V3:
1086 return __packet_lookup_frame_in_block(po, skb, status, len);
1087 default:
1088 WARN(1, "TPACKET version not supported\n");
1089 BUG();
1090 return NULL;
1091 }
1092}
1093
1094static void *prb_lookup_block(struct packet_sock *po,
1095 struct packet_ring_buffer *rb,
1096 unsigned int idx,
1097 int status)
1098{
1099 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1100 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1101
1102 if (status != BLOCK_STATUS(pbd))
1103 return NULL;
1104 return pbd;
1105}
1106
1107static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108{
1109 unsigned int prev;
1110 if (rb->prb_bdqc.kactive_blk_num)
1111 prev = rb->prb_bdqc.kactive_blk_num-1;
1112 else
1113 prev = rb->prb_bdqc.knum_blocks-1;
1114 return prev;
1115}
1116
1117
1118static void *__prb_previous_block(struct packet_sock *po,
1119 struct packet_ring_buffer *rb,
1120 int status)
1121{
1122 unsigned int previous = prb_previous_blk_num(rb);
1123 return prb_lookup_block(po, rb, previous, status);
1124}
1125
1126static void *packet_previous_rx_frame(struct packet_sock *po,
1127 struct packet_ring_buffer *rb,
1128 int status)
1129{
1130 if (po->tp_version <= TPACKET_V2)
1131 return packet_previous_frame(po, rb, status);
1132
1133 return __prb_previous_block(po, rb, status);
1134}
1135
1136static void packet_increment_rx_head(struct packet_sock *po,
1137 struct packet_ring_buffer *rb)
1138{
1139 switch (po->tp_version) {
1140 case TPACKET_V1:
1141 case TPACKET_V2:
1142 return packet_increment_head(rb);
1143 case TPACKET_V3:
1144 default:
1145 WARN(1, "TPACKET version not supported.\n");
1146 BUG();
1147 return;
1148 }
1149}
1150
1151static void *packet_previous_frame(struct packet_sock *po,
1152 struct packet_ring_buffer *rb,
1153 int status)
1154{
1155 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1156 return packet_lookup_frame(po, rb, previous, status);
1157}
1158
1159static void packet_increment_head(struct packet_ring_buffer *buff)
1160{
1161 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1162}
1163
1164static void packet_inc_pending(struct packet_ring_buffer *rb)
1165{
1166 this_cpu_inc(*rb->pending_refcnt);
1167}
1168
1169static void packet_dec_pending(struct packet_ring_buffer *rb)
1170{
1171 this_cpu_dec(*rb->pending_refcnt);
1172}
1173
1174static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1175{
1176 unsigned int refcnt = 0;
1177 int cpu;
1178
1179
1180 if (rb->pending_refcnt == NULL)
1181 return 0;
1182
1183 for_each_possible_cpu(cpu)
1184 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1185
1186 return refcnt;
1187}
1188
1189static int packet_alloc_pending(struct packet_sock *po)
1190{
1191 po->rx_ring.pending_refcnt = NULL;
1192
1193 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1194 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1195 return -ENOBUFS;
1196
1197 return 0;
1198}
1199
1200static void packet_free_pending(struct packet_sock *po)
1201{
1202 free_percpu(po->tx_ring.pending_refcnt);
1203}
1204
1205#define ROOM_POW_OFF 2
1206#define ROOM_NONE 0x0
1207#define ROOM_LOW 0x1
1208#define ROOM_NORMAL 0x2
1209
1210static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1211{
1212 int idx, len;
1213
1214 len = po->rx_ring.frame_max + 1;
1215 idx = po->rx_ring.head;
1216 if (pow_off)
1217 idx += len >> pow_off;
1218 if (idx >= len)
1219 idx -= len;
1220 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1221}
1222
1223static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1224{
1225 int idx, len;
1226
1227 len = po->rx_ring.prb_bdqc.knum_blocks;
1228 idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1229 if (pow_off)
1230 idx += len >> pow_off;
1231 if (idx >= len)
1232 idx -= len;
1233 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1234}
1235
1236static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1237{
1238 struct sock *sk = &po->sk;
1239 int ret = ROOM_NONE;
1240
1241 if (po->prot_hook.func != tpacket_rcv) {
1242 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1243 - (skb ? skb->truesize : 0);
1244 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1245 return ROOM_NORMAL;
1246 else if (avail > 0)
1247 return ROOM_LOW;
1248 else
1249 return ROOM_NONE;
1250 }
1251
1252 if (po->tp_version == TPACKET_V3) {
1253 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1254 ret = ROOM_NORMAL;
1255 else if (__tpacket_v3_has_room(po, 0))
1256 ret = ROOM_LOW;
1257 } else {
1258 if (__tpacket_has_room(po, ROOM_POW_OFF))
1259 ret = ROOM_NORMAL;
1260 else if (__tpacket_has_room(po, 0))
1261 ret = ROOM_LOW;
1262 }
1263
1264 return ret;
1265}
1266
1267static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1268{
1269 int ret;
1270 bool has_room;
1271
1272 spin_lock_bh(&po->sk.sk_receive_queue.lock);
1273 ret = __packet_rcv_has_room(po, skb);
1274 has_room = ret == ROOM_NORMAL;
1275 if (po->pressure == has_room)
1276 po->pressure = !has_room;
1277 spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1278
1279 return ret;
1280}
1281
1282static void packet_sock_destruct(struct sock *sk)
1283{
1284 skb_queue_purge(&sk->sk_error_queue);
1285
1286 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1287 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1288
1289 if (!sock_flag(sk, SOCK_DEAD)) {
1290 pr_err("Attempt to release alive packet socket: %p\n", sk);
1291 return;
1292 }
1293
1294 sk_refcnt_debug_dec(sk);
1295}
1296
1297static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1298{
1299 u32 rxhash;
1300 int i, count = 0;
1301
1302 rxhash = skb_get_hash(skb);
1303 for (i = 0; i < ROLLOVER_HLEN; i++)
1304 if (po->rollover->history[i] == rxhash)
1305 count++;
1306
1307 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1308 return count > (ROLLOVER_HLEN >> 1);
1309}
1310
1311static unsigned int fanout_demux_hash(struct packet_fanout *f,
1312 struct sk_buff *skb,
1313 unsigned int num)
1314{
1315 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1316}
1317
1318static unsigned int fanout_demux_lb(struct packet_fanout *f,
1319 struct sk_buff *skb,
1320 unsigned int num)
1321{
1322 unsigned int val = atomic_inc_return(&f->rr_cur);
1323
1324 return val % num;
1325}
1326
1327static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1328 struct sk_buff *skb,
1329 unsigned int num)
1330{
1331 return smp_processor_id() % num;
1332}
1333
1334static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1335 struct sk_buff *skb,
1336 unsigned int num)
1337{
1338 return prandom_u32_max(num);
1339}
1340
1341static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1342 struct sk_buff *skb,
1343 unsigned int idx, bool try_self,
1344 unsigned int num)
1345{
1346 struct packet_sock *po, *po_next, *po_skip = NULL;
1347 unsigned int i, j, room = ROOM_NONE;
1348
1349 po = pkt_sk(f->arr[idx]);
1350
1351 if (try_self) {
1352 room = packet_rcv_has_room(po, skb);
1353 if (room == ROOM_NORMAL ||
1354 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1355 return idx;
1356 po_skip = po;
1357 }
1358
1359 i = j = min_t(int, po->rollover->sock, num - 1);
1360 do {
1361 po_next = pkt_sk(f->arr[i]);
1362 if (po_next != po_skip && !po_next->pressure &&
1363 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1364 if (i != j)
1365 po->rollover->sock = i;
1366 atomic_long_inc(&po->rollover->num);
1367 if (room == ROOM_LOW)
1368 atomic_long_inc(&po->rollover->num_huge);
1369 return i;
1370 }
1371
1372 if (++i == num)
1373 i = 0;
1374 } while (i != j);
1375
1376 atomic_long_inc(&po->rollover->num_failed);
1377 return idx;
1378}
1379
1380static unsigned int fanout_demux_qm(struct packet_fanout *f,
1381 struct sk_buff *skb,
1382 unsigned int num)
1383{
1384 return skb_get_queue_mapping(skb) % num;
1385}
1386
1387static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1388 struct sk_buff *skb,
1389 unsigned int num)
1390{
1391 struct bpf_prog *prog;
1392 unsigned int ret = 0;
1393
1394 rcu_read_lock();
1395 prog = rcu_dereference(f->bpf_prog);
1396 if (prog)
1397 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1398 rcu_read_unlock();
1399
1400 return ret;
1401}
1402
1403static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1404{
1405 return f->flags & (flag >> 8);
1406}
1407
1408static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1409 struct packet_type *pt, struct net_device *orig_dev)
1410{
1411 struct packet_fanout *f = pt->af_packet_priv;
1412 unsigned int num = READ_ONCE(f->num_members);
1413 struct net *net = read_pnet(&f->net);
1414 struct packet_sock *po;
1415 unsigned int idx;
1416
1417 if (!net_eq(dev_net(dev), net) || !num) {
1418 kfree_skb(skb);
1419 return 0;
1420 }
1421
1422 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1423 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1424 if (!skb)
1425 return 0;
1426 }
1427 switch (f->type) {
1428 case PACKET_FANOUT_HASH:
1429 default:
1430 idx = fanout_demux_hash(f, skb, num);
1431 break;
1432 case PACKET_FANOUT_LB:
1433 idx = fanout_demux_lb(f, skb, num);
1434 break;
1435 case PACKET_FANOUT_CPU:
1436 idx = fanout_demux_cpu(f, skb, num);
1437 break;
1438 case PACKET_FANOUT_RND:
1439 idx = fanout_demux_rnd(f, skb, num);
1440 break;
1441 case PACKET_FANOUT_QM:
1442 idx = fanout_demux_qm(f, skb, num);
1443 break;
1444 case PACKET_FANOUT_ROLLOVER:
1445 idx = fanout_demux_rollover(f, skb, 0, false, num);
1446 break;
1447 case PACKET_FANOUT_CBPF:
1448 case PACKET_FANOUT_EBPF:
1449 idx = fanout_demux_bpf(f, skb, num);
1450 break;
1451 }
1452
1453 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1454 idx = fanout_demux_rollover(f, skb, idx, true, num);
1455
1456 po = pkt_sk(f->arr[idx]);
1457 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1458}
1459
1460DEFINE_MUTEX(fanout_mutex);
1461EXPORT_SYMBOL_GPL(fanout_mutex);
1462static LIST_HEAD(fanout_list);
1463static u16 fanout_next_id;
1464
1465static void __fanout_link(struct sock *sk, struct packet_sock *po)
1466{
1467 struct packet_fanout *f = po->fanout;
1468
1469 spin_lock(&f->lock);
1470 f->arr[f->num_members] = sk;
1471 smp_wmb();
1472 f->num_members++;
1473 if (f->num_members == 1)
1474 dev_add_pack(&f->prot_hook);
1475 spin_unlock(&f->lock);
1476}
1477
1478static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1479{
1480 struct packet_fanout *f = po->fanout;
1481 int i;
1482
1483 spin_lock(&f->lock);
1484 for (i = 0; i < f->num_members; i++) {
1485 if (f->arr[i] == sk)
1486 break;
1487 }
1488 BUG_ON(i >= f->num_members);
1489 f->arr[i] = f->arr[f->num_members - 1];
1490 f->num_members--;
1491 if (f->num_members == 0)
1492 __dev_remove_pack(&f->prot_hook);
1493 spin_unlock(&f->lock);
1494}
1495
1496static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1497{
1498 if (sk->sk_family != PF_PACKET)
1499 return false;
1500
1501 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1502}
1503
1504static void fanout_init_data(struct packet_fanout *f)
1505{
1506 switch (f->type) {
1507 case PACKET_FANOUT_LB:
1508 atomic_set(&f->rr_cur, 0);
1509 break;
1510 case PACKET_FANOUT_CBPF:
1511 case PACKET_FANOUT_EBPF:
1512 RCU_INIT_POINTER(f->bpf_prog, NULL);
1513 break;
1514 }
1515}
1516
1517static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1518{
1519 struct bpf_prog *old;
1520
1521 spin_lock(&f->lock);
1522 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1523 rcu_assign_pointer(f->bpf_prog, new);
1524 spin_unlock(&f->lock);
1525
1526 if (old) {
1527 synchronize_net();
1528 bpf_prog_destroy(old);
1529 }
1530}
1531
1532static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1533 unsigned int len)
1534{
1535 struct bpf_prog *new;
1536 struct sock_fprog fprog;
1537 int ret;
1538
1539 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1540 return -EPERM;
1541 if (len != sizeof(fprog))
1542 return -EINVAL;
1543 if (copy_from_user(&fprog, data, len))
1544 return -EFAULT;
1545
1546 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1547 if (ret)
1548 return ret;
1549
1550 __fanout_set_data_bpf(po->fanout, new);
1551 return 0;
1552}
1553
1554static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1555 unsigned int len)
1556{
1557 struct bpf_prog *new;
1558 u32 fd;
1559
1560 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1561 return -EPERM;
1562 if (len != sizeof(fd))
1563 return -EINVAL;
1564 if (copy_from_user(&fd, data, len))
1565 return -EFAULT;
1566
1567 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1568 if (IS_ERR(new))
1569 return PTR_ERR(new);
1570
1571 __fanout_set_data_bpf(po->fanout, new);
1572 return 0;
1573}
1574
1575static int fanout_set_data(struct packet_sock *po, char __user *data,
1576 unsigned int len)
1577{
1578 switch (po->fanout->type) {
1579 case PACKET_FANOUT_CBPF:
1580 return fanout_set_data_cbpf(po, data, len);
1581 case PACKET_FANOUT_EBPF:
1582 return fanout_set_data_ebpf(po, data, len);
1583 default:
1584 return -EINVAL;
1585 };
1586}
1587
1588static void fanout_release_data(struct packet_fanout *f)
1589{
1590 switch (f->type) {
1591 case PACKET_FANOUT_CBPF:
1592 case PACKET_FANOUT_EBPF:
1593 __fanout_set_data_bpf(f, NULL);
1594 };
1595}
1596
1597static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1598{
1599 struct packet_fanout *f;
1600
1601 list_for_each_entry(f, &fanout_list, list) {
1602 if (f->id == candidate_id &&
1603 read_pnet(&f->net) == sock_net(sk)) {
1604 return false;
1605 }
1606 }
1607 return true;
1608}
1609
1610static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1611{
1612 u16 id = fanout_next_id;
1613
1614 do {
1615 if (__fanout_id_is_free(sk, id)) {
1616 *new_id = id;
1617 fanout_next_id = id + 1;
1618 return true;
1619 }
1620
1621 id++;
1622 } while (id != fanout_next_id);
1623
1624 return false;
1625}
1626
1627static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1628{
1629 struct packet_rollover *rollover = NULL;
1630 struct packet_sock *po = pkt_sk(sk);
1631 struct packet_fanout *f, *match;
1632 u8 type = type_flags & 0xff;
1633 u8 flags = type_flags >> 8;
1634 int err;
1635
1636 switch (type) {
1637 case PACKET_FANOUT_ROLLOVER:
1638 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1639 return -EINVAL;
1640 case PACKET_FANOUT_HASH:
1641 case PACKET_FANOUT_LB:
1642 case PACKET_FANOUT_CPU:
1643 case PACKET_FANOUT_RND:
1644 case PACKET_FANOUT_QM:
1645 case PACKET_FANOUT_CBPF:
1646 case PACKET_FANOUT_EBPF:
1647 break;
1648 default:
1649 return -EINVAL;
1650 }
1651
1652 mutex_lock(&fanout_mutex);
1653
1654 err = -EALREADY;
1655 if (po->fanout)
1656 goto out;
1657
1658 if (type == PACKET_FANOUT_ROLLOVER ||
1659 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1660 err = -ENOMEM;
1661 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1662 if (!rollover)
1663 goto out;
1664 atomic_long_set(&rollover->num, 0);
1665 atomic_long_set(&rollover->num_huge, 0);
1666 atomic_long_set(&rollover->num_failed, 0);
1667 }
1668
1669 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1670 if (id != 0) {
1671 err = -EINVAL;
1672 goto out;
1673 }
1674 if (!fanout_find_new_id(sk, &id)) {
1675 err = -ENOMEM;
1676 goto out;
1677 }
1678
1679 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1680 }
1681
1682 match = NULL;
1683 list_for_each_entry(f, &fanout_list, list) {
1684 if (f->id == id &&
1685 read_pnet(&f->net) == sock_net(sk)) {
1686 match = f;
1687 break;
1688 }
1689 }
1690 err = -EINVAL;
1691 if (match && match->flags != flags)
1692 goto out;
1693 if (!match) {
1694 err = -ENOMEM;
1695 match = kzalloc(sizeof(*match), GFP_KERNEL);
1696 if (!match)
1697 goto out;
1698 write_pnet(&match->net, sock_net(sk));
1699 match->id = id;
1700 match->type = type;
1701 match->flags = flags;
1702 INIT_LIST_HEAD(&match->list);
1703 spin_lock_init(&match->lock);
1704 refcount_set(&match->sk_ref, 0);
1705 fanout_init_data(match);
1706 match->prot_hook.type = po->prot_hook.type;
1707 match->prot_hook.dev = po->prot_hook.dev;
1708 match->prot_hook.func = packet_rcv_fanout;
1709 match->prot_hook.af_packet_priv = match;
1710 match->prot_hook.id_match = match_fanout_group;
1711 list_add(&match->list, &fanout_list);
1712 }
1713 err = -EINVAL;
1714
1715 spin_lock(&po->bind_lock);
1716 if (po->running &&
1717 match->type == type &&
1718 match->prot_hook.type == po->prot_hook.type &&
1719 match->prot_hook.dev == po->prot_hook.dev) {
1720 err = -ENOSPC;
1721 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1722 __dev_remove_pack(&po->prot_hook);
1723 po->fanout = match;
1724 po->rollover = rollover;
1725 rollover = NULL;
1726 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1727 __fanout_link(sk, po);
1728 err = 0;
1729 }
1730 }
1731 spin_unlock(&po->bind_lock);
1732
1733 if (err && !refcount_read(&match->sk_ref)) {
1734 list_del(&match->list);
1735 kfree(match);
1736 }
1737
1738out:
1739 kfree(rollover);
1740 mutex_unlock(&fanout_mutex);
1741 return err;
1742}
1743
1744
1745
1746
1747
1748
1749static struct packet_fanout *fanout_release(struct sock *sk)
1750{
1751 struct packet_sock *po = pkt_sk(sk);
1752 struct packet_fanout *f;
1753
1754 mutex_lock(&fanout_mutex);
1755 f = po->fanout;
1756 if (f) {
1757 po->fanout = NULL;
1758
1759 if (refcount_dec_and_test(&f->sk_ref))
1760 list_del(&f->list);
1761 else
1762 f = NULL;
1763 }
1764 mutex_unlock(&fanout_mutex);
1765
1766 return f;
1767}
1768
1769static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1770 struct sk_buff *skb)
1771{
1772
1773
1774
1775
1776 if (unlikely(dev->type != ARPHRD_ETHER))
1777 return false;
1778
1779 skb_reset_mac_header(skb);
1780 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1781}
1782
1783static const struct proto_ops packet_ops;
1784
1785static const struct proto_ops packet_ops_spkt;
1786
1787static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1788 struct packet_type *pt, struct net_device *orig_dev)
1789{
1790 struct sock *sk;
1791 struct sockaddr_pkt *spkt;
1792
1793
1794
1795
1796
1797
1798 sk = pt->af_packet_priv;
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 if (skb->pkt_type == PACKET_LOOPBACK)
1812 goto out;
1813
1814 if (!net_eq(dev_net(dev), sock_net(sk)))
1815 goto out;
1816
1817 skb = skb_share_check(skb, GFP_ATOMIC);
1818 if (skb == NULL)
1819 goto oom;
1820
1821
1822 skb_dst_drop(skb);
1823
1824
1825 nf_reset(skb);
1826
1827 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1828
1829 skb_push(skb, skb->data - skb_mac_header(skb));
1830
1831
1832
1833
1834
1835 spkt->spkt_family = dev->type;
1836 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1837 spkt->spkt_protocol = skb->protocol;
1838
1839
1840
1841
1842
1843
1844 if (sock_queue_rcv_skb(sk, skb) == 0)
1845 return 0;
1846
1847out:
1848 kfree_skb(skb);
1849oom:
1850 return 0;
1851}
1852
1853
1854
1855
1856
1857
1858
1859static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1860 size_t len)
1861{
1862 struct sock *sk = sock->sk;
1863 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1864 struct sk_buff *skb = NULL;
1865 struct net_device *dev;
1866 struct sockcm_cookie sockc;
1867 __be16 proto = 0;
1868 int err;
1869 int extra_len = 0;
1870
1871
1872
1873
1874
1875 if (saddr) {
1876 if (msg->msg_namelen < sizeof(struct sockaddr))
1877 return -EINVAL;
1878 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1879 proto = saddr->spkt_protocol;
1880 } else
1881 return -ENOTCONN;
1882
1883
1884
1885
1886
1887 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1888retry:
1889 rcu_read_lock();
1890 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1891 err = -ENODEV;
1892 if (dev == NULL)
1893 goto out_unlock;
1894
1895 err = -ENETDOWN;
1896 if (!(dev->flags & IFF_UP))
1897 goto out_unlock;
1898
1899
1900
1901
1902
1903
1904 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1905 if (!netif_supports_nofcs(dev)) {
1906 err = -EPROTONOSUPPORT;
1907 goto out_unlock;
1908 }
1909 extra_len = 4;
1910 }
1911
1912 err = -EMSGSIZE;
1913 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1914 goto out_unlock;
1915
1916 if (!skb) {
1917 size_t reserved = LL_RESERVED_SPACE(dev);
1918 int tlen = dev->needed_tailroom;
1919 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1920
1921 rcu_read_unlock();
1922 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1923 if (skb == NULL)
1924 return -ENOBUFS;
1925
1926
1927
1928
1929 skb_reserve(skb, reserved);
1930 skb_reset_network_header(skb);
1931
1932
1933 if (hhlen) {
1934 skb->data -= hhlen;
1935 skb->tail -= hhlen;
1936 if (len < hhlen)
1937 skb_reset_network_header(skb);
1938 }
1939 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1940 if (err)
1941 goto out_free;
1942 goto retry;
1943 }
1944
1945 if (!dev_validate_header(dev, skb->data, len)) {
1946 err = -EINVAL;
1947 goto out_unlock;
1948 }
1949 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1950 !packet_extra_vlan_len_allowed(dev, skb)) {
1951 err = -EMSGSIZE;
1952 goto out_unlock;
1953 }
1954
1955 sockc.transmit_time = 0;
1956 sockc.tsflags = sk->sk_tsflags;
1957 if (msg->msg_controllen) {
1958 err = sock_cmsg_send(sk, msg, &sockc);
1959 if (unlikely(err))
1960 goto out_unlock;
1961 }
1962
1963 skb->protocol = proto;
1964 skb->dev = dev;
1965 skb->priority = sk->sk_priority;
1966 skb->mark = sk->sk_mark;
1967 skb->tstamp = sockc.transmit_time;
1968
1969 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
1970
1971 if (unlikely(extra_len == 4))
1972 skb->no_fcs = 1;
1973
1974 skb_probe_transport_header(skb, 0);
1975
1976 dev_queue_xmit(skb);
1977 rcu_read_unlock();
1978 return len;
1979
1980out_unlock:
1981 rcu_read_unlock();
1982out_free:
1983 kfree_skb(skb);
1984 return err;
1985}
1986
1987static unsigned int run_filter(struct sk_buff *skb,
1988 const struct sock *sk,
1989 unsigned int res)
1990{
1991 struct sk_filter *filter;
1992
1993 rcu_read_lock();
1994 filter = rcu_dereference(sk->sk_filter);
1995 if (filter != NULL)
1996 res = bpf_prog_run_clear_cb(filter->prog, skb);
1997 rcu_read_unlock();
1998
1999 return res;
2000}
2001
2002static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2003 size_t *len)
2004{
2005 struct virtio_net_hdr vnet_hdr;
2006
2007 if (*len < sizeof(vnet_hdr))
2008 return -EINVAL;
2009 *len -= sizeof(vnet_hdr);
2010
2011 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2012 return -EINVAL;
2013
2014 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2015}
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2030 struct packet_type *pt, struct net_device *orig_dev)
2031{
2032 struct sock *sk;
2033 struct sockaddr_ll *sll;
2034 struct packet_sock *po;
2035 u8 *skb_head = skb->data;
2036 int skb_len = skb->len;
2037 unsigned int snaplen, res;
2038 bool is_drop_n_account = false;
2039
2040 if (skb->pkt_type == PACKET_LOOPBACK)
2041 goto drop;
2042
2043 sk = pt->af_packet_priv;
2044 po = pkt_sk(sk);
2045
2046 if (!net_eq(dev_net(dev), sock_net(sk)))
2047 goto drop;
2048
2049 skb->dev = dev;
2050
2051 if (dev->header_ops) {
2052
2053
2054
2055
2056
2057
2058
2059 if (sk->sk_type != SOCK_DGRAM)
2060 skb_push(skb, skb->data - skb_mac_header(skb));
2061 else if (skb->pkt_type == PACKET_OUTGOING) {
2062
2063 skb_pull(skb, skb_network_offset(skb));
2064 }
2065 }
2066
2067 snaplen = skb->len;
2068
2069 res = run_filter(skb, sk, snaplen);
2070 if (!res)
2071 goto drop_n_restore;
2072 if (snaplen > res)
2073 snaplen = res;
2074
2075 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2076 goto drop_n_acct;
2077
2078 if (skb_shared(skb)) {
2079 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2080 if (nskb == NULL)
2081 goto drop_n_acct;
2082
2083 if (skb_head != skb->data) {
2084 skb->data = skb_head;
2085 skb->len = skb_len;
2086 }
2087 consume_skb(skb);
2088 skb = nskb;
2089 }
2090
2091 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2092
2093 sll = &PACKET_SKB_CB(skb)->sa.ll;
2094 sll->sll_hatype = dev->type;
2095 sll->sll_pkttype = skb->pkt_type;
2096 if (unlikely(po->origdev))
2097 sll->sll_ifindex = orig_dev->ifindex;
2098 else
2099 sll->sll_ifindex = dev->ifindex;
2100
2101 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2102
2103
2104
2105
2106 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2107
2108 if (pskb_trim(skb, snaplen))
2109 goto drop_n_acct;
2110
2111 skb_set_owner_r(skb, sk);
2112 skb->dev = NULL;
2113 skb_dst_drop(skb);
2114
2115
2116 nf_reset(skb);
2117
2118 spin_lock(&sk->sk_receive_queue.lock);
2119 po->stats.stats1.tp_packets++;
2120 sock_skb_set_dropcount(sk, skb);
2121 __skb_queue_tail(&sk->sk_receive_queue, skb);
2122 spin_unlock(&sk->sk_receive_queue.lock);
2123 sk->sk_data_ready(sk);
2124 return 0;
2125
2126drop_n_acct:
2127 is_drop_n_account = true;
2128 spin_lock(&sk->sk_receive_queue.lock);
2129 po->stats.stats1.tp_drops++;
2130 atomic_inc(&sk->sk_drops);
2131 spin_unlock(&sk->sk_receive_queue.lock);
2132
2133drop_n_restore:
2134 if (skb_head != skb->data && skb_shared(skb)) {
2135 skb->data = skb_head;
2136 skb->len = skb_len;
2137 }
2138drop:
2139 if (!is_drop_n_account)
2140 consume_skb(skb);
2141 else
2142 kfree_skb(skb);
2143 return 0;
2144}
2145
2146static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2147 struct packet_type *pt, struct net_device *orig_dev)
2148{
2149 struct sock *sk;
2150 struct packet_sock *po;
2151 struct sockaddr_ll *sll;
2152 union tpacket_uhdr h;
2153 u8 *skb_head = skb->data;
2154 int skb_len = skb->len;
2155 unsigned int snaplen, res;
2156 unsigned long status = TP_STATUS_USER;
2157 unsigned short macoff, netoff, hdrlen;
2158 struct sk_buff *copy_skb = NULL;
2159 struct timespec ts;
2160 __u32 ts_status;
2161 bool is_drop_n_account = false;
2162 bool do_vnet = false;
2163
2164
2165
2166
2167
2168 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2169 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2170
2171 if (skb->pkt_type == PACKET_LOOPBACK)
2172 goto drop;
2173
2174 sk = pt->af_packet_priv;
2175 po = pkt_sk(sk);
2176
2177 if (!net_eq(dev_net(dev), sock_net(sk)))
2178 goto drop;
2179
2180 if (dev->header_ops) {
2181 if (sk->sk_type != SOCK_DGRAM)
2182 skb_push(skb, skb->data - skb_mac_header(skb));
2183 else if (skb->pkt_type == PACKET_OUTGOING) {
2184
2185 skb_pull(skb, skb_network_offset(skb));
2186 }
2187 }
2188
2189 snaplen = skb->len;
2190
2191 res = run_filter(skb, sk, snaplen);
2192 if (!res)
2193 goto drop_n_restore;
2194
2195 if (skb->ip_summed == CHECKSUM_PARTIAL)
2196 status |= TP_STATUS_CSUMNOTREADY;
2197 else if (skb->pkt_type != PACKET_OUTGOING &&
2198 (skb->ip_summed == CHECKSUM_COMPLETE ||
2199 skb_csum_unnecessary(skb)))
2200 status |= TP_STATUS_CSUM_VALID;
2201
2202 if (snaplen > res)
2203 snaplen = res;
2204
2205 if (sk->sk_type == SOCK_DGRAM) {
2206 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2207 po->tp_reserve;
2208 } else {
2209 unsigned int maclen = skb_network_offset(skb);
2210 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2211 (maclen < 16 ? 16 : maclen)) +
2212 po->tp_reserve;
2213 if (po->has_vnet_hdr) {
2214 netoff += sizeof(struct virtio_net_hdr);
2215 do_vnet = true;
2216 }
2217 macoff = netoff - maclen;
2218 }
2219 if (po->tp_version <= TPACKET_V2) {
2220 if (macoff + snaplen > po->rx_ring.frame_size) {
2221 if (po->copy_thresh &&
2222 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2223 if (skb_shared(skb)) {
2224 copy_skb = skb_clone(skb, GFP_ATOMIC);
2225 } else {
2226 copy_skb = skb_get(skb);
2227 skb_head = skb->data;
2228 }
2229 if (copy_skb)
2230 skb_set_owner_r(copy_skb, sk);
2231 }
2232 snaplen = po->rx_ring.frame_size - macoff;
2233 if ((int)snaplen < 0) {
2234 snaplen = 0;
2235 do_vnet = false;
2236 }
2237 }
2238 } else if (unlikely(macoff + snaplen >
2239 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2240 u32 nval;
2241
2242 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2243 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2244 snaplen, nval, macoff);
2245 snaplen = nval;
2246 if (unlikely((int)snaplen < 0)) {
2247 snaplen = 0;
2248 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2249 do_vnet = false;
2250 }
2251 }
2252 spin_lock(&sk->sk_receive_queue.lock);
2253 h.raw = packet_current_rx_frame(po, skb,
2254 TP_STATUS_KERNEL, (macoff+snaplen));
2255 if (!h.raw)
2256 goto drop_n_account;
2257 if (po->tp_version <= TPACKET_V2) {
2258 packet_increment_rx_head(po, &po->rx_ring);
2259
2260
2261
2262
2263
2264
2265 if (po->stats.stats1.tp_drops)
2266 status |= TP_STATUS_LOSING;
2267 }
2268
2269 if (do_vnet &&
2270 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2271 sizeof(struct virtio_net_hdr),
2272 vio_le(), true, 0))
2273 goto drop_n_account;
2274
2275 po->stats.stats1.tp_packets++;
2276 if (copy_skb) {
2277 status |= TP_STATUS_COPY;
2278 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2279 }
2280 spin_unlock(&sk->sk_receive_queue.lock);
2281
2282 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2283
2284 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2285 getnstimeofday(&ts);
2286
2287 status |= ts_status;
2288
2289 switch (po->tp_version) {
2290 case TPACKET_V1:
2291 h.h1->tp_len = skb->len;
2292 h.h1->tp_snaplen = snaplen;
2293 h.h1->tp_mac = macoff;
2294 h.h1->tp_net = netoff;
2295 h.h1->tp_sec = ts.tv_sec;
2296 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2297 hdrlen = sizeof(*h.h1);
2298 break;
2299 case TPACKET_V2:
2300 h.h2->tp_len = skb->len;
2301 h.h2->tp_snaplen = snaplen;
2302 h.h2->tp_mac = macoff;
2303 h.h2->tp_net = netoff;
2304 h.h2->tp_sec = ts.tv_sec;
2305 h.h2->tp_nsec = ts.tv_nsec;
2306 if (skb_vlan_tag_present(skb)) {
2307 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2308 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2309 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2310 } else {
2311 h.h2->tp_vlan_tci = 0;
2312 h.h2->tp_vlan_tpid = 0;
2313 }
2314 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2315 hdrlen = sizeof(*h.h2);
2316 break;
2317 case TPACKET_V3:
2318
2319
2320
2321 h.h3->tp_status |= status;
2322 h.h3->tp_len = skb->len;
2323 h.h3->tp_snaplen = snaplen;
2324 h.h3->tp_mac = macoff;
2325 h.h3->tp_net = netoff;
2326 h.h3->tp_sec = ts.tv_sec;
2327 h.h3->tp_nsec = ts.tv_nsec;
2328 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2329 hdrlen = sizeof(*h.h3);
2330 break;
2331 default:
2332 BUG();
2333 }
2334
2335 sll = h.raw + TPACKET_ALIGN(hdrlen);
2336 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2337 sll->sll_family = AF_PACKET;
2338 sll->sll_hatype = dev->type;
2339 sll->sll_protocol = skb->protocol;
2340 sll->sll_pkttype = skb->pkt_type;
2341 if (unlikely(po->origdev))
2342 sll->sll_ifindex = orig_dev->ifindex;
2343 else
2344 sll->sll_ifindex = dev->ifindex;
2345
2346 smp_mb();
2347
2348#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2349 if (po->tp_version <= TPACKET_V2) {
2350 u8 *start, *end;
2351
2352 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2353 macoff + snaplen);
2354
2355 for (start = h.raw; start < end; start += PAGE_SIZE)
2356 flush_dcache_page(pgv_to_page(start));
2357 }
2358 smp_wmb();
2359#endif
2360
2361 if (po->tp_version <= TPACKET_V2) {
2362 __packet_set_status(po, h.raw, status);
2363 sk->sk_data_ready(sk);
2364 } else {
2365 prb_clear_blk_fill_status(&po->rx_ring);
2366 }
2367
2368drop_n_restore:
2369 if (skb_head != skb->data && skb_shared(skb)) {
2370 skb->data = skb_head;
2371 skb->len = skb_len;
2372 }
2373drop:
2374 if (!is_drop_n_account)
2375 consume_skb(skb);
2376 else
2377 kfree_skb(skb);
2378 return 0;
2379
2380drop_n_account:
2381 is_drop_n_account = true;
2382 po->stats.stats1.tp_drops++;
2383 spin_unlock(&sk->sk_receive_queue.lock);
2384
2385 sk->sk_data_ready(sk);
2386 kfree_skb(copy_skb);
2387 goto drop_n_restore;
2388}
2389
2390static void tpacket_destruct_skb(struct sk_buff *skb)
2391{
2392 struct packet_sock *po = pkt_sk(skb->sk);
2393
2394 if (likely(po->tx_ring.pg_vec)) {
2395 void *ph;
2396 __u32 ts;
2397
2398 ph = skb_zcopy_get_nouarg(skb);
2399 packet_dec_pending(&po->tx_ring);
2400
2401 ts = __packet_set_timestamp(po, ph, skb);
2402 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2403 }
2404
2405 sock_wfree(skb);
2406}
2407
2408static void tpacket_set_protocol(const struct net_device *dev,
2409 struct sk_buff *skb)
2410{
2411 if (dev->type == ARPHRD_ETHER) {
2412 skb_reset_mac_header(skb);
2413 skb->protocol = eth_hdr(skb)->h_proto;
2414 }
2415}
2416
2417static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2418{
2419 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2420 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2421 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2422 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2423 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2424 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2425 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2426
2427 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2428 return -EINVAL;
2429
2430 return 0;
2431}
2432
2433static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2434 struct virtio_net_hdr *vnet_hdr)
2435{
2436 if (*len < sizeof(*vnet_hdr))
2437 return -EINVAL;
2438 *len -= sizeof(*vnet_hdr);
2439
2440 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2441 return -EFAULT;
2442
2443 return __packet_snd_vnet_parse(vnet_hdr, *len);
2444}
2445
2446static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2447 void *frame, struct net_device *dev, void *data, int tp_len,
2448 __be16 proto, unsigned char *addr, int hlen, int copylen,
2449 const struct sockcm_cookie *sockc)
2450{
2451 union tpacket_uhdr ph;
2452 int to_write, offset, len, nr_frags, len_max;
2453 struct socket *sock = po->sk.sk_socket;
2454 struct page *page;
2455 int err;
2456
2457 ph.raw = frame;
2458
2459 skb->protocol = proto;
2460 skb->dev = dev;
2461 skb->priority = po->sk.sk_priority;
2462 skb->mark = po->sk.sk_mark;
2463 skb->tstamp = sockc->transmit_time;
2464 sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2465 skb_zcopy_set_nouarg(skb, ph.raw);
2466
2467 skb_reserve(skb, hlen);
2468 skb_reset_network_header(skb);
2469
2470 to_write = tp_len;
2471
2472 if (sock->type == SOCK_DGRAM) {
2473 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2474 NULL, tp_len);
2475 if (unlikely(err < 0))
2476 return -EINVAL;
2477 } else if (copylen) {
2478 int hdrlen = min_t(int, copylen, tp_len);
2479
2480 skb_push(skb, dev->hard_header_len);
2481 skb_put(skb, copylen - dev->hard_header_len);
2482 err = skb_store_bits(skb, 0, data, hdrlen);
2483 if (unlikely(err))
2484 return err;
2485 if (!dev_validate_header(dev, skb->data, hdrlen))
2486 return -EINVAL;
2487 if (!skb->protocol)
2488 tpacket_set_protocol(dev, skb);
2489
2490 data += hdrlen;
2491 to_write -= hdrlen;
2492 }
2493
2494 offset = offset_in_page(data);
2495 len_max = PAGE_SIZE - offset;
2496 len = ((to_write > len_max) ? len_max : to_write);
2497
2498 skb->data_len = to_write;
2499 skb->len += to_write;
2500 skb->truesize += to_write;
2501 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2502
2503 while (likely(to_write)) {
2504 nr_frags = skb_shinfo(skb)->nr_frags;
2505
2506 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2507 pr_err("Packet exceed the number of skb frags(%lu)\n",
2508 MAX_SKB_FRAGS);
2509 return -EFAULT;
2510 }
2511
2512 page = pgv_to_page(data);
2513 data += len;
2514 flush_dcache_page(page);
2515 get_page(page);
2516 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2517 to_write -= len;
2518 offset = 0;
2519 len_max = PAGE_SIZE;
2520 len = ((to_write > len_max) ? len_max : to_write);
2521 }
2522
2523 skb_probe_transport_header(skb, 0);
2524
2525 return tp_len;
2526}
2527
2528static int tpacket_parse_header(struct packet_sock *po, void *frame,
2529 int size_max, void **data)
2530{
2531 union tpacket_uhdr ph;
2532 int tp_len, off;
2533
2534 ph.raw = frame;
2535
2536 switch (po->tp_version) {
2537 case TPACKET_V3:
2538 if (ph.h3->tp_next_offset != 0) {
2539 pr_warn_once("variable sized slot not supported");
2540 return -EINVAL;
2541 }
2542 tp_len = ph.h3->tp_len;
2543 break;
2544 case TPACKET_V2:
2545 tp_len = ph.h2->tp_len;
2546 break;
2547 default:
2548 tp_len = ph.h1->tp_len;
2549 break;
2550 }
2551 if (unlikely(tp_len > size_max)) {
2552 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2553 return -EMSGSIZE;
2554 }
2555
2556 if (unlikely(po->tp_tx_has_off)) {
2557 int off_min, off_max;
2558
2559 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2560 off_max = po->tx_ring.frame_size - tp_len;
2561 if (po->sk.sk_type == SOCK_DGRAM) {
2562 switch (po->tp_version) {
2563 case TPACKET_V3:
2564 off = ph.h3->tp_net;
2565 break;
2566 case TPACKET_V2:
2567 off = ph.h2->tp_net;
2568 break;
2569 default:
2570 off = ph.h1->tp_net;
2571 break;
2572 }
2573 } else {
2574 switch (po->tp_version) {
2575 case TPACKET_V3:
2576 off = ph.h3->tp_mac;
2577 break;
2578 case TPACKET_V2:
2579 off = ph.h2->tp_mac;
2580 break;
2581 default:
2582 off = ph.h1->tp_mac;
2583 break;
2584 }
2585 }
2586 if (unlikely((off < off_min) || (off_max < off)))
2587 return -EINVAL;
2588 } else {
2589 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2590 }
2591
2592 *data = frame + off;
2593 return tp_len;
2594}
2595
2596static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2597{
2598 struct sk_buff *skb;
2599 struct net_device *dev;
2600 struct virtio_net_hdr *vnet_hdr = NULL;
2601 struct sockcm_cookie sockc;
2602 __be16 proto;
2603 int err, reserve = 0;
2604 void *ph;
2605 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2606 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2607 int tp_len, size_max;
2608 unsigned char *addr;
2609 void *data;
2610 int len_sum = 0;
2611 int status = TP_STATUS_AVAILABLE;
2612 int hlen, tlen, copylen = 0;
2613
2614 mutex_lock(&po->pg_vec_lock);
2615
2616 if (likely(saddr == NULL)) {
2617 dev = packet_cached_dev_get(po);
2618 proto = po->num;
2619 addr = NULL;
2620 } else {
2621 err = -EINVAL;
2622 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2623 goto out;
2624 if (msg->msg_namelen < (saddr->sll_halen
2625 + offsetof(struct sockaddr_ll,
2626 sll_addr)))
2627 goto out;
2628 proto = saddr->sll_protocol;
2629 addr = saddr->sll_addr;
2630 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2631 }
2632
2633 err = -ENXIO;
2634 if (unlikely(dev == NULL))
2635 goto out;
2636 err = -ENETDOWN;
2637 if (unlikely(!(dev->flags & IFF_UP)))
2638 goto out_put;
2639
2640 sockc.transmit_time = 0;
2641 sockc.tsflags = po->sk.sk_tsflags;
2642 if (msg->msg_controllen) {
2643 err = sock_cmsg_send(&po->sk, msg, &sockc);
2644 if (unlikely(err))
2645 goto out_put;
2646 }
2647
2648 if (po->sk.sk_socket->type == SOCK_RAW)
2649 reserve = dev->hard_header_len;
2650 size_max = po->tx_ring.frame_size
2651 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2652
2653 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2654 size_max = dev->mtu + reserve + VLAN_HLEN;
2655
2656 do {
2657 ph = packet_current_frame(po, &po->tx_ring,
2658 TP_STATUS_SEND_REQUEST);
2659 if (unlikely(ph == NULL)) {
2660 if (need_wait && need_resched())
2661 schedule();
2662 continue;
2663 }
2664
2665 skb = NULL;
2666 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2667 if (tp_len < 0)
2668 goto tpacket_error;
2669
2670 status = TP_STATUS_SEND_REQUEST;
2671 hlen = LL_RESERVED_SPACE(dev);
2672 tlen = dev->needed_tailroom;
2673 if (po->has_vnet_hdr) {
2674 vnet_hdr = data;
2675 data += sizeof(*vnet_hdr);
2676 tp_len -= sizeof(*vnet_hdr);
2677 if (tp_len < 0 ||
2678 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2679 tp_len = -EINVAL;
2680 goto tpacket_error;
2681 }
2682 copylen = __virtio16_to_cpu(vio_le(),
2683 vnet_hdr->hdr_len);
2684 }
2685 copylen = max_t(int, copylen, dev->hard_header_len);
2686 skb = sock_alloc_send_skb(&po->sk,
2687 hlen + tlen + sizeof(struct sockaddr_ll) +
2688 (copylen - dev->hard_header_len),
2689 !need_wait, &err);
2690
2691 if (unlikely(skb == NULL)) {
2692
2693 if (likely(len_sum > 0))
2694 err = len_sum;
2695 goto out_status;
2696 }
2697 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2698 addr, hlen, copylen, &sockc);
2699 if (likely(tp_len >= 0) &&
2700 tp_len > dev->mtu + reserve &&
2701 !po->has_vnet_hdr &&
2702 !packet_extra_vlan_len_allowed(dev, skb))
2703 tp_len = -EMSGSIZE;
2704
2705 if (unlikely(tp_len < 0)) {
2706tpacket_error:
2707 if (po->tp_loss) {
2708 __packet_set_status(po, ph,
2709 TP_STATUS_AVAILABLE);
2710 packet_increment_head(&po->tx_ring);
2711 kfree_skb(skb);
2712 continue;
2713 } else {
2714 status = TP_STATUS_WRONG_FORMAT;
2715 err = tp_len;
2716 goto out_status;
2717 }
2718 }
2719
2720 if (po->has_vnet_hdr) {
2721 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2722 tp_len = -EINVAL;
2723 goto tpacket_error;
2724 }
2725 virtio_net_hdr_set_proto(skb, vnet_hdr);
2726 }
2727
2728 skb->destructor = tpacket_destruct_skb;
2729 __packet_set_status(po, ph, TP_STATUS_SENDING);
2730 packet_inc_pending(&po->tx_ring);
2731
2732 status = TP_STATUS_SEND_REQUEST;
2733 err = po->xmit(skb);
2734 if (unlikely(err > 0)) {
2735 err = net_xmit_errno(err);
2736 if (err && __packet_get_status(po, ph) ==
2737 TP_STATUS_AVAILABLE) {
2738
2739 skb = NULL;
2740 goto out_status;
2741 }
2742
2743
2744
2745
2746 err = 0;
2747 }
2748 packet_increment_head(&po->tx_ring);
2749 len_sum += tp_len;
2750 } while (likely((ph != NULL) ||
2751
2752
2753
2754
2755
2756
2757 (need_wait && packet_read_pending(&po->tx_ring))));
2758
2759 err = len_sum;
2760 goto out_put;
2761
2762out_status:
2763 __packet_set_status(po, ph, status);
2764 kfree_skb(skb);
2765out_put:
2766 dev_put(dev);
2767out:
2768 mutex_unlock(&po->pg_vec_lock);
2769 return err;
2770}
2771
2772static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2773 size_t reserve, size_t len,
2774 size_t linear, int noblock,
2775 int *err)
2776{
2777 struct sk_buff *skb;
2778
2779
2780 if (prepad + len < PAGE_SIZE || !linear)
2781 linear = len;
2782
2783 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2784 err, 0);
2785 if (!skb)
2786 return NULL;
2787
2788 skb_reserve(skb, reserve);
2789 skb_put(skb, linear);
2790 skb->data_len = len - linear;
2791 skb->len += len - linear;
2792
2793 return skb;
2794}
2795
2796static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2797{
2798 struct sock *sk = sock->sk;
2799 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2800 struct sk_buff *skb;
2801 struct net_device *dev;
2802 __be16 proto;
2803 unsigned char *addr;
2804 int err, reserve = 0;
2805 struct sockcm_cookie sockc;
2806 struct virtio_net_hdr vnet_hdr = { 0 };
2807 int offset = 0;
2808 struct packet_sock *po = pkt_sk(sk);
2809 bool has_vnet_hdr = false;
2810 int hlen, tlen, linear;
2811 int extra_len = 0;
2812
2813
2814
2815
2816
2817 if (likely(saddr == NULL)) {
2818 dev = packet_cached_dev_get(po);
2819 proto = po->num;
2820 addr = NULL;
2821 } else {
2822 err = -EINVAL;
2823 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2824 goto out;
2825 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2826 goto out;
2827 proto = saddr->sll_protocol;
2828 addr = saddr->sll_addr;
2829 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2830 }
2831
2832 err = -ENXIO;
2833 if (unlikely(dev == NULL))
2834 goto out_unlock;
2835 err = -ENETDOWN;
2836 if (unlikely(!(dev->flags & IFF_UP)))
2837 goto out_unlock;
2838
2839 sockc.transmit_time = 0;
2840 sockc.tsflags = sk->sk_tsflags;
2841 sockc.mark = sk->sk_mark;
2842 if (msg->msg_controllen) {
2843 err = sock_cmsg_send(sk, msg, &sockc);
2844 if (unlikely(err))
2845 goto out_unlock;
2846 }
2847
2848 if (sock->type == SOCK_RAW)
2849 reserve = dev->hard_header_len;
2850 if (po->has_vnet_hdr) {
2851 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2852 if (err)
2853 goto out_unlock;
2854 has_vnet_hdr = true;
2855 }
2856
2857 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2858 if (!netif_supports_nofcs(dev)) {
2859 err = -EPROTONOSUPPORT;
2860 goto out_unlock;
2861 }
2862 extra_len = 4;
2863 }
2864
2865 err = -EMSGSIZE;
2866 if (!vnet_hdr.gso_type &&
2867 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2868 goto out_unlock;
2869
2870 err = -ENOBUFS;
2871 hlen = LL_RESERVED_SPACE(dev);
2872 tlen = dev->needed_tailroom;
2873 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2874 linear = max(linear, min_t(int, len, dev->hard_header_len));
2875 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2876 msg->msg_flags & MSG_DONTWAIT, &err);
2877 if (skb == NULL)
2878 goto out_unlock;
2879
2880 skb_reset_network_header(skb);
2881
2882 err = -EINVAL;
2883 if (sock->type == SOCK_DGRAM) {
2884 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2885 if (unlikely(offset < 0))
2886 goto out_free;
2887 } else if (reserve) {
2888 skb_reserve(skb, -reserve);
2889 if (len < reserve + sizeof(struct ipv6hdr) &&
2890 dev->min_header_len != dev->hard_header_len)
2891 skb_reset_network_header(skb);
2892 }
2893
2894
2895 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2896 if (err)
2897 goto out_free;
2898
2899 if (sock->type == SOCK_RAW &&
2900 !dev_validate_header(dev, skb->data, len)) {
2901 err = -EINVAL;
2902 goto out_free;
2903 }
2904
2905 sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2906
2907 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2908 !packet_extra_vlan_len_allowed(dev, skb)) {
2909 err = -EMSGSIZE;
2910 goto out_free;
2911 }
2912
2913 skb->protocol = proto;
2914 skb->dev = dev;
2915 skb->priority = sk->sk_priority;
2916 skb->mark = sockc.mark;
2917 skb->tstamp = sockc.transmit_time;
2918
2919 if (has_vnet_hdr) {
2920 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2921 if (err)
2922 goto out_free;
2923 len += sizeof(vnet_hdr);
2924 virtio_net_hdr_set_proto(skb, &vnet_hdr);
2925 }
2926
2927 skb_probe_transport_header(skb, reserve);
2928
2929 if (unlikely(extra_len == 4))
2930 skb->no_fcs = 1;
2931
2932 err = po->xmit(skb);
2933 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2934 goto out_unlock;
2935
2936 dev_put(dev);
2937
2938 return len;
2939
2940out_free:
2941 kfree_skb(skb);
2942out_unlock:
2943 if (dev)
2944 dev_put(dev);
2945out:
2946 return err;
2947}
2948
2949static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2950{
2951 struct sock *sk = sock->sk;
2952 struct packet_sock *po = pkt_sk(sk);
2953
2954 if (po->tx_ring.pg_vec)
2955 return tpacket_snd(po, msg);
2956 else
2957 return packet_snd(sock, msg, len);
2958}
2959
2960
2961
2962
2963
2964
2965static int packet_release(struct socket *sock)
2966{
2967 struct sock *sk = sock->sk;
2968 struct packet_sock *po;
2969 struct packet_fanout *f;
2970 struct net *net;
2971 union tpacket_req_u req_u;
2972
2973 if (!sk)
2974 return 0;
2975
2976 net = sock_net(sk);
2977 po = pkt_sk(sk);
2978
2979 mutex_lock(&net->packet.sklist_lock);
2980 sk_del_node_init_rcu(sk);
2981 mutex_unlock(&net->packet.sklist_lock);
2982
2983 preempt_disable();
2984 sock_prot_inuse_add(net, sk->sk_prot, -1);
2985 preempt_enable();
2986
2987 spin_lock(&po->bind_lock);
2988 unregister_prot_hook(sk, false);
2989 packet_cached_dev_reset(po);
2990
2991 if (po->prot_hook.dev) {
2992 dev_put(po->prot_hook.dev);
2993 po->prot_hook.dev = NULL;
2994 }
2995 spin_unlock(&po->bind_lock);
2996
2997 packet_flush_mclist(sk);
2998
2999 lock_sock(sk);
3000 if (po->rx_ring.pg_vec) {
3001 memset(&req_u, 0, sizeof(req_u));
3002 packet_set_ring(sk, &req_u, 1, 0);
3003 }
3004
3005 if (po->tx_ring.pg_vec) {
3006 memset(&req_u, 0, sizeof(req_u));
3007 packet_set_ring(sk, &req_u, 1, 1);
3008 }
3009 release_sock(sk);
3010
3011 f = fanout_release(sk);
3012
3013 synchronize_net();
3014
3015 if (f) {
3016 kfree(po->rollover);
3017 fanout_release_data(f);
3018 kfree(f);
3019 }
3020
3021
3022
3023 sock_orphan(sk);
3024 sock->sk = NULL;
3025
3026
3027
3028 skb_queue_purge(&sk->sk_receive_queue);
3029 packet_free_pending(po);
3030 sk_refcnt_debug_release(sk);
3031
3032 sock_put(sk);
3033 return 0;
3034}
3035
3036
3037
3038
3039
3040static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3041 __be16 proto)
3042{
3043 struct packet_sock *po = pkt_sk(sk);
3044 struct net_device *dev_curr;
3045 __be16 proto_curr;
3046 bool need_rehook;
3047 struct net_device *dev = NULL;
3048 int ret = 0;
3049 bool unlisted = false;
3050
3051 lock_sock(sk);
3052 spin_lock(&po->bind_lock);
3053 rcu_read_lock();
3054
3055 if (po->fanout) {
3056 ret = -EINVAL;
3057 goto out_unlock;
3058 }
3059
3060 if (name) {
3061 dev = dev_get_by_name_rcu(sock_net(sk), name);
3062 if (!dev) {
3063 ret = -ENODEV;
3064 goto out_unlock;
3065 }
3066 } else if (ifindex) {
3067 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3068 if (!dev) {
3069 ret = -ENODEV;
3070 goto out_unlock;
3071 }
3072 }
3073
3074 if (dev)
3075 dev_hold(dev);
3076
3077 proto_curr = po->prot_hook.type;
3078 dev_curr = po->prot_hook.dev;
3079
3080 need_rehook = proto_curr != proto || dev_curr != dev;
3081
3082 if (need_rehook) {
3083 if (po->running) {
3084 rcu_read_unlock();
3085
3086
3087
3088 po->num = 0;
3089 __unregister_prot_hook(sk, true);
3090 rcu_read_lock();
3091 dev_curr = po->prot_hook.dev;
3092 if (dev)
3093 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3094 dev->ifindex);
3095 }
3096
3097 BUG_ON(po->running);
3098 po->num = proto;
3099 po->prot_hook.type = proto;
3100
3101 if (unlikely(unlisted)) {
3102 dev_put(dev);
3103 po->prot_hook.dev = NULL;
3104 po->ifindex = -1;
3105 packet_cached_dev_reset(po);
3106 } else {
3107 po->prot_hook.dev = dev;
3108 po->ifindex = dev ? dev->ifindex : 0;
3109 packet_cached_dev_assign(po, dev);
3110 }
3111 }
3112 if (dev_curr)
3113 dev_put(dev_curr);
3114
3115 if (proto == 0 || !need_rehook)
3116 goto out_unlock;
3117
3118 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3119 register_prot_hook(sk);
3120 } else {
3121 sk->sk_err = ENETDOWN;
3122 if (!sock_flag(sk, SOCK_DEAD))
3123 sk->sk_error_report(sk);
3124 }
3125
3126out_unlock:
3127 rcu_read_unlock();
3128 spin_unlock(&po->bind_lock);
3129 release_sock(sk);
3130 return ret;
3131}
3132
3133
3134
3135
3136
3137static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3138 int addr_len)
3139{
3140 struct sock *sk = sock->sk;
3141 char name[sizeof(uaddr->sa_data) + 1];
3142
3143
3144
3145
3146
3147 if (addr_len != sizeof(struct sockaddr))
3148 return -EINVAL;
3149
3150
3151
3152 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3153 name[sizeof(uaddr->sa_data)] = 0;
3154
3155 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3156}
3157
3158static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3159{
3160 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3161 struct sock *sk = sock->sk;
3162
3163
3164
3165
3166
3167 if (addr_len < sizeof(struct sockaddr_ll))
3168 return -EINVAL;
3169 if (sll->sll_family != AF_PACKET)
3170 return -EINVAL;
3171
3172 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3173 sll->sll_protocol ? : pkt_sk(sk)->num);
3174}
3175
3176static struct proto packet_proto = {
3177 .name = "PACKET",
3178 .owner = THIS_MODULE,
3179 .obj_size = sizeof(struct packet_sock),
3180};
3181
3182
3183
3184
3185
3186static int packet_create(struct net *net, struct socket *sock, int protocol,
3187 int kern)
3188{
3189 struct sock *sk;
3190 struct packet_sock *po;
3191 __be16 proto = (__force __be16)protocol;
3192 int err;
3193
3194 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3195 return -EPERM;
3196 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3197 sock->type != SOCK_PACKET)
3198 return -ESOCKTNOSUPPORT;
3199
3200 sock->state = SS_UNCONNECTED;
3201
3202 err = -ENOBUFS;
3203 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3204 if (sk == NULL)
3205 goto out;
3206
3207 sock->ops = &packet_ops;
3208 if (sock->type == SOCK_PACKET)
3209 sock->ops = &packet_ops_spkt;
3210
3211 sock_init_data(sock, sk);
3212
3213 po = pkt_sk(sk);
3214 sk->sk_family = PF_PACKET;
3215 po->num = proto;
3216 po->xmit = dev_queue_xmit;
3217
3218 err = packet_alloc_pending(po);
3219 if (err)
3220 goto out2;
3221
3222 packet_cached_dev_reset(po);
3223
3224 sk->sk_destruct = packet_sock_destruct;
3225 sk_refcnt_debug_inc(sk);
3226
3227
3228
3229
3230
3231 spin_lock_init(&po->bind_lock);
3232 mutex_init(&po->pg_vec_lock);
3233 po->rollover = NULL;
3234 po->prot_hook.func = packet_rcv;
3235
3236 if (sock->type == SOCK_PACKET)
3237 po->prot_hook.func = packet_rcv_spkt;
3238
3239 po->prot_hook.af_packet_priv = sk;
3240
3241 if (proto) {
3242 po->prot_hook.type = proto;
3243 __register_prot_hook(sk);
3244 }
3245
3246 mutex_lock(&net->packet.sklist_lock);
3247 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3248 mutex_unlock(&net->packet.sklist_lock);
3249
3250 preempt_disable();
3251 sock_prot_inuse_add(net, &packet_proto, 1);
3252 preempt_enable();
3253
3254 return 0;
3255out2:
3256 sk_free(sk);
3257out:
3258 return err;
3259}
3260
3261
3262
3263
3264
3265
3266static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3267 int flags)
3268{
3269 struct sock *sk = sock->sk;
3270 struct sk_buff *skb;
3271 int copied, err;
3272 int vnet_hdr_len = 0;
3273 unsigned int origlen = 0;
3274
3275 err = -EINVAL;
3276 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3277 goto out;
3278
3279#if 0
3280
3281 if (pkt_sk(sk)->ifindex < 0)
3282 return -ENODEV;
3283#endif
3284
3285 if (flags & MSG_ERRQUEUE) {
3286 err = sock_recv_errqueue(sk, msg, len,
3287 SOL_PACKET, PACKET_TX_TIMESTAMP);
3288 goto out;
3289 }
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3301
3302
3303
3304
3305
3306
3307
3308 if (skb == NULL)
3309 goto out;
3310
3311 if (pkt_sk(sk)->pressure)
3312 packet_rcv_has_room(pkt_sk(sk), NULL);
3313
3314 if (pkt_sk(sk)->has_vnet_hdr) {
3315 err = packet_rcv_vnet(msg, skb, &len);
3316 if (err)
3317 goto out_free;
3318 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3319 }
3320
3321
3322
3323
3324
3325 copied = skb->len;
3326 if (copied > len) {
3327 copied = len;
3328 msg->msg_flags |= MSG_TRUNC;
3329 }
3330
3331 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3332 if (err)
3333 goto out_free;
3334
3335 if (sock->type != SOCK_PACKET) {
3336 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3337
3338
3339 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3340 sll->sll_family = AF_PACKET;
3341 sll->sll_protocol = skb->protocol;
3342 }
3343
3344 sock_recv_ts_and_drops(msg, sk, skb);
3345
3346 if (msg->msg_name) {
3347
3348
3349
3350 if (sock->type == SOCK_PACKET) {
3351 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3352 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3353 } else {
3354 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3355
3356 msg->msg_namelen = sll->sll_halen +
3357 offsetof(struct sockaddr_ll, sll_addr);
3358 }
3359 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3360 msg->msg_namelen);
3361 }
3362
3363 if (pkt_sk(sk)->auxdata) {
3364 struct tpacket_auxdata aux;
3365
3366 aux.tp_status = TP_STATUS_USER;
3367 if (skb->ip_summed == CHECKSUM_PARTIAL)
3368 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3369 else if (skb->pkt_type != PACKET_OUTGOING &&
3370 (skb->ip_summed == CHECKSUM_COMPLETE ||
3371 skb_csum_unnecessary(skb)))
3372 aux.tp_status |= TP_STATUS_CSUM_VALID;
3373
3374 aux.tp_len = origlen;
3375 aux.tp_snaplen = skb->len;
3376 aux.tp_mac = 0;
3377 aux.tp_net = skb_network_offset(skb);
3378 if (skb_vlan_tag_present(skb)) {
3379 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3380 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3381 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3382 } else {
3383 aux.tp_vlan_tci = 0;
3384 aux.tp_vlan_tpid = 0;
3385 }
3386 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3387 }
3388
3389
3390
3391
3392
3393 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3394
3395out_free:
3396 skb_free_datagram(sk, skb);
3397out:
3398 return err;
3399}
3400
3401static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3402 int peer)
3403{
3404 struct net_device *dev;
3405 struct sock *sk = sock->sk;
3406
3407 if (peer)
3408 return -EOPNOTSUPP;
3409
3410 uaddr->sa_family = AF_PACKET;
3411 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3412 rcu_read_lock();
3413 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3414 if (dev)
3415 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3416 rcu_read_unlock();
3417
3418 return sizeof(*uaddr);
3419}
3420
3421static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3422 int peer)
3423{
3424 struct net_device *dev;
3425 struct sock *sk = sock->sk;
3426 struct packet_sock *po = pkt_sk(sk);
3427 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3428
3429 if (peer)
3430 return -EOPNOTSUPP;
3431
3432 sll->sll_family = AF_PACKET;
3433 sll->sll_ifindex = po->ifindex;
3434 sll->sll_protocol = po->num;
3435 sll->sll_pkttype = 0;
3436 rcu_read_lock();
3437 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3438 if (dev) {
3439 sll->sll_hatype = dev->type;
3440 sll->sll_halen = dev->addr_len;
3441 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3442 } else {
3443 sll->sll_hatype = 0;
3444 sll->sll_halen = 0;
3445 }
3446 rcu_read_unlock();
3447
3448 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3449}
3450
3451static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3452 int what)
3453{
3454 switch (i->type) {
3455 case PACKET_MR_MULTICAST:
3456 if (i->alen != dev->addr_len)
3457 return -EINVAL;
3458 if (what > 0)
3459 return dev_mc_add(dev, i->addr);
3460 else
3461 return dev_mc_del(dev, i->addr);
3462 break;
3463 case PACKET_MR_PROMISC:
3464 return dev_set_promiscuity(dev, what);
3465 case PACKET_MR_ALLMULTI:
3466 return dev_set_allmulti(dev, what);
3467 case PACKET_MR_UNICAST:
3468 if (i->alen != dev->addr_len)
3469 return -EINVAL;
3470 if (what > 0)
3471 return dev_uc_add(dev, i->addr);
3472 else
3473 return dev_uc_del(dev, i->addr);
3474 break;
3475 default:
3476 break;
3477 }
3478 return 0;
3479}
3480
3481static void packet_dev_mclist_delete(struct net_device *dev,
3482 struct packet_mclist **mlp)
3483{
3484 struct packet_mclist *ml;
3485
3486 while ((ml = *mlp) != NULL) {
3487 if (ml->ifindex == dev->ifindex) {
3488 packet_dev_mc(dev, ml, -1);
3489 *mlp = ml->next;
3490 kfree(ml);
3491 } else
3492 mlp = &ml->next;
3493 }
3494}
3495
3496static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3497{
3498 struct packet_sock *po = pkt_sk(sk);
3499 struct packet_mclist *ml, *i;
3500 struct net_device *dev;
3501 int err;
3502
3503 rtnl_lock();
3504
3505 err = -ENODEV;
3506 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3507 if (!dev)
3508 goto done;
3509
3510 err = -EINVAL;
3511 if (mreq->mr_alen > dev->addr_len)
3512 goto done;
3513
3514 err = -ENOBUFS;
3515 i = kmalloc(sizeof(*i), GFP_KERNEL);
3516 if (i == NULL)
3517 goto done;
3518
3519 err = 0;
3520 for (ml = po->mclist; ml; ml = ml->next) {
3521 if (ml->ifindex == mreq->mr_ifindex &&
3522 ml->type == mreq->mr_type &&
3523 ml->alen == mreq->mr_alen &&
3524 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3525 ml->count++;
3526
3527 kfree(i);
3528 goto done;
3529 }
3530 }
3531
3532 i->type = mreq->mr_type;
3533 i->ifindex = mreq->mr_ifindex;
3534 i->alen = mreq->mr_alen;
3535 memcpy(i->addr, mreq->mr_address, i->alen);
3536 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3537 i->count = 1;
3538 i->next = po->mclist;
3539 po->mclist = i;
3540 err = packet_dev_mc(dev, i, 1);
3541 if (err) {
3542 po->mclist = i->next;
3543 kfree(i);
3544 }
3545
3546done:
3547 rtnl_unlock();
3548 return err;
3549}
3550
3551static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3552{
3553 struct packet_mclist *ml, **mlp;
3554
3555 rtnl_lock();
3556
3557 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3558 if (ml->ifindex == mreq->mr_ifindex &&
3559 ml->type == mreq->mr_type &&
3560 ml->alen == mreq->mr_alen &&
3561 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3562 if (--ml->count == 0) {
3563 struct net_device *dev;
3564 *mlp = ml->next;
3565 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3566 if (dev)
3567 packet_dev_mc(dev, ml, -1);
3568 kfree(ml);
3569 }
3570 break;
3571 }
3572 }
3573 rtnl_unlock();
3574 return 0;
3575}
3576
3577static void packet_flush_mclist(struct sock *sk)
3578{
3579 struct packet_sock *po = pkt_sk(sk);
3580 struct packet_mclist *ml;
3581
3582 if (!po->mclist)
3583 return;
3584
3585 rtnl_lock();
3586 while ((ml = po->mclist) != NULL) {
3587 struct net_device *dev;
3588
3589 po->mclist = ml->next;
3590 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3591 if (dev != NULL)
3592 packet_dev_mc(dev, ml, -1);
3593 kfree(ml);
3594 }
3595 rtnl_unlock();
3596}
3597
3598static int
3599packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3600{
3601 struct sock *sk = sock->sk;
3602 struct packet_sock *po = pkt_sk(sk);
3603 int ret;
3604
3605 if (level != SOL_PACKET)
3606 return -ENOPROTOOPT;
3607
3608 switch (optname) {
3609 case PACKET_ADD_MEMBERSHIP:
3610 case PACKET_DROP_MEMBERSHIP:
3611 {
3612 struct packet_mreq_max mreq;
3613 int len = optlen;
3614 memset(&mreq, 0, sizeof(mreq));
3615 if (len < sizeof(struct packet_mreq))
3616 return -EINVAL;
3617 if (len > sizeof(mreq))
3618 len = sizeof(mreq);
3619 if (copy_from_user(&mreq, optval, len))
3620 return -EFAULT;
3621 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3622 return -EINVAL;
3623 if (optname == PACKET_ADD_MEMBERSHIP)
3624 ret = packet_mc_add(sk, &mreq);
3625 else
3626 ret = packet_mc_drop(sk, &mreq);
3627 return ret;
3628 }
3629
3630 case PACKET_RX_RING:
3631 case PACKET_TX_RING:
3632 {
3633 union tpacket_req_u req_u;
3634 int len;
3635
3636 lock_sock(sk);
3637 switch (po->tp_version) {
3638 case TPACKET_V1:
3639 case TPACKET_V2:
3640 len = sizeof(req_u.req);
3641 break;
3642 case TPACKET_V3:
3643 default:
3644 len = sizeof(req_u.req3);
3645 break;
3646 }
3647 if (optlen < len) {
3648 ret = -EINVAL;
3649 } else {
3650 if (copy_from_user(&req_u.req, optval, len))
3651 ret = -EFAULT;
3652 else
3653 ret = packet_set_ring(sk, &req_u, 0,
3654 optname == PACKET_TX_RING);
3655 }
3656 release_sock(sk);
3657 return ret;
3658 }
3659 case PACKET_COPY_THRESH:
3660 {
3661 int val;
3662
3663 if (optlen != sizeof(val))
3664 return -EINVAL;
3665 if (copy_from_user(&val, optval, sizeof(val)))
3666 return -EFAULT;
3667
3668 pkt_sk(sk)->copy_thresh = val;
3669 return 0;
3670 }
3671 case PACKET_VERSION:
3672 {
3673 int val;
3674
3675 if (optlen != sizeof(val))
3676 return -EINVAL;
3677 if (copy_from_user(&val, optval, sizeof(val)))
3678 return -EFAULT;
3679 switch (val) {
3680 case TPACKET_V1:
3681 case TPACKET_V2:
3682 case TPACKET_V3:
3683 break;
3684 default:
3685 return -EINVAL;
3686 }
3687 lock_sock(sk);
3688 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3689 ret = -EBUSY;
3690 } else {
3691 po->tp_version = val;
3692 ret = 0;
3693 }
3694 release_sock(sk);
3695 return ret;
3696 }
3697 case PACKET_RESERVE:
3698 {
3699 unsigned int val;
3700
3701 if (optlen != sizeof(val))
3702 return -EINVAL;
3703 if (copy_from_user(&val, optval, sizeof(val)))
3704 return -EFAULT;
3705 if (val > INT_MAX)
3706 return -EINVAL;
3707 lock_sock(sk);
3708 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3709 ret = -EBUSY;
3710 } else {
3711 po->tp_reserve = val;
3712 ret = 0;
3713 }
3714 release_sock(sk);
3715 return ret;
3716 }
3717 case PACKET_LOSS:
3718 {
3719 unsigned int val;
3720
3721 if (optlen != sizeof(val))
3722 return -EINVAL;
3723 if (copy_from_user(&val, optval, sizeof(val)))
3724 return -EFAULT;
3725
3726 lock_sock(sk);
3727 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3728 ret = -EBUSY;
3729 } else {
3730 po->tp_loss = !!val;
3731 ret = 0;
3732 }
3733 release_sock(sk);
3734 return ret;
3735 }
3736 case PACKET_AUXDATA:
3737 {
3738 int val;
3739
3740 if (optlen < sizeof(val))
3741 return -EINVAL;
3742 if (copy_from_user(&val, optval, sizeof(val)))
3743 return -EFAULT;
3744
3745 lock_sock(sk);
3746 po->auxdata = !!val;
3747 release_sock(sk);
3748 return 0;
3749 }
3750 case PACKET_ORIGDEV:
3751 {
3752 int val;
3753
3754 if (optlen < sizeof(val))
3755 return -EINVAL;
3756 if (copy_from_user(&val, optval, sizeof(val)))
3757 return -EFAULT;
3758
3759 lock_sock(sk);
3760 po->origdev = !!val;
3761 release_sock(sk);
3762 return 0;
3763 }
3764 case PACKET_VNET_HDR:
3765 {
3766 int val;
3767
3768 if (sock->type != SOCK_RAW)
3769 return -EINVAL;
3770 if (optlen < sizeof(val))
3771 return -EINVAL;
3772 if (copy_from_user(&val, optval, sizeof(val)))
3773 return -EFAULT;
3774
3775 lock_sock(sk);
3776 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3777 ret = -EBUSY;
3778 } else {
3779 po->has_vnet_hdr = !!val;
3780 ret = 0;
3781 }
3782 release_sock(sk);
3783 return ret;
3784 }
3785 case PACKET_TIMESTAMP:
3786 {
3787 int val;
3788
3789 if (optlen != sizeof(val))
3790 return -EINVAL;
3791 if (copy_from_user(&val, optval, sizeof(val)))
3792 return -EFAULT;
3793
3794 po->tp_tstamp = val;
3795 return 0;
3796 }
3797 case PACKET_FANOUT:
3798 {
3799 int val;
3800
3801 if (optlen != sizeof(val))
3802 return -EINVAL;
3803 if (copy_from_user(&val, optval, sizeof(val)))
3804 return -EFAULT;
3805
3806 return fanout_add(sk, val & 0xffff, val >> 16);
3807 }
3808 case PACKET_FANOUT_DATA:
3809 {
3810 if (!po->fanout)
3811 return -EINVAL;
3812
3813 return fanout_set_data(po, optval, optlen);
3814 }
3815 case PACKET_TX_HAS_OFF:
3816 {
3817 unsigned int val;
3818
3819 if (optlen != sizeof(val))
3820 return -EINVAL;
3821 if (copy_from_user(&val, optval, sizeof(val)))
3822 return -EFAULT;
3823
3824 lock_sock(sk);
3825 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3826 ret = -EBUSY;
3827 } else {
3828 po->tp_tx_has_off = !!val;
3829 ret = 0;
3830 }
3831 release_sock(sk);
3832 return 0;
3833 }
3834 case PACKET_QDISC_BYPASS:
3835 {
3836 int val;
3837
3838 if (optlen != sizeof(val))
3839 return -EINVAL;
3840 if (copy_from_user(&val, optval, sizeof(val)))
3841 return -EFAULT;
3842
3843 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3844 return 0;
3845 }
3846 default:
3847 return -ENOPROTOOPT;
3848 }
3849}
3850
3851static int packet_getsockopt(struct socket *sock, int level, int optname,
3852 char __user *optval, int __user *optlen)
3853{
3854 int len;
3855 int val, lv = sizeof(val);
3856 struct sock *sk = sock->sk;
3857 struct packet_sock *po = pkt_sk(sk);
3858 void *data = &val;
3859 union tpacket_stats_u st;
3860 struct tpacket_rollover_stats rstats;
3861
3862 if (level != SOL_PACKET)
3863 return -ENOPROTOOPT;
3864
3865 if (get_user(len, optlen))
3866 return -EFAULT;
3867
3868 if (len < 0)
3869 return -EINVAL;
3870
3871 switch (optname) {
3872 case PACKET_STATISTICS:
3873 spin_lock_bh(&sk->sk_receive_queue.lock);
3874 memcpy(&st, &po->stats, sizeof(st));
3875 memset(&po->stats, 0, sizeof(po->stats));
3876 spin_unlock_bh(&sk->sk_receive_queue.lock);
3877
3878 if (po->tp_version == TPACKET_V3) {
3879 lv = sizeof(struct tpacket_stats_v3);
3880 st.stats3.tp_packets += st.stats3.tp_drops;
3881 data = &st.stats3;
3882 } else {
3883 lv = sizeof(struct tpacket_stats);
3884 st.stats1.tp_packets += st.stats1.tp_drops;
3885 data = &st.stats1;
3886 }
3887
3888 break;
3889 case PACKET_AUXDATA:
3890 val = po->auxdata;
3891 break;
3892 case PACKET_ORIGDEV:
3893 val = po->origdev;
3894 break;
3895 case PACKET_VNET_HDR:
3896 val = po->has_vnet_hdr;
3897 break;
3898 case PACKET_VERSION:
3899 val = po->tp_version;
3900 break;
3901 case PACKET_HDRLEN:
3902 if (len > sizeof(int))
3903 len = sizeof(int);
3904 if (len < sizeof(int))
3905 return -EINVAL;
3906 if (copy_from_user(&val, optval, len))
3907 return -EFAULT;
3908 switch (val) {
3909 case TPACKET_V1:
3910 val = sizeof(struct tpacket_hdr);
3911 break;
3912 case TPACKET_V2:
3913 val = sizeof(struct tpacket2_hdr);
3914 break;
3915 case TPACKET_V3:
3916 val = sizeof(struct tpacket3_hdr);
3917 break;
3918 default:
3919 return -EINVAL;
3920 }
3921 break;
3922 case PACKET_RESERVE:
3923 val = po->tp_reserve;
3924 break;
3925 case PACKET_LOSS:
3926 val = po->tp_loss;
3927 break;
3928 case PACKET_TIMESTAMP:
3929 val = po->tp_tstamp;
3930 break;
3931 case PACKET_FANOUT:
3932 val = (po->fanout ?
3933 ((u32)po->fanout->id |
3934 ((u32)po->fanout->type << 16) |
3935 ((u32)po->fanout->flags << 24)) :
3936 0);
3937 break;
3938 case PACKET_ROLLOVER_STATS:
3939 if (!po->rollover)
3940 return -EINVAL;
3941 rstats.tp_all = atomic_long_read(&po->rollover->num);
3942 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3943 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3944 data = &rstats;
3945 lv = sizeof(rstats);
3946 break;
3947 case PACKET_TX_HAS_OFF:
3948 val = po->tp_tx_has_off;
3949 break;
3950 case PACKET_QDISC_BYPASS:
3951 val = packet_use_direct_xmit(po);
3952 break;
3953 default:
3954 return -ENOPROTOOPT;
3955 }
3956
3957 if (len > lv)
3958 len = lv;
3959 if (put_user(len, optlen))
3960 return -EFAULT;
3961 if (copy_to_user(optval, data, len))
3962 return -EFAULT;
3963 return 0;
3964}
3965
3966
3967#ifdef CONFIG_COMPAT
3968static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3969 char __user *optval, unsigned int optlen)
3970{
3971 struct packet_sock *po = pkt_sk(sock->sk);
3972
3973 if (level != SOL_PACKET)
3974 return -ENOPROTOOPT;
3975
3976 if (optname == PACKET_FANOUT_DATA &&
3977 po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3978 optval = (char __user *)get_compat_bpf_fprog(optval);
3979 if (!optval)
3980 return -EFAULT;
3981 optlen = sizeof(struct sock_fprog);
3982 }
3983
3984 return packet_setsockopt(sock, level, optname, optval, optlen);
3985}
3986#endif
3987
3988static int packet_notifier(struct notifier_block *this,
3989 unsigned long msg, void *ptr)
3990{
3991 struct sock *sk;
3992 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3993 struct net *net = dev_net(dev);
3994
3995 rcu_read_lock();
3996 sk_for_each_rcu(sk, &net->packet.sklist) {
3997 struct packet_sock *po = pkt_sk(sk);
3998
3999 switch (msg) {
4000 case NETDEV_UNREGISTER:
4001 if (po->mclist)
4002 packet_dev_mclist_delete(dev, &po->mclist);
4003
4004
4005 case NETDEV_DOWN:
4006 if (dev->ifindex == po->ifindex) {
4007 spin_lock(&po->bind_lock);
4008 if (po->running) {
4009 __unregister_prot_hook(sk, false);
4010 sk->sk_err = ENETDOWN;
4011 if (!sock_flag(sk, SOCK_DEAD))
4012 sk->sk_error_report(sk);
4013 }
4014 if (msg == NETDEV_UNREGISTER) {
4015 packet_cached_dev_reset(po);
4016 po->ifindex = -1;
4017 if (po->prot_hook.dev)
4018 dev_put(po->prot_hook.dev);
4019 po->prot_hook.dev = NULL;
4020 }
4021 spin_unlock(&po->bind_lock);
4022 }
4023 break;
4024 case NETDEV_UP:
4025 if (dev->ifindex == po->ifindex) {
4026 spin_lock(&po->bind_lock);
4027 if (po->num)
4028 register_prot_hook(sk);
4029 spin_unlock(&po->bind_lock);
4030 }
4031 break;
4032 }
4033 }
4034 rcu_read_unlock();
4035 return NOTIFY_DONE;
4036}
4037
4038
4039static int packet_ioctl(struct socket *sock, unsigned int cmd,
4040 unsigned long arg)
4041{
4042 struct sock *sk = sock->sk;
4043
4044 switch (cmd) {
4045 case SIOCOUTQ:
4046 {
4047 int amount = sk_wmem_alloc_get(sk);
4048
4049 return put_user(amount, (int __user *)arg);
4050 }
4051 case SIOCINQ:
4052 {
4053 struct sk_buff *skb;
4054 int amount = 0;
4055
4056 spin_lock_bh(&sk->sk_receive_queue.lock);
4057 skb = skb_peek(&sk->sk_receive_queue);
4058 if (skb)
4059 amount = skb->len;
4060 spin_unlock_bh(&sk->sk_receive_queue.lock);
4061 return put_user(amount, (int __user *)arg);
4062 }
4063 case SIOCGSTAMP:
4064 return sock_get_timestamp(sk, (struct timeval __user *)arg);
4065 case SIOCGSTAMPNS:
4066 return sock_get_timestampns(sk, (struct timespec __user *)arg);
4067
4068#ifdef CONFIG_INET
4069 case SIOCADDRT:
4070 case SIOCDELRT:
4071 case SIOCDARP:
4072 case SIOCGARP:
4073 case SIOCSARP:
4074 case SIOCGIFADDR:
4075 case SIOCSIFADDR:
4076 case SIOCGIFBRDADDR:
4077 case SIOCSIFBRDADDR:
4078 case SIOCGIFNETMASK:
4079 case SIOCSIFNETMASK:
4080 case SIOCGIFDSTADDR:
4081 case SIOCSIFDSTADDR:
4082 case SIOCSIFFLAGS:
4083 return inet_dgram_ops.ioctl(sock, cmd, arg);
4084#endif
4085
4086 default:
4087 return -ENOIOCTLCMD;
4088 }
4089 return 0;
4090}
4091
4092static __poll_t packet_poll(struct file *file, struct socket *sock,
4093 poll_table *wait)
4094{
4095 struct sock *sk = sock->sk;
4096 struct packet_sock *po = pkt_sk(sk);
4097 __poll_t mask = datagram_poll(file, sock, wait);
4098
4099 spin_lock_bh(&sk->sk_receive_queue.lock);
4100 if (po->rx_ring.pg_vec) {
4101 if (!packet_previous_rx_frame(po, &po->rx_ring,
4102 TP_STATUS_KERNEL))
4103 mask |= EPOLLIN | EPOLLRDNORM;
4104 }
4105 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4106 po->pressure = 0;
4107 spin_unlock_bh(&sk->sk_receive_queue.lock);
4108 spin_lock_bh(&sk->sk_write_queue.lock);
4109 if (po->tx_ring.pg_vec) {
4110 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4111 mask |= EPOLLOUT | EPOLLWRNORM;
4112 }
4113 spin_unlock_bh(&sk->sk_write_queue.lock);
4114 return mask;
4115}
4116
4117
4118
4119
4120
4121
4122static void packet_mm_open(struct vm_area_struct *vma)
4123{
4124 struct file *file = vma->vm_file;
4125 struct socket *sock = file->private_data;
4126 struct sock *sk = sock->sk;
4127
4128 if (sk)
4129 atomic_inc(&pkt_sk(sk)->mapped);
4130}
4131
4132static void packet_mm_close(struct vm_area_struct *vma)
4133{
4134 struct file *file = vma->vm_file;
4135 struct socket *sock = file->private_data;
4136 struct sock *sk = sock->sk;
4137
4138 if (sk)
4139 atomic_dec(&pkt_sk(sk)->mapped);
4140}
4141
4142static const struct vm_operations_struct packet_mmap_ops = {
4143 .open = packet_mm_open,
4144 .close = packet_mm_close,
4145};
4146
4147static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4148 unsigned int len)
4149{
4150 int i;
4151
4152 for (i = 0; i < len; i++) {
4153 if (likely(pg_vec[i].buffer)) {
4154 if (is_vmalloc_addr(pg_vec[i].buffer))
4155 vfree(pg_vec[i].buffer);
4156 else
4157 free_pages((unsigned long)pg_vec[i].buffer,
4158 order);
4159 pg_vec[i].buffer = NULL;
4160 }
4161 }
4162 kfree(pg_vec);
4163}
4164
4165static char *alloc_one_pg_vec_page(unsigned long order)
4166{
4167 char *buffer;
4168 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4169 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4170
4171 buffer = (char *) __get_free_pages(gfp_flags, order);
4172 if (buffer)
4173 return buffer;
4174
4175
4176 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4177 if (buffer)
4178 return buffer;
4179
4180
4181 gfp_flags &= ~__GFP_NORETRY;
4182 buffer = (char *) __get_free_pages(gfp_flags, order);
4183 if (buffer)
4184 return buffer;
4185
4186
4187 return NULL;
4188}
4189
4190static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4191{
4192 unsigned int block_nr = req->tp_block_nr;
4193 struct pgv *pg_vec;
4194 int i;
4195
4196 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4197 if (unlikely(!pg_vec))
4198 goto out;
4199
4200 for (i = 0; i < block_nr; i++) {
4201 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4202 if (unlikely(!pg_vec[i].buffer))
4203 goto out_free_pgvec;
4204 }
4205
4206out:
4207 return pg_vec;
4208
4209out_free_pgvec:
4210 free_pg_vec(pg_vec, order, block_nr);
4211 pg_vec = NULL;
4212 goto out;
4213}
4214
4215static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4216 int closing, int tx_ring)
4217{
4218 struct pgv *pg_vec = NULL;
4219 struct packet_sock *po = pkt_sk(sk);
4220 int was_running, order = 0;
4221 struct packet_ring_buffer *rb;
4222 struct sk_buff_head *rb_queue;
4223 __be16 num;
4224 int err = -EINVAL;
4225
4226 struct tpacket_req *req = &req_u->req;
4227
4228 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4229 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4230
4231 err = -EBUSY;
4232 if (!closing) {
4233 if (atomic_read(&po->mapped))
4234 goto out;
4235 if (packet_read_pending(rb))
4236 goto out;
4237 }
4238
4239 if (req->tp_block_nr) {
4240 unsigned int min_frame_size;
4241
4242
4243 err = -EBUSY;
4244 if (unlikely(rb->pg_vec))
4245 goto out;
4246
4247 switch (po->tp_version) {
4248 case TPACKET_V1:
4249 po->tp_hdrlen = TPACKET_HDRLEN;
4250 break;
4251 case TPACKET_V2:
4252 po->tp_hdrlen = TPACKET2_HDRLEN;
4253 break;
4254 case TPACKET_V3:
4255 po->tp_hdrlen = TPACKET3_HDRLEN;
4256 break;
4257 }
4258
4259 err = -EINVAL;
4260 if (unlikely((int)req->tp_block_size <= 0))
4261 goto out;
4262 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4263 goto out;
4264 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4265 if (po->tp_version >= TPACKET_V3 &&
4266 req->tp_block_size <
4267 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4268 goto out;
4269 if (unlikely(req->tp_frame_size < min_frame_size))
4270 goto out;
4271 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4272 goto out;
4273
4274 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4275 if (unlikely(rb->frames_per_block == 0))
4276 goto out;
4277 if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
4278 goto out;
4279 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4280 req->tp_frame_nr))
4281 goto out;
4282
4283 err = -ENOMEM;
4284 order = get_order(req->tp_block_size);
4285 pg_vec = alloc_pg_vec(req, order);
4286 if (unlikely(!pg_vec))
4287 goto out;
4288 switch (po->tp_version) {
4289 case TPACKET_V3:
4290
4291 if (!tx_ring) {
4292 init_prb_bdqc(po, rb, pg_vec, req_u);
4293 } else {
4294 struct tpacket_req3 *req3 = &req_u->req3;
4295
4296 if (req3->tp_retire_blk_tov ||
4297 req3->tp_sizeof_priv ||
4298 req3->tp_feature_req_word) {
4299 err = -EINVAL;
4300 goto out;
4301 }
4302 }
4303 break;
4304 default:
4305 break;
4306 }
4307 }
4308
4309 else {
4310 err = -EINVAL;
4311 if (unlikely(req->tp_frame_nr))
4312 goto out;
4313 }
4314
4315
4316
4317 spin_lock(&po->bind_lock);
4318 was_running = po->running;
4319 num = po->num;
4320 if (was_running) {
4321 po->num = 0;
4322 __unregister_prot_hook(sk, false);
4323 }
4324 spin_unlock(&po->bind_lock);
4325
4326 synchronize_net();
4327
4328 err = -EBUSY;
4329 mutex_lock(&po->pg_vec_lock);
4330 if (closing || atomic_read(&po->mapped) == 0) {
4331 err = 0;
4332 spin_lock_bh(&rb_queue->lock);
4333 swap(rb->pg_vec, pg_vec);
4334 rb->frame_max = (req->tp_frame_nr - 1);
4335 rb->head = 0;
4336 rb->frame_size = req->tp_frame_size;
4337 spin_unlock_bh(&rb_queue->lock);
4338
4339 swap(rb->pg_vec_order, order);
4340 swap(rb->pg_vec_len, req->tp_block_nr);
4341
4342 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4343 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4344 tpacket_rcv : packet_rcv;
4345 skb_queue_purge(rb_queue);
4346 if (atomic_read(&po->mapped))
4347 pr_err("packet_mmap: vma is busy: %d\n",
4348 atomic_read(&po->mapped));
4349 }
4350 mutex_unlock(&po->pg_vec_lock);
4351
4352 spin_lock(&po->bind_lock);
4353 if (was_running) {
4354 po->num = num;
4355 register_prot_hook(sk);
4356 }
4357 spin_unlock(&po->bind_lock);
4358 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4359
4360 if (!tx_ring)
4361 prb_shutdown_retire_blk_timer(po, rb_queue);
4362 }
4363
4364 if (pg_vec)
4365 free_pg_vec(pg_vec, order, req->tp_block_nr);
4366out:
4367 return err;
4368}
4369
4370static int packet_mmap(struct file *file, struct socket *sock,
4371 struct vm_area_struct *vma)
4372{
4373 struct sock *sk = sock->sk;
4374 struct packet_sock *po = pkt_sk(sk);
4375 unsigned long size, expected_size;
4376 struct packet_ring_buffer *rb;
4377 unsigned long start;
4378 int err = -EINVAL;
4379 int i;
4380
4381 if (vma->vm_pgoff)
4382 return -EINVAL;
4383
4384 mutex_lock(&po->pg_vec_lock);
4385
4386 expected_size = 0;
4387 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4388 if (rb->pg_vec) {
4389 expected_size += rb->pg_vec_len
4390 * rb->pg_vec_pages
4391 * PAGE_SIZE;
4392 }
4393 }
4394
4395 if (expected_size == 0)
4396 goto out;
4397
4398 size = vma->vm_end - vma->vm_start;
4399 if (size != expected_size)
4400 goto out;
4401
4402 start = vma->vm_start;
4403 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4404 if (rb->pg_vec == NULL)
4405 continue;
4406
4407 for (i = 0; i < rb->pg_vec_len; i++) {
4408 struct page *page;
4409 void *kaddr = rb->pg_vec[i].buffer;
4410 int pg_num;
4411
4412 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4413 page = pgv_to_page(kaddr);
4414 err = vm_insert_page(vma, start, page);
4415 if (unlikely(err))
4416 goto out;
4417 start += PAGE_SIZE;
4418 kaddr += PAGE_SIZE;
4419 }
4420 }
4421 }
4422
4423 atomic_inc(&po->mapped);
4424 vma->vm_ops = &packet_mmap_ops;
4425 err = 0;
4426
4427out:
4428 mutex_unlock(&po->pg_vec_lock);
4429 return err;
4430}
4431
4432static const struct proto_ops packet_ops_spkt = {
4433 .family = PF_PACKET,
4434 .owner = THIS_MODULE,
4435 .release = packet_release,
4436 .bind = packet_bind_spkt,
4437 .connect = sock_no_connect,
4438 .socketpair = sock_no_socketpair,
4439 .accept = sock_no_accept,
4440 .getname = packet_getname_spkt,
4441 .poll = datagram_poll,
4442 .ioctl = packet_ioctl,
4443 .listen = sock_no_listen,
4444 .shutdown = sock_no_shutdown,
4445 .setsockopt = sock_no_setsockopt,
4446 .getsockopt = sock_no_getsockopt,
4447 .sendmsg = packet_sendmsg_spkt,
4448 .recvmsg = packet_recvmsg,
4449 .mmap = sock_no_mmap,
4450 .sendpage = sock_no_sendpage,
4451};
4452
4453static const struct proto_ops packet_ops = {
4454 .family = PF_PACKET,
4455 .owner = THIS_MODULE,
4456 .release = packet_release,
4457 .bind = packet_bind,
4458 .connect = sock_no_connect,
4459 .socketpair = sock_no_socketpair,
4460 .accept = sock_no_accept,
4461 .getname = packet_getname,
4462 .poll = packet_poll,
4463 .ioctl = packet_ioctl,
4464 .listen = sock_no_listen,
4465 .shutdown = sock_no_shutdown,
4466 .setsockopt = packet_setsockopt,
4467 .getsockopt = packet_getsockopt,
4468#ifdef CONFIG_COMPAT
4469 .compat_setsockopt = compat_packet_setsockopt,
4470#endif
4471 .sendmsg = packet_sendmsg,
4472 .recvmsg = packet_recvmsg,
4473 .mmap = packet_mmap,
4474 .sendpage = sock_no_sendpage,
4475};
4476
4477static const struct net_proto_family packet_family_ops = {
4478 .family = PF_PACKET,
4479 .create = packet_create,
4480 .owner = THIS_MODULE,
4481};
4482
4483static struct notifier_block packet_netdev_notifier = {
4484 .notifier_call = packet_notifier,
4485};
4486
4487#ifdef CONFIG_PROC_FS
4488
4489static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4490 __acquires(RCU)
4491{
4492 struct net *net = seq_file_net(seq);
4493
4494 rcu_read_lock();
4495 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4496}
4497
4498static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4499{
4500 struct net *net = seq_file_net(seq);
4501 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4502}
4503
4504static void packet_seq_stop(struct seq_file *seq, void *v)
4505 __releases(RCU)
4506{
4507 rcu_read_unlock();
4508}
4509
4510static int packet_seq_show(struct seq_file *seq, void *v)
4511{
4512 if (v == SEQ_START_TOKEN)
4513 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4514 else {
4515 struct sock *s = sk_entry(v);
4516 const struct packet_sock *po = pkt_sk(s);
4517
4518 seq_printf(seq,
4519 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4520 s,
4521 refcount_read(&s->sk_refcnt),
4522 s->sk_type,
4523 ntohs(po->num),
4524 po->ifindex,
4525 po->running,
4526 atomic_read(&s->sk_rmem_alloc),
4527 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4528 sock_i_ino(s));
4529 }
4530
4531 return 0;
4532}
4533
4534static const struct seq_operations packet_seq_ops = {
4535 .start = packet_seq_start,
4536 .next = packet_seq_next,
4537 .stop = packet_seq_stop,
4538 .show = packet_seq_show,
4539};
4540#endif
4541
4542static int __net_init packet_net_init(struct net *net)
4543{
4544 mutex_init(&net->packet.sklist_lock);
4545 INIT_HLIST_HEAD(&net->packet.sklist);
4546
4547 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4548 sizeof(struct seq_net_private)))
4549 return -ENOMEM;
4550
4551 return 0;
4552}
4553
4554static void __net_exit packet_net_exit(struct net *net)
4555{
4556 remove_proc_entry("packet", net->proc_net);
4557 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4558}
4559
4560static struct pernet_operations packet_net_ops = {
4561 .init = packet_net_init,
4562 .exit = packet_net_exit,
4563};
4564
4565
4566static void __exit packet_exit(void)
4567{
4568 unregister_netdevice_notifier(&packet_netdev_notifier);
4569 unregister_pernet_subsys(&packet_net_ops);
4570 sock_unregister(PF_PACKET);
4571 proto_unregister(&packet_proto);
4572}
4573
4574static int __init packet_init(void)
4575{
4576 int rc = proto_register(&packet_proto, 0);
4577
4578 if (rc != 0)
4579 goto out;
4580
4581 sock_register(&packet_family_ops);
4582 register_pernet_subsys(&packet_net_ops);
4583 register_netdevice_notifier(&packet_netdev_notifier);
4584out:
4585 return rc;
4586}
4587
4588module_init(packet_init);
4589module_exit(packet_exit);
4590MODULE_LICENSE("GPL");
4591MODULE_ALIAS_NETPROTO(PF_PACKET);
4592