1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55#include <linux/types.h>
56#include <linux/mm.h>
57#include <linux/capability.h>
58#include <linux/fcntl.h>
59#include <linux/socket.h>
60#include <linux/in.h>
61#include <linux/inet.h>
62#include <linux/netdevice.h>
63#include <linux/if_packet.h>
64#include <linux/wireless.h>
65#include <linux/kernel.h>
66#include <linux/kmod.h>
67#include <linux/slab.h>
68#include <linux/vmalloc.h>
69#include <net/net_namespace.h>
70#include <net/ip.h>
71#include <net/protocol.h>
72#include <linux/skbuff.h>
73#include <net/sock.h>
74#include <linux/errno.h>
75#include <linux/timer.h>
76#include <asm/uaccess.h>
77#include <asm/ioctls.h>
78#include <asm/page.h>
79#include <asm/cacheflush.h>
80#include <asm/io.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
83#include <linux/poll.h>
84#include <linux/module.h>
85#include <linux/init.h>
86#include <linux/mutex.h>
87#include <linux/if_vlan.h>
88#include <linux/virtio_net.h>
89#include <linux/errqueue.h>
90#include <linux/net_tstamp.h>
91#include <linux/percpu.h>
92#ifdef CONFIG_INET
93#include <net/inet_common.h>
94#endif
95#include <linux/bpf.h>
96
97#include "internal.h"
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155struct packet_mreq_max {
156 int mr_ifindex;
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
160};
161
162union tpacket_uhdr {
163 struct tpacket_hdr *h1;
164 struct tpacket2_hdr *h2;
165 struct tpacket3_hdr *h3;
166 void *raw;
167};
168
169static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
170 int closing, int tx_ring);
171
172#define V3_ALIGNMENT (8)
173
174#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176#define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179#define PGV_FROM_VMALLOC 1
180
181#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
182#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
183#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
184#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
185#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
186#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
187#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
188
189struct packet_sock;
190static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
191static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
192 struct packet_type *pt, struct net_device *orig_dev);
193
194static void *packet_previous_frame(struct packet_sock *po,
195 struct packet_ring_buffer *rb,
196 int status);
197static void packet_increment_head(struct packet_ring_buffer *buff);
198static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
199 struct tpacket_block_desc *);
200static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
201 struct packet_sock *);
202static void prb_retire_current_block(struct tpacket_kbdq_core *,
203 struct packet_sock *, unsigned int status);
204static int prb_queue_frozen(struct tpacket_kbdq_core *);
205static void prb_open_block(struct tpacket_kbdq_core *,
206 struct tpacket_block_desc *);
207static void prb_retire_rx_blk_timer_expired(unsigned long);
208static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
209static void prb_init_blk_timer(struct packet_sock *,
210 struct tpacket_kbdq_core *,
211 void (*func) (unsigned long));
212static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
213static void prb_clear_rxhash(struct tpacket_kbdq_core *,
214 struct tpacket3_hdr *);
215static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
216 struct tpacket3_hdr *);
217static void packet_flush_mclist(struct sock *sk);
218
219struct packet_skb_cb {
220 union {
221 struct sockaddr_pkt pkt;
222 union {
223
224
225
226
227 unsigned int origlen;
228 struct sockaddr_ll ll;
229 };
230 } sa;
231};
232
233#define vio_le() virtio_legacy_is_little_endian()
234
235#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
236
237#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
238#define GET_PBLOCK_DESC(x, bid) \
239 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
240#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
241 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
242#define GET_NEXT_PRB_BLK_NUM(x) \
243 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
244 ((x)->kactive_blk_num+1) : 0)
245
246static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
247static void __fanout_link(struct sock *sk, struct packet_sock *po);
248
249static int packet_direct_xmit(struct sk_buff *skb)
250{
251 struct net_device *dev = skb->dev;
252 netdev_features_t features;
253 struct netdev_queue *txq;
254 int ret = NETDEV_TX_BUSY;
255
256 if (unlikely(!netif_running(dev) ||
257 !netif_carrier_ok(dev)))
258 goto drop;
259
260 features = netif_skb_features(skb);
261 if (skb_needs_linearize(skb, features) &&
262 __skb_linearize(skb))
263 goto drop;
264
265 txq = skb_get_tx_queue(dev, skb);
266
267 local_bh_disable();
268
269 HARD_TX_LOCK(dev, txq, smp_processor_id());
270 if (!netif_xmit_frozen_or_drv_stopped(txq))
271 ret = netdev_start_xmit(skb, dev, txq, false);
272 HARD_TX_UNLOCK(dev, txq);
273
274 local_bh_enable();
275
276 if (!dev_xmit_complete(ret))
277 kfree_skb(skb);
278
279 return ret;
280drop:
281 atomic_long_inc(&dev->tx_dropped);
282 kfree_skb(skb);
283 return NET_XMIT_DROP;
284}
285
286static struct net_device *packet_cached_dev_get(struct packet_sock *po)
287{
288 struct net_device *dev;
289
290 rcu_read_lock();
291 dev = rcu_dereference(po->cached_dev);
292 if (likely(dev))
293 dev_hold(dev);
294 rcu_read_unlock();
295
296 return dev;
297}
298
299static void packet_cached_dev_assign(struct packet_sock *po,
300 struct net_device *dev)
301{
302 rcu_assign_pointer(po->cached_dev, dev);
303}
304
305static void packet_cached_dev_reset(struct packet_sock *po)
306{
307 RCU_INIT_POINTER(po->cached_dev, NULL);
308}
309
310static bool packet_use_direct_xmit(const struct packet_sock *po)
311{
312 return po->xmit == packet_direct_xmit;
313}
314
315static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
316{
317 return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
318}
319
320static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
321{
322 const struct net_device_ops *ops = dev->netdev_ops;
323 u16 queue_index;
324
325 if (ops->ndo_select_queue) {
326 queue_index = ops->ndo_select_queue(dev, skb, NULL,
327 __packet_pick_tx_queue);
328 queue_index = netdev_cap_txqueue(dev, queue_index);
329 } else {
330 queue_index = __packet_pick_tx_queue(dev, skb);
331 }
332
333 skb_set_queue_mapping(skb, queue_index);
334}
335
336
337
338
339
340static void register_prot_hook(struct sock *sk)
341{
342 struct packet_sock *po = pkt_sk(sk);
343
344 if (!po->running) {
345 if (po->fanout)
346 __fanout_link(sk, po);
347 else
348 dev_add_pack(&po->prot_hook);
349
350 sock_hold(sk);
351 po->running = 1;
352 }
353}
354
355
356
357
358
359
360
361
362static void __unregister_prot_hook(struct sock *sk, bool sync)
363{
364 struct packet_sock *po = pkt_sk(sk);
365
366 po->running = 0;
367
368 if (po->fanout)
369 __fanout_unlink(sk, po);
370 else
371 __dev_remove_pack(&po->prot_hook);
372
373 __sock_put(sk);
374
375 if (sync) {
376 spin_unlock(&po->bind_lock);
377 synchronize_net();
378 spin_lock(&po->bind_lock);
379 }
380}
381
382static void unregister_prot_hook(struct sock *sk, bool sync)
383{
384 struct packet_sock *po = pkt_sk(sk);
385
386 if (po->running)
387 __unregister_prot_hook(sk, sync);
388}
389
390static inline struct page * __pure pgv_to_page(void *addr)
391{
392 if (is_vmalloc_addr(addr))
393 return vmalloc_to_page(addr);
394 return virt_to_page(addr);
395}
396
397static void __packet_set_status(struct packet_sock *po, void *frame, int status)
398{
399 union tpacket_uhdr h;
400
401 h.raw = frame;
402 switch (po->tp_version) {
403 case TPACKET_V1:
404 h.h1->tp_status = status;
405 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
406 break;
407 case TPACKET_V2:
408 h.h2->tp_status = status;
409 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
410 break;
411 case TPACKET_V3:
412 default:
413 WARN(1, "TPACKET version not supported.\n");
414 BUG();
415 }
416
417 smp_wmb();
418}
419
420static int __packet_get_status(struct packet_sock *po, void *frame)
421{
422 union tpacket_uhdr h;
423
424 smp_rmb();
425
426 h.raw = frame;
427 switch (po->tp_version) {
428 case TPACKET_V1:
429 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
430 return h.h1->tp_status;
431 case TPACKET_V2:
432 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
433 return h.h2->tp_status;
434 case TPACKET_V3:
435 default:
436 WARN(1, "TPACKET version not supported.\n");
437 BUG();
438 return 0;
439 }
440}
441
442static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
443 unsigned int flags)
444{
445 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
446
447 if (shhwtstamps &&
448 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
449 ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
450 return TP_STATUS_TS_RAW_HARDWARE;
451
452 if (ktime_to_timespec_cond(skb->tstamp, ts))
453 return TP_STATUS_TS_SOFTWARE;
454
455 return 0;
456}
457
458static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
459 struct sk_buff *skb)
460{
461 union tpacket_uhdr h;
462 struct timespec ts;
463 __u32 ts_status;
464
465 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
466 return 0;
467
468 h.raw = frame;
469 switch (po->tp_version) {
470 case TPACKET_V1:
471 h.h1->tp_sec = ts.tv_sec;
472 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
473 break;
474 case TPACKET_V2:
475 h.h2->tp_sec = ts.tv_sec;
476 h.h2->tp_nsec = ts.tv_nsec;
477 break;
478 case TPACKET_V3:
479 default:
480 WARN(1, "TPACKET version not supported.\n");
481 BUG();
482 }
483
484
485 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
486 smp_wmb();
487
488 return ts_status;
489}
490
491static void *packet_lookup_frame(struct packet_sock *po,
492 struct packet_ring_buffer *rb,
493 unsigned int position,
494 int status)
495{
496 unsigned int pg_vec_pos, frame_offset;
497 union tpacket_uhdr h;
498
499 pg_vec_pos = position / rb->frames_per_block;
500 frame_offset = position % rb->frames_per_block;
501
502 h.raw = rb->pg_vec[pg_vec_pos].buffer +
503 (frame_offset * rb->frame_size);
504
505 if (status != __packet_get_status(po, h.raw))
506 return NULL;
507
508 return h.raw;
509}
510
511static void *packet_current_frame(struct packet_sock *po,
512 struct packet_ring_buffer *rb,
513 int status)
514{
515 return packet_lookup_frame(po, rb, rb->head, status);
516}
517
518static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
519{
520 del_timer_sync(&pkc->retire_blk_timer);
521}
522
523static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
524 struct sk_buff_head *rb_queue)
525{
526 struct tpacket_kbdq_core *pkc;
527
528 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
529
530 spin_lock_bh(&rb_queue->lock);
531 pkc->delete_blk_timer = 1;
532 spin_unlock_bh(&rb_queue->lock);
533
534 prb_del_retire_blk_timer(pkc);
535}
536
537static void prb_init_blk_timer(struct packet_sock *po,
538 struct tpacket_kbdq_core *pkc,
539 void (*func) (unsigned long))
540{
541 init_timer(&pkc->retire_blk_timer);
542 pkc->retire_blk_timer.data = (long)po;
543 pkc->retire_blk_timer.function = func;
544 pkc->retire_blk_timer.expires = jiffies;
545}
546
547static void prb_setup_retire_blk_timer(struct packet_sock *po)
548{
549 struct tpacket_kbdq_core *pkc;
550
551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
552 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
553}
554
555static int prb_calc_retire_blk_tmo(struct packet_sock *po,
556 int blk_size_in_bytes)
557{
558 struct net_device *dev;
559 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
560 struct ethtool_cmd ecmd;
561 int err;
562 u32 speed;
563
564 rtnl_lock();
565 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
566 if (unlikely(!dev)) {
567 rtnl_unlock();
568 return DEFAULT_PRB_RETIRE_TOV;
569 }
570 err = __ethtool_get_settings(dev, &ecmd);
571 speed = ethtool_cmd_speed(&ecmd);
572 rtnl_unlock();
573 if (!err) {
574
575
576
577
578 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
579 return DEFAULT_PRB_RETIRE_TOV;
580 } else {
581 msec = 1;
582 div = speed / 1000;
583 }
584 }
585
586 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
587
588 if (div)
589 mbits /= div;
590
591 tmo = mbits * msec;
592
593 if (div)
594 return tmo+1;
595 return tmo;
596}
597
598static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
599 union tpacket_req_u *req_u)
600{
601 p1->feature_req_word = req_u->req3.tp_feature_req_word;
602}
603
604static void init_prb_bdqc(struct packet_sock *po,
605 struct packet_ring_buffer *rb,
606 struct pgv *pg_vec,
607 union tpacket_req_u *req_u)
608{
609 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
610 struct tpacket_block_desc *pbd;
611
612 memset(p1, 0x0, sizeof(*p1));
613
614 p1->knxt_seq_num = 1;
615 p1->pkbdq = pg_vec;
616 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
617 p1->pkblk_start = pg_vec[0].buffer;
618 p1->kblk_size = req_u->req3.tp_block_size;
619 p1->knum_blocks = req_u->req3.tp_block_nr;
620 p1->hdrlen = po->tp_hdrlen;
621 p1->version = po->tp_version;
622 p1->last_kactive_blk_num = 0;
623 po->stats.stats3.tp_freeze_q_cnt = 0;
624 if (req_u->req3.tp_retire_blk_tov)
625 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
626 else
627 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
628 req_u->req3.tp_block_size);
629 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
630 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
631
632 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
633 prb_init_ft_ops(p1, req_u);
634 prb_setup_retire_blk_timer(po);
635 prb_open_block(p1, pbd);
636}
637
638
639
640
641static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
642{
643 mod_timer(&pkc->retire_blk_timer,
644 jiffies + pkc->tov_in_jiffies);
645 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
646}
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671static void prb_retire_rx_blk_timer_expired(unsigned long data)
672{
673 struct packet_sock *po = (struct packet_sock *)data;
674 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
675 unsigned int frozen;
676 struct tpacket_block_desc *pbd;
677
678 spin_lock(&po->sk.sk_receive_queue.lock);
679
680 frozen = prb_queue_frozen(pkc);
681 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
682
683 if (unlikely(pkc->delete_blk_timer))
684 goto out;
685
686
687
688
689
690
691
692
693
694
695 if (BLOCK_NUM_PKTS(pbd)) {
696 while (atomic_read(&pkc->blk_fill_in_prog)) {
697
698 cpu_relax();
699 }
700 }
701
702 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
703 if (!frozen) {
704 if (!BLOCK_NUM_PKTS(pbd)) {
705
706 goto refresh_timer;
707 }
708 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
709 if (!prb_dispatch_next_block(pkc, po))
710 goto refresh_timer;
711 else
712 goto out;
713 } else {
714
715
716
717 if (prb_curr_blk_in_use(pkc, pbd)) {
718
719
720
721
722 goto refresh_timer;
723 } else {
724
725
726
727
728
729
730
731 prb_open_block(pkc, pbd);
732 goto out;
733 }
734 }
735 }
736
737refresh_timer:
738 _prb_refresh_rx_retire_blk_timer(pkc);
739
740out:
741 spin_unlock(&po->sk.sk_receive_queue.lock);
742}
743
744static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
745 struct tpacket_block_desc *pbd1, __u32 status)
746{
747
748
749#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
750 u8 *start, *end;
751
752 start = (u8 *)pbd1;
753
754
755 start += PAGE_SIZE;
756
757 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
758 for (; start < end; start += PAGE_SIZE)
759 flush_dcache_page(pgv_to_page(start));
760
761 smp_wmb();
762#endif
763
764
765
766 BLOCK_STATUS(pbd1) = status;
767
768
769
770#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
771 start = (u8 *)pbd1;
772 flush_dcache_page(pgv_to_page(start));
773
774 smp_wmb();
775#endif
776}
777
778
779
780
781
782
783
784
785
786
787static void prb_close_block(struct tpacket_kbdq_core *pkc1,
788 struct tpacket_block_desc *pbd1,
789 struct packet_sock *po, unsigned int stat)
790{
791 __u32 status = TP_STATUS_USER | stat;
792
793 struct tpacket3_hdr *last_pkt;
794 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
795 struct sock *sk = &po->sk;
796
797 if (po->stats.stats3.tp_drops)
798 status |= TP_STATUS_LOSING;
799
800 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
801 last_pkt->tp_next_offset = 0;
802
803
804 if (BLOCK_NUM_PKTS(pbd1)) {
805 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
806 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
807 } else {
808
809
810
811
812
813 struct timespec ts;
814 getnstimeofday(&ts);
815 h1->ts_last_pkt.ts_sec = ts.tv_sec;
816 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
817 }
818
819 smp_wmb();
820
821
822 prb_flush_block(pkc1, pbd1, status);
823
824 sk->sk_data_ready(sk);
825
826 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
827}
828
829static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
830{
831 pkc->reset_pending_on_curr_blk = 0;
832}
833
834
835
836
837
838
839
840
841static void prb_open_block(struct tpacket_kbdq_core *pkc1,
842 struct tpacket_block_desc *pbd1)
843{
844 struct timespec ts;
845 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
846
847 smp_rmb();
848
849
850
851
852
853 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
854 BLOCK_NUM_PKTS(pbd1) = 0;
855 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
856
857 getnstimeofday(&ts);
858
859 h1->ts_first_pkt.ts_sec = ts.tv_sec;
860 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
861
862 pkc1->pkblk_start = (char *)pbd1;
863 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
864
865 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
866 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
867
868 pbd1->version = pkc1->version;
869 pkc1->prev = pkc1->nxt_offset;
870 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
871
872 prb_thaw_queue(pkc1);
873 _prb_refresh_rx_retire_blk_timer(pkc1);
874
875 smp_wmb();
876}
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
902 struct packet_sock *po)
903{
904 pkc->reset_pending_on_curr_blk = 1;
905 po->stats.stats3.tp_freeze_q_cnt++;
906}
907
908#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
909
910
911
912
913
914
915
916static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
917 struct packet_sock *po)
918{
919 struct tpacket_block_desc *pbd;
920
921 smp_rmb();
922
923
924 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
925
926
927 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
928 prb_freeze_queue(pkc, po);
929 return NULL;
930 }
931
932
933
934
935
936
937 prb_open_block(pkc, pbd);
938 return (void *)pkc->nxt_offset;
939}
940
941static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
942 struct packet_sock *po, unsigned int status)
943{
944 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
945
946
947 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
948
949
950
951
952
953
954
955
956
957 if (!(status & TP_STATUS_BLK_TMO)) {
958 while (atomic_read(&pkc->blk_fill_in_prog)) {
959
960 cpu_relax();
961 }
962 }
963 prb_close_block(pkc, pbd, po, status);
964 return;
965 }
966}
967
968static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
969 struct tpacket_block_desc *pbd)
970{
971 return TP_STATUS_USER & BLOCK_STATUS(pbd);
972}
973
974static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
975{
976 return pkc->reset_pending_on_curr_blk;
977}
978
979static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
980{
981 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
982 atomic_dec(&pkc->blk_fill_in_prog);
983}
984
985static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
986 struct tpacket3_hdr *ppd)
987{
988 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
989}
990
991static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
992 struct tpacket3_hdr *ppd)
993{
994 ppd->hv1.tp_rxhash = 0;
995}
996
997static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
998 struct tpacket3_hdr *ppd)
999{
1000 if (skb_vlan_tag_present(pkc->skb)) {
1001 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1002 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1003 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1004 } else {
1005 ppd->hv1.tp_vlan_tci = 0;
1006 ppd->hv1.tp_vlan_tpid = 0;
1007 ppd->tp_status = TP_STATUS_AVAILABLE;
1008 }
1009}
1010
1011static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1012 struct tpacket3_hdr *ppd)
1013{
1014 ppd->hv1.tp_padding = 0;
1015 prb_fill_vlan_info(pkc, ppd);
1016
1017 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1018 prb_fill_rxhash(pkc, ppd);
1019 else
1020 prb_clear_rxhash(pkc, ppd);
1021}
1022
1023static void prb_fill_curr_block(char *curr,
1024 struct tpacket_kbdq_core *pkc,
1025 struct tpacket_block_desc *pbd,
1026 unsigned int len)
1027{
1028 struct tpacket3_hdr *ppd;
1029
1030 ppd = (struct tpacket3_hdr *)curr;
1031 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1032 pkc->prev = curr;
1033 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1035 BLOCK_NUM_PKTS(pbd) += 1;
1036 atomic_inc(&pkc->blk_fill_in_prog);
1037 prb_run_all_ft_ops(pkc, ppd);
1038}
1039
1040
1041static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1042 struct sk_buff *skb,
1043 int status,
1044 unsigned int len
1045 )
1046{
1047 struct tpacket_kbdq_core *pkc;
1048 struct tpacket_block_desc *pbd;
1049 char *curr, *end;
1050
1051 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1052 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1053
1054
1055 if (prb_queue_frozen(pkc)) {
1056
1057
1058
1059
1060 if (prb_curr_blk_in_use(pkc, pbd)) {
1061
1062 return NULL;
1063 } else {
1064
1065
1066
1067
1068
1069
1070 prb_open_block(pkc, pbd);
1071 }
1072 }
1073
1074 smp_mb();
1075 curr = pkc->nxt_offset;
1076 pkc->skb = skb;
1077 end = (char *)pbd + pkc->kblk_size;
1078
1079
1080 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1081 prb_fill_curr_block(curr, pkc, pbd, len);
1082 return (void *)curr;
1083 }
1084
1085
1086 prb_retire_current_block(pkc, po, 0);
1087
1088
1089 curr = (char *)prb_dispatch_next_block(pkc, po);
1090 if (curr) {
1091 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1092 prb_fill_curr_block(curr, pkc, pbd, len);
1093 return (void *)curr;
1094 }
1095
1096
1097
1098
1099
1100 return NULL;
1101}
1102
1103static void *packet_current_rx_frame(struct packet_sock *po,
1104 struct sk_buff *skb,
1105 int status, unsigned int len)
1106{
1107 char *curr = NULL;
1108 switch (po->tp_version) {
1109 case TPACKET_V1:
1110 case TPACKET_V2:
1111 curr = packet_lookup_frame(po, &po->rx_ring,
1112 po->rx_ring.head, status);
1113 return curr;
1114 case TPACKET_V3:
1115 return __packet_lookup_frame_in_block(po, skb, status, len);
1116 default:
1117 WARN(1, "TPACKET version not supported\n");
1118 BUG();
1119 return NULL;
1120 }
1121}
1122
1123static void *prb_lookup_block(struct packet_sock *po,
1124 struct packet_ring_buffer *rb,
1125 unsigned int idx,
1126 int status)
1127{
1128 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1129 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1130
1131 if (status != BLOCK_STATUS(pbd))
1132 return NULL;
1133 return pbd;
1134}
1135
1136static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1137{
1138 unsigned int prev;
1139 if (rb->prb_bdqc.kactive_blk_num)
1140 prev = rb->prb_bdqc.kactive_blk_num-1;
1141 else
1142 prev = rb->prb_bdqc.knum_blocks-1;
1143 return prev;
1144}
1145
1146
1147static void *__prb_previous_block(struct packet_sock *po,
1148 struct packet_ring_buffer *rb,
1149 int status)
1150{
1151 unsigned int previous = prb_previous_blk_num(rb);
1152 return prb_lookup_block(po, rb, previous, status);
1153}
1154
1155static void *packet_previous_rx_frame(struct packet_sock *po,
1156 struct packet_ring_buffer *rb,
1157 int status)
1158{
1159 if (po->tp_version <= TPACKET_V2)
1160 return packet_previous_frame(po, rb, status);
1161
1162 return __prb_previous_block(po, rb, status);
1163}
1164
1165static void packet_increment_rx_head(struct packet_sock *po,
1166 struct packet_ring_buffer *rb)
1167{
1168 switch (po->tp_version) {
1169 case TPACKET_V1:
1170 case TPACKET_V2:
1171 return packet_increment_head(rb);
1172 case TPACKET_V3:
1173 default:
1174 WARN(1, "TPACKET version not supported.\n");
1175 BUG();
1176 return;
1177 }
1178}
1179
1180static void *packet_previous_frame(struct packet_sock *po,
1181 struct packet_ring_buffer *rb,
1182 int status)
1183{
1184 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1185 return packet_lookup_frame(po, rb, previous, status);
1186}
1187
1188static void packet_increment_head(struct packet_ring_buffer *buff)
1189{
1190 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1191}
1192
1193static void packet_inc_pending(struct packet_ring_buffer *rb)
1194{
1195 this_cpu_inc(*rb->pending_refcnt);
1196}
1197
1198static void packet_dec_pending(struct packet_ring_buffer *rb)
1199{
1200 this_cpu_dec(*rb->pending_refcnt);
1201}
1202
1203static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1204{
1205 unsigned int refcnt = 0;
1206 int cpu;
1207
1208
1209 if (rb->pending_refcnt == NULL)
1210 return 0;
1211
1212 for_each_possible_cpu(cpu)
1213 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1214
1215 return refcnt;
1216}
1217
1218static int packet_alloc_pending(struct packet_sock *po)
1219{
1220 po->rx_ring.pending_refcnt = NULL;
1221
1222 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1223 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1224 return -ENOBUFS;
1225
1226 return 0;
1227}
1228
1229static void packet_free_pending(struct packet_sock *po)
1230{
1231 free_percpu(po->tx_ring.pending_refcnt);
1232}
1233
1234#define ROOM_POW_OFF 2
1235#define ROOM_NONE 0x0
1236#define ROOM_LOW 0x1
1237#define ROOM_NORMAL 0x2
1238
1239static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1240{
1241 int idx, len;
1242
1243 len = po->rx_ring.frame_max + 1;
1244 idx = po->rx_ring.head;
1245 if (pow_off)
1246 idx += len >> pow_off;
1247 if (idx >= len)
1248 idx -= len;
1249 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1250}
1251
1252static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1253{
1254 int idx, len;
1255
1256 len = po->rx_ring.prb_bdqc.knum_blocks;
1257 idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1258 if (pow_off)
1259 idx += len >> pow_off;
1260 if (idx >= len)
1261 idx -= len;
1262 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1263}
1264
1265static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266{
1267 struct sock *sk = &po->sk;
1268 int ret = ROOM_NONE;
1269
1270 if (po->prot_hook.func != tpacket_rcv) {
1271 int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1272 - (skb ? skb->truesize : 0);
1273 if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
1274 return ROOM_NORMAL;
1275 else if (avail > 0)
1276 return ROOM_LOW;
1277 else
1278 return ROOM_NONE;
1279 }
1280
1281 if (po->tp_version == TPACKET_V3) {
1282 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1283 ret = ROOM_NORMAL;
1284 else if (__tpacket_v3_has_room(po, 0))
1285 ret = ROOM_LOW;
1286 } else {
1287 if (__tpacket_has_room(po, ROOM_POW_OFF))
1288 ret = ROOM_NORMAL;
1289 else if (__tpacket_has_room(po, 0))
1290 ret = ROOM_LOW;
1291 }
1292
1293 return ret;
1294}
1295
1296static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1297{
1298 int ret;
1299 bool has_room;
1300
1301 spin_lock_bh(&po->sk.sk_receive_queue.lock);
1302 ret = __packet_rcv_has_room(po, skb);
1303 has_room = ret == ROOM_NORMAL;
1304 if (po->pressure == has_room)
1305 po->pressure = !has_room;
1306 spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1307
1308 return ret;
1309}
1310
1311static void packet_sock_destruct(struct sock *sk)
1312{
1313 skb_queue_purge(&sk->sk_error_queue);
1314
1315 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1316 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1317
1318 if (!sock_flag(sk, SOCK_DEAD)) {
1319 pr_err("Attempt to release alive packet socket: %p\n", sk);
1320 return;
1321 }
1322
1323 sk_refcnt_debug_dec(sk);
1324}
1325
1326static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1327{
1328 u32 rxhash;
1329 int i, count = 0;
1330
1331 rxhash = skb_get_hash(skb);
1332 for (i = 0; i < ROLLOVER_HLEN; i++)
1333 if (po->rollover->history[i] == rxhash)
1334 count++;
1335
1336 po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
1337 return count > (ROLLOVER_HLEN >> 1);
1338}
1339
1340static unsigned int fanout_demux_hash(struct packet_fanout *f,
1341 struct sk_buff *skb,
1342 unsigned int num)
1343{
1344 return reciprocal_scale(skb_get_hash(skb), num);
1345}
1346
1347static unsigned int fanout_demux_lb(struct packet_fanout *f,
1348 struct sk_buff *skb,
1349 unsigned int num)
1350{
1351 unsigned int val = atomic_inc_return(&f->rr_cur);
1352
1353 return val % num;
1354}
1355
1356static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1357 struct sk_buff *skb,
1358 unsigned int num)
1359{
1360 return smp_processor_id() % num;
1361}
1362
1363static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1364 struct sk_buff *skb,
1365 unsigned int num)
1366{
1367 return prandom_u32_max(num);
1368}
1369
1370static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1371 struct sk_buff *skb,
1372 unsigned int idx, bool try_self,
1373 unsigned int num)
1374{
1375 struct packet_sock *po, *po_next, *po_skip = NULL;
1376 unsigned int i, j, room = ROOM_NONE;
1377
1378 po = pkt_sk(f->arr[idx]);
1379
1380 if (try_self) {
1381 room = packet_rcv_has_room(po, skb);
1382 if (room == ROOM_NORMAL ||
1383 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1384 return idx;
1385 po_skip = po;
1386 }
1387
1388 i = j = min_t(int, po->rollover->sock, num - 1);
1389 do {
1390 po_next = pkt_sk(f->arr[i]);
1391 if (po_next != po_skip && !po_next->pressure &&
1392 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1393 if (i != j)
1394 po->rollover->sock = i;
1395 atomic_long_inc(&po->rollover->num);
1396 if (room == ROOM_LOW)
1397 atomic_long_inc(&po->rollover->num_huge);
1398 return i;
1399 }
1400
1401 if (++i == num)
1402 i = 0;
1403 } while (i != j);
1404
1405 atomic_long_inc(&po->rollover->num_failed);
1406 return idx;
1407}
1408
1409static unsigned int fanout_demux_qm(struct packet_fanout *f,
1410 struct sk_buff *skb,
1411 unsigned int num)
1412{
1413 return skb_get_queue_mapping(skb) % num;
1414}
1415
1416static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1417 struct sk_buff *skb,
1418 unsigned int num)
1419{
1420 struct bpf_prog *prog;
1421 unsigned int ret = 0;
1422
1423 rcu_read_lock();
1424 prog = rcu_dereference(f->bpf_prog);
1425 if (prog)
1426 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1427 rcu_read_unlock();
1428
1429 return ret;
1430}
1431
1432static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1433{
1434 return f->flags & (flag >> 8);
1435}
1436
1437static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1438 struct packet_type *pt, struct net_device *orig_dev)
1439{
1440 struct packet_fanout *f = pt->af_packet_priv;
1441 unsigned int num = READ_ONCE(f->num_members);
1442 struct net *net = read_pnet(&f->net);
1443 struct packet_sock *po;
1444 unsigned int idx;
1445
1446 if (!net_eq(dev_net(dev), net) || !num) {
1447 kfree_skb(skb);
1448 return 0;
1449 }
1450
1451 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1452 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1453 if (!skb)
1454 return 0;
1455 }
1456 switch (f->type) {
1457 case PACKET_FANOUT_HASH:
1458 default:
1459 idx = fanout_demux_hash(f, skb, num);
1460 break;
1461 case PACKET_FANOUT_LB:
1462 idx = fanout_demux_lb(f, skb, num);
1463 break;
1464 case PACKET_FANOUT_CPU:
1465 idx = fanout_demux_cpu(f, skb, num);
1466 break;
1467 case PACKET_FANOUT_RND:
1468 idx = fanout_demux_rnd(f, skb, num);
1469 break;
1470 case PACKET_FANOUT_QM:
1471 idx = fanout_demux_qm(f, skb, num);
1472 break;
1473 case PACKET_FANOUT_ROLLOVER:
1474 idx = fanout_demux_rollover(f, skb, 0, false, num);
1475 break;
1476 case PACKET_FANOUT_CBPF:
1477 case PACKET_FANOUT_EBPF:
1478 idx = fanout_demux_bpf(f, skb, num);
1479 break;
1480 }
1481
1482 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1483 idx = fanout_demux_rollover(f, skb, idx, true, num);
1484
1485 po = pkt_sk(f->arr[idx]);
1486 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1487}
1488
1489DEFINE_MUTEX(fanout_mutex);
1490EXPORT_SYMBOL_GPL(fanout_mutex);
1491static LIST_HEAD(fanout_list);
1492
1493static void __fanout_link(struct sock *sk, struct packet_sock *po)
1494{
1495 struct packet_fanout *f = po->fanout;
1496
1497 spin_lock(&f->lock);
1498 f->arr[f->num_members] = sk;
1499 smp_wmb();
1500 f->num_members++;
1501 spin_unlock(&f->lock);
1502}
1503
1504static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1505{
1506 struct packet_fanout *f = po->fanout;
1507 int i;
1508
1509 spin_lock(&f->lock);
1510 for (i = 0; i < f->num_members; i++) {
1511 if (f->arr[i] == sk)
1512 break;
1513 }
1514 BUG_ON(i >= f->num_members);
1515 f->arr[i] = f->arr[f->num_members - 1];
1516 f->num_members--;
1517 spin_unlock(&f->lock);
1518}
1519
1520static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1521{
1522 if (sk->sk_family != PF_PACKET)
1523 return false;
1524
1525 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1526}
1527
1528static void fanout_init_data(struct packet_fanout *f)
1529{
1530 switch (f->type) {
1531 case PACKET_FANOUT_LB:
1532 atomic_set(&f->rr_cur, 0);
1533 break;
1534 case PACKET_FANOUT_CBPF:
1535 case PACKET_FANOUT_EBPF:
1536 RCU_INIT_POINTER(f->bpf_prog, NULL);
1537 break;
1538 }
1539}
1540
1541static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1542{
1543 struct bpf_prog *old;
1544
1545 spin_lock(&f->lock);
1546 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1547 rcu_assign_pointer(f->bpf_prog, new);
1548 spin_unlock(&f->lock);
1549
1550 if (old) {
1551 synchronize_net();
1552 bpf_prog_destroy(old);
1553 }
1554}
1555
1556static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1557 unsigned int len)
1558{
1559 struct bpf_prog *new;
1560 struct sock_fprog fprog;
1561 int ret;
1562
1563 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1564 return -EPERM;
1565 if (len != sizeof(fprog))
1566 return -EINVAL;
1567 if (copy_from_user(&fprog, data, len))
1568 return -EFAULT;
1569
1570 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1571 if (ret)
1572 return ret;
1573
1574 __fanout_set_data_bpf(po->fanout, new);
1575 return 0;
1576}
1577
1578static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1579 unsigned int len)
1580{
1581 struct bpf_prog *new;
1582 u32 fd;
1583
1584 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1585 return -EPERM;
1586 if (len != sizeof(fd))
1587 return -EINVAL;
1588 if (copy_from_user(&fd, data, len))
1589 return -EFAULT;
1590
1591 new = bpf_prog_get(fd);
1592 if (IS_ERR(new))
1593 return PTR_ERR(new);
1594 if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1595 bpf_prog_put(new);
1596 return -EINVAL;
1597 }
1598
1599 __fanout_set_data_bpf(po->fanout, new);
1600 return 0;
1601}
1602
1603static int fanout_set_data(struct packet_sock *po, char __user *data,
1604 unsigned int len)
1605{
1606 switch (po->fanout->type) {
1607 case PACKET_FANOUT_CBPF:
1608 return fanout_set_data_cbpf(po, data, len);
1609 case PACKET_FANOUT_EBPF:
1610 return fanout_set_data_ebpf(po, data, len);
1611 default:
1612 return -EINVAL;
1613 };
1614}
1615
1616static void fanout_release_data(struct packet_fanout *f)
1617{
1618 switch (f->type) {
1619 case PACKET_FANOUT_CBPF:
1620 case PACKET_FANOUT_EBPF:
1621 __fanout_set_data_bpf(f, NULL);
1622 };
1623}
1624
1625static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1626{
1627 struct packet_sock *po = pkt_sk(sk);
1628 struct packet_fanout *f, *match;
1629 u8 type = type_flags & 0xff;
1630 u8 flags = type_flags >> 8;
1631 int err;
1632
1633 switch (type) {
1634 case PACKET_FANOUT_ROLLOVER:
1635 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1636 return -EINVAL;
1637 case PACKET_FANOUT_HASH:
1638 case PACKET_FANOUT_LB:
1639 case PACKET_FANOUT_CPU:
1640 case PACKET_FANOUT_RND:
1641 case PACKET_FANOUT_QM:
1642 case PACKET_FANOUT_CBPF:
1643 case PACKET_FANOUT_EBPF:
1644 break;
1645 default:
1646 return -EINVAL;
1647 }
1648
1649 if (!po->running)
1650 return -EINVAL;
1651
1652 if (po->fanout)
1653 return -EALREADY;
1654
1655 if (type == PACKET_FANOUT_ROLLOVER ||
1656 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1657 po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1658 if (!po->rollover)
1659 return -ENOMEM;
1660 atomic_long_set(&po->rollover->num, 0);
1661 atomic_long_set(&po->rollover->num_huge, 0);
1662 atomic_long_set(&po->rollover->num_failed, 0);
1663 }
1664
1665 mutex_lock(&fanout_mutex);
1666 match = NULL;
1667 list_for_each_entry(f, &fanout_list, list) {
1668 if (f->id == id &&
1669 read_pnet(&f->net) == sock_net(sk)) {
1670 match = f;
1671 break;
1672 }
1673 }
1674 err = -EINVAL;
1675 if (match && match->flags != flags)
1676 goto out;
1677 if (!match) {
1678 err = -ENOMEM;
1679 match = kzalloc(sizeof(*match), GFP_KERNEL);
1680 if (!match)
1681 goto out;
1682 write_pnet(&match->net, sock_net(sk));
1683 match->id = id;
1684 match->type = type;
1685 match->flags = flags;
1686 INIT_LIST_HEAD(&match->list);
1687 spin_lock_init(&match->lock);
1688 atomic_set(&match->sk_ref, 0);
1689 fanout_init_data(match);
1690 match->prot_hook.type = po->prot_hook.type;
1691 match->prot_hook.dev = po->prot_hook.dev;
1692 match->prot_hook.func = packet_rcv_fanout;
1693 match->prot_hook.af_packet_priv = match;
1694 match->prot_hook.id_match = match_fanout_group;
1695 dev_add_pack(&match->prot_hook);
1696 list_add(&match->list, &fanout_list);
1697 }
1698 err = -EINVAL;
1699 if (match->type == type &&
1700 match->prot_hook.type == po->prot_hook.type &&
1701 match->prot_hook.dev == po->prot_hook.dev) {
1702 err = -ENOSPC;
1703 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1704 __dev_remove_pack(&po->prot_hook);
1705 po->fanout = match;
1706 atomic_inc(&match->sk_ref);
1707 __fanout_link(sk, po);
1708 err = 0;
1709 }
1710 }
1711out:
1712 mutex_unlock(&fanout_mutex);
1713 if (err) {
1714 kfree(po->rollover);
1715 po->rollover = NULL;
1716 }
1717 return err;
1718}
1719
1720static void fanout_release(struct sock *sk)
1721{
1722 struct packet_sock *po = pkt_sk(sk);
1723 struct packet_fanout *f;
1724
1725 f = po->fanout;
1726 if (!f)
1727 return;
1728
1729 mutex_lock(&fanout_mutex);
1730 po->fanout = NULL;
1731
1732 if (atomic_dec_and_test(&f->sk_ref)) {
1733 list_del(&f->list);
1734 dev_remove_pack(&f->prot_hook);
1735 fanout_release_data(f);
1736 kfree(f);
1737 }
1738 mutex_unlock(&fanout_mutex);
1739
1740 if (po->rollover)
1741 kfree_rcu(po->rollover, rcu);
1742}
1743
1744static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1745 struct sk_buff *skb)
1746{
1747
1748
1749
1750
1751 if (unlikely(dev->type != ARPHRD_ETHER))
1752 return false;
1753
1754 skb_reset_mac_header(skb);
1755 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1756}
1757
1758static const struct proto_ops packet_ops;
1759
1760static const struct proto_ops packet_ops_spkt;
1761
1762static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1763 struct packet_type *pt, struct net_device *orig_dev)
1764{
1765 struct sock *sk;
1766 struct sockaddr_pkt *spkt;
1767
1768
1769
1770
1771
1772
1773 sk = pt->af_packet_priv;
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 if (skb->pkt_type == PACKET_LOOPBACK)
1787 goto out;
1788
1789 if (!net_eq(dev_net(dev), sock_net(sk)))
1790 goto out;
1791
1792 skb = skb_share_check(skb, GFP_ATOMIC);
1793 if (skb == NULL)
1794 goto oom;
1795
1796
1797 skb_dst_drop(skb);
1798
1799
1800 nf_reset(skb);
1801
1802 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1803
1804 skb_push(skb, skb->data - skb_mac_header(skb));
1805
1806
1807
1808
1809
1810 spkt->spkt_family = dev->type;
1811 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1812 spkt->spkt_protocol = skb->protocol;
1813
1814
1815
1816
1817
1818
1819 if (sock_queue_rcv_skb(sk, skb) == 0)
1820 return 0;
1821
1822out:
1823 kfree_skb(skb);
1824oom:
1825 return 0;
1826}
1827
1828
1829
1830
1831
1832
1833
1834static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1835 size_t len)
1836{
1837 struct sock *sk = sock->sk;
1838 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1839 struct sk_buff *skb = NULL;
1840 struct net_device *dev;
1841 __be16 proto = 0;
1842 int err;
1843 int extra_len = 0;
1844
1845
1846
1847
1848
1849 if (saddr) {
1850 if (msg->msg_namelen < sizeof(struct sockaddr))
1851 return -EINVAL;
1852 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1853 proto = saddr->spkt_protocol;
1854 } else
1855 return -ENOTCONN;
1856
1857
1858
1859
1860
1861 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1862retry:
1863 rcu_read_lock();
1864 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1865 err = -ENODEV;
1866 if (dev == NULL)
1867 goto out_unlock;
1868
1869 err = -ENETDOWN;
1870 if (!(dev->flags & IFF_UP))
1871 goto out_unlock;
1872
1873
1874
1875
1876
1877
1878 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1879 if (!netif_supports_nofcs(dev)) {
1880 err = -EPROTONOSUPPORT;
1881 goto out_unlock;
1882 }
1883 extra_len = 4;
1884 }
1885
1886 err = -EMSGSIZE;
1887 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1888 goto out_unlock;
1889
1890 if (!skb) {
1891 size_t reserved = LL_RESERVED_SPACE(dev);
1892 int tlen = dev->needed_tailroom;
1893 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1894
1895 rcu_read_unlock();
1896 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1897 if (skb == NULL)
1898 return -ENOBUFS;
1899
1900
1901
1902
1903 skb_reserve(skb, reserved);
1904 skb_reset_network_header(skb);
1905
1906
1907 if (hhlen) {
1908 skb->data -= hhlen;
1909 skb->tail -= hhlen;
1910 if (len < hhlen)
1911 skb_reset_network_header(skb);
1912 }
1913 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1914 if (err)
1915 goto out_free;
1916 goto retry;
1917 }
1918
1919 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1920 !packet_extra_vlan_len_allowed(dev, skb)) {
1921 err = -EMSGSIZE;
1922 goto out_unlock;
1923 }
1924
1925 skb->protocol = proto;
1926 skb->dev = dev;
1927 skb->priority = sk->sk_priority;
1928 skb->mark = sk->sk_mark;
1929
1930 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1931
1932 if (unlikely(extra_len == 4))
1933 skb->no_fcs = 1;
1934
1935 skb_probe_transport_header(skb, 0);
1936
1937 dev_queue_xmit(skb);
1938 rcu_read_unlock();
1939 return len;
1940
1941out_unlock:
1942 rcu_read_unlock();
1943out_free:
1944 kfree_skb(skb);
1945 return err;
1946}
1947
1948static unsigned int run_filter(struct sk_buff *skb,
1949 const struct sock *sk,
1950 unsigned int res)
1951{
1952 struct sk_filter *filter;
1953
1954 rcu_read_lock();
1955 filter = rcu_dereference(sk->sk_filter);
1956 if (filter != NULL)
1957 res = bpf_prog_run_clear_cb(filter->prog, skb);
1958 rcu_read_unlock();
1959
1960 return res;
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1976 struct packet_type *pt, struct net_device *orig_dev)
1977{
1978 struct sock *sk;
1979 struct sockaddr_ll *sll;
1980 struct packet_sock *po;
1981 u8 *skb_head = skb->data;
1982 int skb_len = skb->len;
1983 unsigned int snaplen, res;
1984
1985 if (skb->pkt_type == PACKET_LOOPBACK)
1986 goto drop;
1987
1988 sk = pt->af_packet_priv;
1989 po = pkt_sk(sk);
1990
1991 if (!net_eq(dev_net(dev), sock_net(sk)))
1992 goto drop;
1993
1994 skb->dev = dev;
1995
1996 if (dev->header_ops) {
1997
1998
1999
2000
2001
2002
2003
2004 if (sk->sk_type != SOCK_DGRAM)
2005 skb_push(skb, skb->data - skb_mac_header(skb));
2006 else if (skb->pkt_type == PACKET_OUTGOING) {
2007
2008 skb_pull(skb, skb_network_offset(skb));
2009 }
2010 }
2011
2012 snaplen = skb->len;
2013
2014 res = run_filter(skb, sk, snaplen);
2015 if (!res)
2016 goto drop_n_restore;
2017 if (snaplen > res)
2018 snaplen = res;
2019
2020 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2021 goto drop_n_acct;
2022
2023 if (skb_shared(skb)) {
2024 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2025 if (nskb == NULL)
2026 goto drop_n_acct;
2027
2028 if (skb_head != skb->data) {
2029 skb->data = skb_head;
2030 skb->len = skb_len;
2031 }
2032 consume_skb(skb);
2033 skb = nskb;
2034 }
2035
2036 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2037
2038 sll = &PACKET_SKB_CB(skb)->sa.ll;
2039 sll->sll_hatype = dev->type;
2040 sll->sll_pkttype = skb->pkt_type;
2041 if (unlikely(po->origdev))
2042 sll->sll_ifindex = orig_dev->ifindex;
2043 else
2044 sll->sll_ifindex = dev->ifindex;
2045
2046 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2047
2048
2049
2050
2051 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2052
2053 if (pskb_trim(skb, snaplen))
2054 goto drop_n_acct;
2055
2056 skb_set_owner_r(skb, sk);
2057 skb->dev = NULL;
2058 skb_dst_drop(skb);
2059
2060
2061 nf_reset(skb);
2062
2063 spin_lock(&sk->sk_receive_queue.lock);
2064 po->stats.stats1.tp_packets++;
2065 sock_skb_set_dropcount(sk, skb);
2066 __skb_queue_tail(&sk->sk_receive_queue, skb);
2067 spin_unlock(&sk->sk_receive_queue.lock);
2068 sk->sk_data_ready(sk);
2069 return 0;
2070
2071drop_n_acct:
2072 spin_lock(&sk->sk_receive_queue.lock);
2073 po->stats.stats1.tp_drops++;
2074 atomic_inc(&sk->sk_drops);
2075 spin_unlock(&sk->sk_receive_queue.lock);
2076
2077drop_n_restore:
2078 if (skb_head != skb->data && skb_shared(skb)) {
2079 skb->data = skb_head;
2080 skb->len = skb_len;
2081 }
2082drop:
2083 consume_skb(skb);
2084 return 0;
2085}
2086
2087static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2088 struct packet_type *pt, struct net_device *orig_dev)
2089{
2090 struct sock *sk;
2091 struct packet_sock *po;
2092 struct sockaddr_ll *sll;
2093 union tpacket_uhdr h;
2094 u8 *skb_head = skb->data;
2095 int skb_len = skb->len;
2096 unsigned int snaplen, res;
2097 unsigned long status = TP_STATUS_USER;
2098 unsigned short macoff, netoff, hdrlen;
2099 struct sk_buff *copy_skb = NULL;
2100 struct timespec ts;
2101 __u32 ts_status;
2102
2103
2104
2105
2106
2107 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2108 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2109
2110 if (skb->pkt_type == PACKET_LOOPBACK)
2111 goto drop;
2112
2113 sk = pt->af_packet_priv;
2114 po = pkt_sk(sk);
2115
2116 if (!net_eq(dev_net(dev), sock_net(sk)))
2117 goto drop;
2118
2119 if (dev->header_ops) {
2120 if (sk->sk_type != SOCK_DGRAM)
2121 skb_push(skb, skb->data - skb_mac_header(skb));
2122 else if (skb->pkt_type == PACKET_OUTGOING) {
2123
2124 skb_pull(skb, skb_network_offset(skb));
2125 }
2126 }
2127
2128 snaplen = skb->len;
2129
2130 res = run_filter(skb, sk, snaplen);
2131 if (!res)
2132 goto drop_n_restore;
2133
2134 if (skb->ip_summed == CHECKSUM_PARTIAL)
2135 status |= TP_STATUS_CSUMNOTREADY;
2136 else if (skb->pkt_type != PACKET_OUTGOING &&
2137 (skb->ip_summed == CHECKSUM_COMPLETE ||
2138 skb_csum_unnecessary(skb)))
2139 status |= TP_STATUS_CSUM_VALID;
2140
2141 if (snaplen > res)
2142 snaplen = res;
2143
2144 if (sk->sk_type == SOCK_DGRAM) {
2145 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2146 po->tp_reserve;
2147 } else {
2148 unsigned int maclen = skb_network_offset(skb);
2149 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2150 (maclen < 16 ? 16 : maclen)) +
2151 po->tp_reserve;
2152 macoff = netoff - maclen;
2153 }
2154 if (po->tp_version <= TPACKET_V2) {
2155 if (macoff + snaplen > po->rx_ring.frame_size) {
2156 if (po->copy_thresh &&
2157 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2158 if (skb_shared(skb)) {
2159 copy_skb = skb_clone(skb, GFP_ATOMIC);
2160 } else {
2161 copy_skb = skb_get(skb);
2162 skb_head = skb->data;
2163 }
2164 if (copy_skb)
2165 skb_set_owner_r(copy_skb, sk);
2166 }
2167 snaplen = po->rx_ring.frame_size - macoff;
2168 if ((int)snaplen < 0)
2169 snaplen = 0;
2170 }
2171 } else if (unlikely(macoff + snaplen >
2172 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2173 u32 nval;
2174
2175 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2176 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2177 snaplen, nval, macoff);
2178 snaplen = nval;
2179 if (unlikely((int)snaplen < 0)) {
2180 snaplen = 0;
2181 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2182 }
2183 }
2184 spin_lock(&sk->sk_receive_queue.lock);
2185 h.raw = packet_current_rx_frame(po, skb,
2186 TP_STATUS_KERNEL, (macoff+snaplen));
2187 if (!h.raw)
2188 goto ring_is_full;
2189 if (po->tp_version <= TPACKET_V2) {
2190 packet_increment_rx_head(po, &po->rx_ring);
2191
2192
2193
2194
2195
2196
2197 if (po->stats.stats1.tp_drops)
2198 status |= TP_STATUS_LOSING;
2199 }
2200 po->stats.stats1.tp_packets++;
2201 if (copy_skb) {
2202 status |= TP_STATUS_COPY;
2203 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2204 }
2205 spin_unlock(&sk->sk_receive_queue.lock);
2206
2207 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2208
2209 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2210 getnstimeofday(&ts);
2211
2212 status |= ts_status;
2213
2214 switch (po->tp_version) {
2215 case TPACKET_V1:
2216 h.h1->tp_len = skb->len;
2217 h.h1->tp_snaplen = snaplen;
2218 h.h1->tp_mac = macoff;
2219 h.h1->tp_net = netoff;
2220 h.h1->tp_sec = ts.tv_sec;
2221 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2222 hdrlen = sizeof(*h.h1);
2223 break;
2224 case TPACKET_V2:
2225 h.h2->tp_len = skb->len;
2226 h.h2->tp_snaplen = snaplen;
2227 h.h2->tp_mac = macoff;
2228 h.h2->tp_net = netoff;
2229 h.h2->tp_sec = ts.tv_sec;
2230 h.h2->tp_nsec = ts.tv_nsec;
2231 if (skb_vlan_tag_present(skb)) {
2232 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2233 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2234 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2235 } else {
2236 h.h2->tp_vlan_tci = 0;
2237 h.h2->tp_vlan_tpid = 0;
2238 }
2239 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2240 hdrlen = sizeof(*h.h2);
2241 break;
2242 case TPACKET_V3:
2243
2244
2245
2246 h.h3->tp_status |= status;
2247 h.h3->tp_len = skb->len;
2248 h.h3->tp_snaplen = snaplen;
2249 h.h3->tp_mac = macoff;
2250 h.h3->tp_net = netoff;
2251 h.h3->tp_sec = ts.tv_sec;
2252 h.h3->tp_nsec = ts.tv_nsec;
2253 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2254 hdrlen = sizeof(*h.h3);
2255 break;
2256 default:
2257 BUG();
2258 }
2259
2260 sll = h.raw + TPACKET_ALIGN(hdrlen);
2261 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2262 sll->sll_family = AF_PACKET;
2263 sll->sll_hatype = dev->type;
2264 sll->sll_protocol = skb->protocol;
2265 sll->sll_pkttype = skb->pkt_type;
2266 if (unlikely(po->origdev))
2267 sll->sll_ifindex = orig_dev->ifindex;
2268 else
2269 sll->sll_ifindex = dev->ifindex;
2270
2271 smp_mb();
2272
2273#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2274 if (po->tp_version <= TPACKET_V2) {
2275 u8 *start, *end;
2276
2277 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2278 macoff + snaplen);
2279
2280 for (start = h.raw; start < end; start += PAGE_SIZE)
2281 flush_dcache_page(pgv_to_page(start));
2282 }
2283 smp_wmb();
2284#endif
2285
2286 if (po->tp_version <= TPACKET_V2) {
2287 __packet_set_status(po, h.raw, status);
2288 sk->sk_data_ready(sk);
2289 } else {
2290 prb_clear_blk_fill_status(&po->rx_ring);
2291 }
2292
2293drop_n_restore:
2294 if (skb_head != skb->data && skb_shared(skb)) {
2295 skb->data = skb_head;
2296 skb->len = skb_len;
2297 }
2298drop:
2299 kfree_skb(skb);
2300 return 0;
2301
2302ring_is_full:
2303 po->stats.stats1.tp_drops++;
2304 spin_unlock(&sk->sk_receive_queue.lock);
2305
2306 sk->sk_data_ready(sk);
2307 kfree_skb(copy_skb);
2308 goto drop_n_restore;
2309}
2310
2311static void tpacket_destruct_skb(struct sk_buff *skb)
2312{
2313 struct packet_sock *po = pkt_sk(skb->sk);
2314
2315 if (likely(po->tx_ring.pg_vec)) {
2316 void *ph;
2317 __u32 ts;
2318
2319 ph = skb_shinfo(skb)->destructor_arg;
2320 packet_dec_pending(&po->tx_ring);
2321
2322 ts = __packet_set_timestamp(po, ph, skb);
2323 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2324 }
2325
2326 sock_wfree(skb);
2327}
2328
2329static bool ll_header_truncated(const struct net_device *dev, int len)
2330{
2331
2332 if (unlikely(len < dev->hard_header_len)) {
2333 net_warn_ratelimited("%s: packet size is too short (%d < %d)\n",
2334 current->comm, len, dev->hard_header_len);
2335 return true;
2336 }
2337
2338 return false;
2339}
2340
2341static void tpacket_set_protocol(const struct net_device *dev,
2342 struct sk_buff *skb)
2343{
2344 if (dev->type == ARPHRD_ETHER) {
2345 skb_reset_mac_header(skb);
2346 skb->protocol = eth_hdr(skb)->h_proto;
2347 }
2348}
2349
2350static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2351 void *frame, struct net_device *dev, int size_max,
2352 __be16 proto, unsigned char *addr, int hlen)
2353{
2354 union tpacket_uhdr ph;
2355 int to_write, offset, len, tp_len, nr_frags, len_max;
2356 struct socket *sock = po->sk.sk_socket;
2357 struct page *page;
2358 void *data;
2359 int err;
2360
2361 ph.raw = frame;
2362
2363 skb->protocol = proto;
2364 skb->dev = dev;
2365 skb->priority = po->sk.sk_priority;
2366 skb->mark = po->sk.sk_mark;
2367 sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2368 skb_shinfo(skb)->destructor_arg = ph.raw;
2369
2370 switch (po->tp_version) {
2371 case TPACKET_V2:
2372 tp_len = ph.h2->tp_len;
2373 break;
2374 default:
2375 tp_len = ph.h1->tp_len;
2376 break;
2377 }
2378 if (unlikely(tp_len > size_max)) {
2379 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2380 return -EMSGSIZE;
2381 }
2382
2383 skb_reserve(skb, hlen);
2384 skb_reset_network_header(skb);
2385
2386 if (unlikely(po->tp_tx_has_off)) {
2387 int off_min, off_max, off;
2388 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2389 off_max = po->tx_ring.frame_size - tp_len;
2390 if (sock->type == SOCK_DGRAM) {
2391 switch (po->tp_version) {
2392 case TPACKET_V2:
2393 off = ph.h2->tp_net;
2394 break;
2395 default:
2396 off = ph.h1->tp_net;
2397 break;
2398 }
2399 } else {
2400 switch (po->tp_version) {
2401 case TPACKET_V2:
2402 off = ph.h2->tp_mac;
2403 break;
2404 default:
2405 off = ph.h1->tp_mac;
2406 break;
2407 }
2408 }
2409 if (unlikely((off < off_min) || (off_max < off)))
2410 return -EINVAL;
2411 data = ph.raw + off;
2412 } else {
2413 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2414 }
2415 to_write = tp_len;
2416
2417 if (sock->type == SOCK_DGRAM) {
2418 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2419 NULL, tp_len);
2420 if (unlikely(err < 0))
2421 return -EINVAL;
2422 } else if (dev->hard_header_len) {
2423 if (ll_header_truncated(dev, tp_len))
2424 return -EINVAL;
2425
2426 skb_push(skb, dev->hard_header_len);
2427 err = skb_store_bits(skb, 0, data,
2428 dev->hard_header_len);
2429 if (unlikely(err))
2430 return err;
2431 if (!skb->protocol)
2432 tpacket_set_protocol(dev, skb);
2433
2434 data += dev->hard_header_len;
2435 to_write -= dev->hard_header_len;
2436 }
2437
2438 offset = offset_in_page(data);
2439 len_max = PAGE_SIZE - offset;
2440 len = ((to_write > len_max) ? len_max : to_write);
2441
2442 skb->data_len = to_write;
2443 skb->len += to_write;
2444 skb->truesize += to_write;
2445 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2446
2447 while (likely(to_write)) {
2448 nr_frags = skb_shinfo(skb)->nr_frags;
2449
2450 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2451 pr_err("Packet exceed the number of skb frags(%lu)\n",
2452 MAX_SKB_FRAGS);
2453 return -EFAULT;
2454 }
2455
2456 page = pgv_to_page(data);
2457 data += len;
2458 flush_dcache_page(page);
2459 get_page(page);
2460 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2461 to_write -= len;
2462 offset = 0;
2463 len_max = PAGE_SIZE;
2464 len = ((to_write > len_max) ? len_max : to_write);
2465 }
2466
2467 skb_probe_transport_header(skb, 0);
2468
2469 return tp_len;
2470}
2471
2472static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2473{
2474 struct sk_buff *skb;
2475 struct net_device *dev;
2476 __be16 proto;
2477 int err, reserve = 0;
2478 void *ph;
2479 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2480 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2481 int tp_len, size_max;
2482 unsigned char *addr;
2483 int len_sum = 0;
2484 int status = TP_STATUS_AVAILABLE;
2485 int hlen, tlen;
2486
2487 mutex_lock(&po->pg_vec_lock);
2488
2489 if (likely(saddr == NULL)) {
2490 dev = packet_cached_dev_get(po);
2491 proto = po->num;
2492 addr = NULL;
2493 } else {
2494 err = -EINVAL;
2495 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2496 goto out;
2497 if (msg->msg_namelen < (saddr->sll_halen
2498 + offsetof(struct sockaddr_ll,
2499 sll_addr)))
2500 goto out;
2501 proto = saddr->sll_protocol;
2502 addr = saddr->sll_addr;
2503 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2504 }
2505
2506 err = -ENXIO;
2507 if (unlikely(dev == NULL))
2508 goto out;
2509 err = -ENETDOWN;
2510 if (unlikely(!(dev->flags & IFF_UP)))
2511 goto out_put;
2512
2513 if (po->sk.sk_socket->type == SOCK_RAW)
2514 reserve = dev->hard_header_len;
2515 size_max = po->tx_ring.frame_size
2516 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2517
2518 if (size_max > dev->mtu + reserve + VLAN_HLEN)
2519 size_max = dev->mtu + reserve + VLAN_HLEN;
2520
2521 do {
2522 ph = packet_current_frame(po, &po->tx_ring,
2523 TP_STATUS_SEND_REQUEST);
2524 if (unlikely(ph == NULL)) {
2525 if (need_wait && need_resched())
2526 schedule();
2527 continue;
2528 }
2529
2530 status = TP_STATUS_SEND_REQUEST;
2531 hlen = LL_RESERVED_SPACE(dev);
2532 tlen = dev->needed_tailroom;
2533 skb = sock_alloc_send_skb(&po->sk,
2534 hlen + tlen + sizeof(struct sockaddr_ll),
2535 !need_wait, &err);
2536
2537 if (unlikely(skb == NULL)) {
2538
2539 if (likely(len_sum > 0))
2540 err = len_sum;
2541 goto out_status;
2542 }
2543 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2544 addr, hlen);
2545 if (likely(tp_len >= 0) &&
2546 tp_len > dev->mtu + reserve &&
2547 !packet_extra_vlan_len_allowed(dev, skb))
2548 tp_len = -EMSGSIZE;
2549
2550 if (unlikely(tp_len < 0)) {
2551 if (po->tp_loss) {
2552 __packet_set_status(po, ph,
2553 TP_STATUS_AVAILABLE);
2554 packet_increment_head(&po->tx_ring);
2555 kfree_skb(skb);
2556 continue;
2557 } else {
2558 status = TP_STATUS_WRONG_FORMAT;
2559 err = tp_len;
2560 goto out_status;
2561 }
2562 }
2563
2564 packet_pick_tx_queue(dev, skb);
2565
2566 skb->destructor = tpacket_destruct_skb;
2567 __packet_set_status(po, ph, TP_STATUS_SENDING);
2568 packet_inc_pending(&po->tx_ring);
2569
2570 status = TP_STATUS_SEND_REQUEST;
2571 err = po->xmit(skb);
2572 if (unlikely(err > 0)) {
2573 err = net_xmit_errno(err);
2574 if (err && __packet_get_status(po, ph) ==
2575 TP_STATUS_AVAILABLE) {
2576
2577 skb = NULL;
2578 goto out_status;
2579 }
2580
2581
2582
2583
2584 err = 0;
2585 }
2586 packet_increment_head(&po->tx_ring);
2587 len_sum += tp_len;
2588 } while (likely((ph != NULL) ||
2589
2590
2591
2592
2593
2594
2595 (need_wait && packet_read_pending(&po->tx_ring))));
2596
2597 err = len_sum;
2598 goto out_put;
2599
2600out_status:
2601 __packet_set_status(po, ph, status);
2602 kfree_skb(skb);
2603out_put:
2604 dev_put(dev);
2605out:
2606 mutex_unlock(&po->pg_vec_lock);
2607 return err;
2608}
2609
2610static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2611 size_t reserve, size_t len,
2612 size_t linear, int noblock,
2613 int *err)
2614{
2615 struct sk_buff *skb;
2616
2617
2618 if (prepad + len < PAGE_SIZE || !linear)
2619 linear = len;
2620
2621 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2622 err, 0);
2623 if (!skb)
2624 return NULL;
2625
2626 skb_reserve(skb, reserve);
2627 skb_put(skb, linear);
2628 skb->data_len = len - linear;
2629 skb->len += len - linear;
2630
2631 return skb;
2632}
2633
2634static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2635{
2636 struct sock *sk = sock->sk;
2637 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2638 struct sk_buff *skb;
2639 struct net_device *dev;
2640 __be16 proto;
2641 unsigned char *addr;
2642 int err, reserve = 0;
2643 struct sockcm_cookie sockc;
2644 struct virtio_net_hdr vnet_hdr = { 0 };
2645 int offset = 0;
2646 int vnet_hdr_len;
2647 struct packet_sock *po = pkt_sk(sk);
2648 unsigned short gso_type = 0;
2649 int hlen, tlen;
2650 int extra_len = 0;
2651 ssize_t n;
2652
2653
2654
2655
2656
2657 if (likely(saddr == NULL)) {
2658 dev = packet_cached_dev_get(po);
2659 proto = po->num;
2660 addr = NULL;
2661 } else {
2662 err = -EINVAL;
2663 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2664 goto out;
2665 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2666 goto out;
2667 proto = saddr->sll_protocol;
2668 addr = saddr->sll_addr;
2669 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2670 }
2671
2672 err = -ENXIO;
2673 if (unlikely(dev == NULL))
2674 goto out_unlock;
2675 err = -ENETDOWN;
2676 if (unlikely(!(dev->flags & IFF_UP)))
2677 goto out_unlock;
2678
2679 sockc.mark = sk->sk_mark;
2680 if (msg->msg_controllen) {
2681 err = sock_cmsg_send(sk, msg, &sockc);
2682 if (unlikely(err))
2683 goto out_unlock;
2684 }
2685
2686 if (sock->type == SOCK_RAW)
2687 reserve = dev->hard_header_len;
2688 if (po->has_vnet_hdr) {
2689 vnet_hdr_len = sizeof(vnet_hdr);
2690
2691 err = -EINVAL;
2692 if (len < vnet_hdr_len)
2693 goto out_unlock;
2694
2695 len -= vnet_hdr_len;
2696
2697 err = -EFAULT;
2698 n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
2699 if (n != vnet_hdr_len)
2700 goto out_unlock;
2701
2702 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2703 (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2704 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
2705 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
2706 vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
2707 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
2708 __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
2709
2710 err = -EINVAL;
2711 if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
2712 goto out_unlock;
2713
2714 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2715 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2716 case VIRTIO_NET_HDR_GSO_TCPV4:
2717 gso_type = SKB_GSO_TCPV4;
2718 break;
2719 case VIRTIO_NET_HDR_GSO_TCPV6:
2720 gso_type = SKB_GSO_TCPV6;
2721 break;
2722 case VIRTIO_NET_HDR_GSO_UDP:
2723 gso_type = SKB_GSO_UDP;
2724 break;
2725 default:
2726 goto out_unlock;
2727 }
2728
2729 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2730 gso_type |= SKB_GSO_TCP_ECN;
2731
2732 if (vnet_hdr.gso_size == 0)
2733 goto out_unlock;
2734
2735 }
2736 }
2737
2738 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2739 if (!netif_supports_nofcs(dev)) {
2740 err = -EPROTONOSUPPORT;
2741 goto out_unlock;
2742 }
2743 extra_len = 4;
2744 }
2745
2746 err = -EMSGSIZE;
2747 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2748 goto out_unlock;
2749
2750 err = -ENOBUFS;
2751 hlen = LL_RESERVED_SPACE(dev);
2752 tlen = dev->needed_tailroom;
2753 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2754 __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
2755 msg->msg_flags & MSG_DONTWAIT, &err);
2756 if (skb == NULL)
2757 goto out_unlock;
2758
2759 skb_set_network_header(skb, reserve);
2760
2761 err = -EINVAL;
2762 if (sock->type == SOCK_DGRAM) {
2763 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2764 if (unlikely(offset < 0))
2765 goto out_free;
2766 } else {
2767 if (ll_header_truncated(dev, len))
2768 goto out_free;
2769 }
2770
2771
2772 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2773 if (err)
2774 goto out_free;
2775
2776 sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2777
2778 if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
2779 !packet_extra_vlan_len_allowed(dev, skb)) {
2780 err = -EMSGSIZE;
2781 goto out_free;
2782 }
2783
2784 skb->protocol = proto;
2785 skb->dev = dev;
2786 skb->priority = sk->sk_priority;
2787 skb->mark = sockc.mark;
2788
2789 packet_pick_tx_queue(dev, skb);
2790
2791 if (po->has_vnet_hdr) {
2792 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2793 u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
2794 u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
2795 if (!skb_partial_csum_set(skb, s, o)) {
2796 err = -EINVAL;
2797 goto out_free;
2798 }
2799 }
2800
2801 skb_shinfo(skb)->gso_size =
2802 __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
2803 skb_shinfo(skb)->gso_type = gso_type;
2804
2805
2806 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2807 skb_shinfo(skb)->gso_segs = 0;
2808
2809 len += vnet_hdr_len;
2810 }
2811
2812 skb_probe_transport_header(skb, reserve);
2813
2814 if (unlikely(extra_len == 4))
2815 skb->no_fcs = 1;
2816
2817 err = po->xmit(skb);
2818 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2819 goto out_unlock;
2820
2821 dev_put(dev);
2822
2823 return len;
2824
2825out_free:
2826 kfree_skb(skb);
2827out_unlock:
2828 if (dev)
2829 dev_put(dev);
2830out:
2831 return err;
2832}
2833
2834static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2835{
2836 struct sock *sk = sock->sk;
2837 struct packet_sock *po = pkt_sk(sk);
2838
2839 if (po->tx_ring.pg_vec)
2840 return tpacket_snd(po, msg);
2841 else
2842 return packet_snd(sock, msg, len);
2843}
2844
2845
2846
2847
2848
2849
2850static int packet_release(struct socket *sock)
2851{
2852 struct sock *sk = sock->sk;
2853 struct packet_sock *po;
2854 struct net *net;
2855 union tpacket_req_u req_u;
2856
2857 if (!sk)
2858 return 0;
2859
2860 net = sock_net(sk);
2861 po = pkt_sk(sk);
2862
2863 mutex_lock(&net->packet.sklist_lock);
2864 sk_del_node_init_rcu(sk);
2865 mutex_unlock(&net->packet.sklist_lock);
2866
2867 preempt_disable();
2868 sock_prot_inuse_add(net, sk->sk_prot, -1);
2869 preempt_enable();
2870
2871 spin_lock(&po->bind_lock);
2872 unregister_prot_hook(sk, false);
2873 packet_cached_dev_reset(po);
2874
2875 if (po->prot_hook.dev) {
2876 dev_put(po->prot_hook.dev);
2877 po->prot_hook.dev = NULL;
2878 }
2879 spin_unlock(&po->bind_lock);
2880
2881 packet_flush_mclist(sk);
2882
2883 if (po->rx_ring.pg_vec) {
2884 memset(&req_u, 0, sizeof(req_u));
2885 packet_set_ring(sk, &req_u, 1, 0);
2886 }
2887
2888 if (po->tx_ring.pg_vec) {
2889 memset(&req_u, 0, sizeof(req_u));
2890 packet_set_ring(sk, &req_u, 1, 1);
2891 }
2892
2893 fanout_release(sk);
2894
2895 synchronize_net();
2896
2897
2898
2899 sock_orphan(sk);
2900 sock->sk = NULL;
2901
2902
2903
2904 skb_queue_purge(&sk->sk_receive_queue);
2905 packet_free_pending(po);
2906 sk_refcnt_debug_release(sk);
2907
2908 sock_put(sk);
2909 return 0;
2910}
2911
2912
2913
2914
2915
2916static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2917 __be16 proto)
2918{
2919 struct packet_sock *po = pkt_sk(sk);
2920 struct net_device *dev_curr;
2921 __be16 proto_curr;
2922 bool need_rehook;
2923 struct net_device *dev = NULL;
2924 int ret = 0;
2925 bool unlisted = false;
2926
2927 if (po->fanout)
2928 return -EINVAL;
2929
2930 lock_sock(sk);
2931 spin_lock(&po->bind_lock);
2932 rcu_read_lock();
2933
2934 if (name) {
2935 dev = dev_get_by_name_rcu(sock_net(sk), name);
2936 if (!dev) {
2937 ret = -ENODEV;
2938 goto out_unlock;
2939 }
2940 } else if (ifindex) {
2941 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2942 if (!dev) {
2943 ret = -ENODEV;
2944 goto out_unlock;
2945 }
2946 }
2947
2948 if (dev)
2949 dev_hold(dev);
2950
2951 proto_curr = po->prot_hook.type;
2952 dev_curr = po->prot_hook.dev;
2953
2954 need_rehook = proto_curr != proto || dev_curr != dev;
2955
2956 if (need_rehook) {
2957 if (po->running) {
2958 rcu_read_unlock();
2959 __unregister_prot_hook(sk, true);
2960 rcu_read_lock();
2961 dev_curr = po->prot_hook.dev;
2962 if (dev)
2963 unlisted = !dev_get_by_index_rcu(sock_net(sk),
2964 dev->ifindex);
2965 }
2966
2967 po->num = proto;
2968 po->prot_hook.type = proto;
2969
2970 if (unlikely(unlisted)) {
2971 dev_put(dev);
2972 po->prot_hook.dev = NULL;
2973 po->ifindex = -1;
2974 packet_cached_dev_reset(po);
2975 } else {
2976 po->prot_hook.dev = dev;
2977 po->ifindex = dev ? dev->ifindex : 0;
2978 packet_cached_dev_assign(po, dev);
2979 }
2980 }
2981 if (dev_curr)
2982 dev_put(dev_curr);
2983
2984 if (proto == 0 || !need_rehook)
2985 goto out_unlock;
2986
2987 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
2988 register_prot_hook(sk);
2989 } else {
2990 sk->sk_err = ENETDOWN;
2991 if (!sock_flag(sk, SOCK_DEAD))
2992 sk->sk_error_report(sk);
2993 }
2994
2995out_unlock:
2996 rcu_read_unlock();
2997 spin_unlock(&po->bind_lock);
2998 release_sock(sk);
2999 return ret;
3000}
3001
3002
3003
3004
3005
3006static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3007 int addr_len)
3008{
3009 struct sock *sk = sock->sk;
3010 char name[15];
3011
3012
3013
3014
3015
3016 if (addr_len != sizeof(struct sockaddr))
3017 return -EINVAL;
3018 strlcpy(name, uaddr->sa_data, sizeof(name));
3019
3020 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3021}
3022
3023static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3024{
3025 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3026 struct sock *sk = sock->sk;
3027
3028
3029
3030
3031
3032 if (addr_len < sizeof(struct sockaddr_ll))
3033 return -EINVAL;
3034 if (sll->sll_family != AF_PACKET)
3035 return -EINVAL;
3036
3037 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3038 sll->sll_protocol ? : pkt_sk(sk)->num);
3039}
3040
3041static struct proto packet_proto = {
3042 .name = "PACKET",
3043 .owner = THIS_MODULE,
3044 .obj_size = sizeof(struct packet_sock),
3045};
3046
3047
3048
3049
3050
3051static int packet_create(struct net *net, struct socket *sock, int protocol,
3052 int kern)
3053{
3054 struct sock *sk;
3055 struct packet_sock *po;
3056 __be16 proto = (__force __be16)protocol;
3057 int err;
3058
3059 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3060 return -EPERM;
3061 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3062 sock->type != SOCK_PACKET)
3063 return -ESOCKTNOSUPPORT;
3064
3065 sock->state = SS_UNCONNECTED;
3066
3067 err = -ENOBUFS;
3068 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3069 if (sk == NULL)
3070 goto out;
3071
3072 sock->ops = &packet_ops;
3073 if (sock->type == SOCK_PACKET)
3074 sock->ops = &packet_ops_spkt;
3075
3076 sock_init_data(sock, sk);
3077
3078 po = pkt_sk(sk);
3079 sk->sk_family = PF_PACKET;
3080 po->num = proto;
3081 po->xmit = dev_queue_xmit;
3082
3083 err = packet_alloc_pending(po);
3084 if (err)
3085 goto out2;
3086
3087 packet_cached_dev_reset(po);
3088
3089 sk->sk_destruct = packet_sock_destruct;
3090 sk_refcnt_debug_inc(sk);
3091
3092
3093
3094
3095
3096 spin_lock_init(&po->bind_lock);
3097 mutex_init(&po->pg_vec_lock);
3098 po->rollover = NULL;
3099 po->prot_hook.func = packet_rcv;
3100
3101 if (sock->type == SOCK_PACKET)
3102 po->prot_hook.func = packet_rcv_spkt;
3103
3104 po->prot_hook.af_packet_priv = sk;
3105
3106 if (proto) {
3107 po->prot_hook.type = proto;
3108 register_prot_hook(sk);
3109 }
3110
3111 mutex_lock(&net->packet.sklist_lock);
3112 sk_add_node_rcu(sk, &net->packet.sklist);
3113 mutex_unlock(&net->packet.sklist_lock);
3114
3115 preempt_disable();
3116 sock_prot_inuse_add(net, &packet_proto, 1);
3117 preempt_enable();
3118
3119 return 0;
3120out2:
3121 sk_free(sk);
3122out:
3123 return err;
3124}
3125
3126
3127
3128
3129
3130
3131static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3132 int flags)
3133{
3134 struct sock *sk = sock->sk;
3135 struct sk_buff *skb;
3136 int copied, err;
3137 int vnet_hdr_len = 0;
3138 unsigned int origlen = 0;
3139
3140 err = -EINVAL;
3141 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3142 goto out;
3143
3144#if 0
3145
3146 if (pkt_sk(sk)->ifindex < 0)
3147 return -ENODEV;
3148#endif
3149
3150 if (flags & MSG_ERRQUEUE) {
3151 err = sock_recv_errqueue(sk, msg, len,
3152 SOL_PACKET, PACKET_TX_TIMESTAMP);
3153 goto out;
3154 }
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3166
3167
3168
3169
3170
3171
3172
3173 if (skb == NULL)
3174 goto out;
3175
3176 if (pkt_sk(sk)->pressure)
3177 packet_rcv_has_room(pkt_sk(sk), NULL);
3178
3179 if (pkt_sk(sk)->has_vnet_hdr) {
3180 struct virtio_net_hdr vnet_hdr = { 0 };
3181
3182 err = -EINVAL;
3183 vnet_hdr_len = sizeof(vnet_hdr);
3184 if (len < vnet_hdr_len)
3185 goto out_free;
3186
3187 len -= vnet_hdr_len;
3188
3189 if (skb_is_gso(skb)) {
3190 struct skb_shared_info *sinfo = skb_shinfo(skb);
3191
3192
3193 vnet_hdr.hdr_len =
3194 __cpu_to_virtio16(vio_le(), skb_headlen(skb));
3195 vnet_hdr.gso_size =
3196 __cpu_to_virtio16(vio_le(), sinfo->gso_size);
3197 if (sinfo->gso_type & SKB_GSO_TCPV4)
3198 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
3199 else if (sinfo->gso_type & SKB_GSO_TCPV6)
3200 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
3201 else if (sinfo->gso_type & SKB_GSO_UDP)
3202 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
3203 else if (sinfo->gso_type & SKB_GSO_FCOE)
3204 goto out_free;
3205 else
3206 BUG();
3207 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
3208 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
3209 } else
3210 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
3211
3212 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3213 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3214 vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
3215 skb_checksum_start_offset(skb));
3216 vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
3217 skb->csum_offset);
3218 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3219 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
3220 }
3221
3222 err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
3223 if (err < 0)
3224 goto out_free;
3225 }
3226
3227
3228
3229
3230
3231 copied = skb->len;
3232 if (copied > len) {
3233 copied = len;
3234 msg->msg_flags |= MSG_TRUNC;
3235 }
3236
3237 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3238 if (err)
3239 goto out_free;
3240
3241 if (sock->type != SOCK_PACKET) {
3242 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3243
3244
3245 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3246 sll->sll_family = AF_PACKET;
3247 sll->sll_protocol = skb->protocol;
3248 }
3249
3250 sock_recv_ts_and_drops(msg, sk, skb);
3251
3252 if (msg->msg_name) {
3253
3254
3255
3256 if (sock->type == SOCK_PACKET) {
3257 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3258 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3259 } else {
3260 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3261
3262 msg->msg_namelen = sll->sll_halen +
3263 offsetof(struct sockaddr_ll, sll_addr);
3264 }
3265 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3266 msg->msg_namelen);
3267 }
3268
3269 if (pkt_sk(sk)->auxdata) {
3270 struct tpacket_auxdata aux;
3271
3272 aux.tp_status = TP_STATUS_USER;
3273 if (skb->ip_summed == CHECKSUM_PARTIAL)
3274 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3275 else if (skb->pkt_type != PACKET_OUTGOING &&
3276 (skb->ip_summed == CHECKSUM_COMPLETE ||
3277 skb_csum_unnecessary(skb)))
3278 aux.tp_status |= TP_STATUS_CSUM_VALID;
3279
3280 aux.tp_len = origlen;
3281 aux.tp_snaplen = skb->len;
3282 aux.tp_mac = 0;
3283 aux.tp_net = skb_network_offset(skb);
3284 if (skb_vlan_tag_present(skb)) {
3285 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3286 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3287 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3288 } else {
3289 aux.tp_vlan_tci = 0;
3290 aux.tp_vlan_tpid = 0;
3291 }
3292 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3293 }
3294
3295
3296
3297
3298
3299 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3300
3301out_free:
3302 skb_free_datagram(sk, skb);
3303out:
3304 return err;
3305}
3306
3307static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3308 int *uaddr_len, int peer)
3309{
3310 struct net_device *dev;
3311 struct sock *sk = sock->sk;
3312
3313 if (peer)
3314 return -EOPNOTSUPP;
3315
3316 uaddr->sa_family = AF_PACKET;
3317 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3318 rcu_read_lock();
3319 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3320 if (dev)
3321 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3322 rcu_read_unlock();
3323 *uaddr_len = sizeof(*uaddr);
3324
3325 return 0;
3326}
3327
3328static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3329 int *uaddr_len, int peer)
3330{
3331 struct net_device *dev;
3332 struct sock *sk = sock->sk;
3333 struct packet_sock *po = pkt_sk(sk);
3334 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3335
3336 if (peer)
3337 return -EOPNOTSUPP;
3338
3339 sll->sll_family = AF_PACKET;
3340 sll->sll_ifindex = po->ifindex;
3341 sll->sll_protocol = po->num;
3342 sll->sll_pkttype = 0;
3343 rcu_read_lock();
3344 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3345 if (dev) {
3346 sll->sll_hatype = dev->type;
3347 sll->sll_halen = dev->addr_len;
3348 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3349 } else {
3350 sll->sll_hatype = 0;
3351 sll->sll_halen = 0;
3352 }
3353 rcu_read_unlock();
3354 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3355
3356 return 0;
3357}
3358
3359static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3360 int what)
3361{
3362 switch (i->type) {
3363 case PACKET_MR_MULTICAST:
3364 if (i->alen != dev->addr_len)
3365 return -EINVAL;
3366 if (what > 0)
3367 return dev_mc_add(dev, i->addr);
3368 else
3369 return dev_mc_del(dev, i->addr);
3370 break;
3371 case PACKET_MR_PROMISC:
3372 return dev_set_promiscuity(dev, what);
3373 case PACKET_MR_ALLMULTI:
3374 return dev_set_allmulti(dev, what);
3375 case PACKET_MR_UNICAST:
3376 if (i->alen != dev->addr_len)
3377 return -EINVAL;
3378 if (what > 0)
3379 return dev_uc_add(dev, i->addr);
3380 else
3381 return dev_uc_del(dev, i->addr);
3382 break;
3383 default:
3384 break;
3385 }
3386 return 0;
3387}
3388
3389static void packet_dev_mclist_delete(struct net_device *dev,
3390 struct packet_mclist **mlp)
3391{
3392 struct packet_mclist *ml;
3393
3394 while ((ml = *mlp) != NULL) {
3395 if (ml->ifindex == dev->ifindex) {
3396 packet_dev_mc(dev, ml, -1);
3397 *mlp = ml->next;
3398 kfree(ml);
3399 } else
3400 mlp = &ml->next;
3401 }
3402}
3403
3404static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3405{
3406 struct packet_sock *po = pkt_sk(sk);
3407 struct packet_mclist *ml, *i;
3408 struct net_device *dev;
3409 int err;
3410
3411 rtnl_lock();
3412
3413 err = -ENODEV;
3414 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3415 if (!dev)
3416 goto done;
3417
3418 err = -EINVAL;
3419 if (mreq->mr_alen > dev->addr_len)
3420 goto done;
3421
3422 err = -ENOBUFS;
3423 i = kmalloc(sizeof(*i), GFP_KERNEL);
3424 if (i == NULL)
3425 goto done;
3426
3427 err = 0;
3428 for (ml = po->mclist; ml; ml = ml->next) {
3429 if (ml->ifindex == mreq->mr_ifindex &&
3430 ml->type == mreq->mr_type &&
3431 ml->alen == mreq->mr_alen &&
3432 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3433 ml->count++;
3434
3435 kfree(i);
3436 goto done;
3437 }
3438 }
3439
3440 i->type = mreq->mr_type;
3441 i->ifindex = mreq->mr_ifindex;
3442 i->alen = mreq->mr_alen;
3443 memcpy(i->addr, mreq->mr_address, i->alen);
3444 i->count = 1;
3445 i->next = po->mclist;
3446 po->mclist = i;
3447 err = packet_dev_mc(dev, i, 1);
3448 if (err) {
3449 po->mclist = i->next;
3450 kfree(i);
3451 }
3452
3453done:
3454 rtnl_unlock();
3455 return err;
3456}
3457
3458static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3459{
3460 struct packet_mclist *ml, **mlp;
3461
3462 rtnl_lock();
3463
3464 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3465 if (ml->ifindex == mreq->mr_ifindex &&
3466 ml->type == mreq->mr_type &&
3467 ml->alen == mreq->mr_alen &&
3468 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3469 if (--ml->count == 0) {
3470 struct net_device *dev;
3471 *mlp = ml->next;
3472 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3473 if (dev)
3474 packet_dev_mc(dev, ml, -1);
3475 kfree(ml);
3476 }
3477 break;
3478 }
3479 }
3480 rtnl_unlock();
3481 return 0;
3482}
3483
3484static void packet_flush_mclist(struct sock *sk)
3485{
3486 struct packet_sock *po = pkt_sk(sk);
3487 struct packet_mclist *ml;
3488
3489 if (!po->mclist)
3490 return;
3491
3492 rtnl_lock();
3493 while ((ml = po->mclist) != NULL) {
3494 struct net_device *dev;
3495
3496 po->mclist = ml->next;
3497 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3498 if (dev != NULL)
3499 packet_dev_mc(dev, ml, -1);
3500 kfree(ml);
3501 }
3502 rtnl_unlock();
3503}
3504
3505static int
3506packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3507{
3508 struct sock *sk = sock->sk;
3509 struct packet_sock *po = pkt_sk(sk);
3510 int ret;
3511
3512 if (level != SOL_PACKET)
3513 return -ENOPROTOOPT;
3514
3515 switch (optname) {
3516 case PACKET_ADD_MEMBERSHIP:
3517 case PACKET_DROP_MEMBERSHIP:
3518 {
3519 struct packet_mreq_max mreq;
3520 int len = optlen;
3521 memset(&mreq, 0, sizeof(mreq));
3522 if (len < sizeof(struct packet_mreq))
3523 return -EINVAL;
3524 if (len > sizeof(mreq))
3525 len = sizeof(mreq);
3526 if (copy_from_user(&mreq, optval, len))
3527 return -EFAULT;
3528 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3529 return -EINVAL;
3530 if (optname == PACKET_ADD_MEMBERSHIP)
3531 ret = packet_mc_add(sk, &mreq);
3532 else
3533 ret = packet_mc_drop(sk, &mreq);
3534 return ret;
3535 }
3536
3537 case PACKET_RX_RING:
3538 case PACKET_TX_RING:
3539 {
3540 union tpacket_req_u req_u;
3541 int len;
3542
3543 switch (po->tp_version) {
3544 case TPACKET_V1:
3545 case TPACKET_V2:
3546 len = sizeof(req_u.req);
3547 break;
3548 case TPACKET_V3:
3549 default:
3550 len = sizeof(req_u.req3);
3551 break;
3552 }
3553 if (optlen < len)
3554 return -EINVAL;
3555 if (pkt_sk(sk)->has_vnet_hdr)
3556 return -EINVAL;
3557 if (copy_from_user(&req_u.req, optval, len))
3558 return -EFAULT;
3559 return packet_set_ring(sk, &req_u, 0,
3560 optname == PACKET_TX_RING);
3561 }
3562 case PACKET_COPY_THRESH:
3563 {
3564 int val;
3565
3566 if (optlen != sizeof(val))
3567 return -EINVAL;
3568 if (copy_from_user(&val, optval, sizeof(val)))
3569 return -EFAULT;
3570
3571 pkt_sk(sk)->copy_thresh = val;
3572 return 0;
3573 }
3574 case PACKET_VERSION:
3575 {
3576 int val;
3577
3578 if (optlen != sizeof(val))
3579 return -EINVAL;
3580 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3581 return -EBUSY;
3582 if (copy_from_user(&val, optval, sizeof(val)))
3583 return -EFAULT;
3584 switch (val) {
3585 case TPACKET_V1:
3586 case TPACKET_V2:
3587 case TPACKET_V3:
3588 po->tp_version = val;
3589 return 0;
3590 default:
3591 return -EINVAL;
3592 }
3593 }
3594 case PACKET_RESERVE:
3595 {
3596 unsigned int val;
3597
3598 if (optlen != sizeof(val))
3599 return -EINVAL;
3600 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3601 return -EBUSY;
3602 if (copy_from_user(&val, optval, sizeof(val)))
3603 return -EFAULT;
3604 po->tp_reserve = val;
3605 return 0;
3606 }
3607 case PACKET_LOSS:
3608 {
3609 unsigned int val;
3610
3611 if (optlen != sizeof(val))
3612 return -EINVAL;
3613 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3614 return -EBUSY;
3615 if (copy_from_user(&val, optval, sizeof(val)))
3616 return -EFAULT;
3617 po->tp_loss = !!val;
3618 return 0;
3619 }
3620 case PACKET_AUXDATA:
3621 {
3622 int val;
3623
3624 if (optlen < sizeof(val))
3625 return -EINVAL;
3626 if (copy_from_user(&val, optval, sizeof(val)))
3627 return -EFAULT;
3628
3629 po->auxdata = !!val;
3630 return 0;
3631 }
3632 case PACKET_ORIGDEV:
3633 {
3634 int val;
3635
3636 if (optlen < sizeof(val))
3637 return -EINVAL;
3638 if (copy_from_user(&val, optval, sizeof(val)))
3639 return -EFAULT;
3640
3641 po->origdev = !!val;
3642 return 0;
3643 }
3644 case PACKET_VNET_HDR:
3645 {
3646 int val;
3647
3648 if (sock->type != SOCK_RAW)
3649 return -EINVAL;
3650 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3651 return -EBUSY;
3652 if (optlen < sizeof(val))
3653 return -EINVAL;
3654 if (copy_from_user(&val, optval, sizeof(val)))
3655 return -EFAULT;
3656
3657 po->has_vnet_hdr = !!val;
3658 return 0;
3659 }
3660 case PACKET_TIMESTAMP:
3661 {
3662 int val;
3663
3664 if (optlen != sizeof(val))
3665 return -EINVAL;
3666 if (copy_from_user(&val, optval, sizeof(val)))
3667 return -EFAULT;
3668
3669 po->tp_tstamp = val;
3670 return 0;
3671 }
3672 case PACKET_FANOUT:
3673 {
3674 int val;
3675
3676 if (optlen != sizeof(val))
3677 return -EINVAL;
3678 if (copy_from_user(&val, optval, sizeof(val)))
3679 return -EFAULT;
3680
3681 return fanout_add(sk, val & 0xffff, val >> 16);
3682 }
3683 case PACKET_FANOUT_DATA:
3684 {
3685 if (!po->fanout)
3686 return -EINVAL;
3687
3688 return fanout_set_data(po, optval, optlen);
3689 }
3690 case PACKET_TX_HAS_OFF:
3691 {
3692 unsigned int val;
3693
3694 if (optlen != sizeof(val))
3695 return -EINVAL;
3696 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3697 return -EBUSY;
3698 if (copy_from_user(&val, optval, sizeof(val)))
3699 return -EFAULT;
3700 po->tp_tx_has_off = !!val;
3701 return 0;
3702 }
3703 case PACKET_QDISC_BYPASS:
3704 {
3705 int val;
3706
3707 if (optlen != sizeof(val))
3708 return -EINVAL;
3709 if (copy_from_user(&val, optval, sizeof(val)))
3710 return -EFAULT;
3711
3712 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3713 return 0;
3714 }
3715 default:
3716 return -ENOPROTOOPT;
3717 }
3718}
3719
3720static int packet_getsockopt(struct socket *sock, int level, int optname,
3721 char __user *optval, int __user *optlen)
3722{
3723 int len;
3724 int val, lv = sizeof(val);
3725 struct sock *sk = sock->sk;
3726 struct packet_sock *po = pkt_sk(sk);
3727 void *data = &val;
3728 union tpacket_stats_u st;
3729 struct tpacket_rollover_stats rstats;
3730
3731 if (level != SOL_PACKET)
3732 return -ENOPROTOOPT;
3733
3734 if (get_user(len, optlen))
3735 return -EFAULT;
3736
3737 if (len < 0)
3738 return -EINVAL;
3739
3740 switch (optname) {
3741 case PACKET_STATISTICS:
3742 spin_lock_bh(&sk->sk_receive_queue.lock);
3743 memcpy(&st, &po->stats, sizeof(st));
3744 memset(&po->stats, 0, sizeof(po->stats));
3745 spin_unlock_bh(&sk->sk_receive_queue.lock);
3746
3747 if (po->tp_version == TPACKET_V3) {
3748 lv = sizeof(struct tpacket_stats_v3);
3749 st.stats3.tp_packets += st.stats3.tp_drops;
3750 data = &st.stats3;
3751 } else {
3752 lv = sizeof(struct tpacket_stats);
3753 st.stats1.tp_packets += st.stats1.tp_drops;
3754 data = &st.stats1;
3755 }
3756
3757 break;
3758 case PACKET_AUXDATA:
3759 val = po->auxdata;
3760 break;
3761 case PACKET_ORIGDEV:
3762 val = po->origdev;
3763 break;
3764 case PACKET_VNET_HDR:
3765 val = po->has_vnet_hdr;
3766 break;
3767 case PACKET_VERSION:
3768 val = po->tp_version;
3769 break;
3770 case PACKET_HDRLEN:
3771 if (len > sizeof(int))
3772 len = sizeof(int);
3773 if (copy_from_user(&val, optval, len))
3774 return -EFAULT;
3775 switch (val) {
3776 case TPACKET_V1:
3777 val = sizeof(struct tpacket_hdr);
3778 break;
3779 case TPACKET_V2:
3780 val = sizeof(struct tpacket2_hdr);
3781 break;
3782 case TPACKET_V3:
3783 val = sizeof(struct tpacket3_hdr);
3784 break;
3785 default:
3786 return -EINVAL;
3787 }
3788 break;
3789 case PACKET_RESERVE:
3790 val = po->tp_reserve;
3791 break;
3792 case PACKET_LOSS:
3793 val = po->tp_loss;
3794 break;
3795 case PACKET_TIMESTAMP:
3796 val = po->tp_tstamp;
3797 break;
3798 case PACKET_FANOUT:
3799 val = (po->fanout ?
3800 ((u32)po->fanout->id |
3801 ((u32)po->fanout->type << 16) |
3802 ((u32)po->fanout->flags << 24)) :
3803 0);
3804 break;
3805 case PACKET_ROLLOVER_STATS:
3806 if (!po->rollover)
3807 return -EINVAL;
3808 rstats.tp_all = atomic_long_read(&po->rollover->num);
3809 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3810 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3811 data = &rstats;
3812 lv = sizeof(rstats);
3813 break;
3814 case PACKET_TX_HAS_OFF:
3815 val = po->tp_tx_has_off;
3816 break;
3817 case PACKET_QDISC_BYPASS:
3818 val = packet_use_direct_xmit(po);
3819 break;
3820 default:
3821 return -ENOPROTOOPT;
3822 }
3823
3824 if (len > lv)
3825 len = lv;
3826 if (put_user(len, optlen))
3827 return -EFAULT;
3828 if (copy_to_user(optval, data, len))
3829 return -EFAULT;
3830 return 0;
3831}
3832
3833
3834static int packet_notifier(struct notifier_block *this,
3835 unsigned long msg, void *ptr)
3836{
3837 struct sock *sk;
3838 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3839 struct net *net = dev_net(dev);
3840
3841 rcu_read_lock();
3842 sk_for_each_rcu(sk, &net->packet.sklist) {
3843 struct packet_sock *po = pkt_sk(sk);
3844
3845 switch (msg) {
3846 case NETDEV_UNREGISTER:
3847 if (po->mclist)
3848 packet_dev_mclist_delete(dev, &po->mclist);
3849
3850
3851 case NETDEV_DOWN:
3852 if (dev->ifindex == po->ifindex) {
3853 spin_lock(&po->bind_lock);
3854 if (po->running) {
3855 __unregister_prot_hook(sk, false);
3856 sk->sk_err = ENETDOWN;
3857 if (!sock_flag(sk, SOCK_DEAD))
3858 sk->sk_error_report(sk);
3859 }
3860 if (msg == NETDEV_UNREGISTER) {
3861 packet_cached_dev_reset(po);
3862 po->ifindex = -1;
3863 if (po->prot_hook.dev)
3864 dev_put(po->prot_hook.dev);
3865 po->prot_hook.dev = NULL;
3866 }
3867 spin_unlock(&po->bind_lock);
3868 }
3869 break;
3870 case NETDEV_UP:
3871 if (dev->ifindex == po->ifindex) {
3872 spin_lock(&po->bind_lock);
3873 if (po->num)
3874 register_prot_hook(sk);
3875 spin_unlock(&po->bind_lock);
3876 }
3877 break;
3878 }
3879 }
3880 rcu_read_unlock();
3881 return NOTIFY_DONE;
3882}
3883
3884
3885static int packet_ioctl(struct socket *sock, unsigned int cmd,
3886 unsigned long arg)
3887{
3888 struct sock *sk = sock->sk;
3889
3890 switch (cmd) {
3891 case SIOCOUTQ:
3892 {
3893 int amount = sk_wmem_alloc_get(sk);
3894
3895 return put_user(amount, (int __user *)arg);
3896 }
3897 case SIOCINQ:
3898 {
3899 struct sk_buff *skb;
3900 int amount = 0;
3901
3902 spin_lock_bh(&sk->sk_receive_queue.lock);
3903 skb = skb_peek(&sk->sk_receive_queue);
3904 if (skb)
3905 amount = skb->len;
3906 spin_unlock_bh(&sk->sk_receive_queue.lock);
3907 return put_user(amount, (int __user *)arg);
3908 }
3909 case SIOCGSTAMP:
3910 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3911 case SIOCGSTAMPNS:
3912 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3913
3914#ifdef CONFIG_INET
3915 case SIOCADDRT:
3916 case SIOCDELRT:
3917 case SIOCDARP:
3918 case SIOCGARP:
3919 case SIOCSARP:
3920 case SIOCGIFADDR:
3921 case SIOCSIFADDR:
3922 case SIOCGIFBRDADDR:
3923 case SIOCSIFBRDADDR:
3924 case SIOCGIFNETMASK:
3925 case SIOCSIFNETMASK:
3926 case SIOCGIFDSTADDR:
3927 case SIOCSIFDSTADDR:
3928 case SIOCSIFFLAGS:
3929 return inet_dgram_ops.ioctl(sock, cmd, arg);
3930#endif
3931
3932 default:
3933 return -ENOIOCTLCMD;
3934 }
3935 return 0;
3936}
3937
3938static unsigned int packet_poll(struct file *file, struct socket *sock,
3939 poll_table *wait)
3940{
3941 struct sock *sk = sock->sk;
3942 struct packet_sock *po = pkt_sk(sk);
3943 unsigned int mask = datagram_poll(file, sock, wait);
3944
3945 spin_lock_bh(&sk->sk_receive_queue.lock);
3946 if (po->rx_ring.pg_vec) {
3947 if (!packet_previous_rx_frame(po, &po->rx_ring,
3948 TP_STATUS_KERNEL))
3949 mask |= POLLIN | POLLRDNORM;
3950 }
3951 if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
3952 po->pressure = 0;
3953 spin_unlock_bh(&sk->sk_receive_queue.lock);
3954 spin_lock_bh(&sk->sk_write_queue.lock);
3955 if (po->tx_ring.pg_vec) {
3956 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3957 mask |= POLLOUT | POLLWRNORM;
3958 }
3959 spin_unlock_bh(&sk->sk_write_queue.lock);
3960 return mask;
3961}
3962
3963
3964
3965
3966
3967
3968static void packet_mm_open(struct vm_area_struct *vma)
3969{
3970 struct file *file = vma->vm_file;
3971 struct socket *sock = file->private_data;
3972 struct sock *sk = sock->sk;
3973
3974 if (sk)
3975 atomic_inc(&pkt_sk(sk)->mapped);
3976}
3977
3978static void packet_mm_close(struct vm_area_struct *vma)
3979{
3980 struct file *file = vma->vm_file;
3981 struct socket *sock = file->private_data;
3982 struct sock *sk = sock->sk;
3983
3984 if (sk)
3985 atomic_dec(&pkt_sk(sk)->mapped);
3986}
3987
3988static const struct vm_operations_struct packet_mmap_ops = {
3989 .open = packet_mm_open,
3990 .close = packet_mm_close,
3991};
3992
3993static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3994 unsigned int len)
3995{
3996 int i;
3997
3998 for (i = 0; i < len; i++) {
3999 if (likely(pg_vec[i].buffer)) {
4000 if (is_vmalloc_addr(pg_vec[i].buffer))
4001 vfree(pg_vec[i].buffer);
4002 else
4003 free_pages((unsigned long)pg_vec[i].buffer,
4004 order);
4005 pg_vec[i].buffer = NULL;
4006 }
4007 }
4008 kfree(pg_vec);
4009}
4010
4011static char *alloc_one_pg_vec_page(unsigned long order)
4012{
4013 char *buffer;
4014 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4015 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4016
4017 buffer = (char *) __get_free_pages(gfp_flags, order);
4018 if (buffer)
4019 return buffer;
4020
4021
4022 buffer = vzalloc((1 << order) * PAGE_SIZE);
4023 if (buffer)
4024 return buffer;
4025
4026
4027 gfp_flags &= ~__GFP_NORETRY;
4028 buffer = (char *) __get_free_pages(gfp_flags, order);
4029 if (buffer)
4030 return buffer;
4031
4032
4033 return NULL;
4034}
4035
4036static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4037{
4038 unsigned int block_nr = req->tp_block_nr;
4039 struct pgv *pg_vec;
4040 int i;
4041
4042 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4043 if (unlikely(!pg_vec))
4044 goto out;
4045
4046 for (i = 0; i < block_nr; i++) {
4047 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4048 if (unlikely(!pg_vec[i].buffer))
4049 goto out_free_pgvec;
4050 }
4051
4052out:
4053 return pg_vec;
4054
4055out_free_pgvec:
4056 free_pg_vec(pg_vec, order, block_nr);
4057 pg_vec = NULL;
4058 goto out;
4059}
4060
4061static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4062 int closing, int tx_ring)
4063{
4064 struct pgv *pg_vec = NULL;
4065 struct packet_sock *po = pkt_sk(sk);
4066 int was_running, order = 0;
4067 struct packet_ring_buffer *rb;
4068 struct sk_buff_head *rb_queue;
4069 __be16 num;
4070 int err = -EINVAL;
4071
4072 struct tpacket_req *req = &req_u->req;
4073
4074
4075 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4076 WARN(1, "Tx-ring is not supported.\n");
4077 goto out;
4078 }
4079
4080 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4081 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4082
4083 err = -EBUSY;
4084 if (!closing) {
4085 if (atomic_read(&po->mapped))
4086 goto out;
4087 if (packet_read_pending(rb))
4088 goto out;
4089 }
4090
4091 if (req->tp_block_nr) {
4092
4093 err = -EBUSY;
4094 if (unlikely(rb->pg_vec))
4095 goto out;
4096
4097 switch (po->tp_version) {
4098 case TPACKET_V1:
4099 po->tp_hdrlen = TPACKET_HDRLEN;
4100 break;
4101 case TPACKET_V2:
4102 po->tp_hdrlen = TPACKET2_HDRLEN;
4103 break;
4104 case TPACKET_V3:
4105 po->tp_hdrlen = TPACKET3_HDRLEN;
4106 break;
4107 }
4108
4109 err = -EINVAL;
4110 if (unlikely((int)req->tp_block_size <= 0))
4111 goto out;
4112 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4113 goto out;
4114 if (po->tp_version >= TPACKET_V3 &&
4115 (int)(req->tp_block_size -
4116 BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
4117 goto out;
4118 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4119 po->tp_reserve))
4120 goto out;
4121 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4122 goto out;
4123
4124 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4125 if (unlikely(rb->frames_per_block == 0))
4126 goto out;
4127 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4128 req->tp_frame_nr))
4129 goto out;
4130
4131 err = -ENOMEM;
4132 order = get_order(req->tp_block_size);
4133 pg_vec = alloc_pg_vec(req, order);
4134 if (unlikely(!pg_vec))
4135 goto out;
4136 switch (po->tp_version) {
4137 case TPACKET_V3:
4138
4139
4140
4141 if (!tx_ring)
4142 init_prb_bdqc(po, rb, pg_vec, req_u);
4143 break;
4144 default:
4145 break;
4146 }
4147 }
4148
4149 else {
4150 err = -EINVAL;
4151 if (unlikely(req->tp_frame_nr))
4152 goto out;
4153 }
4154
4155 lock_sock(sk);
4156
4157
4158 spin_lock(&po->bind_lock);
4159 was_running = po->running;
4160 num = po->num;
4161 if (was_running) {
4162 po->num = 0;
4163 __unregister_prot_hook(sk, false);
4164 }
4165 spin_unlock(&po->bind_lock);
4166
4167 synchronize_net();
4168
4169 err = -EBUSY;
4170 mutex_lock(&po->pg_vec_lock);
4171 if (closing || atomic_read(&po->mapped) == 0) {
4172 err = 0;
4173 spin_lock_bh(&rb_queue->lock);
4174 swap(rb->pg_vec, pg_vec);
4175 rb->frame_max = (req->tp_frame_nr - 1);
4176 rb->head = 0;
4177 rb->frame_size = req->tp_frame_size;
4178 spin_unlock_bh(&rb_queue->lock);
4179
4180 swap(rb->pg_vec_order, order);
4181 swap(rb->pg_vec_len, req->tp_block_nr);
4182
4183 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4184 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4185 tpacket_rcv : packet_rcv;
4186 skb_queue_purge(rb_queue);
4187 if (atomic_read(&po->mapped))
4188 pr_err("packet_mmap: vma is busy: %d\n",
4189 atomic_read(&po->mapped));
4190 }
4191 mutex_unlock(&po->pg_vec_lock);
4192
4193 spin_lock(&po->bind_lock);
4194 if (was_running) {
4195 po->num = num;
4196 register_prot_hook(sk);
4197 }
4198 spin_unlock(&po->bind_lock);
4199 if (closing && (po->tp_version > TPACKET_V2)) {
4200
4201 if (!tx_ring)
4202 prb_shutdown_retire_blk_timer(po, rb_queue);
4203 }
4204 release_sock(sk);
4205
4206 if (pg_vec)
4207 free_pg_vec(pg_vec, order, req->tp_block_nr);
4208out:
4209 return err;
4210}
4211
4212static int packet_mmap(struct file *file, struct socket *sock,
4213 struct vm_area_struct *vma)
4214{
4215 struct sock *sk = sock->sk;
4216 struct packet_sock *po = pkt_sk(sk);
4217 unsigned long size, expected_size;
4218 struct packet_ring_buffer *rb;
4219 unsigned long start;
4220 int err = -EINVAL;
4221 int i;
4222
4223 if (vma->vm_pgoff)
4224 return -EINVAL;
4225
4226 mutex_lock(&po->pg_vec_lock);
4227
4228 expected_size = 0;
4229 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4230 if (rb->pg_vec) {
4231 expected_size += rb->pg_vec_len
4232 * rb->pg_vec_pages
4233 * PAGE_SIZE;
4234 }
4235 }
4236
4237 if (expected_size == 0)
4238 goto out;
4239
4240 size = vma->vm_end - vma->vm_start;
4241 if (size != expected_size)
4242 goto out;
4243
4244 start = vma->vm_start;
4245 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4246 if (rb->pg_vec == NULL)
4247 continue;
4248
4249 for (i = 0; i < rb->pg_vec_len; i++) {
4250 struct page *page;
4251 void *kaddr = rb->pg_vec[i].buffer;
4252 int pg_num;
4253
4254 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4255 page = pgv_to_page(kaddr);
4256 err = vm_insert_page(vma, start, page);
4257 if (unlikely(err))
4258 goto out;
4259 start += PAGE_SIZE;
4260 kaddr += PAGE_SIZE;
4261 }
4262 }
4263 }
4264
4265 atomic_inc(&po->mapped);
4266 vma->vm_ops = &packet_mmap_ops;
4267 err = 0;
4268
4269out:
4270 mutex_unlock(&po->pg_vec_lock);
4271 return err;
4272}
4273
4274static const struct proto_ops packet_ops_spkt = {
4275 .family = PF_PACKET,
4276 .owner = THIS_MODULE,
4277 .release = packet_release,
4278 .bind = packet_bind_spkt,
4279 .connect = sock_no_connect,
4280 .socketpair = sock_no_socketpair,
4281 .accept = sock_no_accept,
4282 .getname = packet_getname_spkt,
4283 .poll = datagram_poll,
4284 .ioctl = packet_ioctl,
4285 .listen = sock_no_listen,
4286 .shutdown = sock_no_shutdown,
4287 .setsockopt = sock_no_setsockopt,
4288 .getsockopt = sock_no_getsockopt,
4289 .sendmsg = packet_sendmsg_spkt,
4290 .recvmsg = packet_recvmsg,
4291 .mmap = sock_no_mmap,
4292 .sendpage = sock_no_sendpage,
4293};
4294
4295static const struct proto_ops packet_ops = {
4296 .family = PF_PACKET,
4297 .owner = THIS_MODULE,
4298 .release = packet_release,
4299 .bind = packet_bind,
4300 .connect = sock_no_connect,
4301 .socketpair = sock_no_socketpair,
4302 .accept = sock_no_accept,
4303 .getname = packet_getname,
4304 .poll = packet_poll,
4305 .ioctl = packet_ioctl,
4306 .listen = sock_no_listen,
4307 .shutdown = sock_no_shutdown,
4308 .setsockopt = packet_setsockopt,
4309 .getsockopt = packet_getsockopt,
4310 .sendmsg = packet_sendmsg,
4311 .recvmsg = packet_recvmsg,
4312 .mmap = packet_mmap,
4313 .sendpage = sock_no_sendpage,
4314};
4315
4316static const struct net_proto_family packet_family_ops = {
4317 .family = PF_PACKET,
4318 .create = packet_create,
4319 .owner = THIS_MODULE,
4320};
4321
4322static struct notifier_block packet_netdev_notifier = {
4323 .notifier_call = packet_notifier,
4324};
4325
4326#ifdef CONFIG_PROC_FS
4327
4328static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4329 __acquires(RCU)
4330{
4331 struct net *net = seq_file_net(seq);
4332
4333 rcu_read_lock();
4334 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4335}
4336
4337static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4338{
4339 struct net *net = seq_file_net(seq);
4340 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4341}
4342
4343static void packet_seq_stop(struct seq_file *seq, void *v)
4344 __releases(RCU)
4345{
4346 rcu_read_unlock();
4347}
4348
4349static int packet_seq_show(struct seq_file *seq, void *v)
4350{
4351 if (v == SEQ_START_TOKEN)
4352 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
4353 else {
4354 struct sock *s = sk_entry(v);
4355 const struct packet_sock *po = pkt_sk(s);
4356
4357 seq_printf(seq,
4358 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4359 s,
4360 atomic_read(&s->sk_refcnt),
4361 s->sk_type,
4362 ntohs(po->num),
4363 po->ifindex,
4364 po->running,
4365 atomic_read(&s->sk_rmem_alloc),
4366 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4367 sock_i_ino(s));
4368 }
4369
4370 return 0;
4371}
4372
4373static const struct seq_operations packet_seq_ops = {
4374 .start = packet_seq_start,
4375 .next = packet_seq_next,
4376 .stop = packet_seq_stop,
4377 .show = packet_seq_show,
4378};
4379
4380static int packet_seq_open(struct inode *inode, struct file *file)
4381{
4382 return seq_open_net(inode, file, &packet_seq_ops,
4383 sizeof(struct seq_net_private));
4384}
4385
4386static const struct file_operations packet_seq_fops = {
4387 .owner = THIS_MODULE,
4388 .open = packet_seq_open,
4389 .read = seq_read,
4390 .llseek = seq_lseek,
4391 .release = seq_release_net,
4392};
4393
4394#endif
4395
4396static int __net_init packet_net_init(struct net *net)
4397{
4398 mutex_init(&net->packet.sklist_lock);
4399 INIT_HLIST_HEAD(&net->packet.sklist);
4400
4401 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
4402 return -ENOMEM;
4403
4404 return 0;
4405}
4406
4407static void __net_exit packet_net_exit(struct net *net)
4408{
4409 remove_proc_entry("packet", net->proc_net);
4410}
4411
4412static struct pernet_operations packet_net_ops = {
4413 .init = packet_net_init,
4414 .exit = packet_net_exit,
4415};
4416
4417
4418static void __exit packet_exit(void)
4419{
4420 unregister_netdevice_notifier(&packet_netdev_notifier);
4421 unregister_pernet_subsys(&packet_net_ops);
4422 sock_unregister(PF_PACKET);
4423 proto_unregister(&packet_proto);
4424}
4425
4426static int __init packet_init(void)
4427{
4428 int rc = proto_register(&packet_proto, 0);
4429
4430 if (rc != 0)
4431 goto out;
4432
4433 sock_register(&packet_family_ops);
4434 register_pernet_subsys(&packet_net_ops);
4435 register_netdevice_notifier(&packet_netdev_notifier);
4436out:
4437 return rc;
4438}
4439
4440module_init(packet_init);
4441module_exit(packet_exit);
4442MODULE_LICENSE("GPL");
4443MODULE_ALIAS_NETPROTO(PF_PACKET);
4444