1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/module.h>
23#include <linux/virtio.h>
24#include <linux/virtio_net.h>
25#include <linux/bpf.h>
26#include <linux/bpf_trace.h>
27#include <linux/scatterlist.h>
28#include <linux/if_vlan.h>
29#include <linux/slab.h>
30#include <linux/cpu.h>
31#include <linux/average.h>
32#include <linux/filter.h>
33#include <linux/netdevice.h>
34#include <linux/kernel.h>
35#include <linux/pci.h>
36#include <net/route.h>
37#include <net/xdp.h>
38#include <net/net_failover.h>
39
40static int napi_weight = NAPI_POLL_WEIGHT;
41module_param(napi_weight, int, 0444);
42
43static bool csum = true, gso = true, napi_tx = true;
44module_param(csum, bool, 0444);
45module_param(gso, bool, 0444);
46module_param(napi_tx, bool, 0644);
47
48
49#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
50#define GOOD_COPY_LEN 128
51
52#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
53
54
55#define VIRTIO_XDP_HEADROOM 256
56
57
58#define VIRTIO_XDP_TX BIT(0)
59#define VIRTIO_XDP_REDIR BIT(1)
60
61#define VIRTIO_XDP_FLAG BIT(0)
62
63
64
65
66
67
68DECLARE_EWMA(pkt_len, 0, 64)
69
70#define VIRTNET_DRIVER_VERSION "1.0.0"
71
72static const unsigned long guest_offloads[] = {
73 VIRTIO_NET_F_GUEST_TSO4,
74 VIRTIO_NET_F_GUEST_TSO6,
75 VIRTIO_NET_F_GUEST_ECN,
76 VIRTIO_NET_F_GUEST_UFO
77};
78
79struct virtnet_stat_desc {
80 char desc[ETH_GSTRING_LEN];
81 size_t offset;
82};
83
84struct virtnet_sq_stats {
85 struct u64_stats_sync syncp;
86 u64 packets;
87 u64 bytes;
88};
89
90struct virtnet_rq_stats {
91 struct u64_stats_sync syncp;
92 u64 packets;
93 u64 bytes;
94};
95
96#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
97#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
98
99static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
100 { "packets", VIRTNET_SQ_STAT(packets) },
101 { "bytes", VIRTNET_SQ_STAT(bytes) },
102};
103
104static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
105 { "packets", VIRTNET_RQ_STAT(packets) },
106 { "bytes", VIRTNET_RQ_STAT(bytes) },
107};
108
109#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
110#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
111
112
113struct send_queue {
114
115 struct virtqueue *vq;
116
117
118 struct scatterlist sg[MAX_SKB_FRAGS + 2];
119
120
121 char name[40];
122
123 struct virtnet_sq_stats stats;
124
125 struct napi_struct napi;
126};
127
128
129struct receive_queue {
130
131 struct virtqueue *vq;
132
133 struct napi_struct napi;
134
135 struct bpf_prog __rcu *xdp_prog;
136
137 struct virtnet_rq_stats stats;
138
139
140 struct page *pages;
141
142
143 struct ewma_pkt_len mrg_avg_pkt_len;
144
145
146 struct page_frag alloc_frag;
147
148
149 struct scatterlist sg[MAX_SKB_FRAGS + 2];
150
151
152 unsigned int min_buf_len;
153
154
155 char name[40];
156
157 struct xdp_rxq_info xdp_rxq;
158};
159
160
161struct control_buf {
162 struct virtio_net_ctrl_hdr hdr;
163 virtio_net_ctrl_ack status;
164 struct virtio_net_ctrl_mq mq;
165 u8 promisc;
166 u8 allmulti;
167 __virtio16 vid;
168 __virtio64 offloads;
169};
170
171struct virtnet_info {
172 struct virtio_device *vdev;
173 struct virtqueue *cvq;
174 struct net_device *dev;
175 struct send_queue *sq;
176 struct receive_queue *rq;
177 unsigned int status;
178
179
180 u16 max_queue_pairs;
181
182
183 u16 curr_queue_pairs;
184
185
186 u16 xdp_queue_pairs;
187
188
189 bool big_packets;
190
191
192 bool mergeable_rx_bufs;
193
194
195 bool has_cvq;
196
197
198 bool any_header_sg;
199
200
201 u8 hdr_len;
202
203
204 struct delayed_work refill;
205
206
207 struct work_struct config_work;
208
209
210 bool affinity_hint_set;
211
212
213 struct hlist_node node;
214 struct hlist_node node_dead;
215
216 struct control_buf *ctrl;
217
218
219 u8 duplex;
220 u32 speed;
221
222 unsigned long guest_offloads;
223
224
225 struct failover *failover;
226};
227
228struct padded_vnet_hdr {
229 struct virtio_net_hdr_mrg_rxbuf hdr;
230
231
232
233
234
235 char padding[4];
236};
237
238static bool is_xdp_frame(void *ptr)
239{
240 return (unsigned long)ptr & VIRTIO_XDP_FLAG;
241}
242
243static void *xdp_to_ptr(struct xdp_frame *ptr)
244{
245 return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
246}
247
248static struct xdp_frame *ptr_to_xdp(void *ptr)
249{
250 return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
251}
252
253
254
255
256static int vq2txq(struct virtqueue *vq)
257{
258 return (vq->index - 1) / 2;
259}
260
261static int txq2vq(int txq)
262{
263 return txq * 2 + 1;
264}
265
266static int vq2rxq(struct virtqueue *vq)
267{
268 return vq->index / 2;
269}
270
271static int rxq2vq(int rxq)
272{
273 return rxq * 2;
274}
275
276static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
277{
278 return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
279}
280
281
282
283
284
285static void give_pages(struct receive_queue *rq, struct page *page)
286{
287 struct page *end;
288
289
290 for (end = page; end->private; end = (struct page *)end->private);
291 end->private = (unsigned long)rq->pages;
292 rq->pages = page;
293}
294
295static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
296{
297 struct page *p = rq->pages;
298
299 if (p) {
300 rq->pages = (struct page *)p->private;
301
302 p->private = 0;
303 } else
304 p = alloc_page(gfp_mask);
305 return p;
306}
307
308static void virtqueue_napi_schedule(struct napi_struct *napi,
309 struct virtqueue *vq)
310{
311 if (napi_schedule_prep(napi)) {
312 virtqueue_disable_cb(vq);
313 __napi_schedule(napi);
314 }
315}
316
317static void virtqueue_napi_complete(struct napi_struct *napi,
318 struct virtqueue *vq, int processed)
319{
320 int opaque;
321
322 opaque = virtqueue_enable_cb_prepare(vq);
323 if (napi_complete_done(napi, processed)) {
324 if (unlikely(virtqueue_poll(vq, opaque)))
325 virtqueue_napi_schedule(napi, vq);
326 } else {
327 virtqueue_disable_cb(vq);
328 }
329}
330
331static void skb_xmit_done(struct virtqueue *vq)
332{
333 struct virtnet_info *vi = vq->vdev->priv;
334 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
335
336
337 virtqueue_disable_cb(vq);
338
339 if (napi->weight)
340 virtqueue_napi_schedule(napi, vq);
341 else
342
343 netif_wake_subqueue(vi->dev, vq2txq(vq));
344}
345
346#define MRG_CTX_HEADER_SHIFT 22
347static void *mergeable_len_to_ctx(unsigned int truesize,
348 unsigned int headroom)
349{
350 return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize);
351}
352
353static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx)
354{
355 return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT;
356}
357
358static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
359{
360 return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1);
361}
362
363
364static struct sk_buff *page_to_skb(struct virtnet_info *vi,
365 struct receive_queue *rq,
366 struct page *page, unsigned int offset,
367 unsigned int len, unsigned int truesize)
368{
369 struct sk_buff *skb;
370 struct virtio_net_hdr_mrg_rxbuf *hdr;
371 unsigned int copy, hdr_len, hdr_padded_len;
372 char *p;
373
374 p = page_address(page) + offset;
375
376
377 skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
378 if (unlikely(!skb))
379 return NULL;
380
381 hdr = skb_vnet_hdr(skb);
382
383 hdr_len = vi->hdr_len;
384 if (vi->mergeable_rx_bufs)
385 hdr_padded_len = sizeof(*hdr);
386 else
387 hdr_padded_len = sizeof(struct padded_vnet_hdr);
388
389 memcpy(hdr, p, hdr_len);
390
391 len -= hdr_len;
392 offset += hdr_padded_len;
393 p += hdr_padded_len;
394
395 copy = len;
396 if (copy > skb_tailroom(skb))
397 copy = skb_tailroom(skb);
398 skb_put_data(skb, p, copy);
399
400 len -= copy;
401 offset += copy;
402
403 if (vi->mergeable_rx_bufs) {
404 if (len)
405 skb_add_rx_frag(skb, 0, page, offset, len, truesize);
406 else
407 put_page(page);
408 return skb;
409 }
410
411
412
413
414
415
416
417 if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
418 net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
419 dev_kfree_skb(skb);
420 return NULL;
421 }
422 BUG_ON(offset >= PAGE_SIZE);
423 while (len) {
424 unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
425 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
426 frag_size, truesize);
427 len -= frag_size;
428 page = (struct page *)page->private;
429 offset = 0;
430 }
431
432 if (page)
433 give_pages(rq, page);
434
435 return skb;
436}
437
438static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
439 struct send_queue *sq,
440 struct xdp_frame *xdpf)
441{
442 struct virtio_net_hdr_mrg_rxbuf *hdr;
443 int err;
444
445
446 if (unlikely(xdpf->metasize > 0))
447 return -EOPNOTSUPP;
448
449 if (unlikely(xdpf->headroom < vi->hdr_len))
450 return -EOVERFLOW;
451
452
453 xdpf->data -= vi->hdr_len;
454
455 hdr = xdpf->data;
456 memset(hdr, 0, vi->hdr_len);
457 xdpf->len += vi->hdr_len;
458
459 sg_init_one(sq->sg, xdpf->data, xdpf->len);
460
461 err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
462 GFP_ATOMIC);
463 if (unlikely(err))
464 return -ENOSPC;
465
466 return 0;
467}
468
469static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
470 struct xdp_frame *xdpf)
471{
472 struct xdp_frame *xdpf_sent;
473 struct send_queue *sq;
474 unsigned int len;
475 unsigned int qp;
476
477 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
478 sq = &vi->sq[qp];
479
480
481 while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
482 xdp_return_frame(xdpf_sent);
483
484 return __virtnet_xdp_xmit_one(vi, sq, xdpf);
485}
486
487static int virtnet_xdp_xmit(struct net_device *dev,
488 int n, struct xdp_frame **frames, u32 flags)
489{
490 struct virtnet_info *vi = netdev_priv(dev);
491 struct receive_queue *rq = vi->rq;
492 struct bpf_prog *xdp_prog;
493 struct send_queue *sq;
494 unsigned int len;
495 unsigned int qp;
496 int drops = 0;
497 int err;
498 void *ptr;
499 int i;
500
501 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
502 return -EINVAL;
503
504 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
505 sq = &vi->sq[qp];
506
507
508
509
510 xdp_prog = rcu_access_pointer(rq->xdp_prog);
511 if (!xdp_prog)
512 return -ENXIO;
513
514
515 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
516 if (likely(is_xdp_frame(ptr)))
517 xdp_return_frame(ptr_to_xdp(ptr));
518 else
519 dev_consume_skb_any(ptr);
520 }
521
522 for (i = 0; i < n; i++) {
523 struct xdp_frame *xdpf = frames[i];
524
525 err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
526 if (err) {
527 xdp_return_frame_rx_napi(xdpf);
528 drops++;
529 }
530 }
531
532 if (flags & XDP_XMIT_FLUSH)
533 virtqueue_kick(sq->vq);
534
535 return n - drops;
536}
537
538static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
539{
540 return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557static struct page *xdp_linearize_page(struct receive_queue *rq,
558 u16 *num_buf,
559 struct page *p,
560 int offset,
561 int page_off,
562 unsigned int *len)
563{
564 struct page *page = alloc_page(GFP_ATOMIC);
565
566 if (!page)
567 return NULL;
568
569 memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
570 page_off += *len;
571
572 while (--*num_buf) {
573 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
574 unsigned int buflen;
575 void *buf;
576 int off;
577
578 buf = virtqueue_get_buf(rq->vq, &buflen);
579 if (unlikely(!buf))
580 goto err_buf;
581
582 p = virt_to_head_page(buf);
583 off = buf - page_address(p);
584
585
586
587
588 if ((page_off + buflen + tailroom) > PAGE_SIZE) {
589 put_page(p);
590 goto err_buf;
591 }
592
593 memcpy(page_address(page) + page_off,
594 page_address(p) + off, buflen);
595 page_off += buflen;
596 put_page(p);
597 }
598
599
600 *len = page_off - VIRTIO_XDP_HEADROOM;
601 return page;
602err_buf:
603 __free_pages(page, 0);
604 return NULL;
605}
606
607static struct sk_buff *receive_small(struct net_device *dev,
608 struct virtnet_info *vi,
609 struct receive_queue *rq,
610 void *buf, void *ctx,
611 unsigned int len,
612 unsigned int *xdp_xmit,
613 unsigned int *rbytes)
614{
615 struct sk_buff *skb;
616 struct bpf_prog *xdp_prog;
617 unsigned int xdp_headroom = (unsigned long)ctx;
618 unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom;
619 unsigned int headroom = vi->hdr_len + header_offset;
620 unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
621 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
622 struct page *page = virt_to_head_page(buf);
623 unsigned int delta = 0;
624 struct page *xdp_page;
625 int err;
626
627 len -= vi->hdr_len;
628 *rbytes += len;
629
630 rcu_read_lock();
631 xdp_prog = rcu_dereference(rq->xdp_prog);
632 if (xdp_prog) {
633 struct virtio_net_hdr_mrg_rxbuf *hdr = buf + header_offset;
634 struct xdp_frame *xdpf;
635 struct xdp_buff xdp;
636 void *orig_data;
637 u32 act;
638
639 if (unlikely(hdr->hdr.gso_type))
640 goto err_xdp;
641
642 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
643 int offset = buf - page_address(page) + header_offset;
644 unsigned int tlen = len + vi->hdr_len;
645 u16 num_buf = 1;
646
647 xdp_headroom = virtnet_get_headroom(vi);
648 header_offset = VIRTNET_RX_PAD + xdp_headroom;
649 headroom = vi->hdr_len + header_offset;
650 buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
651 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
652 xdp_page = xdp_linearize_page(rq, &num_buf, page,
653 offset, header_offset,
654 &tlen);
655 if (!xdp_page)
656 goto err_xdp;
657
658 buf = page_address(xdp_page);
659 put_page(page);
660 page = xdp_page;
661 }
662
663 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
664 xdp.data = xdp.data_hard_start + xdp_headroom;
665 xdp_set_data_meta_invalid(&xdp);
666 xdp.data_end = xdp.data + len;
667 xdp.rxq = &rq->xdp_rxq;
668 orig_data = xdp.data;
669 act = bpf_prog_run_xdp(xdp_prog, &xdp);
670
671 switch (act) {
672 case XDP_PASS:
673
674 delta = orig_data - xdp.data;
675 len = xdp.data_end - xdp.data;
676 break;
677 case XDP_TX:
678 xdpf = convert_to_xdp_frame(&xdp);
679 if (unlikely(!xdpf))
680 goto err_xdp;
681 err = __virtnet_xdp_tx_xmit(vi, xdpf);
682 if (unlikely(err)) {
683 trace_xdp_exception(vi->dev, xdp_prog, act);
684 goto err_xdp;
685 }
686 *xdp_xmit |= VIRTIO_XDP_TX;
687 rcu_read_unlock();
688 goto xdp_xmit;
689 case XDP_REDIRECT:
690 err = xdp_do_redirect(dev, &xdp, xdp_prog);
691 if (err)
692 goto err_xdp;
693 *xdp_xmit |= VIRTIO_XDP_REDIR;
694 rcu_read_unlock();
695 goto xdp_xmit;
696 default:
697 bpf_warn_invalid_xdp_action(act);
698 case XDP_ABORTED:
699 trace_xdp_exception(vi->dev, xdp_prog, act);
700 case XDP_DROP:
701 goto err_xdp;
702 }
703 }
704 rcu_read_unlock();
705
706 skb = build_skb(buf, buflen);
707 if (!skb) {
708 put_page(page);
709 goto err;
710 }
711 skb_reserve(skb, headroom - delta);
712 skb_put(skb, len);
713 if (!delta) {
714 buf += header_offset;
715 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
716 }
717
718err:
719 return skb;
720
721err_xdp:
722 rcu_read_unlock();
723 dev->stats.rx_dropped++;
724 put_page(page);
725xdp_xmit:
726 return NULL;
727}
728
729static struct sk_buff *receive_big(struct net_device *dev,
730 struct virtnet_info *vi,
731 struct receive_queue *rq,
732 void *buf,
733 unsigned int len,
734 unsigned int *rbytes)
735{
736 struct page *page = buf;
737 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
738
739 *rbytes += len - vi->hdr_len;
740 if (unlikely(!skb))
741 goto err;
742
743 return skb;
744
745err:
746 dev->stats.rx_dropped++;
747 give_pages(rq, page);
748 return NULL;
749}
750
751static struct sk_buff *receive_mergeable(struct net_device *dev,
752 struct virtnet_info *vi,
753 struct receive_queue *rq,
754 void *buf,
755 void *ctx,
756 unsigned int len,
757 unsigned int *xdp_xmit,
758 unsigned int *rbytes)
759{
760 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
761 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
762 struct page *page = virt_to_head_page(buf);
763 int offset = buf - page_address(page);
764 struct sk_buff *head_skb, *curr_skb;
765 struct bpf_prog *xdp_prog;
766 unsigned int truesize;
767 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
768 int err;
769
770 head_skb = NULL;
771 *rbytes += len - vi->hdr_len;
772
773 rcu_read_lock();
774 xdp_prog = rcu_dereference(rq->xdp_prog);
775 if (xdp_prog) {
776 struct xdp_frame *xdpf;
777 struct page *xdp_page;
778 struct xdp_buff xdp;
779 void *data;
780 u32 act;
781
782
783
784
785
786 if (unlikely(hdr->hdr.gso_type))
787 goto err_xdp;
788
789
790
791
792
793
794
795 if (unlikely(num_buf > 1 ||
796 headroom < virtnet_get_headroom(vi))) {
797
798 xdp_page = xdp_linearize_page(rq, &num_buf,
799 page, offset,
800 VIRTIO_XDP_HEADROOM,
801 &len);
802 if (!xdp_page)
803 goto err_xdp;
804 offset = VIRTIO_XDP_HEADROOM;
805 } else {
806 xdp_page = page;
807 }
808
809
810
811
812 data = page_address(xdp_page) + offset;
813 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
814 xdp.data = data + vi->hdr_len;
815 xdp_set_data_meta_invalid(&xdp);
816 xdp.data_end = xdp.data + (len - vi->hdr_len);
817 xdp.rxq = &rq->xdp_rxq;
818
819 act = bpf_prog_run_xdp(xdp_prog, &xdp);
820
821 switch (act) {
822 case XDP_PASS:
823
824
825
826
827 offset = xdp.data -
828 page_address(xdp_page) - vi->hdr_len;
829
830
831
832
833 len = xdp.data_end - xdp.data + vi->hdr_len;
834
835 if (unlikely(xdp_page != page)) {
836 rcu_read_unlock();
837 put_page(page);
838 head_skb = page_to_skb(vi, rq, xdp_page,
839 offset, len, PAGE_SIZE);
840 return head_skb;
841 }
842 break;
843 case XDP_TX:
844 xdpf = convert_to_xdp_frame(&xdp);
845 if (unlikely(!xdpf))
846 goto err_xdp;
847 err = __virtnet_xdp_tx_xmit(vi, xdpf);
848 if (unlikely(err)) {
849 trace_xdp_exception(vi->dev, xdp_prog, act);
850 if (unlikely(xdp_page != page))
851 put_page(xdp_page);
852 goto err_xdp;
853 }
854 *xdp_xmit |= VIRTIO_XDP_TX;
855 if (unlikely(xdp_page != page))
856 put_page(page);
857 rcu_read_unlock();
858 goto xdp_xmit;
859 case XDP_REDIRECT:
860 err = xdp_do_redirect(dev, &xdp, xdp_prog);
861 if (err) {
862 if (unlikely(xdp_page != page))
863 put_page(xdp_page);
864 goto err_xdp;
865 }
866 *xdp_xmit |= VIRTIO_XDP_REDIR;
867 if (unlikely(xdp_page != page))
868 put_page(page);
869 rcu_read_unlock();
870 goto xdp_xmit;
871 default:
872 bpf_warn_invalid_xdp_action(act);
873 case XDP_ABORTED:
874 trace_xdp_exception(vi->dev, xdp_prog, act);
875 case XDP_DROP:
876 if (unlikely(xdp_page != page))
877 __free_pages(xdp_page, 0);
878 goto err_xdp;
879 }
880 }
881 rcu_read_unlock();
882
883 truesize = mergeable_ctx_to_truesize(ctx);
884 if (unlikely(len > truesize)) {
885 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
886 dev->name, len, (unsigned long)ctx);
887 dev->stats.rx_length_errors++;
888 goto err_skb;
889 }
890
891 head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
892 curr_skb = head_skb;
893
894 if (unlikely(!curr_skb))
895 goto err_skb;
896 while (--num_buf) {
897 int num_skb_frags;
898
899 buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
900 if (unlikely(!buf)) {
901 pr_debug("%s: rx error: %d buffers out of %d missing\n",
902 dev->name, num_buf,
903 virtio16_to_cpu(vi->vdev,
904 hdr->num_buffers));
905 dev->stats.rx_length_errors++;
906 goto err_buf;
907 }
908
909 *rbytes += len;
910 page = virt_to_head_page(buf);
911
912 truesize = mergeable_ctx_to_truesize(ctx);
913 if (unlikely(len > truesize)) {
914 pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
915 dev->name, len, (unsigned long)ctx);
916 dev->stats.rx_length_errors++;
917 goto err_skb;
918 }
919
920 num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
921 if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
922 struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
923
924 if (unlikely(!nskb))
925 goto err_skb;
926 if (curr_skb == head_skb)
927 skb_shinfo(curr_skb)->frag_list = nskb;
928 else
929 curr_skb->next = nskb;
930 curr_skb = nskb;
931 head_skb->truesize += nskb->truesize;
932 num_skb_frags = 0;
933 }
934 if (curr_skb != head_skb) {
935 head_skb->data_len += len;
936 head_skb->len += len;
937 head_skb->truesize += truesize;
938 }
939 offset = buf - page_address(page);
940 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
941 put_page(page);
942 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
943 len, truesize);
944 } else {
945 skb_add_rx_frag(curr_skb, num_skb_frags, page,
946 offset, len, truesize);
947 }
948 }
949
950 ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
951 return head_skb;
952
953err_xdp:
954 rcu_read_unlock();
955err_skb:
956 put_page(page);
957 while (num_buf-- > 1) {
958 buf = virtqueue_get_buf(rq->vq, &len);
959 if (unlikely(!buf)) {
960 pr_debug("%s: rx error: %d buffers missing\n",
961 dev->name, num_buf);
962 dev->stats.rx_length_errors++;
963 break;
964 }
965 *rbytes += len;
966 page = virt_to_head_page(buf);
967 put_page(page);
968 }
969err_buf:
970 dev->stats.rx_dropped++;
971 dev_kfree_skb(head_skb);
972xdp_xmit:
973 return NULL;
974}
975
976static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
977 void *buf, unsigned int len, void **ctx,
978 unsigned int *xdp_xmit, unsigned int *rbytes)
979{
980 struct net_device *dev = vi->dev;
981 struct sk_buff *skb;
982 struct virtio_net_hdr_mrg_rxbuf *hdr;
983
984 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
985 pr_debug("%s: short packet %i\n", dev->name, len);
986 dev->stats.rx_length_errors++;
987 if (vi->mergeable_rx_bufs) {
988 put_page(virt_to_head_page(buf));
989 } else if (vi->big_packets) {
990 give_pages(rq, buf);
991 } else {
992 put_page(virt_to_head_page(buf));
993 }
994 return;
995 }
996
997 if (vi->mergeable_rx_bufs)
998 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
999 rbytes);
1000 else if (vi->big_packets)
1001 skb = receive_big(dev, vi, rq, buf, len, rbytes);
1002 else
1003 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, rbytes);
1004
1005 if (unlikely(!skb))
1006 return;
1007
1008 hdr = skb_vnet_hdr(skb);
1009
1010 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
1011 skb->ip_summed = CHECKSUM_UNNECESSARY;
1012
1013 if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
1014 virtio_is_little_endian(vi->vdev))) {
1015 net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
1016 dev->name, hdr->hdr.gso_type,
1017 hdr->hdr.gso_size);
1018 goto frame_err;
1019 }
1020
1021 skb_record_rx_queue(skb, vq2rxq(rq->vq));
1022 skb->protocol = eth_type_trans(skb, dev);
1023 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
1024 ntohs(skb->protocol), skb->len, skb->pkt_type);
1025
1026 napi_gro_receive(&rq->napi, skb);
1027 return;
1028
1029frame_err:
1030 dev->stats.rx_frame_errors++;
1031 dev_kfree_skb(skb);
1032}
1033
1034
1035
1036
1037
1038
1039static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1040 gfp_t gfp)
1041{
1042 struct page_frag *alloc_frag = &rq->alloc_frag;
1043 char *buf;
1044 unsigned int xdp_headroom = virtnet_get_headroom(vi);
1045 void *ctx = (void *)(unsigned long)xdp_headroom;
1046 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1047 int err;
1048
1049 len = SKB_DATA_ALIGN(len) +
1050 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1051 if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
1052 return -ENOMEM;
1053
1054 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1055 get_page(alloc_frag->page);
1056 alloc_frag->offset += len;
1057 sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
1058 vi->hdr_len + GOOD_PACKET_LEN);
1059 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1060 if (err < 0)
1061 put_page(virt_to_head_page(buf));
1062 return err;
1063}
1064
1065static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1066 gfp_t gfp)
1067{
1068 struct page *first, *list = NULL;
1069 char *p;
1070 int i, err, offset;
1071
1072 sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
1073
1074
1075 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
1076 first = get_a_page(rq, gfp);
1077 if (!first) {
1078 if (list)
1079 give_pages(rq, list);
1080 return -ENOMEM;
1081 }
1082 sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
1083
1084
1085 first->private = (unsigned long)list;
1086 list = first;
1087 }
1088
1089 first = get_a_page(rq, gfp);
1090 if (!first) {
1091 give_pages(rq, list);
1092 return -ENOMEM;
1093 }
1094 p = page_address(first);
1095
1096
1097
1098 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1099
1100
1101 offset = sizeof(struct padded_vnet_hdr);
1102 sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
1103
1104
1105 first->private = (unsigned long)list;
1106 err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
1107 first, gfp);
1108 if (err < 0)
1109 give_pages(rq, first);
1110
1111 return err;
1112}
1113
1114static unsigned int get_mergeable_buf_len(struct receive_queue *rq,
1115 struct ewma_pkt_len *avg_pkt_len,
1116 unsigned int room)
1117{
1118 const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1119 unsigned int len;
1120
1121 if (room)
1122 return PAGE_SIZE - room;
1123
1124 len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
1125 rq->min_buf_len, PAGE_SIZE - hdr_len);
1126
1127 return ALIGN(len, L1_CACHE_BYTES);
1128}
1129
1130static int add_recvbuf_mergeable(struct virtnet_info *vi,
1131 struct receive_queue *rq, gfp_t gfp)
1132{
1133 struct page_frag *alloc_frag = &rq->alloc_frag;
1134 unsigned int headroom = virtnet_get_headroom(vi);
1135 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
1136 unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
1137 char *buf;
1138 void *ctx;
1139 int err;
1140 unsigned int len, hole;
1141
1142
1143
1144
1145
1146 len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
1147 if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
1148 return -ENOMEM;
1149
1150 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1151 buf += headroom;
1152 get_page(alloc_frag->page);
1153 alloc_frag->offset += len + room;
1154 hole = alloc_frag->size - alloc_frag->offset;
1155 if (hole < len + room) {
1156
1157
1158
1159
1160 len += hole;
1161 alloc_frag->offset += hole;
1162 }
1163
1164 sg_init_one(rq->sg, buf, len);
1165 ctx = mergeable_len_to_ctx(len, headroom);
1166 err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
1167 if (err < 0)
1168 put_page(virt_to_head_page(buf));
1169
1170 return err;
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1181 gfp_t gfp)
1182{
1183 int err;
1184 bool oom;
1185
1186 do {
1187 if (vi->mergeable_rx_bufs)
1188 err = add_recvbuf_mergeable(vi, rq, gfp);
1189 else if (vi->big_packets)
1190 err = add_recvbuf_big(vi, rq, gfp);
1191 else
1192 err = add_recvbuf_small(vi, rq, gfp);
1193
1194 oom = err == -ENOMEM;
1195 if (err)
1196 break;
1197 } while (rq->vq->num_free);
1198 virtqueue_kick(rq->vq);
1199 return !oom;
1200}
1201
1202static void skb_recv_done(struct virtqueue *rvq)
1203{
1204 struct virtnet_info *vi = rvq->vdev->priv;
1205 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1206
1207 virtqueue_napi_schedule(&rq->napi, rvq);
1208}
1209
1210static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
1211{
1212 napi_enable(napi);
1213
1214
1215
1216
1217
1218 local_bh_disable();
1219 virtqueue_napi_schedule(napi, vq);
1220 local_bh_enable();
1221}
1222
1223static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1224 struct virtqueue *vq,
1225 struct napi_struct *napi)
1226{
1227 if (!napi->weight)
1228 return;
1229
1230
1231
1232
1233 if (!vi->affinity_hint_set) {
1234 napi->weight = 0;
1235 return;
1236 }
1237
1238 return virtnet_napi_enable(vq, napi);
1239}
1240
1241static void virtnet_napi_tx_disable(struct napi_struct *napi)
1242{
1243 if (napi->weight)
1244 napi_disable(napi);
1245}
1246
1247static void refill_work(struct work_struct *work)
1248{
1249 struct virtnet_info *vi =
1250 container_of(work, struct virtnet_info, refill.work);
1251 bool still_empty;
1252 int i;
1253
1254 for (i = 0; i < vi->curr_queue_pairs; i++) {
1255 struct receive_queue *rq = &vi->rq[i];
1256
1257 napi_disable(&rq->napi);
1258 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1259 virtnet_napi_enable(rq->vq, &rq->napi);
1260
1261
1262
1263
1264 if (still_empty)
1265 schedule_delayed_work(&vi->refill, HZ/2);
1266 }
1267}
1268
1269static int virtnet_receive(struct receive_queue *rq, int budget,
1270 unsigned int *xdp_xmit)
1271{
1272 struct virtnet_info *vi = rq->vq->vdev->priv;
1273 unsigned int len, received = 0, bytes = 0;
1274 void *buf;
1275
1276 if (!vi->big_packets || vi->mergeable_rx_bufs) {
1277 void *ctx;
1278
1279 while (received < budget &&
1280 (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
1281 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &bytes);
1282 received++;
1283 }
1284 } else {
1285 while (received < budget &&
1286 (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
1287 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &bytes);
1288 received++;
1289 }
1290 }
1291
1292 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
1293 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1294 schedule_delayed_work(&vi->refill, 0);
1295 }
1296
1297 u64_stats_update_begin(&rq->stats.syncp);
1298 rq->stats.bytes += bytes;
1299 rq->stats.packets += received;
1300 u64_stats_update_end(&rq->stats.syncp);
1301
1302 return received;
1303}
1304
1305static void free_old_xmit_skbs(struct send_queue *sq)
1306{
1307 unsigned int len;
1308 unsigned int packets = 0;
1309 unsigned int bytes = 0;
1310 void *ptr;
1311
1312 while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1313 if (likely(!is_xdp_frame(ptr))) {
1314 struct sk_buff *skb = ptr;
1315
1316 pr_debug("Sent skb %p\n", skb);
1317
1318 bytes += skb->len;
1319 dev_consume_skb_any(skb);
1320 } else {
1321 struct xdp_frame *frame = ptr_to_xdp(ptr);
1322
1323 bytes += frame->len;
1324 xdp_return_frame(frame);
1325 }
1326 packets++;
1327 }
1328
1329
1330
1331
1332 if (!packets)
1333 return;
1334
1335 u64_stats_update_begin(&sq->stats.syncp);
1336 sq->stats.bytes += bytes;
1337 sq->stats.packets += packets;
1338 u64_stats_update_end(&sq->stats.syncp);
1339}
1340
1341static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1342{
1343 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1344 return false;
1345 else if (q < vi->curr_queue_pairs)
1346 return true;
1347 else
1348 return false;
1349}
1350
1351static void virtnet_poll_cleantx(struct receive_queue *rq)
1352{
1353 struct virtnet_info *vi = rq->vq->vdev->priv;
1354 unsigned int index = vq2rxq(rq->vq);
1355 struct send_queue *sq = &vi->sq[index];
1356 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1357
1358 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1359 return;
1360
1361 if (__netif_tx_trylock(txq)) {
1362 free_old_xmit_skbs(sq);
1363 __netif_tx_unlock(txq);
1364 }
1365
1366 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1367 netif_tx_wake_queue(txq);
1368}
1369
1370static int virtnet_poll(struct napi_struct *napi, int budget)
1371{
1372 struct receive_queue *rq =
1373 container_of(napi, struct receive_queue, napi);
1374 struct virtnet_info *vi = rq->vq->vdev->priv;
1375 struct send_queue *sq;
1376 unsigned int received, qp;
1377 unsigned int xdp_xmit = 0;
1378
1379 virtnet_poll_cleantx(rq);
1380
1381 received = virtnet_receive(rq, budget, &xdp_xmit);
1382
1383
1384 if (received < budget)
1385 virtqueue_napi_complete(napi, rq->vq, received);
1386
1387 if (xdp_xmit & VIRTIO_XDP_REDIR)
1388 xdp_do_flush();
1389
1390 if (xdp_xmit & VIRTIO_XDP_TX) {
1391 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1392 smp_processor_id();
1393 sq = &vi->sq[qp];
1394 virtqueue_kick(sq->vq);
1395 }
1396
1397 return received;
1398}
1399
1400static int virtnet_open(struct net_device *dev)
1401{
1402 struct virtnet_info *vi = netdev_priv(dev);
1403 int i, err;
1404
1405 for (i = 0; i < vi->max_queue_pairs; i++) {
1406 if (i < vi->curr_queue_pairs)
1407
1408 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1409 schedule_delayed_work(&vi->refill, 0);
1410
1411 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
1412 if (err < 0)
1413 return err;
1414
1415 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1416 MEM_TYPE_PAGE_SHARED, NULL);
1417 if (err < 0) {
1418 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1419 return err;
1420 }
1421
1422 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1423 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
1424 }
1425
1426 return 0;
1427}
1428
1429static int virtnet_poll_tx(struct napi_struct *napi, int budget)
1430{
1431 struct send_queue *sq = container_of(napi, struct send_queue, napi);
1432 struct virtnet_info *vi = sq->vq->vdev->priv;
1433 unsigned int index = vq2txq(sq->vq);
1434 struct netdev_queue *txq;
1435
1436 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1437
1438 napi_complete_done(napi, 0);
1439 return 0;
1440 }
1441
1442 txq = netdev_get_tx_queue(vi->dev, index);
1443 __netif_tx_lock(txq, raw_smp_processor_id());
1444 free_old_xmit_skbs(sq);
1445 __netif_tx_unlock(txq);
1446
1447 virtqueue_napi_complete(napi, sq->vq, 0);
1448
1449 if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
1450 netif_tx_wake_queue(txq);
1451
1452 return 0;
1453}
1454
1455static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1456{
1457 struct virtio_net_hdr_mrg_rxbuf *hdr;
1458 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1459 struct virtnet_info *vi = sq->vq->vdev->priv;
1460 int num_sg;
1461 unsigned hdr_len = vi->hdr_len;
1462 bool can_push;
1463
1464 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1465
1466 can_push = vi->any_header_sg &&
1467 !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1468 !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1469
1470
1471 if (can_push)
1472 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1473 else
1474 hdr = skb_vnet_hdr(skb);
1475
1476 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1477 virtio_is_little_endian(vi->vdev), false,
1478 0))
1479 BUG();
1480
1481 if (vi->mergeable_rx_bufs)
1482 hdr->num_buffers = 0;
1483
1484 sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1485 if (can_push) {
1486 __skb_push(skb, hdr_len);
1487 num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1488 if (unlikely(num_sg < 0))
1489 return num_sg;
1490
1491 __skb_pull(skb, hdr_len);
1492 } else {
1493 sg_set_buf(sq->sg, hdr, hdr_len);
1494 num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
1495 if (unlikely(num_sg < 0))
1496 return num_sg;
1497 num_sg++;
1498 }
1499 return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1500}
1501
1502static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1503{
1504 struct virtnet_info *vi = netdev_priv(dev);
1505 int qnum = skb_get_queue_mapping(skb);
1506 struct send_queue *sq = &vi->sq[qnum];
1507 int err;
1508 struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1509 bool kick = !netdev_xmit_more();
1510 bool use_napi = sq->napi.weight;
1511
1512
1513 free_old_xmit_skbs(sq);
1514
1515 if (use_napi && kick)
1516 virtqueue_enable_cb_delayed(sq->vq);
1517
1518
1519 skb_tx_timestamp(skb);
1520
1521
1522 err = xmit_skb(sq, skb);
1523
1524
1525 if (unlikely(err)) {
1526 dev->stats.tx_fifo_errors++;
1527 if (net_ratelimit())
1528 dev_warn(&dev->dev,
1529 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
1530 dev->stats.tx_dropped++;
1531 dev_kfree_skb_any(skb);
1532 return NETDEV_TX_OK;
1533 }
1534
1535
1536 if (!use_napi) {
1537 skb_orphan(skb);
1538 nf_reset(skb);
1539 }
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551 if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1552 netif_stop_subqueue(dev, qnum);
1553 if (!use_napi &&
1554 unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1555
1556 free_old_xmit_skbs(sq);
1557 if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1558 netif_start_subqueue(dev, qnum);
1559 virtqueue_disable_cb(sq->vq);
1560 }
1561 }
1562 }
1563
1564 if (kick || netif_xmit_stopped(txq))
1565 virtqueue_kick(sq->vq);
1566
1567 return NETDEV_TX_OK;
1568}
1569
1570
1571
1572
1573
1574
1575static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1576 struct scatterlist *out)
1577{
1578 struct scatterlist *sgs[4], hdr, stat;
1579 unsigned out_num = 0, tmp;
1580
1581
1582 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1583
1584 vi->ctrl->status = ~0;
1585 vi->ctrl->hdr.class = class;
1586 vi->ctrl->hdr.cmd = cmd;
1587
1588 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1589 sgs[out_num++] = &hdr;
1590
1591 if (out)
1592 sgs[out_num++] = out;
1593
1594
1595 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1596 sgs[out_num] = &stat;
1597
1598 BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1599 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1600
1601 if (unlikely(!virtqueue_kick(vi->cvq)))
1602 return vi->ctrl->status == VIRTIO_NET_OK;
1603
1604
1605
1606
1607 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1608 !virtqueue_is_broken(vi->cvq))
1609 cpu_relax();
1610
1611 return vi->ctrl->status == VIRTIO_NET_OK;
1612}
1613
1614static int virtnet_set_mac_address(struct net_device *dev, void *p)
1615{
1616 struct virtnet_info *vi = netdev_priv(dev);
1617 struct virtio_device *vdev = vi->vdev;
1618 int ret;
1619 struct sockaddr *addr;
1620 struct scatterlist sg;
1621
1622 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
1623 return -EOPNOTSUPP;
1624
1625 addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
1626 if (!addr)
1627 return -ENOMEM;
1628
1629 ret = eth_prepare_mac_addr_change(dev, addr);
1630 if (ret)
1631 goto out;
1632
1633 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1634 sg_init_one(&sg, addr->sa_data, dev->addr_len);
1635 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1636 VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1637 dev_warn(&vdev->dev,
1638 "Failed to set mac address by vq command.\n");
1639 ret = -EINVAL;
1640 goto out;
1641 }
1642 } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1643 !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1644 unsigned int i;
1645
1646
1647 for (i = 0; i < dev->addr_len; i++)
1648 virtio_cwrite8(vdev,
1649 offsetof(struct virtio_net_config, mac) +
1650 i, addr->sa_data[i]);
1651 }
1652
1653 eth_commit_mac_addr_change(dev, p);
1654 ret = 0;
1655
1656out:
1657 kfree(addr);
1658 return ret;
1659}
1660
1661static void virtnet_stats(struct net_device *dev,
1662 struct rtnl_link_stats64 *tot)
1663{
1664 struct virtnet_info *vi = netdev_priv(dev);
1665 unsigned int start;
1666 int i;
1667
1668 for (i = 0; i < vi->max_queue_pairs; i++) {
1669 u64 tpackets, tbytes, rpackets, rbytes;
1670 struct receive_queue *rq = &vi->rq[i];
1671 struct send_queue *sq = &vi->sq[i];
1672
1673 do {
1674 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
1675 tpackets = sq->stats.packets;
1676 tbytes = sq->stats.bytes;
1677 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
1678
1679 do {
1680 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
1681 rpackets = rq->stats.packets;
1682 rbytes = rq->stats.bytes;
1683 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
1684
1685 tot->rx_packets += rpackets;
1686 tot->tx_packets += tpackets;
1687 tot->rx_bytes += rbytes;
1688 tot->tx_bytes += tbytes;
1689 }
1690
1691 tot->tx_dropped = dev->stats.tx_dropped;
1692 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1693 tot->rx_dropped = dev->stats.rx_dropped;
1694 tot->rx_length_errors = dev->stats.rx_length_errors;
1695 tot->rx_frame_errors = dev->stats.rx_frame_errors;
1696}
1697
1698#ifdef CONFIG_NET_POLL_CONTROLLER
1699static void virtnet_netpoll(struct net_device *dev)
1700{
1701 struct virtnet_info *vi = netdev_priv(dev);
1702 int i;
1703
1704 for (i = 0; i < vi->curr_queue_pairs; i++)
1705 napi_schedule(&vi->rq[i].napi);
1706}
1707#endif
1708
1709static void virtnet_ack_link_announce(struct virtnet_info *vi)
1710{
1711 rtnl_lock();
1712 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1713 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1714 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1715 rtnl_unlock();
1716}
1717
1718static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1719{
1720 struct scatterlist sg;
1721 struct net_device *dev = vi->dev;
1722
1723 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1724 return 0;
1725
1726 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1727 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1728
1729 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1730 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1731 dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1732 queue_pairs);
1733 return -EINVAL;
1734 } else {
1735 vi->curr_queue_pairs = queue_pairs;
1736
1737 if (dev->flags & IFF_UP)
1738 schedule_delayed_work(&vi->refill, 0);
1739 }
1740
1741 return 0;
1742}
1743
1744static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1745{
1746 int err;
1747
1748 rtnl_lock();
1749 err = _virtnet_set_queues(vi, queue_pairs);
1750 rtnl_unlock();
1751 return err;
1752}
1753
1754static int virtnet_close(struct net_device *dev)
1755{
1756 struct virtnet_info *vi = netdev_priv(dev);
1757 int i;
1758
1759
1760 cancel_delayed_work_sync(&vi->refill);
1761
1762 for (i = 0; i < vi->max_queue_pairs; i++) {
1763 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1764 napi_disable(&vi->rq[i].napi);
1765 virtnet_napi_tx_disable(&vi->sq[i].napi);
1766 }
1767
1768 return 0;
1769}
1770
1771static void virtnet_set_rx_mode(struct net_device *dev)
1772{
1773 struct virtnet_info *vi = netdev_priv(dev);
1774 struct scatterlist sg[2];
1775 struct virtio_net_ctrl_mac *mac_data;
1776 struct netdev_hw_addr *ha;
1777 int uc_count;
1778 int mc_count;
1779 void *buf;
1780 int i;
1781
1782
1783 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1784 return;
1785
1786 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1787 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1788
1789 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1790
1791 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1792 VIRTIO_NET_CTRL_RX_PROMISC, sg))
1793 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1794 vi->ctrl->promisc ? "en" : "dis");
1795
1796 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1797
1798 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1799 VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1800 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1801 vi->ctrl->allmulti ? "en" : "dis");
1802
1803 uc_count = netdev_uc_count(dev);
1804 mc_count = netdev_mc_count(dev);
1805
1806 buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1807 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1808 mac_data = buf;
1809 if (!buf)
1810 return;
1811
1812 sg_init_table(sg, 2);
1813
1814
1815 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1816 i = 0;
1817 netdev_for_each_uc_addr(ha, dev)
1818 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1819
1820 sg_set_buf(&sg[0], mac_data,
1821 sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1822
1823
1824 mac_data = (void *)&mac_data->macs[uc_count][0];
1825
1826 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1827 i = 0;
1828 netdev_for_each_mc_addr(ha, dev)
1829 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1830
1831 sg_set_buf(&sg[1], mac_data,
1832 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1833
1834 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1835 VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1836 dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1837
1838 kfree(buf);
1839}
1840
1841static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1842 __be16 proto, u16 vid)
1843{
1844 struct virtnet_info *vi = netdev_priv(dev);
1845 struct scatterlist sg;
1846
1847 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1848 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1849
1850 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1851 VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1852 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1853 return 0;
1854}
1855
1856static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1857 __be16 proto, u16 vid)
1858{
1859 struct virtnet_info *vi = netdev_priv(dev);
1860 struct scatterlist sg;
1861
1862 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
1863 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
1864
1865 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1866 VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1867 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1868 return 0;
1869}
1870
1871static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1872{
1873 int i;
1874
1875 if (vi->affinity_hint_set) {
1876 for (i = 0; i < vi->max_queue_pairs; i++) {
1877 virtqueue_set_affinity(vi->rq[i].vq, NULL);
1878 virtqueue_set_affinity(vi->sq[i].vq, NULL);
1879 }
1880
1881 vi->affinity_hint_set = false;
1882 }
1883}
1884
1885static void virtnet_set_affinity(struct virtnet_info *vi)
1886{
1887 cpumask_var_t mask;
1888 int stragglers;
1889 int group_size;
1890 int i, j, cpu;
1891 int num_cpu;
1892 int stride;
1893
1894 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
1895 virtnet_clean_affinity(vi, -1);
1896 return;
1897 }
1898
1899 num_cpu = num_online_cpus();
1900 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
1901 stragglers = num_cpu >= vi->curr_queue_pairs ?
1902 num_cpu % vi->curr_queue_pairs :
1903 0;
1904 cpu = cpumask_next(-1, cpu_online_mask);
1905
1906 for (i = 0; i < vi->curr_queue_pairs; i++) {
1907 group_size = stride + (i < stragglers ? 1 : 0);
1908
1909 for (j = 0; j < group_size; j++) {
1910 cpumask_set_cpu(cpu, mask);
1911 cpu = cpumask_next_wrap(cpu, cpu_online_mask,
1912 nr_cpu_ids, false);
1913 }
1914 virtqueue_set_affinity(vi->rq[i].vq, mask);
1915 virtqueue_set_affinity(vi->sq[i].vq, mask);
1916 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
1917 cpumask_clear(mask);
1918 }
1919
1920 vi->affinity_hint_set = true;
1921 free_cpumask_var(mask);
1922}
1923
1924static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1925{
1926 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1927 node);
1928 virtnet_set_affinity(vi);
1929 return 0;
1930}
1931
1932static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1933{
1934 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1935 node_dead);
1936 virtnet_set_affinity(vi);
1937 return 0;
1938}
1939
1940static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1941{
1942 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1943 node);
1944
1945 virtnet_clean_affinity(vi, cpu);
1946 return 0;
1947}
1948
1949static enum cpuhp_state virtionet_online;
1950
1951static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1952{
1953 int ret;
1954
1955 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1956 if (ret)
1957 return ret;
1958 ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1959 &vi->node_dead);
1960 if (!ret)
1961 return ret;
1962 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1963 return ret;
1964}
1965
1966static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1967{
1968 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1969 cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1970 &vi->node_dead);
1971}
1972
1973static void virtnet_get_ringparam(struct net_device *dev,
1974 struct ethtool_ringparam *ring)
1975{
1976 struct virtnet_info *vi = netdev_priv(dev);
1977
1978 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1979 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1980 ring->rx_pending = ring->rx_max_pending;
1981 ring->tx_pending = ring->tx_max_pending;
1982}
1983
1984
1985static void virtnet_get_drvinfo(struct net_device *dev,
1986 struct ethtool_drvinfo *info)
1987{
1988 struct virtnet_info *vi = netdev_priv(dev);
1989 struct virtio_device *vdev = vi->vdev;
1990
1991 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1992 strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1993 strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1994
1995}
1996
1997
1998static int virtnet_set_channels(struct net_device *dev,
1999 struct ethtool_channels *channels)
2000{
2001 struct virtnet_info *vi = netdev_priv(dev);
2002 u16 queue_pairs = channels->combined_count;
2003 int err;
2004
2005
2006
2007
2008 if (channels->rx_count || channels->tx_count || channels->other_count)
2009 return -EINVAL;
2010
2011 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2012 return -EINVAL;
2013
2014
2015
2016
2017
2018 if (vi->rq[0].xdp_prog)
2019 return -EINVAL;
2020
2021 get_online_cpus();
2022 err = _virtnet_set_queues(vi, queue_pairs);
2023 if (!err) {
2024 netif_set_real_num_tx_queues(dev, queue_pairs);
2025 netif_set_real_num_rx_queues(dev, queue_pairs);
2026
2027 virtnet_set_affinity(vi);
2028 }
2029 put_online_cpus();
2030
2031 return err;
2032}
2033
2034static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2035{
2036 struct virtnet_info *vi = netdev_priv(dev);
2037 char *p = (char *)data;
2038 unsigned int i, j;
2039
2040 switch (stringset) {
2041 case ETH_SS_STATS:
2042 for (i = 0; i < vi->curr_queue_pairs; i++) {
2043 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2044 snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
2045 i, virtnet_rq_stats_desc[j].desc);
2046 p += ETH_GSTRING_LEN;
2047 }
2048 }
2049
2050 for (i = 0; i < vi->curr_queue_pairs; i++) {
2051 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2052 snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_%s",
2053 i, virtnet_sq_stats_desc[j].desc);
2054 p += ETH_GSTRING_LEN;
2055 }
2056 }
2057 break;
2058 }
2059}
2060
2061static int virtnet_get_sset_count(struct net_device *dev, int sset)
2062{
2063 struct virtnet_info *vi = netdev_priv(dev);
2064
2065 switch (sset) {
2066 case ETH_SS_STATS:
2067 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2068 VIRTNET_SQ_STATS_LEN);
2069 default:
2070 return -EOPNOTSUPP;
2071 }
2072}
2073
2074static void virtnet_get_ethtool_stats(struct net_device *dev,
2075 struct ethtool_stats *stats, u64 *data)
2076{
2077 struct virtnet_info *vi = netdev_priv(dev);
2078 unsigned int idx = 0, start, i, j;
2079 const u8 *stats_base;
2080 size_t offset;
2081
2082 for (i = 0; i < vi->curr_queue_pairs; i++) {
2083 struct receive_queue *rq = &vi->rq[i];
2084
2085 stats_base = (u8 *)&rq->stats;
2086 do {
2087 start = u64_stats_fetch_begin_irq(&rq->stats.syncp);
2088 for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
2089 offset = virtnet_rq_stats_desc[j].offset;
2090 data[idx + j] = *(u64 *)(stats_base + offset);
2091 }
2092 } while (u64_stats_fetch_retry_irq(&rq->stats.syncp, start));
2093 idx += VIRTNET_RQ_STATS_LEN;
2094 }
2095
2096 for (i = 0; i < vi->curr_queue_pairs; i++) {
2097 struct send_queue *sq = &vi->sq[i];
2098
2099 stats_base = (u8 *)&sq->stats;
2100 do {
2101 start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
2102 for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
2103 offset = virtnet_sq_stats_desc[j].offset;
2104 data[idx + j] = *(u64 *)(stats_base + offset);
2105 }
2106 } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
2107 idx += VIRTNET_SQ_STATS_LEN;
2108 }
2109}
2110
2111static void virtnet_get_channels(struct net_device *dev,
2112 struct ethtool_channels *channels)
2113{
2114 struct virtnet_info *vi = netdev_priv(dev);
2115
2116 channels->combined_count = vi->curr_queue_pairs;
2117 channels->max_combined = vi->max_queue_pairs;
2118 channels->max_other = 0;
2119 channels->rx_count = 0;
2120 channels->tx_count = 0;
2121 channels->other_count = 0;
2122}
2123
2124
2125static bool
2126virtnet_validate_ethtool_cmd(const struct ethtool_link_ksettings *cmd)
2127{
2128 struct ethtool_link_ksettings diff1 = *cmd;
2129 struct ethtool_link_ksettings diff2 = {};
2130
2131
2132
2133
2134 diff1.base.speed = 0;
2135 diff2.base.port = PORT_OTHER;
2136 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
2137 diff1.base.duplex = 0;
2138 diff1.base.cmd = 0;
2139 diff1.base.link_mode_masks_nwords = 0;
2140
2141 return !memcmp(&diff1.base, &diff2.base, sizeof(diff1.base)) &&
2142 bitmap_empty(diff1.link_modes.supported,
2143 __ETHTOOL_LINK_MODE_MASK_NBITS) &&
2144 bitmap_empty(diff1.link_modes.advertising,
2145 __ETHTOOL_LINK_MODE_MASK_NBITS) &&
2146 bitmap_empty(diff1.link_modes.lp_advertising,
2147 __ETHTOOL_LINK_MODE_MASK_NBITS);
2148}
2149
2150static int virtnet_set_link_ksettings(struct net_device *dev,
2151 const struct ethtool_link_ksettings *cmd)
2152{
2153 struct virtnet_info *vi = netdev_priv(dev);
2154 u32 speed;
2155
2156 speed = cmd->base.speed;
2157
2158 if (!ethtool_validate_speed(speed) ||
2159 !ethtool_validate_duplex(cmd->base.duplex) ||
2160 !virtnet_validate_ethtool_cmd(cmd))
2161 return -EINVAL;
2162 vi->speed = speed;
2163 vi->duplex = cmd->base.duplex;
2164
2165 return 0;
2166}
2167
2168static int virtnet_get_link_ksettings(struct net_device *dev,
2169 struct ethtool_link_ksettings *cmd)
2170{
2171 struct virtnet_info *vi = netdev_priv(dev);
2172
2173 cmd->base.speed = vi->speed;
2174 cmd->base.duplex = vi->duplex;
2175 cmd->base.port = PORT_OTHER;
2176
2177 return 0;
2178}
2179
2180static void virtnet_init_settings(struct net_device *dev)
2181{
2182 struct virtnet_info *vi = netdev_priv(dev);
2183
2184 vi->speed = SPEED_UNKNOWN;
2185 vi->duplex = DUPLEX_UNKNOWN;
2186}
2187
2188static void virtnet_update_settings(struct virtnet_info *vi)
2189{
2190 u32 speed;
2191 u8 duplex;
2192
2193 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2194 return;
2195
2196 speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config,
2197 speed));
2198 if (ethtool_validate_speed(speed))
2199 vi->speed = speed;
2200 duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config,
2201 duplex));
2202 if (ethtool_validate_duplex(duplex))
2203 vi->duplex = duplex;
2204}
2205
2206static const struct ethtool_ops virtnet_ethtool_ops = {
2207 .get_drvinfo = virtnet_get_drvinfo,
2208 .get_link = ethtool_op_get_link,
2209 .get_ringparam = virtnet_get_ringparam,
2210 .get_strings = virtnet_get_strings,
2211 .get_sset_count = virtnet_get_sset_count,
2212 .get_ethtool_stats = virtnet_get_ethtool_stats,
2213 .set_channels = virtnet_set_channels,
2214 .get_channels = virtnet_get_channels,
2215 .get_ts_info = ethtool_op_get_ts_info,
2216 .get_link_ksettings = virtnet_get_link_ksettings,
2217 .set_link_ksettings = virtnet_set_link_ksettings,
2218};
2219
2220static void virtnet_freeze_down(struct virtio_device *vdev)
2221{
2222 struct virtnet_info *vi = vdev->priv;
2223 int i;
2224
2225
2226 flush_work(&vi->config_work);
2227
2228 netif_device_detach(vi->dev);
2229 netif_tx_disable(vi->dev);
2230 cancel_delayed_work_sync(&vi->refill);
2231
2232 if (netif_running(vi->dev)) {
2233 for (i = 0; i < vi->max_queue_pairs; i++) {
2234 napi_disable(&vi->rq[i].napi);
2235 virtnet_napi_tx_disable(&vi->sq[i].napi);
2236 }
2237 }
2238}
2239
2240static int init_vqs(struct virtnet_info *vi);
2241
2242static int virtnet_restore_up(struct virtio_device *vdev)
2243{
2244 struct virtnet_info *vi = vdev->priv;
2245 int err, i;
2246
2247 err = init_vqs(vi);
2248 if (err)
2249 return err;
2250
2251 virtio_device_ready(vdev);
2252
2253 if (netif_running(vi->dev)) {
2254 for (i = 0; i < vi->curr_queue_pairs; i++)
2255 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2256 schedule_delayed_work(&vi->refill, 0);
2257
2258 for (i = 0; i < vi->max_queue_pairs; i++) {
2259 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2260 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2261 &vi->sq[i].napi);
2262 }
2263 }
2264
2265 netif_device_attach(vi->dev);
2266 return err;
2267}
2268
2269static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2270{
2271 struct scatterlist sg;
2272 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2273
2274 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2275
2276 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2277 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
2278 dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
2279 return -EINVAL;
2280 }
2281
2282 return 0;
2283}
2284
2285static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2286{
2287 u64 offloads = 0;
2288
2289 if (!vi->guest_offloads)
2290 return 0;
2291
2292 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2293 offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2294
2295 return virtnet_set_guest_offloads(vi, offloads);
2296}
2297
2298static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2299{
2300 u64 offloads = vi->guest_offloads;
2301
2302 if (!vi->guest_offloads)
2303 return 0;
2304 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
2305 offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
2306
2307 return virtnet_set_guest_offloads(vi, offloads);
2308}
2309
2310static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
2311 struct netlink_ext_ack *extack)
2312{
2313 unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
2314 struct virtnet_info *vi = netdev_priv(dev);
2315 struct bpf_prog *old_prog;
2316 u16 xdp_qp = 0, curr_qp;
2317 int i, err;
2318
2319 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2320 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2321 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2322 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2323 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
2324 NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
2325 return -EOPNOTSUPP;
2326 }
2327
2328 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2329 NL_SET_ERR_MSG_MOD(extack, "XDP expects header/data in single page, any_header_sg required");
2330 return -EINVAL;
2331 }
2332
2333 if (dev->mtu > max_sz) {
2334 NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
2335 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
2336 return -EINVAL;
2337 }
2338
2339 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2340 if (prog)
2341 xdp_qp = nr_cpu_ids;
2342
2343
2344 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2345 NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
2346 netdev_warn(dev, "request %i queues but max is %i\n",
2347 curr_qp + xdp_qp, vi->max_queue_pairs);
2348 return -ENOMEM;
2349 }
2350
2351 if (prog)
2352 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2353
2354
2355 if (netif_running(dev)) {
2356 for (i = 0; i < vi->max_queue_pairs; i++) {
2357 napi_disable(&vi->rq[i].napi);
2358 virtnet_napi_tx_disable(&vi->sq[i].napi);
2359 }
2360 }
2361
2362 netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
2363 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2364 if (err)
2365 goto err;
2366 vi->xdp_queue_pairs = xdp_qp;
2367
2368 for (i = 0; i < vi->max_queue_pairs; i++) {
2369 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2370 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2371 if (i == 0) {
2372 if (!old_prog)
2373 virtnet_clear_guest_offloads(vi);
2374 if (!prog)
2375 virtnet_restore_guest_offloads(vi);
2376 }
2377 if (old_prog)
2378 bpf_prog_put(old_prog);
2379 if (netif_running(dev)) {
2380 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2381 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2382 &vi->sq[i].napi);
2383 }
2384 }
2385
2386 return 0;
2387
2388err:
2389 if (netif_running(dev)) {
2390 for (i = 0; i < vi->max_queue_pairs; i++) {
2391 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2392 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2393 &vi->sq[i].napi);
2394 }
2395 }
2396 if (prog)
2397 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2398 return err;
2399}
2400
2401static u32 virtnet_xdp_query(struct net_device *dev)
2402{
2403 struct virtnet_info *vi = netdev_priv(dev);
2404 const struct bpf_prog *xdp_prog;
2405 int i;
2406
2407 for (i = 0; i < vi->max_queue_pairs; i++) {
2408 xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2409 if (xdp_prog)
2410 return xdp_prog->aux->id;
2411 }
2412 return 0;
2413}
2414
2415static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2416{
2417 switch (xdp->command) {
2418 case XDP_SETUP_PROG:
2419 return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
2420 case XDP_QUERY_PROG:
2421 xdp->prog_id = virtnet_xdp_query(dev);
2422 return 0;
2423 default:
2424 return -EINVAL;
2425 }
2426}
2427
2428static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
2429 size_t len)
2430{
2431 struct virtnet_info *vi = netdev_priv(dev);
2432 int ret;
2433
2434 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2435 return -EOPNOTSUPP;
2436
2437 ret = snprintf(buf, len, "sby");
2438 if (ret >= len)
2439 return -EOPNOTSUPP;
2440
2441 return 0;
2442}
2443
2444static const struct net_device_ops virtnet_netdev = {
2445 .ndo_open = virtnet_open,
2446 .ndo_stop = virtnet_close,
2447 .ndo_start_xmit = start_xmit,
2448 .ndo_validate_addr = eth_validate_addr,
2449 .ndo_set_mac_address = virtnet_set_mac_address,
2450 .ndo_set_rx_mode = virtnet_set_rx_mode,
2451 .ndo_get_stats64 = virtnet_stats,
2452 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
2453 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
2454#ifdef CONFIG_NET_POLL_CONTROLLER
2455 .ndo_poll_controller = virtnet_netpoll,
2456#endif
2457 .ndo_bpf = virtnet_xdp,
2458 .ndo_xdp_xmit = virtnet_xdp_xmit,
2459 .ndo_features_check = passthru_features_check,
2460 .ndo_get_phys_port_name = virtnet_get_phys_port_name,
2461};
2462
2463static void virtnet_config_changed_work(struct work_struct *work)
2464{
2465 struct virtnet_info *vi =
2466 container_of(work, struct virtnet_info, config_work);
2467 u16 v;
2468
2469 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2470 struct virtio_net_config, status, &v) < 0)
2471 return;
2472
2473 if (v & VIRTIO_NET_S_ANNOUNCE) {
2474 netdev_notify_peers(vi->dev);
2475 virtnet_ack_link_announce(vi);
2476 }
2477
2478
2479 v &= VIRTIO_NET_S_LINK_UP;
2480
2481 if (vi->status == v)
2482 return;
2483
2484 vi->status = v;
2485
2486 if (vi->status & VIRTIO_NET_S_LINK_UP) {
2487 virtnet_update_settings(vi);
2488 netif_carrier_on(vi->dev);
2489 netif_tx_wake_all_queues(vi->dev);
2490 } else {
2491 netif_carrier_off(vi->dev);
2492 netif_tx_stop_all_queues(vi->dev);
2493 }
2494}
2495
2496static void virtnet_config_changed(struct virtio_device *vdev)
2497{
2498 struct virtnet_info *vi = vdev->priv;
2499
2500 schedule_work(&vi->config_work);
2501}
2502
2503static void virtnet_free_queues(struct virtnet_info *vi)
2504{
2505 int i;
2506
2507 for (i = 0; i < vi->max_queue_pairs; i++) {
2508 napi_hash_del(&vi->rq[i].napi);
2509 netif_napi_del(&vi->rq[i].napi);
2510 netif_napi_del(&vi->sq[i].napi);
2511 }
2512
2513
2514
2515
2516 synchronize_net();
2517
2518 kfree(vi->rq);
2519 kfree(vi->sq);
2520 kfree(vi->ctrl);
2521}
2522
2523static void _free_receive_bufs(struct virtnet_info *vi)
2524{
2525 struct bpf_prog *old_prog;
2526 int i;
2527
2528 for (i = 0; i < vi->max_queue_pairs; i++) {
2529 while (vi->rq[i].pages)
2530 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
2531
2532 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2533 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2534 if (old_prog)
2535 bpf_prog_put(old_prog);
2536 }
2537}
2538
2539static void free_receive_bufs(struct virtnet_info *vi)
2540{
2541 rtnl_lock();
2542 _free_receive_bufs(vi);
2543 rtnl_unlock();
2544}
2545
2546static void free_receive_page_frags(struct virtnet_info *vi)
2547{
2548 int i;
2549 for (i = 0; i < vi->max_queue_pairs; i++)
2550 if (vi->rq[i].alloc_frag.page)
2551 put_page(vi->rq[i].alloc_frag.page);
2552}
2553
2554static void free_unused_bufs(struct virtnet_info *vi)
2555{
2556 void *buf;
2557 int i;
2558
2559 for (i = 0; i < vi->max_queue_pairs; i++) {
2560 struct virtqueue *vq = vi->sq[i].vq;
2561 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2562 if (!is_xdp_frame(buf))
2563 dev_kfree_skb(buf);
2564 else
2565 xdp_return_frame(ptr_to_xdp(buf));
2566 }
2567 }
2568
2569 for (i = 0; i < vi->max_queue_pairs; i++) {
2570 struct virtqueue *vq = vi->rq[i].vq;
2571
2572 while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
2573 if (vi->mergeable_rx_bufs) {
2574 put_page(virt_to_head_page(buf));
2575 } else if (vi->big_packets) {
2576 give_pages(&vi->rq[i], buf);
2577 } else {
2578 put_page(virt_to_head_page(buf));
2579 }
2580 }
2581 }
2582}
2583
2584static void virtnet_del_vqs(struct virtnet_info *vi)
2585{
2586 struct virtio_device *vdev = vi->vdev;
2587
2588 virtnet_clean_affinity(vi, -1);
2589
2590 vdev->config->del_vqs(vdev);
2591
2592 virtnet_free_queues(vi);
2593}
2594
2595
2596
2597
2598
2599static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2600{
2601 const unsigned int hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2602 unsigned int rq_size = virtqueue_get_vring_size(vq);
2603 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2604 unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len;
2605 unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size);
2606
2607 return max(max(min_buf_len, hdr_len) - hdr_len,
2608 (unsigned int)GOOD_PACKET_LEN);
2609}
2610
2611static int virtnet_find_vqs(struct virtnet_info *vi)
2612{
2613 vq_callback_t **callbacks;
2614 struct virtqueue **vqs;
2615 int ret = -ENOMEM;
2616 int i, total_vqs;
2617 const char **names;
2618 bool *ctx;
2619
2620
2621
2622
2623
2624 total_vqs = vi->max_queue_pairs * 2 +
2625 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2626
2627
2628 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
2629 if (!vqs)
2630 goto err_vq;
2631 callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
2632 if (!callbacks)
2633 goto err_callback;
2634 names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
2635 if (!names)
2636 goto err_names;
2637 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2638 ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
2639 if (!ctx)
2640 goto err_ctx;
2641 } else {
2642 ctx = NULL;
2643 }
2644
2645
2646 if (vi->has_cvq) {
2647 callbacks[total_vqs - 1] = NULL;
2648 names[total_vqs - 1] = "control";
2649 }
2650
2651
2652 for (i = 0; i < vi->max_queue_pairs; i++) {
2653 callbacks[rxq2vq(i)] = skb_recv_done;
2654 callbacks[txq2vq(i)] = skb_xmit_done;
2655 sprintf(vi->rq[i].name, "input.%d", i);
2656 sprintf(vi->sq[i].name, "output.%d", i);
2657 names[rxq2vq(i)] = vi->rq[i].name;
2658 names[txq2vq(i)] = vi->sq[i].name;
2659 if (ctx)
2660 ctx[rxq2vq(i)] = true;
2661 }
2662
2663 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2664 names, ctx, NULL);
2665 if (ret)
2666 goto err_find;
2667
2668 if (vi->has_cvq) {
2669 vi->cvq = vqs[total_vqs - 1];
2670 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2671 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2672 }
2673
2674 for (i = 0; i < vi->max_queue_pairs; i++) {
2675 vi->rq[i].vq = vqs[rxq2vq(i)];
2676 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
2677 vi->sq[i].vq = vqs[txq2vq(i)];
2678 }
2679
2680
2681
2682
2683err_find:
2684 kfree(ctx);
2685err_ctx:
2686 kfree(names);
2687err_names:
2688 kfree(callbacks);
2689err_callback:
2690 kfree(vqs);
2691err_vq:
2692 return ret;
2693}
2694
2695static int virtnet_alloc_queues(struct virtnet_info *vi)
2696{
2697 int i;
2698
2699 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2700 if (!vi->ctrl)
2701 goto err_ctrl;
2702 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
2703 if (!vi->sq)
2704 goto err_sq;
2705 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
2706 if (!vi->rq)
2707 goto err_rq;
2708
2709 INIT_DELAYED_WORK(&vi->refill, refill_work);
2710 for (i = 0; i < vi->max_queue_pairs; i++) {
2711 vi->rq[i].pages = NULL;
2712 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2713 napi_weight);
2714 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2715 napi_tx ? napi_weight : 0);
2716
2717 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2718 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2719 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2720
2721 u64_stats_init(&vi->rq[i].stats.syncp);
2722 u64_stats_init(&vi->sq[i].stats.syncp);
2723 }
2724
2725 return 0;
2726
2727err_rq:
2728 kfree(vi->sq);
2729err_sq:
2730 kfree(vi->ctrl);
2731err_ctrl:
2732 return -ENOMEM;
2733}
2734
2735static int init_vqs(struct virtnet_info *vi)
2736{
2737 int ret;
2738
2739
2740 ret = virtnet_alloc_queues(vi);
2741 if (ret)
2742 goto err;
2743
2744 ret = virtnet_find_vqs(vi);
2745 if (ret)
2746 goto err_free;
2747
2748 get_online_cpus();
2749 virtnet_set_affinity(vi);
2750 put_online_cpus();
2751
2752 return 0;
2753
2754err_free:
2755 virtnet_free_queues(vi);
2756err:
2757 return ret;
2758}
2759
2760#ifdef CONFIG_SYSFS
2761static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2762 char *buf)
2763{
2764 struct virtnet_info *vi = netdev_priv(queue->dev);
2765 unsigned int queue_index = get_netdev_rx_queue_index(queue);
2766 unsigned int headroom = virtnet_get_headroom(vi);
2767 unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
2768 struct ewma_pkt_len *avg;
2769
2770 BUG_ON(queue_index >= vi->max_queue_pairs);
2771 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2772 return sprintf(buf, "%u\n",
2773 get_mergeable_buf_len(&vi->rq[queue_index], avg,
2774 SKB_DATA_ALIGN(headroom + tailroom)));
2775}
2776
2777static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2778 __ATTR_RO(mergeable_rx_buffer_size);
2779
2780static struct attribute *virtio_net_mrg_rx_attrs[] = {
2781 &mergeable_rx_buffer_size_attribute.attr,
2782 NULL
2783};
2784
2785static const struct attribute_group virtio_net_mrg_rx_group = {
2786 .name = "virtio_net",
2787 .attrs = virtio_net_mrg_rx_attrs
2788};
2789#endif
2790
2791static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2792 unsigned int fbit,
2793 const char *fname, const char *dname)
2794{
2795 if (!virtio_has_feature(vdev, fbit))
2796 return false;
2797
2798 dev_err(&vdev->dev, "device advertises feature %s but not %s",
2799 fname, dname);
2800
2801 return true;
2802}
2803
2804#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
2805 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2806
2807static bool virtnet_validate_features(struct virtio_device *vdev)
2808{
2809 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2810 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2811 "VIRTIO_NET_F_CTRL_VQ") ||
2812 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2813 "VIRTIO_NET_F_CTRL_VQ") ||
2814 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2815 "VIRTIO_NET_F_CTRL_VQ") ||
2816 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2817 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
2818 "VIRTIO_NET_F_CTRL_VQ"))) {
2819 return false;
2820 }
2821
2822 return true;
2823}
2824
2825#define MIN_MTU ETH_MIN_MTU
2826#define MAX_MTU ETH_MAX_MTU
2827
2828static int virtnet_validate(struct virtio_device *vdev)
2829{
2830 if (!vdev->config->get) {
2831 dev_err(&vdev->dev, "%s failure: config access disabled\n",
2832 __func__);
2833 return -EINVAL;
2834 }
2835
2836 if (!virtnet_validate_features(vdev))
2837 return -EINVAL;
2838
2839 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2840 int mtu = virtio_cread16(vdev,
2841 offsetof(struct virtio_net_config,
2842 mtu));
2843 if (mtu < MIN_MTU)
2844 __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2845 }
2846
2847 return 0;
2848}
2849
2850static int virtnet_probe(struct virtio_device *vdev)
2851{
2852 int i, err = -ENOMEM;
2853 struct net_device *dev;
2854 struct virtnet_info *vi;
2855 u16 max_queue_pairs;
2856 int mtu;
2857
2858
2859 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2860 struct virtio_net_config,
2861 max_virtqueue_pairs, &max_queue_pairs);
2862
2863
2864 if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
2865 max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
2866 !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2867 max_queue_pairs = 1;
2868
2869
2870 dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
2871 if (!dev)
2872 return -ENOMEM;
2873
2874
2875 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2876 dev->netdev_ops = &virtnet_netdev;
2877 dev->features = NETIF_F_HIGHDMA;
2878
2879 dev->ethtool_ops = &virtnet_ethtool_ops;
2880 SET_NETDEV_DEV(dev, &vdev->dev);
2881
2882
2883 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
2884
2885 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2886 if (csum)
2887 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2888
2889 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2890 dev->hw_features |= NETIF_F_TSO
2891 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
2892 }
2893
2894 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
2895 dev->hw_features |= NETIF_F_TSO;
2896 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
2897 dev->hw_features |= NETIF_F_TSO6;
2898 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2899 dev->hw_features |= NETIF_F_TSO_ECN;
2900
2901 dev->features |= NETIF_F_GSO_ROBUST;
2902
2903 if (gso)
2904 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
2905
2906 }
2907 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2908 dev->features |= NETIF_F_RXCSUM;
2909
2910 dev->vlan_features = dev->features;
2911
2912
2913 dev->min_mtu = MIN_MTU;
2914 dev->max_mtu = MAX_MTU;
2915
2916
2917 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
2918 virtio_cread_bytes(vdev,
2919 offsetof(struct virtio_net_config, mac),
2920 dev->dev_addr, dev->addr_len);
2921 else
2922 eth_hw_addr_random(dev);
2923
2924
2925 vi = netdev_priv(dev);
2926 vi->dev = dev;
2927 vi->vdev = vdev;
2928 vdev->priv = vi;
2929
2930 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
2931
2932
2933 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2934 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2935 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2936 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2937 vi->big_packets = true;
2938
2939 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2940 vi->mergeable_rx_bufs = true;
2941
2942 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
2943 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2944 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2945 else
2946 vi->hdr_len = sizeof(struct virtio_net_hdr);
2947
2948 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2949 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2950 vi->any_header_sg = true;
2951
2952 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2953 vi->has_cvq = true;
2954
2955 if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2956 mtu = virtio_cread16(vdev,
2957 offsetof(struct virtio_net_config,
2958 mtu));
2959 if (mtu < dev->min_mtu) {
2960
2961
2962
2963 dev_err(&vdev->dev, "device MTU appears to have changed "
2964 "it is now %d < %d", mtu, dev->min_mtu);
2965 goto free;
2966 }
2967
2968 dev->mtu = mtu;
2969 dev->max_mtu = mtu;
2970
2971
2972 if (dev->mtu > ETH_DATA_LEN)
2973 vi->big_packets = true;
2974 }
2975
2976 if (vi->any_header_sg)
2977 dev->needed_headroom = vi->hdr_len;
2978
2979
2980 if (num_online_cpus() >= max_queue_pairs)
2981 vi->curr_queue_pairs = max_queue_pairs;
2982 else
2983 vi->curr_queue_pairs = num_online_cpus();
2984 vi->max_queue_pairs = max_queue_pairs;
2985
2986
2987 err = init_vqs(vi);
2988 if (err)
2989 goto free;
2990
2991#ifdef CONFIG_SYSFS
2992 if (vi->mergeable_rx_bufs)
2993 dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
2994#endif
2995 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
2996 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
2997
2998 virtnet_init_settings(dev);
2999
3000 if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3001 vi->failover = net_failover_create(vi->dev);
3002 if (IS_ERR(vi->failover)) {
3003 err = PTR_ERR(vi->failover);
3004 goto free_vqs;
3005 }
3006 }
3007
3008 err = register_netdev(dev);
3009 if (err) {
3010 pr_debug("virtio_net: registering device failed\n");
3011 goto free_failover;
3012 }
3013
3014 virtio_device_ready(vdev);
3015
3016 err = virtnet_cpu_notif_add(vi);
3017 if (err) {
3018 pr_debug("virtio_net: registering cpu notifier failed\n");
3019 goto free_unregister_netdev;
3020 }
3021
3022 virtnet_set_queues(vi, vi->curr_queue_pairs);
3023
3024
3025
3026 netif_carrier_off(dev);
3027 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3028 schedule_work(&vi->config_work);
3029 } else {
3030 vi->status = VIRTIO_NET_S_LINK_UP;
3031 virtnet_update_settings(vi);
3032 netif_carrier_on(dev);
3033 }
3034
3035 for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
3036 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
3037 set_bit(guest_offloads[i], &vi->guest_offloads);
3038
3039 pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
3040 dev->name, max_queue_pairs);
3041
3042 return 0;
3043
3044free_unregister_netdev:
3045 vi->vdev->config->reset(vdev);
3046
3047 unregister_netdev(dev);
3048free_failover:
3049 net_failover_destroy(vi->failover);
3050free_vqs:
3051 cancel_delayed_work_sync(&vi->refill);
3052 free_receive_page_frags(vi);
3053 virtnet_del_vqs(vi);
3054free:
3055 free_netdev(dev);
3056 return err;
3057}
3058
3059static void remove_vq_common(struct virtnet_info *vi)
3060{
3061 vi->vdev->config->reset(vi->vdev);
3062
3063
3064 free_unused_bufs(vi);
3065
3066 free_receive_bufs(vi);
3067
3068 free_receive_page_frags(vi);
3069
3070 virtnet_del_vqs(vi);
3071}
3072
3073static void virtnet_remove(struct virtio_device *vdev)
3074{
3075 struct virtnet_info *vi = vdev->priv;
3076
3077 virtnet_cpu_notif_remove(vi);
3078
3079
3080 flush_work(&vi->config_work);
3081
3082 unregister_netdev(vi->dev);
3083
3084 net_failover_destroy(vi->failover);
3085
3086 remove_vq_common(vi);
3087
3088 free_netdev(vi->dev);
3089}
3090
3091static __maybe_unused int virtnet_freeze(struct virtio_device *vdev)
3092{
3093 struct virtnet_info *vi = vdev->priv;
3094
3095 virtnet_cpu_notif_remove(vi);
3096 virtnet_freeze_down(vdev);
3097 remove_vq_common(vi);
3098
3099 return 0;
3100}
3101
3102static __maybe_unused int virtnet_restore(struct virtio_device *vdev)
3103{
3104 struct virtnet_info *vi = vdev->priv;
3105 int err;
3106
3107 err = virtnet_restore_up(vdev);
3108 if (err)
3109 return err;
3110 virtnet_set_queues(vi, vi->curr_queue_pairs);
3111
3112 err = virtnet_cpu_notif_add(vi);
3113 if (err)
3114 return err;
3115
3116 return 0;
3117}
3118
3119static struct virtio_device_id id_table[] = {
3120 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
3121 { 0 },
3122};
3123
3124#define VIRTNET_FEATURES \
3125 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
3126 VIRTIO_NET_F_MAC, \
3127 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
3128 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
3129 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
3130 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
3131 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
3132 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
3133 VIRTIO_NET_F_CTRL_MAC_ADDR, \
3134 VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
3135 VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
3136
3137static unsigned int features[] = {
3138 VIRTNET_FEATURES,
3139};
3140
3141static unsigned int features_legacy[] = {
3142 VIRTNET_FEATURES,
3143 VIRTIO_NET_F_GSO,
3144 VIRTIO_F_ANY_LAYOUT,
3145};
3146
3147static struct virtio_driver virtio_net_driver = {
3148 .feature_table = features,
3149 .feature_table_size = ARRAY_SIZE(features),
3150 .feature_table_legacy = features_legacy,
3151 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
3152 .driver.name = KBUILD_MODNAME,
3153 .driver.owner = THIS_MODULE,
3154 .id_table = id_table,
3155 .validate = virtnet_validate,
3156 .probe = virtnet_probe,
3157 .remove = virtnet_remove,
3158 .config_changed = virtnet_config_changed,
3159#ifdef CONFIG_PM_SLEEP
3160 .freeze = virtnet_freeze,
3161 .restore = virtnet_restore,
3162#endif
3163};
3164
3165static __init int virtio_net_driver_init(void)
3166{
3167 int ret;
3168
3169 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
3170 virtnet_cpu_online,
3171 virtnet_cpu_down_prep);
3172 if (ret < 0)
3173 goto out;
3174 virtionet_online = ret;
3175 ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
3176 NULL, virtnet_cpu_dead);
3177 if (ret)
3178 goto err_dead;
3179
3180 ret = register_virtio_driver(&virtio_net_driver);
3181 if (ret)
3182 goto err_virtio;
3183 return 0;
3184err_virtio:
3185 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3186err_dead:
3187 cpuhp_remove_multi_state(virtionet_online);
3188out:
3189 return ret;
3190}
3191module_init(virtio_net_driver_init);
3192
3193static __exit void virtio_net_driver_exit(void)
3194{
3195 unregister_virtio_driver(&virtio_net_driver);
3196 cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
3197 cpuhp_remove_multi_state(virtionet_online);
3198}
3199module_exit(virtio_net_driver_exit);
3200
3201MODULE_DEVICE_TABLE(virtio, id_table);
3202MODULE_DESCRIPTION("Virtio network driver");
3203MODULE_LICENSE("GPL");
3204