1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/skbuff.h>
39#include <linux/ethtool.h>
40#include <linux/if_ether.h>
41#include <net/tcp.h>
42#include <linux/udp.h>
43#include <linux/moduleparam.h>
44#include <linux/mm.h>
45#include <linux/slab.h>
46#include <net/ip.h>
47#include <linux/bpf.h>
48#include <net/page_pool.h>
49#include <linux/bpf_trace.h>
50
51#include <xen/xen.h>
52#include <xen/xenbus.h>
53#include <xen/events.h>
54#include <xen/page.h>
55#include <xen/platform_pci.h>
56#include <xen/grant_table.h>
57
58#include <xen/interface/io/netif.h>
59#include <xen/interface/memory.h>
60#include <xen/interface/grant_table.h>
61
62
63#define MAX_QUEUES_DEFAULT 8
64static unsigned int xennet_max_queues;
65module_param_named(max_queues, xennet_max_queues, uint, 0644);
66MODULE_PARM_DESC(max_queues,
67 "Maximum number of queues per virtual interface");
68
69static bool __read_mostly xennet_trusted = true;
70module_param_named(trusted, xennet_trusted, bool, 0644);
71MODULE_PARM_DESC(trusted, "Is the backend trusted");
72
73#define XENNET_TIMEOUT (5 * HZ)
74
75static const struct ethtool_ops xennet_ethtool_ops;
76
77struct netfront_cb {
78 int pull_to;
79};
80
81#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
82
83#define RX_COPY_THRESHOLD 256
84
85#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
86#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
87
88
89#define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
90
91
92#define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
93
94
95#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
96
97static DECLARE_WAIT_QUEUE_HEAD(module_wq);
98
99struct netfront_stats {
100 u64 packets;
101 u64 bytes;
102 struct u64_stats_sync syncp;
103};
104
105struct netfront_info;
106
107struct netfront_queue {
108 unsigned int id;
109 char name[QUEUE_NAME_SIZE];
110 struct netfront_info *info;
111
112 struct bpf_prog __rcu *xdp_prog;
113
114 struct napi_struct napi;
115
116
117
118
119 unsigned int tx_evtchn, rx_evtchn;
120 unsigned int tx_irq, rx_irq;
121
122 char tx_irq_name[IRQ_NAME_SIZE];
123 char rx_irq_name[IRQ_NAME_SIZE];
124
125 spinlock_t tx_lock;
126 struct xen_netif_tx_front_ring tx;
127 int tx_ring_ref;
128
129
130
131
132
133 struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
134 unsigned short tx_link[NET_TX_RING_SIZE];
135#define TX_LINK_NONE 0xffff
136#define TX_PENDING 0xfffe
137 grant_ref_t gref_tx_head;
138 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
139 struct page *grant_tx_page[NET_TX_RING_SIZE];
140 unsigned tx_skb_freelist;
141 unsigned int tx_pend_queue;
142
143 spinlock_t rx_lock ____cacheline_aligned_in_smp;
144 struct xen_netif_rx_front_ring rx;
145 int rx_ring_ref;
146
147 struct timer_list rx_refill_timer;
148
149 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
150 grant_ref_t gref_rx_head;
151 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
152
153 unsigned int rx_rsp_unconsumed;
154 spinlock_t rx_cons_lock;
155
156 struct page_pool *page_pool;
157 struct xdp_rxq_info xdp_rxq;
158};
159
160struct netfront_info {
161 struct list_head list;
162 struct net_device *netdev;
163
164 struct xenbus_device *xbdev;
165
166
167 struct netfront_queue *queues;
168
169
170 struct netfront_stats __percpu *rx_stats;
171 struct netfront_stats __percpu *tx_stats;
172
173
174 bool netback_has_xdp_headroom;
175 bool netfront_xdp_enabled;
176
177
178 bool broken;
179
180
181 bool bounce;
182
183 atomic_t rx_gso_checksum_fixup;
184};
185
186struct netfront_rx_info {
187 struct xen_netif_rx_response rx;
188 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
189};
190
191
192
193
194
195static void add_id_to_list(unsigned *head, unsigned short *list,
196 unsigned short id)
197{
198 list[id] = *head;
199 *head = id;
200}
201
202static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
203{
204 unsigned int id = *head;
205
206 if (id != TX_LINK_NONE) {
207 *head = list[id];
208 list[id] = TX_LINK_NONE;
209 }
210 return id;
211}
212
213static int xennet_rxidx(RING_IDX idx)
214{
215 return idx & (NET_RX_RING_SIZE - 1);
216}
217
218static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
219 RING_IDX ri)
220{
221 int i = xennet_rxidx(ri);
222 struct sk_buff *skb = queue->rx_skbs[i];
223 queue->rx_skbs[i] = NULL;
224 return skb;
225}
226
227static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
228 RING_IDX ri)
229{
230 int i = xennet_rxidx(ri);
231 grant_ref_t ref = queue->grant_rx_ref[i];
232 queue->grant_rx_ref[i] = INVALID_GRANT_REF;
233 return ref;
234}
235
236#ifdef CONFIG_SYSFS
237static const struct attribute_group xennet_dev_group;
238#endif
239
240static bool xennet_can_sg(struct net_device *dev)
241{
242 return dev->features & NETIF_F_SG;
243}
244
245
246static void rx_refill_timeout(struct timer_list *t)
247{
248 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
249 napi_schedule(&queue->napi);
250}
251
252static int netfront_tx_slot_available(struct netfront_queue *queue)
253{
254 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
255 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
256}
257
258static void xennet_maybe_wake_tx(struct netfront_queue *queue)
259{
260 struct net_device *dev = queue->info->netdev;
261 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
262
263 if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
264 netfront_tx_slot_available(queue) &&
265 likely(netif_running(dev)))
266 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
267}
268
269
270static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
271{
272 struct sk_buff *skb;
273 struct page *page;
274
275 skb = __netdev_alloc_skb(queue->info->netdev,
276 RX_COPY_THRESHOLD + NET_IP_ALIGN,
277 GFP_ATOMIC | __GFP_NOWARN);
278 if (unlikely(!skb))
279 return NULL;
280
281 page = page_pool_alloc_pages(queue->page_pool,
282 GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
283 if (unlikely(!page)) {
284 kfree_skb(skb);
285 return NULL;
286 }
287 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
288
289
290 skb_reserve(skb, NET_IP_ALIGN);
291 skb->dev = queue->info->netdev;
292
293 return skb;
294}
295
296
297static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
298{
299 RING_IDX req_prod = queue->rx.req_prod_pvt;
300 int notify;
301 int err = 0;
302
303 if (unlikely(!netif_carrier_ok(queue->info->netdev)))
304 return;
305
306 for (req_prod = queue->rx.req_prod_pvt;
307 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
308 req_prod++) {
309 struct sk_buff *skb;
310 unsigned short id;
311 grant_ref_t ref;
312 struct page *page;
313 struct xen_netif_rx_request *req;
314
315 skb = xennet_alloc_one_rx_buffer(queue);
316 if (!skb) {
317 err = -ENOMEM;
318 break;
319 }
320
321 id = xennet_rxidx(req_prod);
322
323 BUG_ON(queue->rx_skbs[id]);
324 queue->rx_skbs[id] = skb;
325
326 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
327 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
328 queue->grant_rx_ref[id] = ref;
329
330 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
331
332 req = RING_GET_REQUEST(&queue->rx, req_prod);
333 gnttab_page_grant_foreign_access_ref_one(ref,
334 queue->info->xbdev->otherend_id,
335 page,
336 0);
337 req->id = id;
338 req->gref = ref;
339 }
340
341 queue->rx.req_prod_pvt = req_prod;
342
343
344
345
346
347
348 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
349 unlikely(err)) {
350 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
351 return;
352 }
353
354 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
355 if (notify)
356 notify_remote_via_irq(queue->rx_irq);
357}
358
359static int xennet_open(struct net_device *dev)
360{
361 struct netfront_info *np = netdev_priv(dev);
362 unsigned int num_queues = dev->real_num_tx_queues;
363 unsigned int i = 0;
364 struct netfront_queue *queue = NULL;
365
366 if (!np->queues || np->broken)
367 return -ENODEV;
368
369 for (i = 0; i < num_queues; ++i) {
370 queue = &np->queues[i];
371 napi_enable(&queue->napi);
372
373 spin_lock_bh(&queue->rx_lock);
374 if (netif_carrier_ok(dev)) {
375 xennet_alloc_rx_buffers(queue);
376 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
377 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
378 napi_schedule(&queue->napi);
379 }
380 spin_unlock_bh(&queue->rx_lock);
381 }
382
383 netif_tx_start_all_queues(dev);
384
385 return 0;
386}
387
388static bool xennet_tx_buf_gc(struct netfront_queue *queue)
389{
390 RING_IDX cons, prod;
391 unsigned short id;
392 struct sk_buff *skb;
393 bool more_to_do;
394 bool work_done = false;
395 const struct device *dev = &queue->info->netdev->dev;
396
397 BUG_ON(!netif_carrier_ok(queue->info->netdev));
398
399 do {
400 prod = queue->tx.sring->rsp_prod;
401 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
402 dev_alert(dev, "Illegal number of responses %u\n",
403 prod - queue->tx.rsp_cons);
404 goto err;
405 }
406 rmb();
407
408 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
409 struct xen_netif_tx_response txrsp;
410
411 work_done = true;
412
413 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
414 if (txrsp.status == XEN_NETIF_RSP_NULL)
415 continue;
416
417 id = txrsp.id;
418 if (id >= RING_SIZE(&queue->tx)) {
419 dev_alert(dev,
420 "Response has incorrect id (%u)\n",
421 id);
422 goto err;
423 }
424 if (queue->tx_link[id] != TX_PENDING) {
425 dev_alert(dev,
426 "Response for inactive request\n");
427 goto err;
428 }
429
430 queue->tx_link[id] = TX_LINK_NONE;
431 skb = queue->tx_skbs[id];
432 queue->tx_skbs[id] = NULL;
433 if (unlikely(!gnttab_end_foreign_access_ref(
434 queue->grant_tx_ref[id]))) {
435 dev_alert(dev,
436 "Grant still in use by backend domain\n");
437 goto err;
438 }
439 gnttab_release_grant_reference(
440 &queue->gref_tx_head, queue->grant_tx_ref[id]);
441 queue->grant_tx_ref[id] = INVALID_GRANT_REF;
442 queue->grant_tx_page[id] = NULL;
443 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
444 dev_kfree_skb_irq(skb);
445 }
446
447 queue->tx.rsp_cons = prod;
448
449 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
450 } while (more_to_do);
451
452 xennet_maybe_wake_tx(queue);
453
454 return work_done;
455
456 err:
457 queue->info->broken = true;
458 dev_alert(dev, "Disabled for further use\n");
459
460 return work_done;
461}
462
463struct xennet_gnttab_make_txreq {
464 struct netfront_queue *queue;
465 struct sk_buff *skb;
466 struct page *page;
467 struct xen_netif_tx_request *tx;
468 struct xen_netif_tx_request tx_local;
469 unsigned int size;
470};
471
472static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
473 unsigned int len, void *data)
474{
475 struct xennet_gnttab_make_txreq *info = data;
476 unsigned int id;
477 struct xen_netif_tx_request *tx;
478 grant_ref_t ref;
479
480 struct page *page = info->page;
481 struct netfront_queue *queue = info->queue;
482 struct sk_buff *skb = info->skb;
483
484 id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
485 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
486 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
487 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
488
489 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
490 gfn, GNTMAP_readonly);
491
492 queue->tx_skbs[id] = skb;
493 queue->grant_tx_page[id] = page;
494 queue->grant_tx_ref[id] = ref;
495
496 info->tx_local.id = id;
497 info->tx_local.gref = ref;
498 info->tx_local.offset = offset;
499 info->tx_local.size = len;
500 info->tx_local.flags = 0;
501
502 *tx = info->tx_local;
503
504
505
506
507
508 add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
509
510 info->tx = tx;
511 info->size += info->tx_local.size;
512}
513
514static struct xen_netif_tx_request *xennet_make_first_txreq(
515 struct xennet_gnttab_make_txreq *info,
516 unsigned int offset, unsigned int len)
517{
518 info->size = 0;
519
520 gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
521
522 return info->tx;
523}
524
525static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
526 unsigned int len, void *data)
527{
528 struct xennet_gnttab_make_txreq *info = data;
529
530 info->tx->flags |= XEN_NETTXF_more_data;
531 skb_get(info->skb);
532 xennet_tx_setup_grant(gfn, offset, len, data);
533}
534
535static void xennet_make_txreqs(
536 struct xennet_gnttab_make_txreq *info,
537 struct page *page,
538 unsigned int offset, unsigned int len)
539{
540
541 page += offset >> PAGE_SHIFT;
542 offset &= ~PAGE_MASK;
543
544 while (len) {
545 info->page = page;
546 info->size = 0;
547
548 gnttab_foreach_grant_in_range(page, offset, len,
549 xennet_make_one_txreq,
550 info);
551
552 page++;
553 offset = 0;
554 len -= info->size;
555 }
556}
557
558
559
560
561
562static int xennet_count_skb_slots(struct sk_buff *skb)
563{
564 int i, frags = skb_shinfo(skb)->nr_frags;
565 int slots;
566
567 slots = gnttab_count_grant(offset_in_page(skb->data),
568 skb_headlen(skb));
569
570 for (i = 0; i < frags; i++) {
571 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
572 unsigned long size = skb_frag_size(frag);
573 unsigned long offset = skb_frag_off(frag);
574
575
576 offset &= ~PAGE_MASK;
577
578 slots += gnttab_count_grant(offset, size);
579 }
580
581 return slots;
582}
583
584static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
585 struct net_device *sb_dev)
586{
587 unsigned int num_queues = dev->real_num_tx_queues;
588 u32 hash;
589 u16 queue_idx;
590
591
592 if (num_queues == 1) {
593 queue_idx = 0;
594 } else {
595 hash = skb_get_hash(skb);
596 queue_idx = hash % num_queues;
597 }
598
599 return queue_idx;
600}
601
602static void xennet_mark_tx_pending(struct netfront_queue *queue)
603{
604 unsigned int i;
605
606 while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
607 TX_LINK_NONE)
608 queue->tx_link[i] = TX_PENDING;
609}
610
611static int xennet_xdp_xmit_one(struct net_device *dev,
612 struct netfront_queue *queue,
613 struct xdp_frame *xdpf)
614{
615 struct netfront_info *np = netdev_priv(dev);
616 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
617 struct xennet_gnttab_make_txreq info = {
618 .queue = queue,
619 .skb = NULL,
620 .page = virt_to_page(xdpf->data),
621 };
622 int notify;
623
624 xennet_make_first_txreq(&info,
625 offset_in_page(xdpf->data),
626 xdpf->len);
627
628 xennet_mark_tx_pending(queue);
629
630 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
631 if (notify)
632 notify_remote_via_irq(queue->tx_irq);
633
634 u64_stats_update_begin(&tx_stats->syncp);
635 tx_stats->bytes += xdpf->len;
636 tx_stats->packets++;
637 u64_stats_update_end(&tx_stats->syncp);
638
639 xennet_tx_buf_gc(queue);
640
641 return 0;
642}
643
644static int xennet_xdp_xmit(struct net_device *dev, int n,
645 struct xdp_frame **frames, u32 flags)
646{
647 unsigned int num_queues = dev->real_num_tx_queues;
648 struct netfront_info *np = netdev_priv(dev);
649 struct netfront_queue *queue = NULL;
650 unsigned long irq_flags;
651 int nxmit = 0;
652 int i;
653
654 if (unlikely(np->broken))
655 return -ENODEV;
656 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
657 return -EINVAL;
658
659 queue = &np->queues[smp_processor_id() % num_queues];
660
661 spin_lock_irqsave(&queue->tx_lock, irq_flags);
662 for (i = 0; i < n; i++) {
663 struct xdp_frame *xdpf = frames[i];
664
665 if (!xdpf)
666 continue;
667 if (xennet_xdp_xmit_one(dev, queue, xdpf))
668 break;
669 nxmit++;
670 }
671 spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
672
673 return nxmit;
674}
675
676struct sk_buff *bounce_skb(const struct sk_buff *skb)
677{
678 unsigned int headerlen = skb_headroom(skb);
679
680 unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
681 XEN_PAGE_SIZE);
682 struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
683
684 if (!n)
685 return NULL;
686
687 if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
688 WARN_ONCE(1, "misaligned skb allocated\n");
689 kfree_skb(n);
690 return NULL;
691 }
692
693
694 skb_reserve(n, headerlen);
695
696 skb_put(n, skb->len);
697
698 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
699
700 skb_copy_header(n, skb);
701 return n;
702}
703
704#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
705
706static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
707{
708 struct netfront_info *np = netdev_priv(dev);
709 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
710 struct xen_netif_tx_request *first_tx;
711 unsigned int i;
712 int notify;
713 int slots;
714 struct page *page;
715 unsigned int offset;
716 unsigned int len;
717 unsigned long flags;
718 struct netfront_queue *queue = NULL;
719 struct xennet_gnttab_make_txreq info = { };
720 unsigned int num_queues = dev->real_num_tx_queues;
721 u16 queue_index;
722 struct sk_buff *nskb;
723
724
725 if (num_queues < 1)
726 goto drop;
727 if (unlikely(np->broken))
728 goto drop;
729
730 queue_index = skb_get_queue_mapping(skb);
731 queue = &np->queues[queue_index];
732
733
734
735
736 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
737 net_alert_ratelimited(
738 "xennet: skb->len = %u, too big for wire format\n",
739 skb->len);
740 goto drop;
741 }
742
743 slots = xennet_count_skb_slots(skb);
744 if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
745 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
746 slots, skb->len);
747 if (skb_linearize(skb))
748 goto drop;
749 }
750
751 page = virt_to_page(skb->data);
752 offset = offset_in_page(skb->data);
753
754
755
756
757
758
759
760
761 if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
762 nskb = bounce_skb(skb);
763 if (!nskb)
764 goto drop;
765 dev_consume_skb_any(skb);
766 skb = nskb;
767 page = virt_to_page(skb->data);
768 offset = offset_in_page(skb->data);
769 }
770
771 len = skb_headlen(skb);
772
773 spin_lock_irqsave(&queue->tx_lock, flags);
774
775 if (unlikely(!netif_carrier_ok(dev) ||
776 (slots > 1 && !xennet_can_sg(dev)) ||
777 netif_needs_gso(skb, netif_skb_features(skb)))) {
778 spin_unlock_irqrestore(&queue->tx_lock, flags);
779 goto drop;
780 }
781
782
783 info.queue = queue;
784 info.skb = skb;
785 info.page = page;
786 first_tx = xennet_make_first_txreq(&info, offset, len);
787 offset += info.tx_local.size;
788 if (offset == PAGE_SIZE) {
789 page++;
790 offset = 0;
791 }
792 len -= info.tx_local.size;
793
794 if (skb->ip_summed == CHECKSUM_PARTIAL)
795
796 first_tx->flags |= XEN_NETTXF_csum_blank |
797 XEN_NETTXF_data_validated;
798 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
799
800 first_tx->flags |= XEN_NETTXF_data_validated;
801
802
803 if (skb_shinfo(skb)->gso_size) {
804 struct xen_netif_extra_info *gso;
805
806 gso = (struct xen_netif_extra_info *)
807 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
808
809 first_tx->flags |= XEN_NETTXF_extra_info;
810
811 gso->u.gso.size = skb_shinfo(skb)->gso_size;
812 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
813 XEN_NETIF_GSO_TYPE_TCPV6 :
814 XEN_NETIF_GSO_TYPE_TCPV4;
815 gso->u.gso.pad = 0;
816 gso->u.gso.features = 0;
817
818 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
819 gso->flags = 0;
820 }
821
822
823 xennet_make_txreqs(&info, page, offset, len);
824
825
826 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
827 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
828 xennet_make_txreqs(&info, skb_frag_page(frag),
829 skb_frag_off(frag),
830 skb_frag_size(frag));
831 }
832
833
834 first_tx->size = skb->len;
835
836
837 skb_tx_timestamp(skb);
838
839 xennet_mark_tx_pending(queue);
840
841 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
842 if (notify)
843 notify_remote_via_irq(queue->tx_irq);
844
845 u64_stats_update_begin(&tx_stats->syncp);
846 tx_stats->bytes += skb->len;
847 tx_stats->packets++;
848 u64_stats_update_end(&tx_stats->syncp);
849
850
851 xennet_tx_buf_gc(queue);
852
853 if (!netfront_tx_slot_available(queue))
854 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
855
856 spin_unlock_irqrestore(&queue->tx_lock, flags);
857
858 return NETDEV_TX_OK;
859
860 drop:
861 dev->stats.tx_dropped++;
862 dev_kfree_skb_any(skb);
863 return NETDEV_TX_OK;
864}
865
866static int xennet_close(struct net_device *dev)
867{
868 struct netfront_info *np = netdev_priv(dev);
869 unsigned int num_queues = dev->real_num_tx_queues;
870 unsigned int i;
871 struct netfront_queue *queue;
872 netif_tx_stop_all_queues(np->netdev);
873 for (i = 0; i < num_queues; ++i) {
874 queue = &np->queues[i];
875 napi_disable(&queue->napi);
876 }
877 return 0;
878}
879
880static void xennet_destroy_queues(struct netfront_info *info)
881{
882 unsigned int i;
883
884 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
885 struct netfront_queue *queue = &info->queues[i];
886
887 if (netif_running(info->netdev))
888 napi_disable(&queue->napi);
889 netif_napi_del(&queue->napi);
890 }
891
892 kfree(info->queues);
893 info->queues = NULL;
894}
895
896static void xennet_uninit(struct net_device *dev)
897{
898 struct netfront_info *np = netdev_priv(dev);
899 xennet_destroy_queues(np);
900}
901
902static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
903{
904 unsigned long flags;
905
906 spin_lock_irqsave(&queue->rx_cons_lock, flags);
907 queue->rx.rsp_cons = val;
908 queue->rx_rsp_unconsumed = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
909 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
910}
911
912static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
913 grant_ref_t ref)
914{
915 int new = xennet_rxidx(queue->rx.req_prod_pvt);
916
917 BUG_ON(queue->rx_skbs[new]);
918 queue->rx_skbs[new] = skb;
919 queue->grant_rx_ref[new] = ref;
920 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
921 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
922 queue->rx.req_prod_pvt++;
923}
924
925static int xennet_get_extras(struct netfront_queue *queue,
926 struct xen_netif_extra_info *extras,
927 RING_IDX rp)
928
929{
930 struct xen_netif_extra_info extra;
931 struct device *dev = &queue->info->netdev->dev;
932 RING_IDX cons = queue->rx.rsp_cons;
933 int err = 0;
934
935 do {
936 struct sk_buff *skb;
937 grant_ref_t ref;
938
939 if (unlikely(cons + 1 == rp)) {
940 if (net_ratelimit())
941 dev_warn(dev, "Missing extra info\n");
942 err = -EBADR;
943 break;
944 }
945
946 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
947
948 if (unlikely(!extra.type ||
949 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
950 if (net_ratelimit())
951 dev_warn(dev, "Invalid extra type: %d\n",
952 extra.type);
953 err = -EINVAL;
954 } else {
955 extras[extra.type - 1] = extra;
956 }
957
958 skb = xennet_get_rx_skb(queue, cons);
959 ref = xennet_get_rx_ref(queue, cons);
960 xennet_move_rx_slot(queue, skb, ref);
961 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
962
963 xennet_set_rx_rsp_cons(queue, cons);
964 return err;
965}
966
967static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
968 struct xen_netif_rx_response *rx, struct bpf_prog *prog,
969 struct xdp_buff *xdp, bool *need_xdp_flush)
970{
971 struct xdp_frame *xdpf;
972 u32 len = rx->status;
973 u32 act;
974 int err;
975
976 xdp_init_buff(xdp, XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
977 &queue->xdp_rxq);
978 xdp_prepare_buff(xdp, page_address(pdata), XDP_PACKET_HEADROOM,
979 len, false);
980
981 act = bpf_prog_run_xdp(prog, xdp);
982 switch (act) {
983 case XDP_TX:
984 get_page(pdata);
985 xdpf = xdp_convert_buff_to_frame(xdp);
986 err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
987 if (unlikely(!err))
988 xdp_return_frame_rx_napi(xdpf);
989 else if (unlikely(err < 0))
990 trace_xdp_exception(queue->info->netdev, prog, act);
991 break;
992 case XDP_REDIRECT:
993 get_page(pdata);
994 err = xdp_do_redirect(queue->info->netdev, xdp, prog);
995 *need_xdp_flush = true;
996 if (unlikely(err))
997 trace_xdp_exception(queue->info->netdev, prog, act);
998 break;
999 case XDP_PASS:
1000 case XDP_DROP:
1001 break;
1002
1003 case XDP_ABORTED:
1004 trace_xdp_exception(queue->info->netdev, prog, act);
1005 break;
1006
1007 default:
1008 bpf_warn_invalid_xdp_action(queue->info->netdev, prog, act);
1009 }
1010
1011 return act;
1012}
1013
1014static int xennet_get_responses(struct netfront_queue *queue,
1015 struct netfront_rx_info *rinfo, RING_IDX rp,
1016 struct sk_buff_head *list,
1017 bool *need_xdp_flush)
1018{
1019 struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1020 int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
1021 RING_IDX cons = queue->rx.rsp_cons;
1022 struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
1023 struct xen_netif_extra_info *extras = rinfo->extras;
1024 grant_ref_t ref = xennet_get_rx_ref(queue, cons);
1025 struct device *dev = &queue->info->netdev->dev;
1026 struct bpf_prog *xdp_prog;
1027 struct xdp_buff xdp;
1028 int slots = 1;
1029 int err = 0;
1030 u32 verdict;
1031
1032 if (rx->flags & XEN_NETRXF_extra_info) {
1033 err = xennet_get_extras(queue, extras, rp);
1034 if (!err) {
1035 if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
1036 struct xen_netif_extra_info *xdp;
1037
1038 xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
1039 rx->offset = xdp->u.xdp.headroom;
1040 }
1041 }
1042 cons = queue->rx.rsp_cons;
1043 }
1044
1045 for (;;) {
1046 if (unlikely(rx->status < 0 ||
1047 rx->offset + rx->status > XEN_PAGE_SIZE)) {
1048 if (net_ratelimit())
1049 dev_warn(dev, "rx->offset: %u, size: %d\n",
1050 rx->offset, rx->status);
1051 xennet_move_rx_slot(queue, skb, ref);
1052 err = -EINVAL;
1053 goto next;
1054 }
1055
1056
1057
1058
1059
1060
1061 if (ref == INVALID_GRANT_REF) {
1062 if (net_ratelimit())
1063 dev_warn(dev, "Bad rx response id %d.\n",
1064 rx->id);
1065 err = -EINVAL;
1066 goto next;
1067 }
1068
1069 if (!gnttab_end_foreign_access_ref(ref)) {
1070 dev_alert(dev,
1071 "Grant still in use by backend domain\n");
1072 queue->info->broken = true;
1073 dev_alert(dev, "Disabled for further use\n");
1074 return -EINVAL;
1075 }
1076
1077 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
1078
1079 rcu_read_lock();
1080 xdp_prog = rcu_dereference(queue->xdp_prog);
1081 if (xdp_prog) {
1082 if (!(rx->flags & XEN_NETRXF_more_data)) {
1083
1084 verdict = xennet_run_xdp(queue,
1085 skb_frag_page(&skb_shinfo(skb)->frags[0]),
1086 rx, xdp_prog, &xdp, need_xdp_flush);
1087 if (verdict != XDP_PASS)
1088 err = -EINVAL;
1089 } else {
1090
1091 err = -EINVAL;
1092 }
1093 }
1094 rcu_read_unlock();
1095
1096 __skb_queue_tail(list, skb);
1097
1098next:
1099 if (!(rx->flags & XEN_NETRXF_more_data))
1100 break;
1101
1102 if (cons + slots == rp) {
1103 if (net_ratelimit())
1104 dev_warn(dev, "Need more slots\n");
1105 err = -ENOENT;
1106 break;
1107 }
1108
1109 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1110 rx = &rx_local;
1111 skb = xennet_get_rx_skb(queue, cons + slots);
1112 ref = xennet_get_rx_ref(queue, cons + slots);
1113 slots++;
1114 }
1115
1116 if (unlikely(slots > max)) {
1117 if (net_ratelimit())
1118 dev_warn(dev, "Too many slots\n");
1119 err = -E2BIG;
1120 }
1121
1122 if (unlikely(err))
1123 xennet_set_rx_rsp_cons(queue, cons + slots);
1124
1125 return err;
1126}
1127
1128static int xennet_set_skb_gso(struct sk_buff *skb,
1129 struct xen_netif_extra_info *gso)
1130{
1131 if (!gso->u.gso.size) {
1132 if (net_ratelimit())
1133 pr_warn("GSO size must not be zero\n");
1134 return -EINVAL;
1135 }
1136
1137 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
1138 gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
1139 if (net_ratelimit())
1140 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
1141 return -EINVAL;
1142 }
1143
1144 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1145 skb_shinfo(skb)->gso_type =
1146 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
1147 SKB_GSO_TCPV4 :
1148 SKB_GSO_TCPV6;
1149
1150
1151 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1152 skb_shinfo(skb)->gso_segs = 0;
1153
1154 return 0;
1155}
1156
1157static int xennet_fill_frags(struct netfront_queue *queue,
1158 struct sk_buff *skb,
1159 struct sk_buff_head *list)
1160{
1161 RING_IDX cons = queue->rx.rsp_cons;
1162 struct sk_buff *nskb;
1163
1164 while ((nskb = __skb_dequeue(list))) {
1165 struct xen_netif_rx_response rx;
1166 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1167
1168 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1169
1170 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1171 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1172
1173 BUG_ON(pull_to < skb_headlen(skb));
1174 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1175 }
1176 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1177 xennet_set_rx_rsp_cons(queue,
1178 ++cons + skb_queue_len(list));
1179 kfree_skb(nskb);
1180 return -ENOENT;
1181 }
1182
1183 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1184 skb_frag_page(nfrag),
1185 rx.offset, rx.status, PAGE_SIZE);
1186
1187 skb_shinfo(nskb)->nr_frags = 0;
1188 kfree_skb(nskb);
1189 }
1190
1191 xennet_set_rx_rsp_cons(queue, cons);
1192
1193 return 0;
1194}
1195
1196static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1197{
1198 bool recalculate_partial_csum = false;
1199
1200
1201
1202
1203
1204
1205
1206 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1207 struct netfront_info *np = netdev_priv(dev);
1208 atomic_inc(&np->rx_gso_checksum_fixup);
1209 skb->ip_summed = CHECKSUM_PARTIAL;
1210 recalculate_partial_csum = true;
1211 }
1212
1213
1214 if (skb->ip_summed != CHECKSUM_PARTIAL)
1215 return 0;
1216
1217 return skb_checksum_setup(skb, recalculate_partial_csum);
1218}
1219
1220static int handle_incoming_queue(struct netfront_queue *queue,
1221 struct sk_buff_head *rxq)
1222{
1223 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1224 int packets_dropped = 0;
1225 struct sk_buff *skb;
1226
1227 while ((skb = __skb_dequeue(rxq)) != NULL) {
1228 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1229
1230 if (pull_to > skb_headlen(skb))
1231 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1232
1233
1234 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1235 skb_reset_network_header(skb);
1236
1237 if (checksum_setup(queue->info->netdev, skb)) {
1238 kfree_skb(skb);
1239 packets_dropped++;
1240 queue->info->netdev->stats.rx_errors++;
1241 continue;
1242 }
1243
1244 u64_stats_update_begin(&rx_stats->syncp);
1245 rx_stats->packets++;
1246 rx_stats->bytes += skb->len;
1247 u64_stats_update_end(&rx_stats->syncp);
1248
1249
1250 napi_gro_receive(&queue->napi, skb);
1251 }
1252
1253 return packets_dropped;
1254}
1255
1256static int xennet_poll(struct napi_struct *napi, int budget)
1257{
1258 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1259 struct net_device *dev = queue->info->netdev;
1260 struct sk_buff *skb;
1261 struct netfront_rx_info rinfo;
1262 struct xen_netif_rx_response *rx = &rinfo.rx;
1263 struct xen_netif_extra_info *extras = rinfo.extras;
1264 RING_IDX i, rp;
1265 int work_done;
1266 struct sk_buff_head rxq;
1267 struct sk_buff_head errq;
1268 struct sk_buff_head tmpq;
1269 int err;
1270 bool need_xdp_flush = false;
1271
1272 spin_lock(&queue->rx_lock);
1273
1274 skb_queue_head_init(&rxq);
1275 skb_queue_head_init(&errq);
1276 skb_queue_head_init(&tmpq);
1277
1278 rp = queue->rx.sring->rsp_prod;
1279 if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1280 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1281 rp - queue->rx.rsp_cons);
1282 queue->info->broken = true;
1283 spin_unlock(&queue->rx_lock);
1284 return 0;
1285 }
1286 rmb();
1287
1288 i = queue->rx.rsp_cons;
1289 work_done = 0;
1290 while ((i != rp) && (work_done < budget)) {
1291 RING_COPY_RESPONSE(&queue->rx, i, rx);
1292 memset(extras, 0, sizeof(rinfo.extras));
1293
1294 err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
1295 &need_xdp_flush);
1296
1297 if (unlikely(err)) {
1298 if (queue->info->broken) {
1299 spin_unlock(&queue->rx_lock);
1300 return 0;
1301 }
1302err:
1303 while ((skb = __skb_dequeue(&tmpq)))
1304 __skb_queue_tail(&errq, skb);
1305 dev->stats.rx_errors++;
1306 i = queue->rx.rsp_cons;
1307 continue;
1308 }
1309
1310 skb = __skb_dequeue(&tmpq);
1311
1312 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1313 struct xen_netif_extra_info *gso;
1314 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1315
1316 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1317 __skb_queue_head(&tmpq, skb);
1318 xennet_set_rx_rsp_cons(queue,
1319 queue->rx.rsp_cons +
1320 skb_queue_len(&tmpq));
1321 goto err;
1322 }
1323 }
1324
1325 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1326 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1327 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1328
1329 skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
1330 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1331 skb->data_len = rx->status;
1332 skb->len += rx->status;
1333
1334 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1335 goto err;
1336
1337 if (rx->flags & XEN_NETRXF_csum_blank)
1338 skb->ip_summed = CHECKSUM_PARTIAL;
1339 else if (rx->flags & XEN_NETRXF_data_validated)
1340 skb->ip_summed = CHECKSUM_UNNECESSARY;
1341
1342 __skb_queue_tail(&rxq, skb);
1343
1344 i = queue->rx.rsp_cons + 1;
1345 xennet_set_rx_rsp_cons(queue, i);
1346 work_done++;
1347 }
1348 if (need_xdp_flush)
1349 xdp_do_flush();
1350
1351 __skb_queue_purge(&errq);
1352
1353 work_done -= handle_incoming_queue(queue, &rxq);
1354
1355 xennet_alloc_rx_buffers(queue);
1356
1357 if (work_done < budget) {
1358 int more_to_do = 0;
1359
1360 napi_complete_done(napi, work_done);
1361
1362 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1363 if (more_to_do)
1364 napi_schedule(napi);
1365 }
1366
1367 spin_unlock(&queue->rx_lock);
1368
1369 return work_done;
1370}
1371
1372static int xennet_change_mtu(struct net_device *dev, int mtu)
1373{
1374 int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1375
1376 if (mtu > max)
1377 return -EINVAL;
1378 dev->mtu = mtu;
1379 return 0;
1380}
1381
1382static void xennet_get_stats64(struct net_device *dev,
1383 struct rtnl_link_stats64 *tot)
1384{
1385 struct netfront_info *np = netdev_priv(dev);
1386 int cpu;
1387
1388 for_each_possible_cpu(cpu) {
1389 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1390 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1391 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1392 unsigned int start;
1393
1394 do {
1395 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1396 tx_packets = tx_stats->packets;
1397 tx_bytes = tx_stats->bytes;
1398 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1399
1400 do {
1401 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1402 rx_packets = rx_stats->packets;
1403 rx_bytes = rx_stats->bytes;
1404 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1405
1406 tot->rx_packets += rx_packets;
1407 tot->tx_packets += tx_packets;
1408 tot->rx_bytes += rx_bytes;
1409 tot->tx_bytes += tx_bytes;
1410 }
1411
1412 tot->rx_errors = dev->stats.rx_errors;
1413 tot->tx_dropped = dev->stats.tx_dropped;
1414}
1415
1416static void xennet_release_tx_bufs(struct netfront_queue *queue)
1417{
1418 struct sk_buff *skb;
1419 int i;
1420
1421 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1422
1423 if (!queue->tx_skbs[i])
1424 continue;
1425
1426 skb = queue->tx_skbs[i];
1427 queue->tx_skbs[i] = NULL;
1428 get_page(queue->grant_tx_page[i]);
1429 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1430 queue->grant_tx_page[i]);
1431 queue->grant_tx_page[i] = NULL;
1432 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
1433 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1434 dev_kfree_skb_irq(skb);
1435 }
1436}
1437
1438static void xennet_release_rx_bufs(struct netfront_queue *queue)
1439{
1440 int id, ref;
1441
1442 spin_lock_bh(&queue->rx_lock);
1443
1444 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1445 struct sk_buff *skb;
1446 struct page *page;
1447
1448 skb = queue->rx_skbs[id];
1449 if (!skb)
1450 continue;
1451
1452 ref = queue->grant_rx_ref[id];
1453 if (ref == INVALID_GRANT_REF)
1454 continue;
1455
1456 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1457
1458
1459
1460
1461 get_page(page);
1462 gnttab_end_foreign_access(ref, page);
1463 queue->grant_rx_ref[id] = INVALID_GRANT_REF;
1464
1465 kfree_skb(skb);
1466 }
1467
1468 spin_unlock_bh(&queue->rx_lock);
1469}
1470
1471static netdev_features_t xennet_fix_features(struct net_device *dev,
1472 netdev_features_t features)
1473{
1474 struct netfront_info *np = netdev_priv(dev);
1475
1476 if (features & NETIF_F_SG &&
1477 !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
1478 features &= ~NETIF_F_SG;
1479
1480 if (features & NETIF_F_IPV6_CSUM &&
1481 !xenbus_read_unsigned(np->xbdev->otherend,
1482 "feature-ipv6-csum-offload", 0))
1483 features &= ~NETIF_F_IPV6_CSUM;
1484
1485 if (features & NETIF_F_TSO &&
1486 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
1487 features &= ~NETIF_F_TSO;
1488
1489 if (features & NETIF_F_TSO6 &&
1490 !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
1491 features &= ~NETIF_F_TSO6;
1492
1493 return features;
1494}
1495
1496static int xennet_set_features(struct net_device *dev,
1497 netdev_features_t features)
1498{
1499 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1500 netdev_info(dev, "Reducing MTU because no SG offload");
1501 dev->mtu = ETH_DATA_LEN;
1502 }
1503
1504 return 0;
1505}
1506
1507static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1508{
1509 unsigned long flags;
1510
1511 if (unlikely(queue->info->broken))
1512 return false;
1513
1514 spin_lock_irqsave(&queue->tx_lock, flags);
1515 if (xennet_tx_buf_gc(queue))
1516 *eoi = 0;
1517 spin_unlock_irqrestore(&queue->tx_lock, flags);
1518
1519 return true;
1520}
1521
1522static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1523{
1524 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1525
1526 if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1527 xen_irq_lateeoi(irq, eoiflag);
1528
1529 return IRQ_HANDLED;
1530}
1531
1532static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1533{
1534 unsigned int work_queued;
1535 unsigned long flags;
1536
1537 if (unlikely(queue->info->broken))
1538 return false;
1539
1540 spin_lock_irqsave(&queue->rx_cons_lock, flags);
1541 work_queued = XEN_RING_NR_UNCONSUMED_RESPONSES(&queue->rx);
1542 if (work_queued > queue->rx_rsp_unconsumed) {
1543 queue->rx_rsp_unconsumed = work_queued;
1544 *eoi = 0;
1545 } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1546 const struct device *dev = &queue->info->netdev->dev;
1547
1548 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1549 dev_alert(dev, "RX producer index going backwards\n");
1550 dev_alert(dev, "Disabled for further use\n");
1551 queue->info->broken = true;
1552 return false;
1553 }
1554 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1555
1556 if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1557 napi_schedule(&queue->napi);
1558
1559 return true;
1560}
1561
1562static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1563{
1564 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1565
1566 if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1567 xen_irq_lateeoi(irq, eoiflag);
1568
1569 return IRQ_HANDLED;
1570}
1571
1572static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1573{
1574 unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1575
1576 if (xennet_handle_tx(dev_id, &eoiflag) &&
1577 xennet_handle_rx(dev_id, &eoiflag))
1578 xen_irq_lateeoi(irq, eoiflag);
1579
1580 return IRQ_HANDLED;
1581}
1582
1583#ifdef CONFIG_NET_POLL_CONTROLLER
1584static void xennet_poll_controller(struct net_device *dev)
1585{
1586
1587 struct netfront_info *info = netdev_priv(dev);
1588 unsigned int num_queues = dev->real_num_tx_queues;
1589 unsigned int i;
1590
1591 if (info->broken)
1592 return;
1593
1594 for (i = 0; i < num_queues; ++i)
1595 xennet_interrupt(0, &info->queues[i]);
1596}
1597#endif
1598
1599#define NETBACK_XDP_HEADROOM_DISABLE 0
1600#define NETBACK_XDP_HEADROOM_ENABLE 1
1601
1602static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
1603{
1604 int err;
1605 unsigned short headroom;
1606
1607 headroom = xdp ? XDP_PACKET_HEADROOM : 0;
1608 err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
1609 "xdp-headroom", "%hu",
1610 headroom);
1611 if (err)
1612 pr_warn("Error writing xdp-headroom\n");
1613
1614 return err;
1615}
1616
1617static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1618 struct netlink_ext_ack *extack)
1619{
1620 unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
1621 struct netfront_info *np = netdev_priv(dev);
1622 struct bpf_prog *old_prog;
1623 unsigned int i, err;
1624
1625 if (dev->mtu > max_mtu) {
1626 netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
1627 return -EINVAL;
1628 }
1629
1630 if (!np->netback_has_xdp_headroom)
1631 return 0;
1632
1633 xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
1634
1635 err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
1636 NETBACK_XDP_HEADROOM_DISABLE);
1637 if (err)
1638 return err;
1639
1640
1641 wait_event(module_wq,
1642 xenbus_read_driver_state(np->xbdev->otherend) ==
1643 XenbusStateReconfigured);
1644 np->netfront_xdp_enabled = true;
1645
1646 old_prog = rtnl_dereference(np->queues[0].xdp_prog);
1647
1648 if (prog)
1649 bpf_prog_add(prog, dev->real_num_tx_queues);
1650
1651 for (i = 0; i < dev->real_num_tx_queues; ++i)
1652 rcu_assign_pointer(np->queues[i].xdp_prog, prog);
1653
1654 if (old_prog)
1655 for (i = 0; i < dev->real_num_tx_queues; ++i)
1656 bpf_prog_put(old_prog);
1657
1658 xenbus_switch_state(np->xbdev, XenbusStateConnected);
1659
1660 return 0;
1661}
1662
1663static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1664{
1665 struct netfront_info *np = netdev_priv(dev);
1666
1667 if (np->broken)
1668 return -ENODEV;
1669
1670 switch (xdp->command) {
1671 case XDP_SETUP_PROG:
1672 return xennet_xdp_set(dev, xdp->prog, xdp->extack);
1673 default:
1674 return -EINVAL;
1675 }
1676}
1677
1678static const struct net_device_ops xennet_netdev_ops = {
1679 .ndo_uninit = xennet_uninit,
1680 .ndo_open = xennet_open,
1681 .ndo_stop = xennet_close,
1682 .ndo_start_xmit = xennet_start_xmit,
1683 .ndo_change_mtu = xennet_change_mtu,
1684 .ndo_get_stats64 = xennet_get_stats64,
1685 .ndo_set_mac_address = eth_mac_addr,
1686 .ndo_validate_addr = eth_validate_addr,
1687 .ndo_fix_features = xennet_fix_features,
1688 .ndo_set_features = xennet_set_features,
1689 .ndo_select_queue = xennet_select_queue,
1690 .ndo_bpf = xennet_xdp,
1691 .ndo_xdp_xmit = xennet_xdp_xmit,
1692#ifdef CONFIG_NET_POLL_CONTROLLER
1693 .ndo_poll_controller = xennet_poll_controller,
1694#endif
1695};
1696
1697static void xennet_free_netdev(struct net_device *netdev)
1698{
1699 struct netfront_info *np = netdev_priv(netdev);
1700
1701 free_percpu(np->rx_stats);
1702 free_percpu(np->tx_stats);
1703 free_netdev(netdev);
1704}
1705
1706static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1707{
1708 int err;
1709 struct net_device *netdev;
1710 struct netfront_info *np;
1711
1712 netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1713 if (!netdev)
1714 return ERR_PTR(-ENOMEM);
1715
1716 np = netdev_priv(netdev);
1717 np->xbdev = dev;
1718
1719 np->queues = NULL;
1720
1721 err = -ENOMEM;
1722 np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1723 if (np->rx_stats == NULL)
1724 goto exit;
1725 np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1726 if (np->tx_stats == NULL)
1727 goto exit;
1728
1729 netdev->netdev_ops = &xennet_netdev_ops;
1730
1731 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1732 NETIF_F_GSO_ROBUST;
1733 netdev->hw_features = NETIF_F_SG |
1734 NETIF_F_IPV6_CSUM |
1735 NETIF_F_TSO | NETIF_F_TSO6;
1736
1737
1738
1739
1740
1741
1742
1743 netdev->features |= netdev->hw_features;
1744
1745 netdev->ethtool_ops = &xennet_ethtool_ops;
1746 netdev->min_mtu = ETH_MIN_MTU;
1747 netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
1748 SET_NETDEV_DEV(netdev, &dev->dev);
1749
1750 np->netdev = netdev;
1751 np->netfront_xdp_enabled = false;
1752
1753 netif_carrier_off(netdev);
1754
1755 do {
1756 xenbus_switch_state(dev, XenbusStateInitialising);
1757 err = wait_event_timeout(module_wq,
1758 xenbus_read_driver_state(dev->otherend) !=
1759 XenbusStateClosed &&
1760 xenbus_read_driver_state(dev->otherend) !=
1761 XenbusStateUnknown, XENNET_TIMEOUT);
1762 } while (!err);
1763
1764 return netdev;
1765
1766 exit:
1767 xennet_free_netdev(netdev);
1768 return ERR_PTR(err);
1769}
1770
1771
1772
1773
1774
1775
1776static int netfront_probe(struct xenbus_device *dev,
1777 const struct xenbus_device_id *id)
1778{
1779 int err;
1780 struct net_device *netdev;
1781 struct netfront_info *info;
1782
1783 netdev = xennet_create_dev(dev);
1784 if (IS_ERR(netdev)) {
1785 err = PTR_ERR(netdev);
1786 xenbus_dev_fatal(dev, err, "creating netdev");
1787 return err;
1788 }
1789
1790 info = netdev_priv(netdev);
1791 dev_set_drvdata(&dev->dev, info);
1792#ifdef CONFIG_SYSFS
1793 info->netdev->sysfs_groups[0] = &xennet_dev_group;
1794#endif
1795
1796 return 0;
1797}
1798
1799static void xennet_end_access(int ref, void *page)
1800{
1801
1802 if (ref != INVALID_GRANT_REF)
1803 gnttab_end_foreign_access(ref, virt_to_page(page));
1804}
1805
1806static void xennet_disconnect_backend(struct netfront_info *info)
1807{
1808 unsigned int i = 0;
1809 unsigned int num_queues = info->netdev->real_num_tx_queues;
1810
1811 netif_carrier_off(info->netdev);
1812
1813 for (i = 0; i < num_queues && info->queues; ++i) {
1814 struct netfront_queue *queue = &info->queues[i];
1815
1816 del_timer_sync(&queue->rx_refill_timer);
1817
1818 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1819 unbind_from_irqhandler(queue->tx_irq, queue);
1820 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1821 unbind_from_irqhandler(queue->tx_irq, queue);
1822 unbind_from_irqhandler(queue->rx_irq, queue);
1823 }
1824 queue->tx_evtchn = queue->rx_evtchn = 0;
1825 queue->tx_irq = queue->rx_irq = 0;
1826
1827 if (netif_running(info->netdev))
1828 napi_synchronize(&queue->napi);
1829
1830 xennet_release_tx_bufs(queue);
1831 xennet_release_rx_bufs(queue);
1832 gnttab_free_grant_references(queue->gref_tx_head);
1833 gnttab_free_grant_references(queue->gref_rx_head);
1834
1835
1836 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1837 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1838
1839 queue->tx_ring_ref = INVALID_GRANT_REF;
1840 queue->rx_ring_ref = INVALID_GRANT_REF;
1841 queue->tx.sring = NULL;
1842 queue->rx.sring = NULL;
1843
1844 page_pool_destroy(queue->page_pool);
1845 }
1846}
1847
1848
1849
1850
1851
1852
1853
1854static int netfront_resume(struct xenbus_device *dev)
1855{
1856 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1857
1858 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1859
1860 netif_tx_lock_bh(info->netdev);
1861 netif_device_detach(info->netdev);
1862 netif_tx_unlock_bh(info->netdev);
1863
1864 xennet_disconnect_backend(info);
1865 return 0;
1866}
1867
1868static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1869{
1870 char *s, *e, *macstr;
1871 int i;
1872
1873 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1874 if (IS_ERR(macstr))
1875 return PTR_ERR(macstr);
1876
1877 for (i = 0; i < ETH_ALEN; i++) {
1878 mac[i] = simple_strtoul(s, &e, 16);
1879 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1880 kfree(macstr);
1881 return -ENOENT;
1882 }
1883 s = e+1;
1884 }
1885
1886 kfree(macstr);
1887 return 0;
1888}
1889
1890static int setup_netfront_single(struct netfront_queue *queue)
1891{
1892 int err;
1893
1894 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1895 if (err < 0)
1896 goto fail;
1897
1898 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1899 xennet_interrupt, 0,
1900 queue->info->netdev->name,
1901 queue);
1902 if (err < 0)
1903 goto bind_fail;
1904 queue->rx_evtchn = queue->tx_evtchn;
1905 queue->rx_irq = queue->tx_irq = err;
1906
1907 return 0;
1908
1909bind_fail:
1910 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1911 queue->tx_evtchn = 0;
1912fail:
1913 return err;
1914}
1915
1916static int setup_netfront_split(struct netfront_queue *queue)
1917{
1918 int err;
1919
1920 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1921 if (err < 0)
1922 goto fail;
1923 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1924 if (err < 0)
1925 goto alloc_rx_evtchn_fail;
1926
1927 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1928 "%s-tx", queue->name);
1929 err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1930 xennet_tx_interrupt, 0,
1931 queue->tx_irq_name, queue);
1932 if (err < 0)
1933 goto bind_tx_fail;
1934 queue->tx_irq = err;
1935
1936 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1937 "%s-rx", queue->name);
1938 err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1939 xennet_rx_interrupt, 0,
1940 queue->rx_irq_name, queue);
1941 if (err < 0)
1942 goto bind_rx_fail;
1943 queue->rx_irq = err;
1944
1945 return 0;
1946
1947bind_rx_fail:
1948 unbind_from_irqhandler(queue->tx_irq, queue);
1949 queue->tx_irq = 0;
1950bind_tx_fail:
1951 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1952 queue->rx_evtchn = 0;
1953alloc_rx_evtchn_fail:
1954 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1955 queue->tx_evtchn = 0;
1956fail:
1957 return err;
1958}
1959
1960static int setup_netfront(struct xenbus_device *dev,
1961 struct netfront_queue *queue, unsigned int feature_split_evtchn)
1962{
1963 struct xen_netif_tx_sring *txs;
1964 struct xen_netif_rx_sring *rxs;
1965 int err;
1966
1967 queue->tx_ring_ref = INVALID_GRANT_REF;
1968 queue->rx_ring_ref = INVALID_GRANT_REF;
1969 queue->rx.sring = NULL;
1970 queue->tx.sring = NULL;
1971
1972 err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&txs,
1973 1, &queue->tx_ring_ref);
1974 if (err)
1975 goto fail;
1976
1977 XEN_FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1978
1979 err = xenbus_setup_ring(dev, GFP_NOIO | __GFP_HIGH, (void **)&rxs,
1980 1, &queue->rx_ring_ref);
1981 if (err)
1982 goto fail;
1983
1984 XEN_FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1985
1986 if (feature_split_evtchn)
1987 err = setup_netfront_split(queue);
1988
1989
1990
1991
1992 if (!feature_split_evtchn || err)
1993 err = setup_netfront_single(queue);
1994
1995 if (err)
1996 goto fail;
1997
1998 return 0;
1999
2000 fail:
2001 xenbus_teardown_ring((void **)&queue->rx.sring, 1, &queue->rx_ring_ref);
2002 xenbus_teardown_ring((void **)&queue->tx.sring, 1, &queue->tx_ring_ref);
2003
2004 return err;
2005}
2006
2007
2008
2009
2010
2011static int xennet_init_queue(struct netfront_queue *queue)
2012{
2013 unsigned short i;
2014 int err = 0;
2015 char *devid;
2016
2017 spin_lock_init(&queue->tx_lock);
2018 spin_lock_init(&queue->rx_lock);
2019 spin_lock_init(&queue->rx_cons_lock);
2020
2021 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
2022
2023 devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
2024 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
2025 devid, queue->id);
2026
2027
2028 queue->tx_skb_freelist = 0;
2029 queue->tx_pend_queue = TX_LINK_NONE;
2030 for (i = 0; i < NET_TX_RING_SIZE; i++) {
2031 queue->tx_link[i] = i + 1;
2032 queue->grant_tx_ref[i] = INVALID_GRANT_REF;
2033 queue->grant_tx_page[i] = NULL;
2034 }
2035 queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
2036
2037
2038 for (i = 0; i < NET_RX_RING_SIZE; i++) {
2039 queue->rx_skbs[i] = NULL;
2040 queue->grant_rx_ref[i] = INVALID_GRANT_REF;
2041 }
2042
2043
2044 if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
2045 &queue->gref_tx_head) < 0) {
2046 pr_alert("can't alloc tx grant refs\n");
2047 err = -ENOMEM;
2048 goto exit;
2049 }
2050
2051
2052 if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
2053 &queue->gref_rx_head) < 0) {
2054 pr_alert("can't alloc rx grant refs\n");
2055 err = -ENOMEM;
2056 goto exit_free_tx;
2057 }
2058
2059 return 0;
2060
2061 exit_free_tx:
2062 gnttab_free_grant_references(queue->gref_tx_head);
2063 exit:
2064 return err;
2065}
2066
2067static int write_queue_xenstore_keys(struct netfront_queue *queue,
2068 struct xenbus_transaction *xbt, int write_hierarchical)
2069{
2070
2071
2072
2073
2074 struct xenbus_device *dev = queue->info->xbdev;
2075 int err;
2076 const char *message;
2077 char *path;
2078 size_t pathsize;
2079
2080
2081 if (write_hierarchical) {
2082 pathsize = strlen(dev->nodename) + 10;
2083 path = kzalloc(pathsize, GFP_KERNEL);
2084 if (!path) {
2085 err = -ENOMEM;
2086 message = "out of memory while writing ring references";
2087 goto error;
2088 }
2089 snprintf(path, pathsize, "%s/queue-%u",
2090 dev->nodename, queue->id);
2091 } else {
2092 path = (char *)dev->nodename;
2093 }
2094
2095
2096 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
2097 queue->tx_ring_ref);
2098 if (err) {
2099 message = "writing tx-ring-ref";
2100 goto error;
2101 }
2102
2103 err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
2104 queue->rx_ring_ref);
2105 if (err) {
2106 message = "writing rx-ring-ref";
2107 goto error;
2108 }
2109
2110
2111
2112
2113 if (queue->tx_evtchn == queue->rx_evtchn) {
2114
2115 err = xenbus_printf(*xbt, path,
2116 "event-channel", "%u", queue->tx_evtchn);
2117 if (err) {
2118 message = "writing event-channel";
2119 goto error;
2120 }
2121 } else {
2122
2123 err = xenbus_printf(*xbt, path,
2124 "event-channel-tx", "%u", queue->tx_evtchn);
2125 if (err) {
2126 message = "writing event-channel-tx";
2127 goto error;
2128 }
2129
2130 err = xenbus_printf(*xbt, path,
2131 "event-channel-rx", "%u", queue->rx_evtchn);
2132 if (err) {
2133 message = "writing event-channel-rx";
2134 goto error;
2135 }
2136 }
2137
2138 if (write_hierarchical)
2139 kfree(path);
2140 return 0;
2141
2142error:
2143 if (write_hierarchical)
2144 kfree(path);
2145 xenbus_dev_fatal(dev, err, "%s", message);
2146 return err;
2147}
2148
2149
2150
2151static int xennet_create_page_pool(struct netfront_queue *queue)
2152{
2153 int err;
2154 struct page_pool_params pp_params = {
2155 .order = 0,
2156 .flags = 0,
2157 .pool_size = NET_RX_RING_SIZE,
2158 .nid = NUMA_NO_NODE,
2159 .dev = &queue->info->netdev->dev,
2160 .offset = XDP_PACKET_HEADROOM,
2161 .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
2162 };
2163
2164 queue->page_pool = page_pool_create(&pp_params);
2165 if (IS_ERR(queue->page_pool)) {
2166 err = PTR_ERR(queue->page_pool);
2167 queue->page_pool = NULL;
2168 return err;
2169 }
2170
2171 err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
2172 queue->id, 0);
2173 if (err) {
2174 netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
2175 goto err_free_pp;
2176 }
2177
2178 err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
2179 MEM_TYPE_PAGE_POOL, queue->page_pool);
2180 if (err) {
2181 netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
2182 goto err_unregister_rxq;
2183 }
2184 return 0;
2185
2186err_unregister_rxq:
2187 xdp_rxq_info_unreg(&queue->xdp_rxq);
2188err_free_pp:
2189 page_pool_destroy(queue->page_pool);
2190 queue->page_pool = NULL;
2191 return err;
2192}
2193
2194static int xennet_create_queues(struct netfront_info *info,
2195 unsigned int *num_queues)
2196{
2197 unsigned int i;
2198 int ret;
2199
2200 info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
2201 GFP_KERNEL);
2202 if (!info->queues)
2203 return -ENOMEM;
2204
2205 for (i = 0; i < *num_queues; i++) {
2206 struct netfront_queue *queue = &info->queues[i];
2207
2208 queue->id = i;
2209 queue->info = info;
2210
2211 ret = xennet_init_queue(queue);
2212 if (ret < 0) {
2213 dev_warn(&info->xbdev->dev,
2214 "only created %d queues\n", i);
2215 *num_queues = i;
2216 break;
2217 }
2218
2219
2220 ret = xennet_create_page_pool(queue);
2221 if (ret < 0) {
2222 dev_err(&info->xbdev->dev, "can't allocate page pool\n");
2223 *num_queues = i;
2224 return ret;
2225 }
2226
2227 netif_napi_add(queue->info->netdev, &queue->napi,
2228 xennet_poll, 64);
2229 if (netif_running(info->netdev))
2230 napi_enable(&queue->napi);
2231 }
2232
2233 netif_set_real_num_tx_queues(info->netdev, *num_queues);
2234
2235 if (*num_queues == 0) {
2236 dev_err(&info->xbdev->dev, "no queues\n");
2237 return -EINVAL;
2238 }
2239 return 0;
2240}
2241
2242
2243static int talk_to_netback(struct xenbus_device *dev,
2244 struct netfront_info *info)
2245{
2246 const char *message;
2247 struct xenbus_transaction xbt;
2248 int err;
2249 unsigned int feature_split_evtchn;
2250 unsigned int i = 0;
2251 unsigned int max_queues = 0;
2252 struct netfront_queue *queue = NULL;
2253 unsigned int num_queues = 1;
2254 u8 addr[ETH_ALEN];
2255
2256 info->netdev->irq = 0;
2257
2258
2259 info->bounce = !xennet_trusted ||
2260 !xenbus_read_unsigned(dev->nodename, "trusted", 1);
2261
2262
2263 max_queues = xenbus_read_unsigned(info->xbdev->otherend,
2264 "multi-queue-max-queues", 1);
2265 num_queues = min(max_queues, xennet_max_queues);
2266
2267
2268 feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
2269 "feature-split-event-channels", 0);
2270
2271
2272 err = xen_net_read_mac(dev, addr);
2273 if (err) {
2274 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2275 goto out_unlocked;
2276 }
2277 eth_hw_addr_set(info->netdev, addr);
2278
2279 info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
2280 "feature-xdp-headroom", 0);
2281 if (info->netback_has_xdp_headroom) {
2282
2283 err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
2284 NETBACK_XDP_HEADROOM_ENABLE :
2285 NETBACK_XDP_HEADROOM_DISABLE);
2286 if (err)
2287 goto out_unlocked;
2288 }
2289
2290 rtnl_lock();
2291 if (info->queues)
2292 xennet_destroy_queues(info);
2293
2294
2295 info->broken = false;
2296
2297 err = xennet_create_queues(info, &num_queues);
2298 if (err < 0) {
2299 xenbus_dev_fatal(dev, err, "creating queues");
2300 kfree(info->queues);
2301 info->queues = NULL;
2302 goto out;
2303 }
2304 rtnl_unlock();
2305
2306
2307 for (i = 0; i < num_queues; ++i) {
2308 queue = &info->queues[i];
2309 err = setup_netfront(dev, queue, feature_split_evtchn);
2310 if (err)
2311 goto destroy_ring;
2312 }
2313
2314again:
2315 err = xenbus_transaction_start(&xbt);
2316 if (err) {
2317 xenbus_dev_fatal(dev, err, "starting transaction");
2318 goto destroy_ring;
2319 }
2320
2321 if (xenbus_exists(XBT_NIL,
2322 info->xbdev->otherend, "multi-queue-max-queues")) {
2323
2324 err = xenbus_printf(xbt, dev->nodename,
2325 "multi-queue-num-queues", "%u", num_queues);
2326 if (err) {
2327 message = "writing multi-queue-num-queues";
2328 goto abort_transaction_no_dev_fatal;
2329 }
2330 }
2331
2332 if (num_queues == 1) {
2333 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0);
2334 if (err)
2335 goto abort_transaction_no_dev_fatal;
2336 } else {
2337
2338 for (i = 0; i < num_queues; ++i) {
2339 queue = &info->queues[i];
2340 err = write_queue_xenstore_keys(queue, &xbt, 1);
2341 if (err)
2342 goto abort_transaction_no_dev_fatal;
2343 }
2344 }
2345
2346
2347 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2348 1);
2349 if (err) {
2350 message = "writing request-rx-copy";
2351 goto abort_transaction;
2352 }
2353
2354 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2355 if (err) {
2356 message = "writing feature-rx-notify";
2357 goto abort_transaction;
2358 }
2359
2360 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2361 if (err) {
2362 message = "writing feature-sg";
2363 goto abort_transaction;
2364 }
2365
2366 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2367 if (err) {
2368 message = "writing feature-gso-tcpv4";
2369 goto abort_transaction;
2370 }
2371
2372 err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2373 if (err) {
2374 message = "writing feature-gso-tcpv6";
2375 goto abort_transaction;
2376 }
2377
2378 err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2379 "1");
2380 if (err) {
2381 message = "writing feature-ipv6-csum-offload";
2382 goto abort_transaction;
2383 }
2384
2385 err = xenbus_transaction_end(xbt, 0);
2386 if (err) {
2387 if (err == -EAGAIN)
2388 goto again;
2389 xenbus_dev_fatal(dev, err, "completing transaction");
2390 goto destroy_ring;
2391 }
2392
2393 return 0;
2394
2395 abort_transaction:
2396 xenbus_dev_fatal(dev, err, "%s", message);
2397abort_transaction_no_dev_fatal:
2398 xenbus_transaction_end(xbt, 1);
2399 destroy_ring:
2400 xennet_disconnect_backend(info);
2401 rtnl_lock();
2402 xennet_destroy_queues(info);
2403 out:
2404 rtnl_unlock();
2405out_unlocked:
2406 device_unregister(&dev->dev);
2407 return err;
2408}
2409
2410static int xennet_connect(struct net_device *dev)
2411{
2412 struct netfront_info *np = netdev_priv(dev);
2413 unsigned int num_queues = 0;
2414 int err;
2415 unsigned int j = 0;
2416 struct netfront_queue *queue = NULL;
2417
2418 if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
2419 dev_info(&dev->dev,
2420 "backend does not support copying receive path\n");
2421 return -ENODEV;
2422 }
2423
2424 err = talk_to_netback(np->xbdev, np);
2425 if (err)
2426 return err;
2427 if (np->netback_has_xdp_headroom)
2428 pr_info("backend supports XDP headroom\n");
2429 if (np->bounce)
2430 dev_info(&np->xbdev->dev,
2431 "bouncing transmitted data to zeroed pages\n");
2432
2433
2434 num_queues = dev->real_num_tx_queues;
2435
2436 if (dev->reg_state == NETREG_UNINITIALIZED) {
2437 err = register_netdev(dev);
2438 if (err) {
2439 pr_warn("%s: register_netdev err=%d\n", __func__, err);
2440 device_unregister(&np->xbdev->dev);
2441 return err;
2442 }
2443 }
2444
2445 rtnl_lock();
2446 netdev_update_features(dev);
2447 rtnl_unlock();
2448
2449
2450
2451
2452
2453
2454
2455 netif_tx_lock_bh(np->netdev);
2456 netif_device_attach(np->netdev);
2457 netif_tx_unlock_bh(np->netdev);
2458
2459 netif_carrier_on(np->netdev);
2460 for (j = 0; j < num_queues; ++j) {
2461 queue = &np->queues[j];
2462
2463 notify_remote_via_irq(queue->tx_irq);
2464 if (queue->tx_irq != queue->rx_irq)
2465 notify_remote_via_irq(queue->rx_irq);
2466
2467 spin_lock_irq(&queue->tx_lock);
2468 xennet_tx_buf_gc(queue);
2469 spin_unlock_irq(&queue->tx_lock);
2470
2471 spin_lock_bh(&queue->rx_lock);
2472 xennet_alloc_rx_buffers(queue);
2473 spin_unlock_bh(&queue->rx_lock);
2474 }
2475
2476 return 0;
2477}
2478
2479
2480
2481
2482static void netback_changed(struct xenbus_device *dev,
2483 enum xenbus_state backend_state)
2484{
2485 struct netfront_info *np = dev_get_drvdata(&dev->dev);
2486 struct net_device *netdev = np->netdev;
2487
2488 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2489
2490 wake_up_all(&module_wq);
2491
2492 switch (backend_state) {
2493 case XenbusStateInitialising:
2494 case XenbusStateInitialised:
2495 case XenbusStateReconfiguring:
2496 case XenbusStateReconfigured:
2497 case XenbusStateUnknown:
2498 break;
2499
2500 case XenbusStateInitWait:
2501 if (dev->state != XenbusStateInitialising)
2502 break;
2503 if (xennet_connect(netdev) != 0)
2504 break;
2505 xenbus_switch_state(dev, XenbusStateConnected);
2506 break;
2507
2508 case XenbusStateConnected:
2509 netdev_notify_peers(netdev);
2510 break;
2511
2512 case XenbusStateClosed:
2513 if (dev->state == XenbusStateClosed)
2514 break;
2515 fallthrough;
2516 case XenbusStateClosing:
2517 xenbus_frontend_closed(dev);
2518 break;
2519 }
2520}
2521
2522static const struct xennet_stat {
2523 char name[ETH_GSTRING_LEN];
2524 u16 offset;
2525} xennet_stats[] = {
2526 {
2527 "rx_gso_checksum_fixup",
2528 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2529 },
2530};
2531
2532static int xennet_get_sset_count(struct net_device *dev, int string_set)
2533{
2534 switch (string_set) {
2535 case ETH_SS_STATS:
2536 return ARRAY_SIZE(xennet_stats);
2537 default:
2538 return -EINVAL;
2539 }
2540}
2541
2542static void xennet_get_ethtool_stats(struct net_device *dev,
2543 struct ethtool_stats *stats, u64 * data)
2544{
2545 void *np = netdev_priv(dev);
2546 int i;
2547
2548 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2549 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2550}
2551
2552static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2553{
2554 int i;
2555
2556 switch (stringset) {
2557 case ETH_SS_STATS:
2558 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2559 memcpy(data + i * ETH_GSTRING_LEN,
2560 xennet_stats[i].name, ETH_GSTRING_LEN);
2561 break;
2562 }
2563}
2564
2565static const struct ethtool_ops xennet_ethtool_ops =
2566{
2567 .get_link = ethtool_op_get_link,
2568
2569 .get_sset_count = xennet_get_sset_count,
2570 .get_ethtool_stats = xennet_get_ethtool_stats,
2571 .get_strings = xennet_get_strings,
2572 .get_ts_info = ethtool_op_get_ts_info,
2573};
2574
2575#ifdef CONFIG_SYSFS
2576static ssize_t show_rxbuf(struct device *dev,
2577 struct device_attribute *attr, char *buf)
2578{
2579 return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2580}
2581
2582static ssize_t store_rxbuf(struct device *dev,
2583 struct device_attribute *attr,
2584 const char *buf, size_t len)
2585{
2586 char *endp;
2587
2588 if (!capable(CAP_NET_ADMIN))
2589 return -EPERM;
2590
2591 simple_strtoul(buf, &endp, 0);
2592 if (endp == buf)
2593 return -EBADMSG;
2594
2595
2596
2597 return len;
2598}
2599
2600static DEVICE_ATTR(rxbuf_min, 0644, show_rxbuf, store_rxbuf);
2601static DEVICE_ATTR(rxbuf_max, 0644, show_rxbuf, store_rxbuf);
2602static DEVICE_ATTR(rxbuf_cur, 0444, show_rxbuf, NULL);
2603
2604static struct attribute *xennet_dev_attrs[] = {
2605 &dev_attr_rxbuf_min.attr,
2606 &dev_attr_rxbuf_max.attr,
2607 &dev_attr_rxbuf_cur.attr,
2608 NULL
2609};
2610
2611static const struct attribute_group xennet_dev_group = {
2612 .attrs = xennet_dev_attrs
2613};
2614#endif
2615
2616static void xennet_bus_close(struct xenbus_device *dev)
2617{
2618 int ret;
2619
2620 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2621 return;
2622 do {
2623 xenbus_switch_state(dev, XenbusStateClosing);
2624 ret = wait_event_timeout(module_wq,
2625 xenbus_read_driver_state(dev->otherend) ==
2626 XenbusStateClosing ||
2627 xenbus_read_driver_state(dev->otherend) ==
2628 XenbusStateClosed ||
2629 xenbus_read_driver_state(dev->otherend) ==
2630 XenbusStateUnknown,
2631 XENNET_TIMEOUT);
2632 } while (!ret);
2633
2634 if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2635 return;
2636
2637 do {
2638 xenbus_switch_state(dev, XenbusStateClosed);
2639 ret = wait_event_timeout(module_wq,
2640 xenbus_read_driver_state(dev->otherend) ==
2641 XenbusStateClosed ||
2642 xenbus_read_driver_state(dev->otherend) ==
2643 XenbusStateUnknown,
2644 XENNET_TIMEOUT);
2645 } while (!ret);
2646}
2647
2648static int xennet_remove(struct xenbus_device *dev)
2649{
2650 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2651
2652 xennet_bus_close(dev);
2653 xennet_disconnect_backend(info);
2654
2655 if (info->netdev->reg_state == NETREG_REGISTERED)
2656 unregister_netdev(info->netdev);
2657
2658 if (info->queues) {
2659 rtnl_lock();
2660 xennet_destroy_queues(info);
2661 rtnl_unlock();
2662 }
2663 xennet_free_netdev(info->netdev);
2664
2665 return 0;
2666}
2667
2668static const struct xenbus_device_id netfront_ids[] = {
2669 { "vif" },
2670 { "" }
2671};
2672
2673static struct xenbus_driver netfront_driver = {
2674 .ids = netfront_ids,
2675 .probe = netfront_probe,
2676 .remove = xennet_remove,
2677 .resume = netfront_resume,
2678 .otherend_changed = netback_changed,
2679};
2680
2681static int __init netif_init(void)
2682{
2683 if (!xen_domain())
2684 return -ENODEV;
2685
2686 if (!xen_has_pv_nic_devices())
2687 return -ENODEV;
2688
2689 pr_info("Initialising Xen virtual ethernet driver\n");
2690
2691
2692
2693
2694 if (xennet_max_queues == 0)
2695 xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
2696 num_online_cpus());
2697
2698 return xenbus_register_frontend(&netfront_driver);
2699}
2700module_init(netif_init);
2701
2702
2703static void __exit netif_exit(void)
2704{
2705 xenbus_unregister_driver(&netfront_driver);
2706}
2707module_exit(netif_exit);
2708
2709MODULE_DESCRIPTION("Xen virtual network device frontend");
2710MODULE_LICENSE("GPL");
2711MODULE_ALIAS("xen:vif");
2712MODULE_ALIAS("xennet");
2713