1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "common.h"
36
37#include <linux/kthread.h>
38#include <linux/if_vlan.h>
39#include <linux/udp.h>
40
41#include <net/tcp.h>
42
43#include <xen/xen.h>
44#include <xen/events.h>
45#include <xen/interface/memory.h>
46
47#include <asm/xen/hypercall.h>
48#include <asm/xen/page.h>
49
50
51
52
53
54bool separate_tx_rx_irq = 1;
55module_param(separate_tx_rx_irq, bool, 0644);
56
57
58
59
60
61#define FATAL_SKB_SLOTS_DEFAULT 20
62static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
63module_param(fatal_skb_slots, uint, 0444);
64
65
66
67
68
69
70
71#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
72
73
74
75
76
77
78static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
79{
80 return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
81}
82
83static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
84 u8 status);
85
86static void make_tx_response(struct xenvif *vif,
87 struct xen_netif_tx_request *txp,
88 s8 st);
89
90static inline int tx_work_todo(struct xenvif *vif);
91static inline int rx_work_todo(struct xenvif *vif);
92
93static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
94 u16 id,
95 s8 st,
96 u16 offset,
97 u16 size,
98 u16 flags);
99
100static inline unsigned long idx_to_pfn(struct xenvif *vif,
101 u16 idx)
102{
103 return page_to_pfn(vif->mmap_pages[idx]);
104}
105
106static inline unsigned long idx_to_kaddr(struct xenvif *vif,
107 u16 idx)
108{
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
110}
111
112
113
114
115
116
117#define PKT_PROT_LEN 128
118
119static u16 frag_get_pending_idx(skb_frag_t *frag)
120{
121 return (u16)frag->page_offset;
122}
123
124static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
125{
126 frag->page_offset = pending_idx;
127}
128
129static inline pending_ring_idx_t pending_index(unsigned i)
130{
131 return i & (MAX_PENDING_REQS-1);
132}
133
134static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
135{
136 return MAX_PENDING_REQS -
137 vif->pending_prod + vif->pending_cons;
138}
139
140bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
141{
142 RING_IDX prod, cons;
143
144 do {
145 prod = vif->rx.sring->req_prod;
146 cons = vif->rx.req_cons;
147
148 if (prod - cons >= needed)
149 return true;
150
151 vif->rx.sring->req_event = prod + 1;
152
153
154
155
156 mb();
157 } while (vif->rx.sring->req_prod != prod);
158
159 return false;
160}
161
162
163
164
165
166
167static bool start_new_rx_buffer(int offset, unsigned long size, int head)
168{
169
170 if (offset == MAX_BUFFER_OFFSET)
171 return true;
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 if ((offset + size > MAX_BUFFER_OFFSET) &&
196 (size <= MAX_BUFFER_OFFSET) && offset && !head)
197 return true;
198
199 return false;
200}
201
202struct netrx_pending_operations {
203 unsigned copy_prod, copy_cons;
204 unsigned meta_prod, meta_cons;
205 struct gnttab_copy *copy;
206 struct xenvif_rx_meta *meta;
207 int copy_off;
208 grant_ref_t copy_gref;
209};
210
211static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
212 struct netrx_pending_operations *npo)
213{
214 struct xenvif_rx_meta *meta;
215 struct xen_netif_rx_request *req;
216
217 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
218
219 meta = npo->meta + npo->meta_prod++;
220 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
221 meta->gso_size = 0;
222 meta->size = 0;
223 meta->id = req->id;
224
225 npo->copy_off = 0;
226 npo->copy_gref = req->gref;
227
228 return meta;
229}
230
231
232
233
234
235static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
236 struct netrx_pending_operations *npo,
237 struct page *page, unsigned long size,
238 unsigned long offset, int *head)
239{
240 struct gnttab_copy *copy_gop;
241 struct xenvif_rx_meta *meta;
242 unsigned long bytes;
243 int gso_type = XEN_NETIF_GSO_TYPE_NONE;
244
245
246 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
247
248 meta = npo->meta + npo->meta_prod - 1;
249
250
251 page += offset >> PAGE_SHIFT;
252 offset &= ~PAGE_MASK;
253
254 while (size > 0) {
255 BUG_ON(offset >= PAGE_SIZE);
256 BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
257
258 bytes = PAGE_SIZE - offset;
259
260 if (bytes > size)
261 bytes = size;
262
263 if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
264
265
266
267
268 BUG_ON(*head);
269
270 meta = get_next_rx_buffer(vif, npo);
271 }
272
273 if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
274 bytes = MAX_BUFFER_OFFSET - npo->copy_off;
275
276 copy_gop = npo->copy + npo->copy_prod++;
277 copy_gop->flags = GNTCOPY_dest_gref;
278 copy_gop->len = bytes;
279
280 copy_gop->source.domid = DOMID_SELF;
281 copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
282 copy_gop->source.offset = offset;
283
284 copy_gop->dest.domid = vif->domid;
285 copy_gop->dest.offset = npo->copy_off;
286 copy_gop->dest.u.ref = npo->copy_gref;
287
288 npo->copy_off += bytes;
289 meta->size += bytes;
290
291 offset += bytes;
292 size -= bytes;
293
294
295 if (offset == PAGE_SIZE && size) {
296 BUG_ON(!PageCompound(page));
297 page++;
298 offset = 0;
299 }
300
301
302 if (skb_is_gso(skb)) {
303 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
304 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
305 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
306 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
307 }
308
309 if (*head && ((1 << gso_type) & vif->gso_mask))
310 vif->rx.req_cons++;
311
312 *head = 0;
313
314 }
315}
316
317
318
319
320
321
322
323
324
325
326
327
328
329static int xenvif_gop_skb(struct sk_buff *skb,
330 struct netrx_pending_operations *npo)
331{
332 struct xenvif *vif = netdev_priv(skb->dev);
333 int nr_frags = skb_shinfo(skb)->nr_frags;
334 int i;
335 struct xen_netif_rx_request *req;
336 struct xenvif_rx_meta *meta;
337 unsigned char *data;
338 int head = 1;
339 int old_meta_prod;
340 int gso_type;
341
342 old_meta_prod = npo->meta_prod;
343
344 gso_type = XEN_NETIF_GSO_TYPE_NONE;
345 if (skb_is_gso(skb)) {
346 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
347 gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
348 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
349 gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
350 }
351
352
353 if ((1 << gso_type) & vif->gso_prefix_mask) {
354 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
355 meta = npo->meta + npo->meta_prod++;
356 meta->gso_type = gso_type;
357 meta->gso_size = skb_shinfo(skb)->gso_size;
358 meta->size = 0;
359 meta->id = req->id;
360 }
361
362 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
363 meta = npo->meta + npo->meta_prod++;
364
365 if ((1 << gso_type) & vif->gso_mask) {
366 meta->gso_type = gso_type;
367 meta->gso_size = skb_shinfo(skb)->gso_size;
368 } else {
369 meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
370 meta->gso_size = 0;
371 }
372
373 meta->size = 0;
374 meta->id = req->id;
375 npo->copy_off = 0;
376 npo->copy_gref = req->gref;
377
378 data = skb->data;
379 while (data < skb_tail_pointer(skb)) {
380 unsigned int offset = offset_in_page(data);
381 unsigned int len = PAGE_SIZE - offset;
382
383 if (data + len > skb_tail_pointer(skb))
384 len = skb_tail_pointer(skb) - data;
385
386 xenvif_gop_frag_copy(vif, skb, npo,
387 virt_to_page(data), len, offset, &head);
388 data += len;
389 }
390
391 for (i = 0; i < nr_frags; i++) {
392 xenvif_gop_frag_copy(vif, skb, npo,
393 skb_frag_page(&skb_shinfo(skb)->frags[i]),
394 skb_frag_size(&skb_shinfo(skb)->frags[i]),
395 skb_shinfo(skb)->frags[i].page_offset,
396 &head);
397 }
398
399 return npo->meta_prod - old_meta_prod;
400}
401
402
403
404
405
406
407
408static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
409 struct netrx_pending_operations *npo)
410{
411 struct gnttab_copy *copy_op;
412 int status = XEN_NETIF_RSP_OKAY;
413 int i;
414
415 for (i = 0; i < nr_meta_slots; i++) {
416 copy_op = npo->copy + npo->copy_cons++;
417 if (copy_op->status != GNTST_okay) {
418 netdev_dbg(vif->dev,
419 "Bad status %d from copy to DOM%d.\n",
420 copy_op->status, vif->domid);
421 status = XEN_NETIF_RSP_ERROR;
422 }
423 }
424
425 return status;
426}
427
428static void xenvif_add_frag_responses(struct xenvif *vif, int status,
429 struct xenvif_rx_meta *meta,
430 int nr_meta_slots)
431{
432 int i;
433 unsigned long offset;
434
435
436 if (nr_meta_slots <= 1)
437 return;
438
439 nr_meta_slots--;
440
441 for (i = 0; i < nr_meta_slots; i++) {
442 int flags;
443 if (i == nr_meta_slots - 1)
444 flags = 0;
445 else
446 flags = XEN_NETRXF_more_data;
447
448 offset = 0;
449 make_rx_response(vif, meta[i].id, status, offset,
450 meta[i].size, flags);
451 }
452}
453
454struct skb_cb_overlay {
455 int meta_slots_used;
456};
457
458void xenvif_kick_thread(struct xenvif *vif)
459{
460 wake_up(&vif->wq);
461}
462
463static void xenvif_rx_action(struct xenvif *vif)
464{
465 s8 status;
466 u16 flags;
467 struct xen_netif_rx_response *resp;
468 struct sk_buff_head rxq;
469 struct sk_buff *skb;
470 LIST_HEAD(notify);
471 int ret;
472 unsigned long offset;
473 struct skb_cb_overlay *sco;
474 bool need_to_notify = false;
475
476 struct netrx_pending_operations npo = {
477 .copy = vif->grant_copy_op,
478 .meta = vif->meta,
479 };
480
481 skb_queue_head_init(&rxq);
482
483 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
484 RING_IDX max_slots_needed;
485 int i;
486
487
488
489
490
491 max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
492 skb_headlen(skb),
493 PAGE_SIZE);
494 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
495 unsigned int size;
496 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
497 max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
498 }
499 if (skb_is_gso(skb) &&
500 (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
501 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
502 max_slots_needed++;
503
504
505 if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
506 skb_queue_head(&vif->rx_queue, skb);
507 need_to_notify = true;
508 vif->rx_last_skb_slots = max_slots_needed;
509 break;
510 } else
511 vif->rx_last_skb_slots = 0;
512
513 sco = (struct skb_cb_overlay *)skb->cb;
514 sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
515 BUG_ON(sco->meta_slots_used > max_slots_needed);
516
517 __skb_queue_tail(&rxq, skb);
518 }
519
520 BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
521
522 if (!npo.copy_prod)
523 goto done;
524
525 BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
526 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
527
528 while ((skb = __skb_dequeue(&rxq)) != NULL) {
529 sco = (struct skb_cb_overlay *)skb->cb;
530
531 if ((1 << vif->meta[npo.meta_cons].gso_type) &
532 vif->gso_prefix_mask) {
533 resp = RING_GET_RESPONSE(&vif->rx,
534 vif->rx.rsp_prod_pvt++);
535
536 resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
537
538 resp->offset = vif->meta[npo.meta_cons].gso_size;
539 resp->id = vif->meta[npo.meta_cons].id;
540 resp->status = sco->meta_slots_used;
541
542 npo.meta_cons++;
543 sco->meta_slots_used--;
544 }
545
546
547 vif->dev->stats.tx_bytes += skb->len;
548 vif->dev->stats.tx_packets++;
549
550 status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
551
552 if (sco->meta_slots_used == 1)
553 flags = 0;
554 else
555 flags = XEN_NETRXF_more_data;
556
557 if (skb->ip_summed == CHECKSUM_PARTIAL)
558 flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
559 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
560
561 flags |= XEN_NETRXF_data_validated;
562
563 offset = 0;
564 resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
565 status, offset,
566 vif->meta[npo.meta_cons].size,
567 flags);
568
569 if ((1 << vif->meta[npo.meta_cons].gso_type) &
570 vif->gso_mask) {
571 struct xen_netif_extra_info *gso =
572 (struct xen_netif_extra_info *)
573 RING_GET_RESPONSE(&vif->rx,
574 vif->rx.rsp_prod_pvt++);
575
576 resp->flags |= XEN_NETRXF_extra_info;
577
578 gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
579 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
580 gso->u.gso.pad = 0;
581 gso->u.gso.features = 0;
582
583 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
584 gso->flags = 0;
585 }
586
587 xenvif_add_frag_responses(vif, status,
588 vif->meta + npo.meta_cons + 1,
589 sco->meta_slots_used);
590
591 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
592
593 need_to_notify |= !!ret;
594
595 npo.meta_cons += sco->meta_slots_used;
596 dev_kfree_skb(skb);
597 }
598
599done:
600 if (need_to_notify)
601 notify_remote_via_irq(vif->rx_irq);
602}
603
604void xenvif_check_rx_xenvif(struct xenvif *vif)
605{
606 int more_to_do;
607
608 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
609
610 if (more_to_do)
611 napi_schedule(&vif->napi);
612}
613
614static void tx_add_credit(struct xenvif *vif)
615{
616 unsigned long max_burst, max_credit;
617
618
619
620
621
622 max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
623 max_burst = min(max_burst, 131072UL);
624 max_burst = max(max_burst, vif->credit_bytes);
625
626
627 max_credit = vif->remaining_credit + vif->credit_bytes;
628 if (max_credit < vif->remaining_credit)
629 max_credit = ULONG_MAX;
630
631 vif->remaining_credit = min(max_credit, max_burst);
632}
633
634static void tx_credit_callback(unsigned long data)
635{
636 struct xenvif *vif = (struct xenvif *)data;
637 tx_add_credit(vif);
638 xenvif_check_rx_xenvif(vif);
639}
640
641static void xenvif_tx_err(struct xenvif *vif,
642 struct xen_netif_tx_request *txp, RING_IDX end)
643{
644 RING_IDX cons = vif->tx.req_cons;
645
646 do {
647 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
648 if (cons == end)
649 break;
650 txp = RING_GET_REQUEST(&vif->tx, cons++);
651 } while (1);
652 vif->tx.req_cons = cons;
653}
654
655static void xenvif_fatal_tx_err(struct xenvif *vif)
656{
657 netdev_err(vif->dev, "fatal error; disabling device\n");
658 xenvif_carrier_off(vif);
659}
660
661static int xenvif_count_requests(struct xenvif *vif,
662 struct xen_netif_tx_request *first,
663 struct xen_netif_tx_request *txp,
664 int work_to_do)
665{
666 RING_IDX cons = vif->tx.req_cons;
667 int slots = 0;
668 int drop_err = 0;
669 int more_data;
670
671 if (!(first->flags & XEN_NETTXF_more_data))
672 return 0;
673
674 do {
675 struct xen_netif_tx_request dropped_tx = { 0 };
676
677 if (slots >= work_to_do) {
678 netdev_err(vif->dev,
679 "Asked for %d slots but exceeds this limit\n",
680 work_to_do);
681 xenvif_fatal_tx_err(vif);
682 return -ENODATA;
683 }
684
685
686
687
688 if (unlikely(slots >= fatal_skb_slots)) {
689 netdev_err(vif->dev,
690 "Malicious frontend using %d slots, threshold %u\n",
691 slots, fatal_skb_slots);
692 xenvif_fatal_tx_err(vif);
693 return -E2BIG;
694 }
695
696
697
698
699
700
701
702
703 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
704 if (net_ratelimit())
705 netdev_dbg(vif->dev,
706 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
707 slots, XEN_NETBK_LEGACY_SLOTS_MAX);
708 drop_err = -E2BIG;
709 }
710
711 if (drop_err)
712 txp = &dropped_tx;
713
714 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
715 sizeof(*txp));
716
717
718
719
720
721
722
723
724
725
726 if (!drop_err && txp->size > first->size) {
727 if (net_ratelimit())
728 netdev_dbg(vif->dev,
729 "Invalid tx request, slot size %u > remaining size %u\n",
730 txp->size, first->size);
731 drop_err = -EIO;
732 }
733
734 first->size -= txp->size;
735 slots++;
736
737 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
738 netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
739 txp->offset, txp->size);
740 xenvif_fatal_tx_err(vif);
741 return -EINVAL;
742 }
743
744 more_data = txp->flags & XEN_NETTXF_more_data;
745
746 if (!drop_err)
747 txp++;
748
749 } while (more_data);
750
751 if (drop_err) {
752 xenvif_tx_err(vif, first, cons + slots);
753 return drop_err;
754 }
755
756 return slots;
757}
758
759static struct page *xenvif_alloc_page(struct xenvif *vif,
760 u16 pending_idx)
761{
762 struct page *page;
763
764 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
765 if (!page)
766 return NULL;
767 vif->mmap_pages[pending_idx] = page;
768
769 return page;
770}
771
772static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
773 struct sk_buff *skb,
774 struct xen_netif_tx_request *txp,
775 struct gnttab_copy *gop)
776{
777 struct skb_shared_info *shinfo = skb_shinfo(skb);
778 skb_frag_t *frags = shinfo->frags;
779 u16 pending_idx = *((u16 *)skb->data);
780 u16 head_idx = 0;
781 int slot, start;
782 struct page *page;
783 pending_ring_idx_t index, start_idx = 0;
784 uint16_t dst_offset;
785 unsigned int nr_slots;
786 struct pending_tx_info *first = NULL;
787
788
789
790
791 nr_slots = shinfo->nr_frags;
792
793
794 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
795
796
797
798
799
800 for (shinfo->nr_frags = slot = start; slot < nr_slots;
801 shinfo->nr_frags++) {
802 struct pending_tx_info *pending_tx_info =
803 vif->pending_tx_info;
804
805 page = alloc_page(GFP_ATOMIC|__GFP_COLD);
806 if (!page)
807 goto err;
808
809 dst_offset = 0;
810 first = NULL;
811 while (dst_offset < PAGE_SIZE && slot < nr_slots) {
812 gop->flags = GNTCOPY_source_gref;
813
814 gop->source.u.ref = txp->gref;
815 gop->source.domid = vif->domid;
816 gop->source.offset = txp->offset;
817
818 gop->dest.domid = DOMID_SELF;
819
820 gop->dest.offset = dst_offset;
821 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
822
823 if (dst_offset + txp->size > PAGE_SIZE) {
824
825
826
827
828
829
830
831 gop->len = PAGE_SIZE - dst_offset;
832 txp->offset += gop->len;
833 txp->size -= gop->len;
834 dst_offset += gop->len;
835 } else {
836
837 gop->len = txp->size;
838 dst_offset += gop->len;
839
840 index = pending_index(vif->pending_cons++);
841
842 pending_idx = vif->pending_ring[index];
843
844 memcpy(&pending_tx_info[pending_idx].req, txp,
845 sizeof(*txp));
846
847
848
849
850
851 vif->mmap_pages[pending_idx] = (void *)(~0UL);
852 pending_tx_info[pending_idx].head =
853 INVALID_PENDING_RING_IDX;
854
855 if (!first) {
856 first = &pending_tx_info[pending_idx];
857 start_idx = index;
858 head_idx = pending_idx;
859 }
860
861 txp++;
862 slot++;
863 }
864
865 gop++;
866 }
867
868 first->req.offset = 0;
869 first->req.size = dst_offset;
870 first->head = start_idx;
871 vif->mmap_pages[head_idx] = page;
872 frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
873 }
874
875 BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
876
877 return gop;
878err:
879
880 while (shinfo->nr_frags-- > start) {
881 xenvif_idx_release(vif,
882 frag_get_pending_idx(&frags[shinfo->nr_frags]),
883 XEN_NETIF_RSP_ERROR);
884 }
885
886 if (start)
887 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
888
889 return NULL;
890}
891
892static int xenvif_tx_check_gop(struct xenvif *vif,
893 struct sk_buff *skb,
894 struct gnttab_copy **gopp)
895{
896 struct gnttab_copy *gop = *gopp;
897 u16 pending_idx = *((u16 *)skb->data);
898 struct skb_shared_info *shinfo = skb_shinfo(skb);
899 struct pending_tx_info *tx_info;
900 int nr_frags = shinfo->nr_frags;
901 int i, err, start;
902 u16 peek;
903
904
905 err = gop->status;
906 if (unlikely(err))
907 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
908
909
910 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
911
912 for (i = start; i < nr_frags; i++) {
913 int j, newerr;
914 pending_ring_idx_t head;
915
916 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
917 tx_info = &vif->pending_tx_info[pending_idx];
918 head = tx_info->head;
919
920
921 do {
922 newerr = (++gop)->status;
923 if (newerr)
924 break;
925 peek = vif->pending_ring[pending_index(++head)];
926 } while (!pending_tx_is_head(vif, peek));
927
928 if (likely(!newerr)) {
929
930 if (unlikely(err))
931 xenvif_idx_release(vif, pending_idx,
932 XEN_NETIF_RSP_OKAY);
933 continue;
934 }
935
936
937 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
938
939
940 if (err)
941 continue;
942
943
944 pending_idx = *((u16 *)skb->data);
945 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
946 for (j = start; j < i; j++) {
947 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
948 xenvif_idx_release(vif, pending_idx,
949 XEN_NETIF_RSP_OKAY);
950 }
951
952
953 err = newerr;
954 }
955
956 *gopp = gop + 1;
957 return err;
958}
959
960static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
961{
962 struct skb_shared_info *shinfo = skb_shinfo(skb);
963 int nr_frags = shinfo->nr_frags;
964 int i;
965
966 for (i = 0; i < nr_frags; i++) {
967 skb_frag_t *frag = shinfo->frags + i;
968 struct xen_netif_tx_request *txp;
969 struct page *page;
970 u16 pending_idx;
971
972 pending_idx = frag_get_pending_idx(frag);
973
974 txp = &vif->pending_tx_info[pending_idx].req;
975 page = virt_to_page(idx_to_kaddr(vif, pending_idx));
976 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
977 skb->len += txp->size;
978 skb->data_len += txp->size;
979 skb->truesize += txp->size;
980
981
982 get_page(vif->mmap_pages[pending_idx]);
983 xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
984 }
985}
986
987static int xenvif_get_extras(struct xenvif *vif,
988 struct xen_netif_extra_info *extras,
989 int work_to_do)
990{
991 struct xen_netif_extra_info extra;
992 RING_IDX cons = vif->tx.req_cons;
993
994 do {
995 if (unlikely(work_to_do-- <= 0)) {
996 netdev_err(vif->dev, "Missing extra info\n");
997 xenvif_fatal_tx_err(vif);
998 return -EBADR;
999 }
1000
1001 memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1002 sizeof(extra));
1003 if (unlikely(!extra.type ||
1004 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1005 vif->tx.req_cons = ++cons;
1006 netdev_err(vif->dev,
1007 "Invalid extra type: %d\n", extra.type);
1008 xenvif_fatal_tx_err(vif);
1009 return -EINVAL;
1010 }
1011
1012 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1013 vif->tx.req_cons = ++cons;
1014 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1015
1016 return work_to_do;
1017}
1018
1019static int xenvif_set_skb_gso(struct xenvif *vif,
1020 struct sk_buff *skb,
1021 struct xen_netif_extra_info *gso)
1022{
1023 if (!gso->u.gso.size) {
1024 netdev_err(vif->dev, "GSO size must not be zero.\n");
1025 xenvif_fatal_tx_err(vif);
1026 return -EINVAL;
1027 }
1028
1029 switch (gso->u.gso.type) {
1030 case XEN_NETIF_GSO_TYPE_TCPV4:
1031 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1032 break;
1033 case XEN_NETIF_GSO_TYPE_TCPV6:
1034 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1035 break;
1036 default:
1037 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1038 xenvif_fatal_tx_err(vif);
1039 return -EINVAL;
1040 }
1041
1042 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1043
1044
1045 return 0;
1046}
1047
1048static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1049{
1050 bool recalculate_partial_csum = false;
1051
1052
1053
1054
1055
1056
1057 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1058 vif->rx_gso_checksum_fixup++;
1059 skb->ip_summed = CHECKSUM_PARTIAL;
1060 recalculate_partial_csum = true;
1061 }
1062
1063
1064 if (skb->ip_summed != CHECKSUM_PARTIAL)
1065 return 0;
1066
1067 return skb_checksum_setup(skb, recalculate_partial_csum);
1068}
1069
1070static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1071{
1072 u64 now = get_jiffies_64();
1073 u64 next_credit = vif->credit_window_start +
1074 msecs_to_jiffies(vif->credit_usec / 1000);
1075
1076
1077 if (timer_pending(&vif->credit_timeout))
1078 return true;
1079
1080
1081 if (time_after_eq64(now, next_credit)) {
1082 vif->credit_window_start = now;
1083 tx_add_credit(vif);
1084 }
1085
1086
1087 if (size > vif->remaining_credit) {
1088 vif->credit_timeout.data =
1089 (unsigned long)vif;
1090 vif->credit_timeout.function =
1091 tx_credit_callback;
1092 mod_timer(&vif->credit_timeout,
1093 next_credit);
1094 vif->credit_window_start = next_credit;
1095
1096 return true;
1097 }
1098
1099 return false;
1100}
1101
1102static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
1103{
1104 struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1105 struct sk_buff *skb;
1106 int ret;
1107
1108 while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1109 < MAX_PENDING_REQS) &&
1110 (skb_queue_len(&vif->tx_queue) < budget)) {
1111 struct xen_netif_tx_request txreq;
1112 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1113 struct page *page;
1114 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1115 u16 pending_idx;
1116 RING_IDX idx;
1117 int work_to_do;
1118 unsigned int data_len;
1119 pending_ring_idx_t index;
1120
1121 if (vif->tx.sring->req_prod - vif->tx.req_cons >
1122 XEN_NETIF_TX_RING_SIZE) {
1123 netdev_err(vif->dev,
1124 "Impossible number of requests. "
1125 "req_prod %d, req_cons %d, size %ld\n",
1126 vif->tx.sring->req_prod, vif->tx.req_cons,
1127 XEN_NETIF_TX_RING_SIZE);
1128 xenvif_fatal_tx_err(vif);
1129 continue;
1130 }
1131
1132 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
1133 if (!work_to_do)
1134 break;
1135
1136 idx = vif->tx.req_cons;
1137 rmb();
1138 memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1139
1140
1141 if (txreq.size > vif->remaining_credit &&
1142 tx_credit_exceeded(vif, txreq.size))
1143 break;
1144
1145 vif->remaining_credit -= txreq.size;
1146
1147 work_to_do--;
1148 vif->tx.req_cons = ++idx;
1149
1150 memset(extras, 0, sizeof(extras));
1151 if (txreq.flags & XEN_NETTXF_extra_info) {
1152 work_to_do = xenvif_get_extras(vif, extras,
1153 work_to_do);
1154 idx = vif->tx.req_cons;
1155 if (unlikely(work_to_do < 0))
1156 break;
1157 }
1158
1159 ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
1160 if (unlikely(ret < 0))
1161 break;
1162
1163 idx += ret;
1164
1165 if (unlikely(txreq.size < ETH_HLEN)) {
1166 netdev_dbg(vif->dev,
1167 "Bad packet size: %d\n", txreq.size);
1168 xenvif_tx_err(vif, &txreq, idx);
1169 break;
1170 }
1171
1172
1173 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1174 netdev_err(vif->dev,
1175 "txreq.offset: %x, size: %u, end: %lu\n",
1176 txreq.offset, txreq.size,
1177 (txreq.offset&~PAGE_MASK) + txreq.size);
1178 xenvif_fatal_tx_err(vif);
1179 break;
1180 }
1181
1182 index = pending_index(vif->pending_cons);
1183 pending_idx = vif->pending_ring[index];
1184
1185 data_len = (txreq.size > PKT_PROT_LEN &&
1186 ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1187 PKT_PROT_LEN : txreq.size;
1188
1189 skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1190 GFP_ATOMIC | __GFP_NOWARN);
1191 if (unlikely(skb == NULL)) {
1192 netdev_dbg(vif->dev,
1193 "Can't allocate a skb in start_xmit.\n");
1194 xenvif_tx_err(vif, &txreq, idx);
1195 break;
1196 }
1197
1198
1199 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1200
1201 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1202 struct xen_netif_extra_info *gso;
1203 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1204
1205 if (xenvif_set_skb_gso(vif, skb, gso)) {
1206
1207 kfree_skb(skb);
1208 break;
1209 }
1210 }
1211
1212
1213 page = xenvif_alloc_page(vif, pending_idx);
1214 if (!page) {
1215 kfree_skb(skb);
1216 xenvif_tx_err(vif, &txreq, idx);
1217 break;
1218 }
1219
1220 gop->source.u.ref = txreq.gref;
1221 gop->source.domid = vif->domid;
1222 gop->source.offset = txreq.offset;
1223
1224 gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1225 gop->dest.domid = DOMID_SELF;
1226 gop->dest.offset = txreq.offset;
1227
1228 gop->len = txreq.size;
1229 gop->flags = GNTCOPY_source_gref;
1230
1231 gop++;
1232
1233 memcpy(&vif->pending_tx_info[pending_idx].req,
1234 &txreq, sizeof(txreq));
1235 vif->pending_tx_info[pending_idx].head = index;
1236 *((u16 *)skb->data) = pending_idx;
1237
1238 __skb_put(skb, data_len);
1239
1240 skb_shinfo(skb)->nr_frags = ret;
1241 if (data_len < txreq.size) {
1242 skb_shinfo(skb)->nr_frags++;
1243 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1244 pending_idx);
1245 } else {
1246 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1247 INVALID_PENDING_IDX);
1248 }
1249
1250 vif->pending_cons++;
1251
1252 request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
1253 if (request_gop == NULL) {
1254 kfree_skb(skb);
1255 xenvif_tx_err(vif, &txreq, idx);
1256 break;
1257 }
1258 gop = request_gop;
1259
1260 __skb_queue_tail(&vif->tx_queue, skb);
1261
1262 vif->tx.req_cons = idx;
1263
1264 if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
1265 break;
1266 }
1267
1268 return gop - vif->tx_copy_ops;
1269}
1270
1271
1272static int xenvif_tx_submit(struct xenvif *vif)
1273{
1274 struct gnttab_copy *gop = vif->tx_copy_ops;
1275 struct sk_buff *skb;
1276 int work_done = 0;
1277
1278 while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1279 struct xen_netif_tx_request *txp;
1280 u16 pending_idx;
1281 unsigned data_len;
1282
1283 pending_idx = *((u16 *)skb->data);
1284 txp = &vif->pending_tx_info[pending_idx].req;
1285
1286
1287 if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
1288 netdev_dbg(vif->dev, "netback grant failed.\n");
1289 skb_shinfo(skb)->nr_frags = 0;
1290 kfree_skb(skb);
1291 continue;
1292 }
1293
1294 data_len = skb->len;
1295 memcpy(skb->data,
1296 (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1297 data_len);
1298 if (data_len < txp->size) {
1299
1300 txp->offset += data_len;
1301 txp->size -= data_len;
1302 } else {
1303
1304 xenvif_idx_release(vif, pending_idx,
1305 XEN_NETIF_RSP_OKAY);
1306 }
1307
1308 if (txp->flags & XEN_NETTXF_csum_blank)
1309 skb->ip_summed = CHECKSUM_PARTIAL;
1310 else if (txp->flags & XEN_NETTXF_data_validated)
1311 skb->ip_summed = CHECKSUM_UNNECESSARY;
1312
1313 xenvif_fill_frags(vif, skb);
1314
1315 if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1316 int target = min_t(int, skb->len, PKT_PROT_LEN);
1317 __pskb_pull_tail(skb, target - skb_headlen(skb));
1318 }
1319
1320 skb->dev = vif->dev;
1321 skb->protocol = eth_type_trans(skb, skb->dev);
1322 skb_reset_network_header(skb);
1323
1324 if (checksum_setup(vif, skb)) {
1325 netdev_dbg(vif->dev,
1326 "Can't setup checksum in net_tx_action\n");
1327 kfree_skb(skb);
1328 continue;
1329 }
1330
1331 skb_probe_transport_header(skb, 0);
1332
1333
1334
1335
1336
1337 if (skb_is_gso(skb)) {
1338 int mss = skb_shinfo(skb)->gso_size;
1339 int hdrlen = skb_transport_header(skb) -
1340 skb_mac_header(skb) +
1341 tcp_hdrlen(skb);
1342
1343 skb_shinfo(skb)->gso_segs =
1344 DIV_ROUND_UP(skb->len - hdrlen, mss);
1345 }
1346
1347 vif->dev->stats.rx_bytes += skb->len;
1348 vif->dev->stats.rx_packets++;
1349
1350 work_done++;
1351
1352 netif_receive_skb(skb);
1353 }
1354
1355 return work_done;
1356}
1357
1358
1359int xenvif_tx_action(struct xenvif *vif, int budget)
1360{
1361 unsigned nr_gops;
1362 int work_done;
1363
1364 if (unlikely(!tx_work_todo(vif)))
1365 return 0;
1366
1367 nr_gops = xenvif_tx_build_gops(vif, budget);
1368
1369 if (nr_gops == 0)
1370 return 0;
1371
1372 gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1373
1374 work_done = xenvif_tx_submit(vif);
1375
1376 return work_done;
1377}
1378
1379static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1380 u8 status)
1381{
1382 struct pending_tx_info *pending_tx_info;
1383 pending_ring_idx_t head;
1384 u16 peek;
1385
1386 BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
1387
1388
1389 if (vif->mmap_pages[pending_idx] == NULL)
1390 return;
1391
1392 pending_tx_info = &vif->pending_tx_info[pending_idx];
1393
1394 head = pending_tx_info->head;
1395
1396 BUG_ON(!pending_tx_is_head(vif, head));
1397 BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
1398
1399 do {
1400 pending_ring_idx_t index;
1401 pending_ring_idx_t idx = pending_index(head);
1402 u16 info_idx = vif->pending_ring[idx];
1403
1404 pending_tx_info = &vif->pending_tx_info[info_idx];
1405 make_tx_response(vif, &pending_tx_info->req, status);
1406
1407
1408
1409
1410
1411 pending_tx_info->head = 0;
1412
1413 index = pending_index(vif->pending_prod++);
1414 vif->pending_ring[index] = vif->pending_ring[info_idx];
1415
1416 peek = vif->pending_ring[pending_index(++head)];
1417
1418 } while (!pending_tx_is_head(vif, peek));
1419
1420 put_page(vif->mmap_pages[pending_idx]);
1421 vif->mmap_pages[pending_idx] = NULL;
1422}
1423
1424
1425static void make_tx_response(struct xenvif *vif,
1426 struct xen_netif_tx_request *txp,
1427 s8 st)
1428{
1429 RING_IDX i = vif->tx.rsp_prod_pvt;
1430 struct xen_netif_tx_response *resp;
1431 int notify;
1432
1433 resp = RING_GET_RESPONSE(&vif->tx, i);
1434 resp->id = txp->id;
1435 resp->status = st;
1436
1437 if (txp->flags & XEN_NETTXF_extra_info)
1438 RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1439
1440 vif->tx.rsp_prod_pvt = ++i;
1441 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1442 if (notify)
1443 notify_remote_via_irq(vif->tx_irq);
1444}
1445
1446static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1447 u16 id,
1448 s8 st,
1449 u16 offset,
1450 u16 size,
1451 u16 flags)
1452{
1453 RING_IDX i = vif->rx.rsp_prod_pvt;
1454 struct xen_netif_rx_response *resp;
1455
1456 resp = RING_GET_RESPONSE(&vif->rx, i);
1457 resp->offset = offset;
1458 resp->flags = flags;
1459 resp->id = id;
1460 resp->status = (s16)size;
1461 if (st < 0)
1462 resp->status = (s16)st;
1463
1464 vif->rx.rsp_prod_pvt = ++i;
1465
1466 return resp;
1467}
1468
1469static inline int rx_work_todo(struct xenvif *vif)
1470{
1471 return !skb_queue_empty(&vif->rx_queue) &&
1472 xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
1473}
1474
1475static inline int tx_work_todo(struct xenvif *vif)
1476{
1477
1478 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
1479 (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1480 < MAX_PENDING_REQS))
1481 return 1;
1482
1483 return 0;
1484}
1485
1486void xenvif_unmap_frontend_rings(struct xenvif *vif)
1487{
1488 if (vif->tx.sring)
1489 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1490 vif->tx.sring);
1491 if (vif->rx.sring)
1492 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1493 vif->rx.sring);
1494}
1495
1496int xenvif_map_frontend_rings(struct xenvif *vif,
1497 grant_ref_t tx_ring_ref,
1498 grant_ref_t rx_ring_ref)
1499{
1500 void *addr;
1501 struct xen_netif_tx_sring *txs;
1502 struct xen_netif_rx_sring *rxs;
1503
1504 int err = -ENOMEM;
1505
1506 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1507 tx_ring_ref, &addr);
1508 if (err)
1509 goto err;
1510
1511 txs = (struct xen_netif_tx_sring *)addr;
1512 BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1513
1514 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1515 rx_ring_ref, &addr);
1516 if (err)
1517 goto err;
1518
1519 rxs = (struct xen_netif_rx_sring *)addr;
1520 BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1521
1522 return 0;
1523
1524err:
1525 xenvif_unmap_frontend_rings(vif);
1526 return err;
1527}
1528
1529void xenvif_stop_queue(struct xenvif *vif)
1530{
1531 if (!vif->can_queue)
1532 return;
1533
1534 netif_stop_queue(vif->dev);
1535}
1536
1537static void xenvif_start_queue(struct xenvif *vif)
1538{
1539 if (xenvif_schedulable(vif))
1540 netif_wake_queue(vif->dev);
1541}
1542
1543int xenvif_kthread(void *data)
1544{
1545 struct xenvif *vif = data;
1546 struct sk_buff *skb;
1547
1548 while (!kthread_should_stop()) {
1549 wait_event_interruptible(vif->wq,
1550 rx_work_todo(vif) ||
1551 kthread_should_stop());
1552 if (kthread_should_stop())
1553 break;
1554
1555 if (!skb_queue_empty(&vif->rx_queue))
1556 xenvif_rx_action(vif);
1557
1558 if (skb_queue_empty(&vif->rx_queue) &&
1559 netif_queue_stopped(vif->dev))
1560 xenvif_start_queue(vif);
1561
1562 cond_resched();
1563 }
1564
1565
1566 while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
1567 dev_kfree_skb(skb);
1568
1569 return 0;
1570}
1571
1572static int __init netback_init(void)
1573{
1574 int rc = 0;
1575
1576 if (!xen_domain())
1577 return -ENODEV;
1578
1579 if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1580 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1581 fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1582 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1583 }
1584
1585 rc = xenvif_xenbus_init();
1586 if (rc)
1587 goto failed_init;
1588
1589 return 0;
1590
1591failed_init:
1592 return rc;
1593}
1594
1595module_init(netback_init);
1596
1597static void __exit netback_fini(void)
1598{
1599 xenvif_xenbus_fini();
1600}
1601module_exit(netback_fini);
1602
1603MODULE_LICENSE("Dual BSD/GPL");
1604MODULE_ALIAS("xen-backend:vif");
1605