1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
21#include <linux/virtio_config.h>
22#include <linux/device.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/hrtimer.h>
26#include <linux/dma-mapping.h>
27#include <xen/xen.h>
28
29#ifdef DEBUG
30
31#define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
37
38#define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
41 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
43 (_vq)->in_use = __LINE__; \
44 } while (0)
45#define END_USE(_vq) \
46 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
47#else
48#define BAD_RING(_vq, fmt, args...) \
49 do { \
50 dev_err(&_vq->vq.vdev->dev, \
51 "%s:"fmt, (_vq)->vq.name, ##args); \
52 (_vq)->broken = true; \
53 } while (0)
54#define START_USE(vq)
55#define END_USE(vq)
56#endif
57
58struct vring_desc_state {
59 void *data;
60 struct vring_desc *indir_desc;
61};
62
63struct vring_virtqueue {
64 struct virtqueue vq;
65
66
67 struct vring vring;
68
69
70 bool weak_barriers;
71
72
73 bool broken;
74
75
76 bool indirect;
77
78
79 bool event;
80
81
82 unsigned int free_head;
83
84 unsigned int num_added;
85
86
87 u16 last_used_idx;
88
89
90 u16 avail_flags_shadow;
91
92
93 u16 avail_idx_shadow;
94
95
96 bool (*notify)(struct virtqueue *vq);
97
98
99 bool we_own_ring;
100 size_t queue_size_in_bytes;
101 dma_addr_t queue_dma_addr;
102
103#ifdef DEBUG
104
105 unsigned int in_use;
106
107
108 bool last_add_time_valid;
109 ktime_t last_add_time;
110#endif
111
112
113 struct vring_desc_state desc_state[];
114};
115
116#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static bool vring_use_dma_api(struct virtio_device *vdev)
145{
146 if (!virtio_has_iommu_quirk(vdev))
147 return true;
148
149
150
151
152
153
154
155
156
157
158 if (xen_domain())
159 return true;
160
161 return false;
162}
163
164
165
166
167
168
169static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
170{
171 return vq->vq.vdev->dev.parent;
172}
173
174
175static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
176 struct scatterlist *sg,
177 enum dma_data_direction direction)
178{
179 if (!vring_use_dma_api(vq->vq.vdev))
180 return (dma_addr_t)sg_phys(sg);
181
182
183
184
185
186
187 return dma_map_page(vring_dma_dev(vq),
188 sg_page(sg), sg->offset, sg->length,
189 direction);
190}
191
192static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
193 void *cpu_addr, size_t size,
194 enum dma_data_direction direction)
195{
196 if (!vring_use_dma_api(vq->vq.vdev))
197 return (dma_addr_t)virt_to_phys(cpu_addr);
198
199 return dma_map_single(vring_dma_dev(vq),
200 cpu_addr, size, direction);
201}
202
203static void vring_unmap_one(const struct vring_virtqueue *vq,
204 struct vring_desc *desc)
205{
206 u16 flags;
207
208 if (!vring_use_dma_api(vq->vq.vdev))
209 return;
210
211 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
212
213 if (flags & VRING_DESC_F_INDIRECT) {
214 dma_unmap_single(vring_dma_dev(vq),
215 virtio64_to_cpu(vq->vq.vdev, desc->addr),
216 virtio32_to_cpu(vq->vq.vdev, desc->len),
217 (flags & VRING_DESC_F_WRITE) ?
218 DMA_FROM_DEVICE : DMA_TO_DEVICE);
219 } else {
220 dma_unmap_page(vring_dma_dev(vq),
221 virtio64_to_cpu(vq->vq.vdev, desc->addr),
222 virtio32_to_cpu(vq->vq.vdev, desc->len),
223 (flags & VRING_DESC_F_WRITE) ?
224 DMA_FROM_DEVICE : DMA_TO_DEVICE);
225 }
226}
227
228static int vring_mapping_error(const struct vring_virtqueue *vq,
229 dma_addr_t addr)
230{
231 if (!vring_use_dma_api(vq->vq.vdev))
232 return 0;
233
234 return dma_mapping_error(vring_dma_dev(vq), addr);
235}
236
237static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
238 unsigned int total_sg, gfp_t gfp)
239{
240 struct vring_desc *desc;
241 unsigned int i;
242
243
244
245
246
247
248 gfp &= ~__GFP_HIGHMEM;
249
250 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
251 if (!desc)
252 return NULL;
253
254 for (i = 0; i < total_sg; i++)
255 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
256 return desc;
257}
258
259static inline int virtqueue_add(struct virtqueue *_vq,
260 struct scatterlist *sgs[],
261 unsigned int total_sg,
262 unsigned int out_sgs,
263 unsigned int in_sgs,
264 void *data,
265 void *ctx,
266 gfp_t gfp)
267{
268 struct vring_virtqueue *vq = to_vvq(_vq);
269 struct scatterlist *sg;
270 struct vring_desc *desc;
271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
272 int head;
273 bool indirect;
274
275 START_USE(vq);
276
277 BUG_ON(data == NULL);
278 BUG_ON(ctx && vq->indirect);
279
280 if (unlikely(vq->broken)) {
281 END_USE(vq);
282 return -EIO;
283 }
284
285#ifdef DEBUG
286 {
287 ktime_t now = ktime_get();
288
289
290 if (vq->last_add_time_valid)
291 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
292 > 100);
293 vq->last_add_time = now;
294 vq->last_add_time_valid = true;
295 }
296#endif
297
298 BUG_ON(total_sg == 0);
299
300 head = vq->free_head;
301
302
303
304 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
305 desc = alloc_indirect(_vq, total_sg, gfp);
306 else {
307 desc = NULL;
308 WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
309 }
310
311 if (desc) {
312
313 indirect = true;
314
315 i = 0;
316 descs_used = 1;
317 } else {
318 indirect = false;
319 desc = vq->vring.desc;
320 i = head;
321 descs_used = total_sg;
322 }
323
324 if (vq->vq.num_free < descs_used) {
325 pr_debug("Can't add buf len %i - avail = %i\n",
326 descs_used, vq->vq.num_free);
327
328
329
330 if (out_sgs)
331 vq->notify(&vq->vq);
332 if (indirect)
333 kfree(desc);
334 END_USE(vq);
335 return -ENOSPC;
336 }
337
338 for (n = 0; n < out_sgs; n++) {
339 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
340 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
341 if (vring_mapping_error(vq, addr))
342 goto unmap_release;
343
344 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
345 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
346 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
347 prev = i;
348 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
349 }
350 }
351 for (; n < (out_sgs + in_sgs); n++) {
352 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
353 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
354 if (vring_mapping_error(vq, addr))
355 goto unmap_release;
356
357 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
358 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
359 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
360 prev = i;
361 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
362 }
363 }
364
365 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
366
367 if (indirect) {
368
369 dma_addr_t addr = vring_map_single(
370 vq, desc, total_sg * sizeof(struct vring_desc),
371 DMA_TO_DEVICE);
372 if (vring_mapping_error(vq, addr))
373 goto unmap_release;
374
375 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
376 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
377
378 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
379 }
380
381
382 vq->vq.num_free -= descs_used;
383
384
385 if (indirect)
386 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
387 else
388 vq->free_head = i;
389
390
391 vq->desc_state[head].data = data;
392 if (indirect)
393 vq->desc_state[head].indir_desc = desc;
394 else
395 vq->desc_state[head].indir_desc = ctx;
396
397
398
399 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
400 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
401
402
403
404 virtio_wmb(vq->weak_barriers);
405 vq->avail_idx_shadow++;
406 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
407 vq->num_added++;
408
409 pr_debug("Added buffer head %i to %p\n", head, vq);
410 END_USE(vq);
411
412
413
414 if (unlikely(vq->num_added == (1 << 16) - 1))
415 virtqueue_kick(_vq);
416
417 return 0;
418
419unmap_release:
420 err_idx = i;
421 i = head;
422
423 for (n = 0; n < total_sg; n++) {
424 if (i == err_idx)
425 break;
426 vring_unmap_one(vq, &desc[i]);
427 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
428 }
429
430 if (indirect)
431 kfree(desc);
432
433 END_USE(vq);
434 return -EIO;
435}
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451int virtqueue_add_sgs(struct virtqueue *_vq,
452 struct scatterlist *sgs[],
453 unsigned int out_sgs,
454 unsigned int in_sgs,
455 void *data,
456 gfp_t gfp)
457{
458 unsigned int i, total_sg = 0;
459
460
461 for (i = 0; i < out_sgs + in_sgs; i++) {
462 struct scatterlist *sg;
463 for (sg = sgs[i]; sg; sg = sg_next(sg))
464 total_sg++;
465 }
466 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
467 data, NULL, gfp);
468}
469EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484int virtqueue_add_outbuf(struct virtqueue *vq,
485 struct scatterlist *sg, unsigned int num,
486 void *data,
487 gfp_t gfp)
488{
489 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
490}
491EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506int virtqueue_add_inbuf(struct virtqueue *vq,
507 struct scatterlist *sg, unsigned int num,
508 void *data,
509 gfp_t gfp)
510{
511 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
512}
513EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
530 struct scatterlist *sg, unsigned int num,
531 void *data,
532 void *ctx,
533 gfp_t gfp)
534{
535 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
536}
537EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
538
539
540
541
542
543
544
545
546
547
548
549
550bool virtqueue_kick_prepare(struct virtqueue *_vq)
551{
552 struct vring_virtqueue *vq = to_vvq(_vq);
553 u16 new, old;
554 bool needs_kick;
555
556 START_USE(vq);
557
558
559 virtio_mb(vq->weak_barriers);
560
561 old = vq->avail_idx_shadow - vq->num_added;
562 new = vq->avail_idx_shadow;
563 vq->num_added = 0;
564
565#ifdef DEBUG
566 if (vq->last_add_time_valid) {
567 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
568 vq->last_add_time)) > 100);
569 }
570 vq->last_add_time_valid = false;
571#endif
572
573 if (vq->event) {
574 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
575 new, old);
576 } else {
577 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
578 }
579 END_USE(vq);
580 return needs_kick;
581}
582EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
583
584
585
586
587
588
589
590
591
592bool virtqueue_notify(struct virtqueue *_vq)
593{
594 struct vring_virtqueue *vq = to_vvq(_vq);
595
596 if (unlikely(vq->broken))
597 return false;
598
599
600 if (!vq->notify(_vq)) {
601 vq->broken = true;
602 return false;
603 }
604 return true;
605}
606EXPORT_SYMBOL_GPL(virtqueue_notify);
607
608
609
610
611
612
613
614
615
616
617
618
619
620bool virtqueue_kick(struct virtqueue *vq)
621{
622 if (virtqueue_kick_prepare(vq))
623 return virtqueue_notify(vq);
624 return true;
625}
626EXPORT_SYMBOL_GPL(virtqueue_kick);
627
628static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
629 void **ctx)
630{
631 unsigned int i, j;
632 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
633
634
635 vq->desc_state[head].data = NULL;
636
637
638 i = head;
639
640 while (vq->vring.desc[i].flags & nextflag) {
641 vring_unmap_one(vq, &vq->vring.desc[i]);
642 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
643 vq->vq.num_free++;
644 }
645
646 vring_unmap_one(vq, &vq->vring.desc[i]);
647 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
648 vq->free_head = head;
649
650
651 vq->vq.num_free++;
652
653 if (vq->indirect) {
654 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
655 u32 len;
656
657
658 if (!indir_desc)
659 return;
660
661 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
662
663 BUG_ON(!(vq->vring.desc[head].flags &
664 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
665 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
666
667 for (j = 0; j < len / sizeof(struct vring_desc); j++)
668 vring_unmap_one(vq, &indir_desc[j]);
669
670 kfree(indir_desc);
671 vq->desc_state[head].indir_desc = NULL;
672 } else if (ctx) {
673 *ctx = vq->desc_state[head].indir_desc;
674 }
675}
676
677static inline bool more_used(const struct vring_virtqueue *vq)
678{
679 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
680}
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
699 void **ctx)
700{
701 struct vring_virtqueue *vq = to_vvq(_vq);
702 void *ret;
703 unsigned int i;
704 u16 last_used;
705
706 START_USE(vq);
707
708 if (unlikely(vq->broken)) {
709 END_USE(vq);
710 return NULL;
711 }
712
713 if (!more_used(vq)) {
714 pr_debug("No more buffers in queue\n");
715 END_USE(vq);
716 return NULL;
717 }
718
719
720 virtio_rmb(vq->weak_barriers);
721
722 last_used = (vq->last_used_idx & (vq->vring.num - 1));
723 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
724 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
725
726 if (unlikely(i >= vq->vring.num)) {
727 BAD_RING(vq, "id %u out of range\n", i);
728 return NULL;
729 }
730 if (unlikely(!vq->desc_state[i].data)) {
731 BAD_RING(vq, "id %u is not a head!\n", i);
732 return NULL;
733 }
734
735
736 ret = vq->desc_state[i].data;
737 detach_buf(vq, i, ctx);
738 vq->last_used_idx++;
739
740
741
742 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
743 virtio_store_mb(vq->weak_barriers,
744 &vring_used_event(&vq->vring),
745 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
746
747#ifdef DEBUG
748 vq->last_add_time_valid = false;
749#endif
750
751 END_USE(vq);
752 return ret;
753}
754EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
755
756void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
757{
758 return virtqueue_get_buf_ctx(_vq, len, NULL);
759}
760EXPORT_SYMBOL_GPL(virtqueue_get_buf);
761
762
763
764
765
766
767
768
769
770void virtqueue_disable_cb(struct virtqueue *_vq)
771{
772 struct vring_virtqueue *vq = to_vvq(_vq);
773
774 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
775 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
776 if (!vq->event)
777 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
778 }
779
780}
781EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
782
783
784
785
786
787
788
789
790
791
792
793
794
795unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
796{
797 struct vring_virtqueue *vq = to_vvq(_vq);
798 u16 last_used_idx;
799
800 START_USE(vq);
801
802
803
804
805
806
807 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
808 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
809 if (!vq->event)
810 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
811 }
812 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
813 END_USE(vq);
814 return last_used_idx;
815}
816EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
817
818
819
820
821
822
823
824
825
826
827bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
828{
829 struct vring_virtqueue *vq = to_vvq(_vq);
830
831 virtio_mb(vq->weak_barriers);
832 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
833}
834EXPORT_SYMBOL_GPL(virtqueue_poll);
835
836
837
838
839
840
841
842
843
844
845
846
847bool virtqueue_enable_cb(struct virtqueue *_vq)
848{
849 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
850 return !virtqueue_poll(_vq, last_used_idx);
851}
852EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
868{
869 struct vring_virtqueue *vq = to_vvq(_vq);
870 u16 bufs;
871
872 START_USE(vq);
873
874
875
876
877
878
879 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
880 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
881 if (!vq->event)
882 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
883 }
884
885 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
886
887 virtio_store_mb(vq->weak_barriers,
888 &vring_used_event(&vq->vring),
889 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
890
891 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
892 END_USE(vq);
893 return false;
894 }
895
896 END_USE(vq);
897 return true;
898}
899EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
900
901
902
903
904
905
906
907
908
909void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
910{
911 struct vring_virtqueue *vq = to_vvq(_vq);
912 unsigned int i;
913 void *buf;
914
915 START_USE(vq);
916
917 for (i = 0; i < vq->vring.num; i++) {
918 if (!vq->desc_state[i].data)
919 continue;
920
921 buf = vq->desc_state[i].data;
922 detach_buf(vq, i, NULL);
923 vq->avail_idx_shadow--;
924 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
925 END_USE(vq);
926 return buf;
927 }
928
929 BUG_ON(vq->vq.num_free != vq->vring.num);
930
931 END_USE(vq);
932 return NULL;
933}
934EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
935
936irqreturn_t vring_interrupt(int irq, void *_vq)
937{
938 struct vring_virtqueue *vq = to_vvq(_vq);
939
940 if (!more_used(vq)) {
941 pr_debug("virtqueue interrupt with no work for %p\n", vq);
942 return IRQ_NONE;
943 }
944
945 if (unlikely(vq->broken))
946 return IRQ_HANDLED;
947
948 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
949 if (vq->vq.callback)
950 vq->vq.callback(&vq->vq);
951
952 return IRQ_HANDLED;
953}
954EXPORT_SYMBOL_GPL(vring_interrupt);
955
956struct virtqueue *__vring_new_virtqueue(unsigned int index,
957 struct vring vring,
958 struct virtio_device *vdev,
959 bool weak_barriers,
960 bool context,
961 bool (*notify)(struct virtqueue *),
962 void (*callback)(struct virtqueue *),
963 const char *name)
964{
965 unsigned int i;
966 struct vring_virtqueue *vq;
967
968 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
969 GFP_KERNEL);
970 if (!vq)
971 return NULL;
972
973 vq->vring = vring;
974 vq->vq.callback = callback;
975 vq->vq.vdev = vdev;
976 vq->vq.name = name;
977 vq->vq.num_free = vring.num;
978 vq->vq.index = index;
979 vq->we_own_ring = false;
980 vq->queue_dma_addr = 0;
981 vq->queue_size_in_bytes = 0;
982 vq->notify = notify;
983 vq->weak_barriers = weak_barriers;
984 vq->broken = false;
985 vq->last_used_idx = 0;
986 vq->avail_flags_shadow = 0;
987 vq->avail_idx_shadow = 0;
988 vq->num_added = 0;
989 list_add_tail(&vq->vq.list, &vdev->vqs);
990#ifdef DEBUG
991 vq->in_use = false;
992 vq->last_add_time_valid = false;
993#endif
994
995 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
996 !context;
997 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
998
999
1000 if (!callback) {
1001 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
1002 if (!vq->event)
1003 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
1004 }
1005
1006
1007 vq->free_head = 0;
1008 for (i = 0; i < vring.num-1; i++)
1009 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
1010 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
1011
1012 return &vq->vq;
1013}
1014EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1015
1016static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1017 dma_addr_t *dma_handle, gfp_t flag)
1018{
1019 if (vring_use_dma_api(vdev)) {
1020 return dma_alloc_coherent(vdev->dev.parent, size,
1021 dma_handle, flag);
1022 } else {
1023 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1024 if (queue) {
1025 phys_addr_t phys_addr = virt_to_phys(queue);
1026 *dma_handle = (dma_addr_t)phys_addr;
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1040 free_pages_exact(queue, PAGE_ALIGN(size));
1041 return NULL;
1042 }
1043 }
1044 return queue;
1045 }
1046}
1047
1048static void vring_free_queue(struct virtio_device *vdev, size_t size,
1049 void *queue, dma_addr_t dma_handle)
1050{
1051 if (vring_use_dma_api(vdev)) {
1052 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1053 } else {
1054 free_pages_exact(queue, PAGE_ALIGN(size));
1055 }
1056}
1057
1058struct virtqueue *vring_create_virtqueue(
1059 unsigned int index,
1060 unsigned int num,
1061 unsigned int vring_align,
1062 struct virtio_device *vdev,
1063 bool weak_barriers,
1064 bool may_reduce_num,
1065 bool context,
1066 bool (*notify)(struct virtqueue *),
1067 void (*callback)(struct virtqueue *),
1068 const char *name)
1069{
1070 struct virtqueue *vq;
1071 void *queue = NULL;
1072 dma_addr_t dma_addr;
1073 size_t queue_size_in_bytes;
1074 struct vring vring;
1075
1076
1077 if (num & (num - 1)) {
1078 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1079 return NULL;
1080 }
1081
1082
1083 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1084 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1085 &dma_addr,
1086 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1087 if (queue)
1088 break;
1089 }
1090
1091 if (!num)
1092 return NULL;
1093
1094 if (!queue) {
1095
1096 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1097 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1098 }
1099 if (!queue)
1100 return NULL;
1101
1102 queue_size_in_bytes = vring_size(num, vring_align);
1103 vring_init(&vring, num, queue, vring_align);
1104
1105 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1106 notify, callback, name);
1107 if (!vq) {
1108 vring_free_queue(vdev, queue_size_in_bytes, queue,
1109 dma_addr);
1110 return NULL;
1111 }
1112
1113 to_vvq(vq)->queue_dma_addr = dma_addr;
1114 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1115 to_vvq(vq)->we_own_ring = true;
1116
1117 return vq;
1118}
1119EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1120
1121struct virtqueue *vring_new_virtqueue(unsigned int index,
1122 unsigned int num,
1123 unsigned int vring_align,
1124 struct virtio_device *vdev,
1125 bool weak_barriers,
1126 bool context,
1127 void *pages,
1128 bool (*notify)(struct virtqueue *vq),
1129 void (*callback)(struct virtqueue *vq),
1130 const char *name)
1131{
1132 struct vring vring;
1133 vring_init(&vring, num, pages, vring_align);
1134 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
1135 notify, callback, name);
1136}
1137EXPORT_SYMBOL_GPL(vring_new_virtqueue);
1138
1139void vring_del_virtqueue(struct virtqueue *_vq)
1140{
1141 struct vring_virtqueue *vq = to_vvq(_vq);
1142
1143 if (vq->we_own_ring) {
1144 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1145 vq->vring.desc, vq->queue_dma_addr);
1146 }
1147 list_del(&_vq->list);
1148 kfree(vq);
1149}
1150EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1151
1152
1153void vring_transport_features(struct virtio_device *vdev)
1154{
1155 unsigned int i;
1156
1157 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1158 switch (i) {
1159 case VIRTIO_RING_F_INDIRECT_DESC:
1160 break;
1161 case VIRTIO_RING_F_EVENT_IDX:
1162 break;
1163 case VIRTIO_F_VERSION_1:
1164 break;
1165 case VIRTIO_F_IOMMU_PLATFORM:
1166 break;
1167 default:
1168
1169 __virtio_clear_bit(vdev, i);
1170 }
1171 }
1172}
1173EXPORT_SYMBOL_GPL(vring_transport_features);
1174
1175
1176
1177
1178
1179
1180
1181
1182unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1183{
1184
1185 struct vring_virtqueue *vq = to_vvq(_vq);
1186
1187 return vq->vring.num;
1188}
1189EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1190
1191bool virtqueue_is_broken(struct virtqueue *_vq)
1192{
1193 struct vring_virtqueue *vq = to_vvq(_vq);
1194
1195 return vq->broken;
1196}
1197EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1198
1199
1200
1201
1202
1203void virtio_break_device(struct virtio_device *dev)
1204{
1205 struct virtqueue *_vq;
1206
1207 list_for_each_entry(_vq, &dev->vqs, list) {
1208 struct vring_virtqueue *vq = to_vvq(_vq);
1209 vq->broken = true;
1210 }
1211}
1212EXPORT_SYMBOL_GPL(virtio_break_device);
1213
1214dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
1215{
1216 struct vring_virtqueue *vq = to_vvq(_vq);
1217
1218 BUG_ON(!vq->we_own_ring);
1219
1220 return vq->queue_dma_addr;
1221}
1222EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1223
1224dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
1225{
1226 struct vring_virtqueue *vq = to_vvq(_vq);
1227
1228 BUG_ON(!vq->we_own_ring);
1229
1230 return vq->queue_dma_addr +
1231 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1232}
1233EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1234
1235dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1236{
1237 struct vring_virtqueue *vq = to_vvq(_vq);
1238
1239 BUG_ON(!vq->we_own_ring);
1240
1241 return vq->queue_dma_addr +
1242 ((char *)vq->vring.used - (char *)vq->vring.desc);
1243}
1244EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1245
1246const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1247{
1248 return &to_vvq(vq)->vring;
1249}
1250EXPORT_SYMBOL_GPL(virtqueue_get_vring);
1251
1252MODULE_LICENSE("GPL");
1253