1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <inttypes.h>
15
16#include "trace.h"
17#include "qemu/error-report.h"
18#include "hw/virtio/virtio.h"
19#include "qemu/atomic.h"
20#include "hw/virtio/virtio-bus.h"
21
22
23
24
25
26
27#define VIRTIO_PCI_VRING_ALIGN 4096
28
29typedef struct VRingDesc
30{
31 uint64_t addr;
32 uint32_t len;
33 uint16_t flags;
34 uint16_t next;
35} VRingDesc;
36
37typedef struct VRingAvail
38{
39 uint16_t flags;
40 uint16_t idx;
41 uint16_t ring[0];
42} VRingAvail;
43
44typedef struct VRingUsedElem
45{
46 uint32_t id;
47 uint32_t len;
48} VRingUsedElem;
49
50typedef struct VRingUsed
51{
52 uint16_t flags;
53 uint16_t idx;
54 VRingUsedElem ring[0];
55} VRingUsed;
56
57typedef struct VRing
58{
59 unsigned int num;
60 unsigned int align;
61 hwaddr desc;
62 hwaddr avail;
63 hwaddr used;
64} VRing;
65
66struct VirtQueue
67{
68 VRing vring;
69 hwaddr pa;
70 uint16_t last_avail_idx;
71
72 uint16_t signalled_used;
73
74
75 bool signalled_used_valid;
76
77
78 bool notification;
79
80 uint16_t queue_index;
81
82 int inuse;
83
84 uint16_t vector;
85 void (*handle_output)(VirtIODevice *vdev, VirtQueue *vq);
86 VirtIODevice *vdev;
87 EventNotifier guest_notifier;
88 EventNotifier host_notifier;
89};
90
91
92static void virtqueue_init(VirtQueue *vq)
93{
94 hwaddr pa = vq->pa;
95
96 vq->vring.desc = pa;
97 vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
98 vq->vring.used = vring_align(vq->vring.avail +
99 offsetof(VRingAvail, ring[vq->vring.num]),
100 vq->vring.align);
101}
102
103static inline uint64_t vring_desc_addr(hwaddr desc_pa, int i)
104{
105 hwaddr pa;
106 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, addr);
107 return ldq_phys(pa);
108}
109
110static inline uint32_t vring_desc_len(hwaddr desc_pa, int i)
111{
112 hwaddr pa;
113 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, len);
114 return ldl_phys(pa);
115}
116
117static inline uint16_t vring_desc_flags(hwaddr desc_pa, int i)
118{
119 hwaddr pa;
120 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, flags);
121 return lduw_phys(pa);
122}
123
124static inline uint16_t vring_desc_next(hwaddr desc_pa, int i)
125{
126 hwaddr pa;
127 pa = desc_pa + sizeof(VRingDesc) * i + offsetof(VRingDesc, next);
128 return lduw_phys(pa);
129}
130
131static inline uint16_t vring_avail_flags(VirtQueue *vq)
132{
133 hwaddr pa;
134 pa = vq->vring.avail + offsetof(VRingAvail, flags);
135 return lduw_phys(pa);
136}
137
138static inline uint16_t vring_avail_idx(VirtQueue *vq)
139{
140 hwaddr pa;
141 pa = vq->vring.avail + offsetof(VRingAvail, idx);
142 return lduw_phys(pa);
143}
144
145static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
146{
147 hwaddr pa;
148 pa = vq->vring.avail + offsetof(VRingAvail, ring[i]);
149 return lduw_phys(pa);
150}
151
152static inline uint16_t vring_used_event(VirtQueue *vq)
153{
154 return vring_avail_ring(vq, vq->vring.num);
155}
156
157static inline void vring_used_ring_id(VirtQueue *vq, int i, uint32_t val)
158{
159 hwaddr pa;
160 pa = vq->vring.used + offsetof(VRingUsed, ring[i].id);
161 stl_phys(pa, val);
162}
163
164static inline void vring_used_ring_len(VirtQueue *vq, int i, uint32_t val)
165{
166 hwaddr pa;
167 pa = vq->vring.used + offsetof(VRingUsed, ring[i].len);
168 stl_phys(pa, val);
169}
170
171static uint16_t vring_used_idx(VirtQueue *vq)
172{
173 hwaddr pa;
174 pa = vq->vring.used + offsetof(VRingUsed, idx);
175 return lduw_phys(pa);
176}
177
178static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
179{
180 hwaddr pa;
181 pa = vq->vring.used + offsetof(VRingUsed, idx);
182 stw_phys(pa, val);
183}
184
185static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
186{
187 hwaddr pa;
188 pa = vq->vring.used + offsetof(VRingUsed, flags);
189 stw_phys(pa, lduw_phys(pa) | mask);
190}
191
192static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
193{
194 hwaddr pa;
195 pa = vq->vring.used + offsetof(VRingUsed, flags);
196 stw_phys(pa, lduw_phys(pa) & ~mask);
197}
198
199static inline void vring_avail_event(VirtQueue *vq, uint16_t val)
200{
201 hwaddr pa;
202 if (!vq->notification) {
203 return;
204 }
205 pa = vq->vring.used + offsetof(VRingUsed, ring[vq->vring.num]);
206 stw_phys(pa, val);
207}
208
209void virtio_queue_set_notification(VirtQueue *vq, int enable)
210{
211 vq->notification = enable;
212 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
213 vring_avail_event(vq, vring_avail_idx(vq));
214 } else if (enable) {
215 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
216 } else {
217 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
218 }
219 if (enable) {
220
221 smp_mb();
222 }
223}
224
225int virtio_queue_ready(VirtQueue *vq)
226{
227 return vq->vring.avail != 0;
228}
229
230int virtio_queue_empty(VirtQueue *vq)
231{
232 return vring_avail_idx(vq) == vq->last_avail_idx;
233}
234
235void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
236 unsigned int len, unsigned int idx)
237{
238 unsigned int offset;
239 int i;
240
241 trace_virtqueue_fill(vq, elem, len, idx);
242
243 offset = 0;
244 for (i = 0; i < elem->in_num; i++) {
245 size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
246
247 cpu_physical_memory_unmap(elem->in_sg[i].iov_base,
248 elem->in_sg[i].iov_len,
249 1, size);
250
251 offset += size;
252 }
253
254 for (i = 0; i < elem->out_num; i++)
255 cpu_physical_memory_unmap(elem->out_sg[i].iov_base,
256 elem->out_sg[i].iov_len,
257 0, elem->out_sg[i].iov_len);
258
259 idx = (idx + vring_used_idx(vq)) % vq->vring.num;
260
261
262 vring_used_ring_id(vq, idx, elem->index);
263 vring_used_ring_len(vq, idx, len);
264}
265
266void virtqueue_flush(VirtQueue *vq, unsigned int count)
267{
268 uint16_t old, new;
269
270 smp_wmb();
271 trace_virtqueue_flush(vq, count);
272 old = vring_used_idx(vq);
273 new = old + count;
274 vring_used_idx_set(vq, new);
275 vq->inuse -= count;
276 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
277 vq->signalled_used_valid = false;
278}
279
280void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
281 unsigned int len)
282{
283 virtqueue_fill(vq, elem, len, 0);
284 virtqueue_flush(vq, 1);
285}
286
287static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
288{
289 uint16_t num_heads = vring_avail_idx(vq) - idx;
290
291
292 if (num_heads > vq->vring.num) {
293 error_report("Guest moved used index from %u to %u",
294 idx, vring_avail_idx(vq));
295 exit(1);
296 }
297
298
299 if (num_heads) {
300 smp_rmb();
301 }
302
303 return num_heads;
304}
305
306static unsigned int virtqueue_get_head(VirtQueue *vq, unsigned int idx)
307{
308 unsigned int head;
309
310
311
312 head = vring_avail_ring(vq, idx % vq->vring.num);
313
314
315 if (head >= vq->vring.num) {
316 error_report("Guest says index %u is available", head);
317 exit(1);
318 }
319
320 return head;
321}
322
323static unsigned virtqueue_next_desc(hwaddr desc_pa,
324 unsigned int i, unsigned int max)
325{
326 unsigned int next;
327
328
329 if (!(vring_desc_flags(desc_pa, i) & VRING_DESC_F_NEXT))
330 return max;
331
332
333 next = vring_desc_next(desc_pa, i);
334
335 smp_wmb();
336
337 if (next >= max) {
338 error_report("Desc next is %u", next);
339 exit(1);
340 }
341
342 return next;
343}
344
345void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
346 unsigned int *out_bytes,
347 unsigned max_in_bytes, unsigned max_out_bytes)
348{
349 unsigned int idx;
350 unsigned int total_bufs, in_total, out_total;
351
352 idx = vq->last_avail_idx;
353
354 total_bufs = in_total = out_total = 0;
355 while (virtqueue_num_heads(vq, idx)) {
356 unsigned int max, num_bufs, indirect = 0;
357 hwaddr desc_pa;
358 int i;
359
360 max = vq->vring.num;
361 num_bufs = total_bufs;
362 i = virtqueue_get_head(vq, idx++);
363 desc_pa = vq->vring.desc;
364
365 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
366 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
367 error_report("Invalid size for indirect buffer table");
368 exit(1);
369 }
370
371
372 if (num_bufs >= max) {
373 error_report("Looped descriptor");
374 exit(1);
375 }
376
377
378 indirect = 1;
379 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
380 desc_pa = vring_desc_addr(desc_pa, i);
381 num_bufs = i = 0;
382 }
383
384 do {
385
386 if (++num_bufs > max) {
387 error_report("Looped descriptor");
388 exit(1);
389 }
390
391 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
392 in_total += vring_desc_len(desc_pa, i);
393 } else {
394 out_total += vring_desc_len(desc_pa, i);
395 }
396 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
397 goto done;
398 }
399 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
400
401 if (!indirect)
402 total_bufs = num_bufs;
403 else
404 total_bufs++;
405 }
406done:
407 if (in_bytes) {
408 *in_bytes = in_total;
409 }
410 if (out_bytes) {
411 *out_bytes = out_total;
412 }
413}
414
415int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
416 unsigned int out_bytes)
417{
418 unsigned int in_total, out_total;
419
420 virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
421 return in_bytes <= in_total && out_bytes <= out_total;
422}
423
424void virtqueue_map_sg(struct iovec *sg, hwaddr *addr,
425 size_t num_sg, int is_write)
426{
427 unsigned int i;
428 hwaddr len;
429
430 if (num_sg > VIRTQUEUE_MAX_SIZE) {
431 error_report("virtio: map attempt out of bounds: %zd > %d",
432 num_sg, VIRTQUEUE_MAX_SIZE);
433 exit(1);
434 }
435
436 for (i = 0; i < num_sg; i++) {
437 len = sg[i].iov_len;
438 sg[i].iov_base = cpu_physical_memory_map(addr[i], &len, is_write);
439 if (sg[i].iov_base == NULL || len != sg[i].iov_len) {
440 error_report("virtio: trying to map MMIO memory");
441 exit(1);
442 }
443 }
444}
445
446int virtqueue_pop(VirtQueue *vq, VirtQueueElement *elem)
447{
448 unsigned int i, head, max;
449 hwaddr desc_pa = vq->vring.desc;
450
451 if (!virtqueue_num_heads(vq, vq->last_avail_idx))
452 return 0;
453
454
455 elem->out_num = elem->in_num = 0;
456
457 max = vq->vring.num;
458
459 i = head = virtqueue_get_head(vq, vq->last_avail_idx++);
460 if (vq->vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX)) {
461 vring_avail_event(vq, vring_avail_idx(vq));
462 }
463
464 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_INDIRECT) {
465 if (vring_desc_len(desc_pa, i) % sizeof(VRingDesc)) {
466 error_report("Invalid size for indirect buffer table");
467 exit(1);
468 }
469
470
471 max = vring_desc_len(desc_pa, i) / sizeof(VRingDesc);
472 desc_pa = vring_desc_addr(desc_pa, i);
473 i = 0;
474 }
475
476
477 do {
478 struct iovec *sg;
479
480 if (vring_desc_flags(desc_pa, i) & VRING_DESC_F_WRITE) {
481 if (elem->in_num >= ARRAY_SIZE(elem->in_sg)) {
482 error_report("Too many write descriptors in indirect table");
483 exit(1);
484 }
485 elem->in_addr[elem->in_num] = vring_desc_addr(desc_pa, i);
486 sg = &elem->in_sg[elem->in_num++];
487 } else {
488 if (elem->out_num >= ARRAY_SIZE(elem->out_sg)) {
489 error_report("Too many read descriptors in indirect table");
490 exit(1);
491 }
492 elem->out_addr[elem->out_num] = vring_desc_addr(desc_pa, i);
493 sg = &elem->out_sg[elem->out_num++];
494 }
495
496 sg->iov_len = vring_desc_len(desc_pa, i);
497
498
499 if ((elem->in_num + elem->out_num) > max) {
500 error_report("Looped descriptor");
501 exit(1);
502 }
503 } while ((i = virtqueue_next_desc(desc_pa, i, max)) != max);
504
505
506 virtqueue_map_sg(elem->in_sg, elem->in_addr, elem->in_num, 1);
507 virtqueue_map_sg(elem->out_sg, elem->out_addr, elem->out_num, 0);
508
509 elem->index = head;
510
511 vq->inuse++;
512
513 trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
514 return elem->in_num + elem->out_num;
515}
516
517
518static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
519{
520 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
521 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
522
523 if (k->notify) {
524 k->notify(qbus->parent, vector);
525 }
526}
527
528void virtio_update_irq(VirtIODevice *vdev)
529{
530 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
531}
532
533void virtio_set_status(VirtIODevice *vdev, uint8_t val)
534{
535 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
536 trace_virtio_set_status(vdev, val);
537
538 if (k->set_status) {
539 k->set_status(vdev, val);
540 }
541 vdev->status = val;
542}
543
544void virtio_reset(void *opaque)
545{
546 VirtIODevice *vdev = opaque;
547 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
548 int i;
549
550 virtio_set_status(vdev, 0);
551
552 if (k->reset) {
553 k->reset(vdev);
554 }
555
556 vdev->guest_features = 0;
557 vdev->queue_sel = 0;
558 vdev->status = 0;
559 vdev->isr = 0;
560 vdev->config_vector = VIRTIO_NO_VECTOR;
561 virtio_notify_vector(vdev, vdev->config_vector);
562
563 for(i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
564 vdev->vq[i].vring.desc = 0;
565 vdev->vq[i].vring.avail = 0;
566 vdev->vq[i].vring.used = 0;
567 vdev->vq[i].last_avail_idx = 0;
568 vdev->vq[i].pa = 0;
569 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
570 vdev->vq[i].signalled_used = 0;
571 vdev->vq[i].signalled_used_valid = false;
572 vdev->vq[i].notification = true;
573 }
574}
575
576uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
577{
578 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
579 uint8_t val;
580
581 if (addr + sizeof(val) > vdev->config_len) {
582 return (uint32_t)-1;
583 }
584
585 k->get_config(vdev, vdev->config);
586
587 val = ldub_p(vdev->config + addr);
588 return val;
589}
590
591uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
592{
593 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
594 uint16_t val;
595
596 if (addr + sizeof(val) > vdev->config_len) {
597 return (uint32_t)-1;
598 }
599
600 k->get_config(vdev, vdev->config);
601
602 val = lduw_p(vdev->config + addr);
603 return val;
604}
605
606uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
607{
608 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
609 uint32_t val;
610
611 if (addr + sizeof(val) > vdev->config_len) {
612 return (uint32_t)-1;
613 }
614
615 k->get_config(vdev, vdev->config);
616
617 val = ldl_p(vdev->config + addr);
618 return val;
619}
620
621void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
622{
623 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
624 uint8_t val = data;
625
626 if (addr + sizeof(val) > vdev->config_len) {
627 return;
628 }
629
630 stb_p(vdev->config + addr, val);
631
632 if (k->set_config) {
633 k->set_config(vdev, vdev->config);
634 }
635}
636
637void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
638{
639 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
640 uint16_t val = data;
641
642 if (addr + sizeof(val) > vdev->config_len) {
643 return;
644 }
645
646 stw_p(vdev->config + addr, val);
647
648 if (k->set_config) {
649 k->set_config(vdev, vdev->config);
650 }
651}
652
653void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
654{
655 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
656 uint32_t val = data;
657
658 if (addr + sizeof(val) > vdev->config_len) {
659 return;
660 }
661
662 stl_p(vdev->config + addr, val);
663
664 if (k->set_config) {
665 k->set_config(vdev, vdev->config);
666 }
667}
668
669void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
670{
671 vdev->vq[n].pa = addr;
672 virtqueue_init(&vdev->vq[n]);
673}
674
675hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
676{
677 return vdev->vq[n].pa;
678}
679
680void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
681{
682
683
684
685 if (!!num != !!vdev->vq[n].vring.num ||
686 num > VIRTQUEUE_MAX_SIZE ||
687 num < 0) {
688 return;
689 }
690 vdev->vq[n].vring.num = num;
691 virtqueue_init(&vdev->vq[n]);
692}
693
694int virtio_queue_get_num(VirtIODevice *vdev, int n)
695{
696 return vdev->vq[n].vring.num;
697}
698
699int virtio_queue_get_id(VirtQueue *vq)
700{
701 VirtIODevice *vdev = vq->vdev;
702 assert(vq >= &vdev->vq[0] && vq < &vdev->vq[VIRTIO_PCI_QUEUE_MAX]);
703 return vq - &vdev->vq[0];
704}
705
706void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
707{
708 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
709 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
710
711
712
713
714
715 assert(k->has_variable_vring_alignment);
716
717 vdev->vq[n].vring.align = align;
718 virtqueue_init(&vdev->vq[n]);
719}
720
721void virtio_queue_notify_vq(VirtQueue *vq)
722{
723 if (vq->vring.desc) {
724 VirtIODevice *vdev = vq->vdev;
725 trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
726 vq->handle_output(vdev, vq);
727 }
728}
729
730void virtio_queue_notify(VirtIODevice *vdev, int n)
731{
732 virtio_queue_notify_vq(&vdev->vq[n]);
733}
734
735uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
736{
737 return n < VIRTIO_PCI_QUEUE_MAX ? vdev->vq[n].vector :
738 VIRTIO_NO_VECTOR;
739}
740
741void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
742{
743 if (n < VIRTIO_PCI_QUEUE_MAX)
744 vdev->vq[n].vector = vector;
745}
746
747VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
748 void (*handle_output)(VirtIODevice *, VirtQueue *))
749{
750 int i;
751
752 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
753 if (vdev->vq[i].vring.num == 0)
754 break;
755 }
756
757 if (i == VIRTIO_PCI_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
758 abort();
759
760 vdev->vq[i].vring.num = queue_size;
761 vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
762 vdev->vq[i].handle_output = handle_output;
763
764 return &vdev->vq[i];
765}
766
767void virtio_del_queue(VirtIODevice *vdev, int n)
768{
769 if (n < 0 || n >= VIRTIO_PCI_QUEUE_MAX) {
770 abort();
771 }
772
773 vdev->vq[n].vring.num = 0;
774}
775
776void virtio_irq(VirtQueue *vq)
777{
778 trace_virtio_irq(vq);
779 vq->vdev->isr |= 0x01;
780 virtio_notify_vector(vq->vdev, vq->vector);
781}
782
783
784
785
786static inline int vring_need_event(uint16_t event, uint16_t new, uint16_t old)
787{
788
789
790
791
792
793 return (uint16_t)(new - event - 1) < (uint16_t)(new - old);
794}
795
796static bool vring_notify(VirtIODevice *vdev, VirtQueue *vq)
797{
798 uint16_t old, new;
799 bool v;
800
801 smp_mb();
802
803 if (((vdev->guest_features & (1 << VIRTIO_F_NOTIFY_ON_EMPTY)) &&
804 !vq->inuse && vring_avail_idx(vq) == vq->last_avail_idx)) {
805 return true;
806 }
807
808 if (!(vdev->guest_features & (1 << VIRTIO_RING_F_EVENT_IDX))) {
809 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
810 }
811
812 v = vq->signalled_used_valid;
813 vq->signalled_used_valid = true;
814 old = vq->signalled_used;
815 new = vq->signalled_used = vring_used_idx(vq);
816 return !v || vring_need_event(vring_used_event(vq), new, old);
817}
818
819void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
820{
821 if (!vring_notify(vdev, vq)) {
822 return;
823 }
824
825 trace_virtio_notify(vdev, vq);
826 vdev->isr |= 0x01;
827 virtio_notify_vector(vdev, vq->vector);
828}
829
830void virtio_notify_config(VirtIODevice *vdev)
831{
832 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
833 return;
834
835 vdev->isr |= 0x03;
836 virtio_notify_vector(vdev, vdev->config_vector);
837}
838
839void virtio_save(VirtIODevice *vdev, QEMUFile *f)
840{
841 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
842 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
843 int i;
844
845 if (k->save_config) {
846 k->save_config(qbus->parent, f);
847 }
848
849 qemu_put_8s(f, &vdev->status);
850 qemu_put_8s(f, &vdev->isr);
851 qemu_put_be16s(f, &vdev->queue_sel);
852 qemu_put_be32s(f, &vdev->guest_features);
853 qemu_put_be32(f, vdev->config_len);
854 qemu_put_buffer(f, vdev->config, vdev->config_len);
855
856 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
857 if (vdev->vq[i].vring.num == 0)
858 break;
859 }
860
861 qemu_put_be32(f, i);
862
863 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
864 if (vdev->vq[i].vring.num == 0)
865 break;
866
867 qemu_put_be32(f, vdev->vq[i].vring.num);
868 if (k->has_variable_vring_alignment) {
869 qemu_put_be32(f, vdev->vq[i].vring.align);
870 }
871 qemu_put_be64(f, vdev->vq[i].pa);
872 qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
873 if (k->save_queue) {
874 k->save_queue(qbus->parent, i, f);
875 }
876 }
877}
878
879int virtio_set_features(VirtIODevice *vdev, uint32_t val)
880{
881 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
882 VirtioBusClass *vbusk = VIRTIO_BUS_GET_CLASS(qbus);
883 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
884 uint32_t supported_features = vbusk->get_features(qbus->parent);
885 bool bad = (val & ~supported_features) != 0;
886
887 val &= supported_features;
888 if (k->set_features) {
889 k->set_features(vdev, val);
890 }
891 vdev->guest_features = val;
892 return bad ? -1 : 0;
893}
894
895int virtio_load(VirtIODevice *vdev, QEMUFile *f)
896{
897 int i, ret;
898 int32_t config_len;
899 uint32_t num;
900 uint32_t features;
901 uint32_t supported_features;
902 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
903 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
904
905 if (k->load_config) {
906 ret = k->load_config(qbus->parent, f);
907 if (ret)
908 return ret;
909 }
910
911 qemu_get_8s(f, &vdev->status);
912 qemu_get_8s(f, &vdev->isr);
913 qemu_get_be16s(f, &vdev->queue_sel);
914 if (vdev->queue_sel >= VIRTIO_PCI_QUEUE_MAX) {
915 return -1;
916 }
917 qemu_get_be32s(f, &features);
918
919 if (virtio_set_features(vdev, features) < 0) {
920 supported_features = k->get_features(qbus->parent);
921 error_report("Features 0x%x unsupported. Allowed features: 0x%x",
922 features, supported_features);
923 return -1;
924 }
925 config_len = qemu_get_be32(f);
926
927
928
929
930
931
932 qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
933
934 while (config_len > vdev->config_len) {
935 qemu_get_byte(f);
936 config_len--;
937 }
938
939 num = qemu_get_be32(f);
940
941 if (num > VIRTIO_PCI_QUEUE_MAX) {
942 error_report("Invalid number of PCI queues: 0x%x", num);
943 return -1;
944 }
945
946 for (i = 0; i < num; i++) {
947 vdev->vq[i].vring.num = qemu_get_be32(f);
948 if (k->has_variable_vring_alignment) {
949 vdev->vq[i].vring.align = qemu_get_be32(f);
950 }
951 vdev->vq[i].pa = qemu_get_be64(f);
952 qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
953 vdev->vq[i].signalled_used_valid = false;
954 vdev->vq[i].notification = true;
955
956 if (vdev->vq[i].pa) {
957 uint16_t nheads;
958 virtqueue_init(&vdev->vq[i]);
959 nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
960
961 if (nheads > vdev->vq[i].vring.num) {
962 error_report("VQ %d size 0x%x Guest index 0x%x "
963 "inconsistent with Host index 0x%x: delta 0x%x",
964 i, vdev->vq[i].vring.num,
965 vring_avail_idx(&vdev->vq[i]),
966 vdev->vq[i].last_avail_idx, nheads);
967 return -1;
968 }
969 } else if (vdev->vq[i].last_avail_idx) {
970 error_report("VQ %d address 0x0 "
971 "inconsistent with Host index 0x%x",
972 i, vdev->vq[i].last_avail_idx);
973 return -1;
974 }
975 if (k->load_queue) {
976 ret = k->load_queue(qbus->parent, i, f);
977 if (ret)
978 return ret;
979 }
980 }
981
982 virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
983 return 0;
984}
985
986void virtio_cleanup(VirtIODevice *vdev)
987{
988 qemu_del_vm_change_state_handler(vdev->vmstate);
989 g_free(vdev->config);
990 g_free(vdev->vq);
991}
992
993static void virtio_vmstate_change(void *opaque, int running, RunState state)
994{
995 VirtIODevice *vdev = opaque;
996 BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
997 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
998 bool backend_run = running && (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK);
999 vdev->vm_running = running;
1000
1001 if (backend_run) {
1002 virtio_set_status(vdev, vdev->status);
1003 }
1004
1005 if (k->vmstate_change) {
1006 k->vmstate_change(qbus->parent, backend_run);
1007 }
1008
1009 if (!backend_run) {
1010 virtio_set_status(vdev, vdev->status);
1011 }
1012}
1013
1014void virtio_init(VirtIODevice *vdev, const char *name,
1015 uint16_t device_id, size_t config_size)
1016{
1017 int i;
1018 vdev->device_id = device_id;
1019 vdev->status = 0;
1020 vdev->isr = 0;
1021 vdev->queue_sel = 0;
1022 vdev->config_vector = VIRTIO_NO_VECTOR;
1023 vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_PCI_QUEUE_MAX);
1024 vdev->vm_running = runstate_is_running();
1025 for (i = 0; i < VIRTIO_PCI_QUEUE_MAX; i++) {
1026 vdev->vq[i].vector = VIRTIO_NO_VECTOR;
1027 vdev->vq[i].vdev = vdev;
1028 vdev->vq[i].queue_index = i;
1029 }
1030
1031 vdev->name = name;
1032 vdev->config_len = config_size;
1033 if (vdev->config_len) {
1034 vdev->config = g_malloc0(config_size);
1035 } else {
1036 vdev->config = NULL;
1037 }
1038 vdev->vmstate = qemu_add_vm_change_state_handler(virtio_vmstate_change,
1039 vdev);
1040}
1041
1042hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
1043{
1044 return vdev->vq[n].vring.desc;
1045}
1046
1047hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
1048{
1049 return vdev->vq[n].vring.avail;
1050}
1051
1052hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
1053{
1054 return vdev->vq[n].vring.used;
1055}
1056
1057hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
1058{
1059 return vdev->vq[n].vring.desc;
1060}
1061
1062hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
1063{
1064 return sizeof(VRingDesc) * vdev->vq[n].vring.num;
1065}
1066
1067hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
1068{
1069 return offsetof(VRingAvail, ring) +
1070 sizeof(uint64_t) * vdev->vq[n].vring.num;
1071}
1072
1073hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
1074{
1075 return offsetof(VRingUsed, ring) +
1076 sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
1077}
1078
1079hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
1080{
1081 return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
1082 virtio_queue_get_used_size(vdev, n);
1083}
1084
1085uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
1086{
1087 return vdev->vq[n].last_avail_idx;
1088}
1089
1090void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx)
1091{
1092 vdev->vq[n].last_avail_idx = idx;
1093}
1094
1095void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
1096{
1097 vdev->vq[n].signalled_used_valid = false;
1098}
1099
1100VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
1101{
1102 return vdev->vq + n;
1103}
1104
1105uint16_t virtio_get_queue_index(VirtQueue *vq)
1106{
1107 return vq->queue_index;
1108}
1109
1110static void virtio_queue_guest_notifier_read(EventNotifier *n)
1111{
1112 VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
1113 if (event_notifier_test_and_clear(n)) {
1114 virtio_irq(vq);
1115 }
1116}
1117
1118void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
1119 bool with_irqfd)
1120{
1121 if (assign && !with_irqfd) {
1122 event_notifier_set_handler(&vq->guest_notifier,
1123 virtio_queue_guest_notifier_read);
1124 } else {
1125 event_notifier_set_handler(&vq->guest_notifier, NULL);
1126 }
1127 if (!assign) {
1128
1129
1130 virtio_queue_guest_notifier_read(&vq->guest_notifier);
1131 }
1132}
1133
1134EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
1135{
1136 return &vq->guest_notifier;
1137}
1138
1139static void virtio_queue_host_notifier_read(EventNotifier *n)
1140{
1141 VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
1142 if (event_notifier_test_and_clear(n)) {
1143 virtio_queue_notify_vq(vq);
1144 }
1145}
1146
1147void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
1148 bool set_handler)
1149{
1150 if (assign && set_handler) {
1151 event_notifier_set_handler(&vq->host_notifier,
1152 virtio_queue_host_notifier_read);
1153 } else {
1154 event_notifier_set_handler(&vq->host_notifier, NULL);
1155 }
1156 if (!assign) {
1157
1158
1159 virtio_queue_host_notifier_read(&vq->host_notifier);
1160 }
1161}
1162
1163EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
1164{
1165 return &vq->host_notifier;
1166}
1167
1168void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
1169{
1170 if (vdev->bus_name) {
1171 g_free(vdev->bus_name);
1172 vdev->bus_name = NULL;
1173 }
1174
1175 if (bus_name) {
1176 vdev->bus_name = g_strdup(bus_name);
1177 }
1178}
1179
1180static int virtio_device_init(DeviceState *qdev)
1181{
1182 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1183 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(qdev);
1184 assert(k->init != NULL);
1185 if (k->init(vdev) < 0) {
1186 return -1;
1187 }
1188 virtio_bus_device_plugged(vdev);
1189 return 0;
1190}
1191
1192static int virtio_device_exit(DeviceState *qdev)
1193{
1194 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1195 VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(qdev);
1196
1197 virtio_bus_device_unplugged(vdev);
1198 if (k->exit) {
1199 k->exit(vdev);
1200 }
1201 if (vdev->bus_name) {
1202 g_free(vdev->bus_name);
1203 vdev->bus_name = NULL;
1204 }
1205 return 0;
1206}
1207
1208static void virtio_device_class_init(ObjectClass *klass, void *data)
1209{
1210
1211 DeviceClass *dc = DEVICE_CLASS(klass);
1212 dc->init = virtio_device_init;
1213 dc->exit = virtio_device_exit;
1214 dc->bus_type = TYPE_VIRTIO_BUS;
1215}
1216
1217static const TypeInfo virtio_device_info = {
1218 .name = TYPE_VIRTIO_DEVICE,
1219 .parent = TYPE_DEVICE,
1220 .instance_size = sizeof(VirtIODevice),
1221 .class_init = virtio_device_class_init,
1222 .abstract = true,
1223 .class_size = sizeof(VirtioDeviceClass),
1224};
1225
1226static void virtio_register_types(void)
1227{
1228 type_register_static(&virtio_device_info);
1229}
1230
1231type_init(virtio_register_types)
1232