1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "qapi/error.h"
18#include "hw/virtio/vhost.h"
19#include "qemu/atomic.h"
20#include "qemu/range.h"
21#include "qemu/error-report.h"
22#include "qemu/memfd.h"
23#include "qemu/log.h"
24#include "standard-headers/linux/vhost_types.h"
25#include "hw/virtio/virtio-bus.h"
26#include "migration/blocker.h"
27#include "migration/qemu-file-types.h"
28#include "sysemu/dma.h"
29#include "trace.h"
30
31
32#define _VHOST_DEBUG 1
33
34#ifdef _VHOST_DEBUG
35#define VHOST_OPS_DEBUG(retval, fmt, ...) \
36 do { \
37 error_report(fmt ": %s (%d)", ## __VA_ARGS__, \
38 strerror(-retval), -retval); \
39 } while (0)
40#else
41#define VHOST_OPS_DEBUG(retval, fmt, ...) \
42 do { } while (0)
43#endif
44
45static struct vhost_log *vhost_log;
46static struct vhost_log *vhost_log_shm;
47
48static unsigned int used_memslots;
49static QLIST_HEAD(, vhost_dev) vhost_devices =
50 QLIST_HEAD_INITIALIZER(vhost_devices);
51
52bool vhost_has_free_slot(void)
53{
54 unsigned int slots_limit = ~0U;
55 struct vhost_dev *hdev;
56
57 QLIST_FOREACH(hdev, &vhost_devices, entry) {
58 unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev);
59 slots_limit = MIN(slots_limit, r);
60 }
61 return slots_limit > used_memslots;
62}
63
64static void vhost_dev_sync_region(struct vhost_dev *dev,
65 MemoryRegionSection *section,
66 uint64_t mfirst, uint64_t mlast,
67 uint64_t rfirst, uint64_t rlast)
68{
69 vhost_log_chunk_t *log = dev->log->log;
70
71 uint64_t start = MAX(mfirst, rfirst);
72 uint64_t end = MIN(mlast, rlast);
73 vhost_log_chunk_t *from = log + start / VHOST_LOG_CHUNK;
74 vhost_log_chunk_t *to = log + end / VHOST_LOG_CHUNK + 1;
75 uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK);
76
77 if (end < start) {
78 return;
79 }
80 assert(end / VHOST_LOG_CHUNK < dev->log_size);
81 assert(start / VHOST_LOG_CHUNK < dev->log_size);
82
83 for (;from < to; ++from) {
84 vhost_log_chunk_t log;
85
86
87 if (!*from) {
88 addr += VHOST_LOG_CHUNK;
89 continue;
90 }
91
92
93 log = qatomic_xchg(from, 0);
94 while (log) {
95 int bit = ctzl(log);
96 hwaddr page_addr;
97 hwaddr section_offset;
98 hwaddr mr_offset;
99 page_addr = addr + bit * VHOST_LOG_PAGE;
100 section_offset = page_addr - section->offset_within_address_space;
101 mr_offset = section_offset + section->offset_within_region;
102 memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE);
103 log &= ~(0x1ull << bit);
104 }
105 addr += VHOST_LOG_CHUNK;
106 }
107}
108
109bool vhost_dev_has_iommu(struct vhost_dev *dev)
110{
111 VirtIODevice *vdev = dev->vdev;
112
113
114
115
116
117
118
119 if (vdev) {
120 return virtio_bus_device_iommu_enabled(vdev) &&
121 virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
122 } else {
123 return false;
124 }
125}
126
127static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
128 MemoryRegionSection *section,
129 hwaddr first,
130 hwaddr last)
131{
132 int i;
133 hwaddr start_addr;
134 hwaddr end_addr;
135
136 if (!dev->log_enabled || !dev->started) {
137 return 0;
138 }
139 start_addr = section->offset_within_address_space;
140 end_addr = range_get_last(start_addr, int128_get64(section->size));
141 start_addr = MAX(first, start_addr);
142 end_addr = MIN(last, end_addr);
143
144 for (i = 0; i < dev->mem->nregions; ++i) {
145 struct vhost_memory_region *reg = dev->mem->regions + i;
146 vhost_dev_sync_region(dev, section, start_addr, end_addr,
147 reg->guest_phys_addr,
148 range_get_last(reg->guest_phys_addr,
149 reg->memory_size));
150 }
151 for (i = 0; i < dev->nvqs; ++i) {
152 struct vhost_virtqueue *vq = dev->vqs + i;
153
154 if (!vq->used_phys && !vq->used_size) {
155 continue;
156 }
157
158 if (vhost_dev_has_iommu(dev)) {
159 IOMMUTLBEntry iotlb;
160 hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
161 hwaddr phys, s, offset;
162
163 while (used_size) {
164 rcu_read_lock();
165 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
166 used_phys,
167 true,
168 MEMTXATTRS_UNSPECIFIED);
169 rcu_read_unlock();
170
171 if (!iotlb.target_as) {
172 qemu_log_mask(LOG_GUEST_ERROR, "translation "
173 "failure for used_iova %"PRIx64"\n",
174 used_phys);
175 return -EINVAL;
176 }
177
178 offset = used_phys & iotlb.addr_mask;
179 phys = iotlb.translated_addr + offset;
180
181
182
183
184
185 s = iotlb.addr_mask - offset;
186
187
188
189
190
191 s = MIN(s, used_size - 1) + 1;
192
193 vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
194 range_get_last(phys, s));
195 used_size -= s;
196 used_phys += s;
197 }
198 } else {
199 vhost_dev_sync_region(dev, section, start_addr,
200 end_addr, vq->used_phys,
201 range_get_last(vq->used_phys, vq->used_size));
202 }
203 }
204 return 0;
205}
206
207static void vhost_log_sync(MemoryListener *listener,
208 MemoryRegionSection *section)
209{
210 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
211 memory_listener);
212 vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL);
213}
214
215static void vhost_log_sync_range(struct vhost_dev *dev,
216 hwaddr first, hwaddr last)
217{
218 int i;
219
220 for (i = 0; i < dev->n_mem_sections; ++i) {
221 MemoryRegionSection *section = &dev->mem_sections[i];
222 vhost_sync_dirty_bitmap(dev, section, first, last);
223 }
224}
225
226static uint64_t vhost_get_log_size(struct vhost_dev *dev)
227{
228 uint64_t log_size = 0;
229 int i;
230 for (i = 0; i < dev->mem->nregions; ++i) {
231 struct vhost_memory_region *reg = dev->mem->regions + i;
232 uint64_t last = range_get_last(reg->guest_phys_addr,
233 reg->memory_size);
234 log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1);
235 }
236 return log_size;
237}
238
239static int vhost_set_backend_type(struct vhost_dev *dev,
240 VhostBackendType backend_type)
241{
242 int r = 0;
243
244 switch (backend_type) {
245#ifdef CONFIG_VHOST_KERNEL
246 case VHOST_BACKEND_TYPE_KERNEL:
247 dev->vhost_ops = &kernel_ops;
248 break;
249#endif
250#ifdef CONFIG_VHOST_USER
251 case VHOST_BACKEND_TYPE_USER:
252 dev->vhost_ops = &user_ops;
253 break;
254#endif
255#ifdef CONFIG_VHOST_VDPA
256 case VHOST_BACKEND_TYPE_VDPA:
257 dev->vhost_ops = &vdpa_ops;
258 break;
259#endif
260 default:
261 error_report("Unknown vhost backend type");
262 r = -1;
263 }
264
265 return r;
266}
267
268static struct vhost_log *vhost_log_alloc(uint64_t size, bool share)
269{
270 Error *err = NULL;
271 struct vhost_log *log;
272 uint64_t logsize = size * sizeof(*(log->log));
273 int fd = -1;
274
275 log = g_new0(struct vhost_log, 1);
276 if (share) {
277 log->log = qemu_memfd_alloc("vhost-log", logsize,
278 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
279 &fd, &err);
280 if (err) {
281 error_report_err(err);
282 g_free(log);
283 return NULL;
284 }
285 memset(log->log, 0, logsize);
286 } else {
287 log->log = g_malloc0(logsize);
288 }
289
290 log->size = size;
291 log->refcnt = 1;
292 log->fd = fd;
293
294 return log;
295}
296
297static struct vhost_log *vhost_log_get(uint64_t size, bool share)
298{
299 struct vhost_log *log = share ? vhost_log_shm : vhost_log;
300
301 if (!log || log->size != size) {
302 log = vhost_log_alloc(size, share);
303 if (share) {
304 vhost_log_shm = log;
305 } else {
306 vhost_log = log;
307 }
308 } else {
309 ++log->refcnt;
310 }
311
312 return log;
313}
314
315static void vhost_log_put(struct vhost_dev *dev, bool sync)
316{
317 struct vhost_log *log = dev->log;
318
319 if (!log) {
320 return;
321 }
322
323 --log->refcnt;
324 if (log->refcnt == 0) {
325
326 if (dev->log_size && sync) {
327 vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1);
328 }
329
330 if (vhost_log == log) {
331 g_free(log->log);
332 vhost_log = NULL;
333 } else if (vhost_log_shm == log) {
334 qemu_memfd_free(log->log, log->size * sizeof(*(log->log)),
335 log->fd);
336 vhost_log_shm = NULL;
337 }
338
339 g_free(log);
340 }
341
342 dev->log = NULL;
343 dev->log_size = 0;
344}
345
346static bool vhost_dev_log_is_shared(struct vhost_dev *dev)
347{
348 return dev->vhost_ops->vhost_requires_shm_log &&
349 dev->vhost_ops->vhost_requires_shm_log(dev);
350}
351
352static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
353{
354 struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev));
355 uint64_t log_base = (uintptr_t)log->log;
356 int r;
357
358
359
360 r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log);
361 if (r < 0) {
362 VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
363 }
364
365 vhost_log_put(dev, true);
366 dev->log = log;
367 dev->log_size = size;
368}
369
370static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
371 hwaddr *plen, bool is_write)
372{
373 if (!vhost_dev_has_iommu(dev)) {
374 return cpu_physical_memory_map(addr, plen, is_write);
375 } else {
376 return (void *)(uintptr_t)addr;
377 }
378}
379
380static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer,
381 hwaddr len, int is_write,
382 hwaddr access_len)
383{
384 if (!vhost_dev_has_iommu(dev)) {
385 cpu_physical_memory_unmap(buffer, len, is_write, access_len);
386 }
387}
388
389static int vhost_verify_ring_part_mapping(void *ring_hva,
390 uint64_t ring_gpa,
391 uint64_t ring_size,
392 void *reg_hva,
393 uint64_t reg_gpa,
394 uint64_t reg_size)
395{
396 uint64_t hva_ring_offset;
397 uint64_t ring_last = range_get_last(ring_gpa, ring_size);
398 uint64_t reg_last = range_get_last(reg_gpa, reg_size);
399
400 if (ring_last < reg_gpa || ring_gpa > reg_last) {
401 return 0;
402 }
403
404 if (ring_last > reg_last) {
405 return -ENOMEM;
406 }
407
408 hva_ring_offset = ring_gpa - reg_gpa;
409 if (ring_hva != reg_hva + hva_ring_offset) {
410 return -EBUSY;
411 }
412
413 return 0;
414}
415
416static int vhost_verify_ring_mappings(struct vhost_dev *dev,
417 void *reg_hva,
418 uint64_t reg_gpa,
419 uint64_t reg_size)
420{
421 int i, j;
422 int r = 0;
423 const char *part_name[] = {
424 "descriptor table",
425 "available ring",
426 "used ring"
427 };
428
429 if (vhost_dev_has_iommu(dev)) {
430 return 0;
431 }
432
433 for (i = 0; i < dev->nvqs; ++i) {
434 struct vhost_virtqueue *vq = dev->vqs + i;
435
436 if (vq->desc_phys == 0) {
437 continue;
438 }
439
440 j = 0;
441 r = vhost_verify_ring_part_mapping(
442 vq->desc, vq->desc_phys, vq->desc_size,
443 reg_hva, reg_gpa, reg_size);
444 if (r) {
445 break;
446 }
447
448 j++;
449 r = vhost_verify_ring_part_mapping(
450 vq->avail, vq->avail_phys, vq->avail_size,
451 reg_hva, reg_gpa, reg_size);
452 if (r) {
453 break;
454 }
455
456 j++;
457 r = vhost_verify_ring_part_mapping(
458 vq->used, vq->used_phys, vq->used_size,
459 reg_hva, reg_gpa, reg_size);
460 if (r) {
461 break;
462 }
463 }
464
465 if (r == -ENOMEM) {
466 error_report("Unable to map %s for ring %d", part_name[j], i);
467 } else if (r == -EBUSY) {
468 error_report("%s relocated for ring %d", part_name[j], i);
469 }
470 return r;
471}
472
473
474
475
476
477
478
479
480static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section)
481{
482 MemoryRegion *mr = section->mr;
483
484 if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) {
485 uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr);
486 uint8_t handled_dirty;
487
488
489
490
491
492
493
494
495
496
497 handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) |
498 (1 << DIRTY_MEMORY_CODE);
499
500 if (dirty_mask & ~handled_dirty) {
501 trace_vhost_reject_section(mr->name, 1);
502 return false;
503 }
504
505 if (dev->vhost_ops->vhost_backend_mem_section_filter &&
506 !dev->vhost_ops->vhost_backend_mem_section_filter(dev, section)) {
507 trace_vhost_reject_section(mr->name, 2);
508 return false;
509 }
510
511 trace_vhost_section(mr->name);
512 return true;
513 } else {
514 trace_vhost_reject_section(mr->name, 3);
515 return false;
516 }
517}
518
519static void vhost_begin(MemoryListener *listener)
520{
521 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
522 memory_listener);
523 dev->tmp_sections = NULL;
524 dev->n_tmp_sections = 0;
525}
526
527static void vhost_commit(MemoryListener *listener)
528{
529 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
530 memory_listener);
531 MemoryRegionSection *old_sections;
532 int n_old_sections;
533 uint64_t log_size;
534 size_t regions_size;
535 int r;
536 int i;
537 bool changed = false;
538
539
540
541
542
543 old_sections = dev->mem_sections;
544 n_old_sections = dev->n_mem_sections;
545 dev->mem_sections = dev->tmp_sections;
546 dev->n_mem_sections = dev->n_tmp_sections;
547
548 if (dev->n_mem_sections != n_old_sections) {
549 changed = true;
550 } else {
551
552 for (int i = 0; i < n_old_sections; i++) {
553 if (!MemoryRegionSection_eq(&old_sections[i],
554 &dev->mem_sections[i])) {
555 changed = true;
556 break;
557 }
558 }
559 }
560
561 trace_vhost_commit(dev->started, changed);
562 if (!changed) {
563 goto out;
564 }
565
566
567 regions_size = offsetof(struct vhost_memory, regions) +
568 dev->n_mem_sections * sizeof dev->mem->regions[0];
569 dev->mem = g_realloc(dev->mem, regions_size);
570 dev->mem->nregions = dev->n_mem_sections;
571 used_memslots = dev->mem->nregions;
572 for (i = 0; i < dev->n_mem_sections; i++) {
573 struct vhost_memory_region *cur_vmr = dev->mem->regions + i;
574 struct MemoryRegionSection *mrs = dev->mem_sections + i;
575
576 cur_vmr->guest_phys_addr = mrs->offset_within_address_space;
577 cur_vmr->memory_size = int128_get64(mrs->size);
578 cur_vmr->userspace_addr =
579 (uintptr_t)memory_region_get_ram_ptr(mrs->mr) +
580 mrs->offset_within_region;
581 cur_vmr->flags_padding = 0;
582 }
583
584 if (!dev->started) {
585 goto out;
586 }
587
588 for (i = 0; i < dev->mem->nregions; i++) {
589 if (vhost_verify_ring_mappings(dev,
590 (void *)(uintptr_t)dev->mem->regions[i].userspace_addr,
591 dev->mem->regions[i].guest_phys_addr,
592 dev->mem->regions[i].memory_size)) {
593 error_report("Verify ring failure on region %d", i);
594 abort();
595 }
596 }
597
598 if (!dev->log_enabled) {
599 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
600 if (r < 0) {
601 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
602 }
603 goto out;
604 }
605 log_size = vhost_get_log_size(dev);
606
607
608#define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log)
609
610 if (dev->log_size < log_size) {
611 vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER);
612 }
613 r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem);
614 if (r < 0) {
615 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
616 }
617
618 if (dev->log_size > log_size + VHOST_LOG_BUFFER) {
619 vhost_dev_log_resize(dev, log_size);
620 }
621
622out:
623
624
625
626
627 while (n_old_sections--) {
628 memory_region_unref(old_sections[n_old_sections].mr);
629 }
630 g_free(old_sections);
631 return;
632}
633
634
635
636
637
638
639static void vhost_region_add_section(struct vhost_dev *dev,
640 MemoryRegionSection *section)
641{
642 bool need_add = true;
643 uint64_t mrs_size = int128_get64(section->size);
644 uint64_t mrs_gpa = section->offset_within_address_space;
645 uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) +
646 section->offset_within_region;
647 RAMBlock *mrs_rb = section->mr->ram_block;
648
649 trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size,
650 mrs_host);
651
652 if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) {
653
654
655 size_t mrs_page = qemu_ram_pagesize(mrs_rb);
656 uint64_t alignage = mrs_host & (mrs_page - 1);
657 if (alignage) {
658 mrs_host -= alignage;
659 mrs_size += alignage;
660 mrs_gpa -= alignage;
661 }
662
663 alignage = mrs_size & (mrs_page - 1);
664 if (alignage) {
665 mrs_size += mrs_page - alignage;
666 }
667 trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa,
668 mrs_size, mrs_host);
669 }
670
671 if (dev->n_tmp_sections) {
672
673
674
675
676
677 MemoryRegionSection *prev_sec = dev->tmp_sections +
678 (dev->n_tmp_sections - 1);
679 uint64_t prev_gpa_start = prev_sec->offset_within_address_space;
680 uint64_t prev_size = int128_get64(prev_sec->size);
681 uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size);
682 uint64_t prev_host_start =
683 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) +
684 prev_sec->offset_within_region;
685 uint64_t prev_host_end = range_get_last(prev_host_start, prev_size);
686
687 if (mrs_gpa <= (prev_gpa_end + 1)) {
688
689
690
691
692 if (mrs_gpa < prev_gpa_start) {
693 error_report("%s:Section '%s' rounded to %"PRIx64
694 " prior to previous '%s' %"PRIx64,
695 __func__, section->mr->name, mrs_gpa,
696 prev_sec->mr->name, prev_gpa_start);
697
698 return;
699 }
700
701 size_t offset = mrs_gpa - prev_gpa_start;
702
703 if (prev_host_start + offset == mrs_host &&
704 section->mr == prev_sec->mr &&
705 (!dev->vhost_ops->vhost_backend_can_merge ||
706 dev->vhost_ops->vhost_backend_can_merge(dev,
707 mrs_host, mrs_size,
708 prev_host_start, prev_size))) {
709 uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size);
710 need_add = false;
711 prev_sec->offset_within_address_space =
712 MIN(prev_gpa_start, mrs_gpa);
713 prev_sec->offset_within_region =
714 MIN(prev_host_start, mrs_host) -
715 (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr);
716 prev_sec->size = int128_make64(max_end - MIN(prev_host_start,
717 mrs_host));
718 trace_vhost_region_add_section_merge(section->mr->name,
719 int128_get64(prev_sec->size),
720 prev_sec->offset_within_address_space,
721 prev_sec->offset_within_region);
722 } else {
723
724
725
726 if (mrs_gpa != prev_gpa_end + 1) {
727 error_report("%s: Overlapping but not coherent sections "
728 "at %"PRIx64,
729 __func__, mrs_gpa);
730 return;
731 }
732 }
733 }
734 }
735
736 if (need_add) {
737 ++dev->n_tmp_sections;
738 dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections,
739 dev->n_tmp_sections);
740 dev->tmp_sections[dev->n_tmp_sections - 1] = *section;
741
742
743
744 dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL;
745 memory_region_ref(section->mr);
746 }
747}
748
749
750static void vhost_region_addnop(MemoryListener *listener,
751 MemoryRegionSection *section)
752{
753 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
754 memory_listener);
755
756 if (!vhost_section(dev, section)) {
757 return;
758 }
759 vhost_region_add_section(dev, section);
760}
761
762static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
763{
764 struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n);
765 struct vhost_dev *hdev = iommu->hdev;
766 hwaddr iova = iotlb->iova + iommu->iommu_offset;
767
768 if (vhost_backend_invalidate_device_iotlb(hdev, iova,
769 iotlb->addr_mask + 1)) {
770 error_report("Fail to invalidate device iotlb");
771 }
772}
773
774static void vhost_iommu_region_add(MemoryListener *listener,
775 MemoryRegionSection *section)
776{
777 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
778 iommu_listener);
779 struct vhost_iommu *iommu;
780 Int128 end;
781 int iommu_idx;
782 IOMMUMemoryRegion *iommu_mr;
783
784 if (!memory_region_is_iommu(section->mr)) {
785 return;
786 }
787
788 iommu_mr = IOMMU_MEMORY_REGION(section->mr);
789
790 iommu = g_malloc0(sizeof(*iommu));
791 end = int128_add(int128_make64(section->offset_within_region),
792 section->size);
793 end = int128_sub(end, int128_one());
794 iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr,
795 MEMTXATTRS_UNSPECIFIED);
796 iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify,
797 dev->vdev->device_iotlb_enabled ?
798 IOMMU_NOTIFIER_DEVIOTLB_UNMAP :
799 IOMMU_NOTIFIER_UNMAP,
800 section->offset_within_region,
801 int128_get64(end),
802 iommu_idx);
803 iommu->mr = section->mr;
804 iommu->iommu_offset = section->offset_within_address_space -
805 section->offset_within_region;
806 iommu->hdev = dev;
807 memory_region_register_iommu_notifier(section->mr, &iommu->n,
808 &error_fatal);
809 QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next);
810
811}
812
813static void vhost_iommu_region_del(MemoryListener *listener,
814 MemoryRegionSection *section)
815{
816 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
817 iommu_listener);
818 struct vhost_iommu *iommu;
819
820 if (!memory_region_is_iommu(section->mr)) {
821 return;
822 }
823
824 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
825 if (iommu->mr == section->mr &&
826 iommu->n.start == section->offset_within_region) {
827 memory_region_unregister_iommu_notifier(iommu->mr,
828 &iommu->n);
829 QLIST_REMOVE(iommu, iommu_next);
830 g_free(iommu);
831 break;
832 }
833 }
834}
835
836void vhost_toggle_device_iotlb(VirtIODevice *vdev)
837{
838 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
839 struct vhost_dev *dev;
840 struct vhost_iommu *iommu;
841
842 if (vdev->vhost_started) {
843 dev = vdc->get_vhost(vdev);
844 } else {
845 return;
846 }
847
848 QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) {
849 memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n);
850 iommu->n.notifier_flags = vdev->device_iotlb_enabled ?
851 IOMMU_NOTIFIER_DEVIOTLB_UNMAP : IOMMU_NOTIFIER_UNMAP;
852 memory_region_register_iommu_notifier(iommu->mr, &iommu->n,
853 &error_fatal);
854 }
855}
856
857static int vhost_virtqueue_set_addr(struct vhost_dev *dev,
858 struct vhost_virtqueue *vq,
859 unsigned idx, bool enable_log)
860{
861 struct vhost_vring_addr addr;
862 int r;
863 memset(&addr, 0, sizeof(struct vhost_vring_addr));
864
865 if (dev->vhost_ops->vhost_vq_get_addr) {
866 r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq);
867 if (r < 0) {
868 VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed");
869 return r;
870 }
871 } else {
872 addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc;
873 addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail;
874 addr.used_user_addr = (uint64_t)(unsigned long)vq->used;
875 }
876 addr.index = idx;
877 addr.log_guest_addr = vq->used_phys;
878 addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0;
879 r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
880 if (r < 0) {
881 VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed");
882 }
883 return r;
884}
885
886static int vhost_dev_set_features(struct vhost_dev *dev,
887 bool enable_log)
888{
889 uint64_t features = dev->acked_features;
890 int r;
891 if (enable_log) {
892 features |= 0x1ULL << VHOST_F_LOG_ALL;
893 }
894 if (!vhost_dev_has_iommu(dev)) {
895 features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM);
896 }
897 if (dev->vhost_ops->vhost_force_iommu) {
898 if (dev->vhost_ops->vhost_force_iommu(dev) == true) {
899 features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM;
900 }
901 }
902 r = dev->vhost_ops->vhost_set_features(dev, features);
903 if (r < 0) {
904 VHOST_OPS_DEBUG(r, "vhost_set_features failed");
905 goto out;
906 }
907 if (dev->vhost_ops->vhost_set_backend_cap) {
908 r = dev->vhost_ops->vhost_set_backend_cap(dev);
909 if (r < 0) {
910 VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed");
911 goto out;
912 }
913 }
914
915out:
916 return r;
917}
918
919static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log)
920{
921 int r, i, idx;
922 hwaddr addr;
923
924 r = vhost_dev_set_features(dev, enable_log);
925 if (r < 0) {
926 goto err_features;
927 }
928 for (i = 0; i < dev->nvqs; ++i) {
929 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
930 addr = virtio_queue_get_desc_addr(dev->vdev, idx);
931 if (!addr) {
932
933
934
935
936
937
938 continue;
939 }
940 r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
941 enable_log);
942 if (r < 0) {
943 goto err_vq;
944 }
945 }
946 return 0;
947err_vq:
948 for (; i >= 0; --i) {
949 idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i);
950 addr = virtio_queue_get_desc_addr(dev->vdev, idx);
951 if (!addr) {
952 continue;
953 }
954 vhost_virtqueue_set_addr(dev, dev->vqs + i, idx,
955 dev->log_enabled);
956 }
957 vhost_dev_set_features(dev, dev->log_enabled);
958err_features:
959 return r;
960}
961
962static int vhost_migration_log(MemoryListener *listener, bool enable)
963{
964 struct vhost_dev *dev = container_of(listener, struct vhost_dev,
965 memory_listener);
966 int r;
967 if (enable == dev->log_enabled) {
968 return 0;
969 }
970 if (!dev->started) {
971 dev->log_enabled = enable;
972 return 0;
973 }
974
975 r = 0;
976 if (!enable) {
977 r = vhost_dev_set_log(dev, false);
978 if (r < 0) {
979 goto check_dev_state;
980 }
981 vhost_log_put(dev, false);
982 } else {
983 vhost_dev_log_resize(dev, vhost_get_log_size(dev));
984 r = vhost_dev_set_log(dev, true);
985 if (r < 0) {
986 goto check_dev_state;
987 }
988 }
989
990check_dev_state:
991 dev->log_enabled = enable;
992
993
994
995
996
997 if (!dev->started) {
998
999
1000
1001
1002 r = 0;
1003 }
1004 if (r) {
1005
1006 dev->log_enabled = false;
1007 }
1008
1009 return r;
1010}
1011
1012static void vhost_log_global_start(MemoryListener *listener)
1013{
1014 int r;
1015
1016 r = vhost_migration_log(listener, true);
1017 if (r < 0) {
1018 abort();
1019 }
1020}
1021
1022static void vhost_log_global_stop(MemoryListener *listener)
1023{
1024 int r;
1025
1026 r = vhost_migration_log(listener, false);
1027 if (r < 0) {
1028 abort();
1029 }
1030}
1031
1032static void vhost_log_start(MemoryListener *listener,
1033 MemoryRegionSection *section,
1034 int old, int new)
1035{
1036
1037}
1038
1039static void vhost_log_stop(MemoryListener *listener,
1040 MemoryRegionSection *section,
1041 int old, int new)
1042{
1043
1044}
1045
1046
1047
1048
1049
1050
1051static inline bool vhost_needs_vring_endian(VirtIODevice *vdev)
1052{
1053 if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1054 return false;
1055 }
1056#if HOST_BIG_ENDIAN
1057 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE;
1058#else
1059 return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG;
1060#endif
1061}
1062
1063static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev,
1064 bool is_big_endian,
1065 int vhost_vq_index)
1066{
1067 int r;
1068 struct vhost_vring_state s = {
1069 .index = vhost_vq_index,
1070 .num = is_big_endian
1071 };
1072
1073 r = dev->vhost_ops->vhost_set_vring_endian(dev, &s);
1074 if (r < 0) {
1075 VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed");
1076 }
1077 return r;
1078}
1079
1080static int vhost_memory_region_lookup(struct vhost_dev *hdev,
1081 uint64_t gpa, uint64_t *uaddr,
1082 uint64_t *len)
1083{
1084 int i;
1085
1086 for (i = 0; i < hdev->mem->nregions; i++) {
1087 struct vhost_memory_region *reg = hdev->mem->regions + i;
1088
1089 if (gpa >= reg->guest_phys_addr &&
1090 reg->guest_phys_addr + reg->memory_size > gpa) {
1091 *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr;
1092 *len = reg->guest_phys_addr + reg->memory_size - gpa;
1093 return 0;
1094 }
1095 }
1096
1097 return -EFAULT;
1098}
1099
1100int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write)
1101{
1102 IOMMUTLBEntry iotlb;
1103 uint64_t uaddr, len;
1104 int ret = -EFAULT;
1105
1106 RCU_READ_LOCK_GUARD();
1107
1108 trace_vhost_iotlb_miss(dev, 1);
1109
1110 iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
1111 iova, write,
1112 MEMTXATTRS_UNSPECIFIED);
1113 if (iotlb.target_as != NULL) {
1114 ret = vhost_memory_region_lookup(dev, iotlb.translated_addr,
1115 &uaddr, &len);
1116 if (ret) {
1117 trace_vhost_iotlb_miss(dev, 3);
1118 error_report("Fail to lookup the translated address "
1119 "%"PRIx64, iotlb.translated_addr);
1120 goto out;
1121 }
1122
1123 len = MIN(iotlb.addr_mask + 1, len);
1124 iova = iova & ~iotlb.addr_mask;
1125
1126 ret = vhost_backend_update_device_iotlb(dev, iova, uaddr,
1127 len, iotlb.perm);
1128 if (ret) {
1129 trace_vhost_iotlb_miss(dev, 4);
1130 error_report("Fail to update device iotlb");
1131 goto out;
1132 }
1133 }
1134
1135 trace_vhost_iotlb_miss(dev, 2);
1136
1137out:
1138 return ret;
1139}
1140
1141int vhost_virtqueue_start(struct vhost_dev *dev,
1142 struct VirtIODevice *vdev,
1143 struct vhost_virtqueue *vq,
1144 unsigned idx)
1145{
1146 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1147 VirtioBusState *vbus = VIRTIO_BUS(qbus);
1148 VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus);
1149 hwaddr s, l, a;
1150 int r;
1151 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1152 struct vhost_vring_file file = {
1153 .index = vhost_vq_index
1154 };
1155 struct vhost_vring_state state = {
1156 .index = vhost_vq_index
1157 };
1158 struct VirtQueue *vvq = virtio_get_queue(vdev, idx);
1159
1160 a = virtio_queue_get_desc_addr(vdev, idx);
1161 if (a == 0) {
1162
1163 return 0;
1164 }
1165
1166 vq->num = state.num = virtio_queue_get_num(vdev, idx);
1167 r = dev->vhost_ops->vhost_set_vring_num(dev, &state);
1168 if (r) {
1169 VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed");
1170 return r;
1171 }
1172
1173 state.num = virtio_queue_get_last_avail_idx(vdev, idx);
1174 r = dev->vhost_ops->vhost_set_vring_base(dev, &state);
1175 if (r) {
1176 VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed");
1177 return r;
1178 }
1179
1180 if (vhost_needs_vring_endian(vdev)) {
1181 r = vhost_virtqueue_set_vring_endian_legacy(dev,
1182 virtio_is_big_endian(vdev),
1183 vhost_vq_index);
1184 if (r) {
1185 return r;
1186 }
1187 }
1188
1189 vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
1190 vq->desc_phys = a;
1191 vq->desc = vhost_memory_map(dev, a, &l, false);
1192 if (!vq->desc || l != s) {
1193 r = -ENOMEM;
1194 goto fail_alloc_desc;
1195 }
1196 vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
1197 vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
1198 vq->avail = vhost_memory_map(dev, a, &l, false);
1199 if (!vq->avail || l != s) {
1200 r = -ENOMEM;
1201 goto fail_alloc_avail;
1202 }
1203 vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx);
1204 vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx);
1205 vq->used = vhost_memory_map(dev, a, &l, true);
1206 if (!vq->used || l != s) {
1207 r = -ENOMEM;
1208 goto fail_alloc_used;
1209 }
1210
1211 r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
1212 if (r < 0) {
1213 goto fail_alloc;
1214 }
1215
1216 file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq));
1217 r = dev->vhost_ops->vhost_set_vring_kick(dev, &file);
1218 if (r) {
1219 VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed");
1220 goto fail_kick;
1221 }
1222
1223
1224 event_notifier_test_and_clear(&vq->masked_notifier);
1225
1226
1227
1228
1229 if (!vdev->use_guest_notifier_mask) {
1230
1231 vhost_virtqueue_mask(dev, vdev, idx, false);
1232 }
1233
1234 if (k->query_guest_notifiers &&
1235 k->query_guest_notifiers(qbus->parent) &&
1236 virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) {
1237 file.fd = -1;
1238 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1239 if (r) {
1240 goto fail_vector;
1241 }
1242 }
1243
1244 return 0;
1245
1246fail_vector:
1247fail_kick:
1248fail_alloc:
1249 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1250 0, 0);
1251fail_alloc_used:
1252 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1253 0, 0);
1254fail_alloc_avail:
1255 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1256 0, 0);
1257fail_alloc_desc:
1258 return r;
1259}
1260
1261void vhost_virtqueue_stop(struct vhost_dev *dev,
1262 struct VirtIODevice *vdev,
1263 struct vhost_virtqueue *vq,
1264 unsigned idx)
1265{
1266 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx);
1267 struct vhost_vring_state state = {
1268 .index = vhost_vq_index,
1269 };
1270 int r;
1271
1272 if (virtio_queue_get_desc_addr(vdev, idx) == 0) {
1273
1274 return;
1275 }
1276
1277 r = dev->vhost_ops->vhost_get_vring_base(dev, &state);
1278 if (r < 0) {
1279 VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r);
1280
1281
1282
1283 virtio_queue_restore_last_avail_idx(vdev, idx);
1284 } else {
1285 virtio_queue_set_last_avail_idx(vdev, idx, state.num);
1286 }
1287 virtio_queue_invalidate_signalled_used(vdev, idx);
1288 virtio_queue_update_used_idx(vdev, idx);
1289
1290
1291
1292
1293 if (vhost_needs_vring_endian(vdev)) {
1294 vhost_virtqueue_set_vring_endian_legacy(dev,
1295 !virtio_is_big_endian(vdev),
1296 vhost_vq_index);
1297 }
1298
1299 vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx),
1300 1, virtio_queue_get_used_size(vdev, idx));
1301 vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx),
1302 0, virtio_queue_get_avail_size(vdev, idx));
1303 vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx),
1304 0, virtio_queue_get_desc_size(vdev, idx));
1305}
1306
1307static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev,
1308 int n, uint32_t timeout)
1309{
1310 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1311 struct vhost_vring_state state = {
1312 .index = vhost_vq_index,
1313 .num = timeout,
1314 };
1315 int r;
1316
1317 if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) {
1318 return -EINVAL;
1319 }
1320
1321 r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state);
1322 if (r) {
1323 VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed");
1324 return r;
1325 }
1326
1327 return 0;
1328}
1329
1330static void vhost_virtqueue_error_notifier(EventNotifier *n)
1331{
1332 struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue,
1333 error_notifier);
1334 struct vhost_dev *dev = vq->dev;
1335 int index = vq - dev->vqs;
1336
1337 if (event_notifier_test_and_clear(n) && dev->vdev) {
1338 VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d",
1339 dev->vq_index + index);
1340 }
1341}
1342
1343static int vhost_virtqueue_init(struct vhost_dev *dev,
1344 struct vhost_virtqueue *vq, int n)
1345{
1346 int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n);
1347 struct vhost_vring_file file = {
1348 .index = vhost_vq_index,
1349 };
1350 int r = event_notifier_init(&vq->masked_notifier, 0);
1351 if (r < 0) {
1352 return r;
1353 }
1354
1355 file.fd = event_notifier_get_wfd(&vq->masked_notifier);
1356 r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
1357 if (r) {
1358 VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed");
1359 goto fail_call;
1360 }
1361
1362 vq->dev = dev;
1363
1364 if (dev->vhost_ops->vhost_set_vring_err) {
1365 r = event_notifier_init(&vq->error_notifier, 0);
1366 if (r < 0) {
1367 goto fail_call;
1368 }
1369
1370 file.fd = event_notifier_get_fd(&vq->error_notifier);
1371 r = dev->vhost_ops->vhost_set_vring_err(dev, &file);
1372 if (r) {
1373 VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed");
1374 goto fail_err;
1375 }
1376
1377 event_notifier_set_handler(&vq->error_notifier,
1378 vhost_virtqueue_error_notifier);
1379 }
1380
1381 return 0;
1382
1383fail_err:
1384 event_notifier_cleanup(&vq->error_notifier);
1385fail_call:
1386 event_notifier_cleanup(&vq->masked_notifier);
1387 return r;
1388}
1389
1390static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
1391{
1392 event_notifier_cleanup(&vq->masked_notifier);
1393 if (vq->dev->vhost_ops->vhost_set_vring_err) {
1394 event_notifier_set_handler(&vq->error_notifier, NULL);
1395 event_notifier_cleanup(&vq->error_notifier);
1396 }
1397}
1398
1399int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
1400 VhostBackendType backend_type, uint32_t busyloop_timeout,
1401 Error **errp)
1402{
1403 uint64_t features;
1404 int i, r, n_initialized_vqs = 0;
1405
1406 hdev->vdev = NULL;
1407 hdev->migration_blocker = NULL;
1408
1409 r = vhost_set_backend_type(hdev, backend_type);
1410 assert(r >= 0);
1411
1412 r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp);
1413 if (r < 0) {
1414 goto fail;
1415 }
1416
1417 r = hdev->vhost_ops->vhost_set_owner(hdev);
1418 if (r < 0) {
1419 error_setg_errno(errp, -r, "vhost_set_owner failed");
1420 goto fail;
1421 }
1422
1423 r = hdev->vhost_ops->vhost_get_features(hdev, &features);
1424 if (r < 0) {
1425 error_setg_errno(errp, -r, "vhost_get_features failed");
1426 goto fail;
1427 }
1428
1429 for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) {
1430 r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i);
1431 if (r < 0) {
1432 error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i);
1433 goto fail;
1434 }
1435 }
1436
1437 if (busyloop_timeout) {
1438 for (i = 0; i < hdev->nvqs; ++i) {
1439 r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i,
1440 busyloop_timeout);
1441 if (r < 0) {
1442 error_setg_errno(errp, -r, "Failed to set busyloop timeout");
1443 goto fail_busyloop;
1444 }
1445 }
1446 }
1447
1448 hdev->features = features;
1449
1450 hdev->memory_listener = (MemoryListener) {
1451 .name = "vhost",
1452 .begin = vhost_begin,
1453 .commit = vhost_commit,
1454 .region_add = vhost_region_addnop,
1455 .region_nop = vhost_region_addnop,
1456 .log_start = vhost_log_start,
1457 .log_stop = vhost_log_stop,
1458 .log_sync = vhost_log_sync,
1459 .log_global_start = vhost_log_global_start,
1460 .log_global_stop = vhost_log_global_stop,
1461 .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND
1462 };
1463
1464 hdev->iommu_listener = (MemoryListener) {
1465 .name = "vhost-iommu",
1466 .region_add = vhost_iommu_region_add,
1467 .region_del = vhost_iommu_region_del,
1468 };
1469
1470 if (hdev->migration_blocker == NULL) {
1471 if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
1472 error_setg(&hdev->migration_blocker,
1473 "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
1474 } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) {
1475 error_setg(&hdev->migration_blocker,
1476 "Migration disabled: failed to allocate shared memory");
1477 }
1478 }
1479
1480 if (hdev->migration_blocker != NULL) {
1481 r = migrate_add_blocker(hdev->migration_blocker, errp);
1482 if (r < 0) {
1483 error_free(hdev->migration_blocker);
1484 goto fail_busyloop;
1485 }
1486 }
1487
1488 hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
1489 hdev->n_mem_sections = 0;
1490 hdev->mem_sections = NULL;
1491 hdev->log = NULL;
1492 hdev->log_size = 0;
1493 hdev->log_enabled = false;
1494 hdev->started = false;
1495 memory_listener_register(&hdev->memory_listener, &address_space_memory);
1496 QLIST_INSERT_HEAD(&vhost_devices, hdev, entry);
1497
1498 if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) {
1499 error_setg(errp, "vhost backend memory slots limit is less"
1500 " than current number of present memory slots");
1501 r = -EINVAL;
1502 goto fail_busyloop;
1503 }
1504
1505 return 0;
1506
1507fail_busyloop:
1508 if (busyloop_timeout) {
1509 while (--i >= 0) {
1510 vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0);
1511 }
1512 }
1513fail:
1514 hdev->nvqs = n_initialized_vqs;
1515 vhost_dev_cleanup(hdev);
1516 return r;
1517}
1518
1519void vhost_dev_cleanup(struct vhost_dev *hdev)
1520{
1521 int i;
1522
1523 trace_vhost_dev_cleanup(hdev);
1524
1525 for (i = 0; i < hdev->nvqs; ++i) {
1526 vhost_virtqueue_cleanup(hdev->vqs + i);
1527 }
1528 if (hdev->mem) {
1529
1530 memory_listener_unregister(&hdev->memory_listener);
1531 QLIST_REMOVE(hdev, entry);
1532 }
1533 if (hdev->migration_blocker) {
1534 migrate_del_blocker(hdev->migration_blocker);
1535 error_free(hdev->migration_blocker);
1536 }
1537 g_free(hdev->mem);
1538 g_free(hdev->mem_sections);
1539 if (hdev->vhost_ops) {
1540 hdev->vhost_ops->vhost_backend_cleanup(hdev);
1541 }
1542 assert(!hdev->log);
1543
1544 memset(hdev, 0, sizeof(struct vhost_dev));
1545}
1546
1547static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
1548 VirtIODevice *vdev,
1549 unsigned int nvqs)
1550{
1551 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1552 int i, r;
1553
1554
1555
1556
1557
1558 memory_region_transaction_begin();
1559
1560 for (i = 0; i < nvqs; ++i) {
1561 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1562 false);
1563 if (r < 0) {
1564 error_report("vhost VQ %d notifier cleanup failed: %d", i, -r);
1565 }
1566 assert(r >= 0);
1567 }
1568
1569
1570
1571
1572
1573 memory_region_transaction_commit();
1574
1575 for (i = 0; i < nvqs; ++i) {
1576 virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i);
1577 }
1578 virtio_device_release_ioeventfd(vdev);
1579}
1580
1581
1582
1583
1584int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1585{
1586 BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
1587 int i, r;
1588
1589
1590
1591
1592 r = virtio_device_grab_ioeventfd(vdev);
1593 if (r < 0) {
1594 error_report("binding does not support host notifiers");
1595 return r;
1596 }
1597
1598
1599
1600
1601
1602 memory_region_transaction_begin();
1603
1604 for (i = 0; i < hdev->nvqs; ++i) {
1605 r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i,
1606 true);
1607 if (r < 0) {
1608 error_report("vhost VQ %d notifier binding failed: %d", i, -r);
1609 memory_region_transaction_commit();
1610 vhost_dev_disable_notifiers_nvqs(hdev, vdev, i);
1611 return r;
1612 }
1613 }
1614
1615 memory_region_transaction_commit();
1616
1617 return 0;
1618}
1619
1620
1621
1622
1623
1624
1625void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
1626{
1627 vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs);
1628}
1629
1630
1631
1632
1633bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
1634{
1635 struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index;
1636 assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs);
1637 return event_notifier_test_and_clear(&vq->masked_notifier);
1638}
1639
1640
1641void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
1642 bool mask)
1643{
1644 struct VirtQueue *vvq = virtio_get_queue(vdev, n);
1645 int r, index = n - hdev->vq_index;
1646 struct vhost_vring_file file;
1647
1648
1649 assert(hdev->vhost_ops);
1650
1651 if (mask) {
1652 assert(vdev->use_guest_notifier_mask);
1653 file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier);
1654 } else {
1655 file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq));
1656 }
1657
1658 file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n);
1659 r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file);
1660 if (r < 0) {
1661 error_report("vhost_set_vring_call failed %d", -r);
1662 }
1663}
1664
1665bool vhost_config_pending(struct vhost_dev *hdev)
1666{
1667 assert(hdev->vhost_ops);
1668 if ((hdev->started == false) ||
1669 (hdev->vhost_ops->vhost_set_config_call == NULL)) {
1670 return false;
1671 }
1672
1673 EventNotifier *notifier =
1674 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
1675 return event_notifier_test_and_clear(notifier);
1676}
1677
1678void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask)
1679{
1680 int fd;
1681 int r;
1682 EventNotifier *notifier =
1683 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier;
1684 EventNotifier *config_notifier = &vdev->config_notifier;
1685 assert(hdev->vhost_ops);
1686
1687 if ((hdev->started == false) ||
1688 (hdev->vhost_ops->vhost_set_config_call == NULL)) {
1689 return;
1690 }
1691 if (mask) {
1692 assert(vdev->use_guest_notifier_mask);
1693 fd = event_notifier_get_fd(notifier);
1694 } else {
1695 fd = event_notifier_get_fd(config_notifier);
1696 }
1697 r = hdev->vhost_ops->vhost_set_config_call(hdev, fd);
1698 if (r < 0) {
1699 error_report("vhost_set_config_call failed %d", -r);
1700 }
1701}
1702
1703static void vhost_stop_config_intr(struct vhost_dev *dev)
1704{
1705 int fd = -1;
1706 assert(dev->vhost_ops);
1707 if (dev->vhost_ops->vhost_set_config_call) {
1708 dev->vhost_ops->vhost_set_config_call(dev, fd);
1709 }
1710}
1711
1712static void vhost_start_config_intr(struct vhost_dev *dev)
1713{
1714 int r;
1715
1716 assert(dev->vhost_ops);
1717 int fd = event_notifier_get_fd(&dev->vdev->config_notifier);
1718 if (dev->vhost_ops->vhost_set_config_call) {
1719 r = dev->vhost_ops->vhost_set_config_call(dev, fd);
1720 if (!r) {
1721 event_notifier_set(&dev->vdev->config_notifier);
1722 }
1723 }
1724}
1725
1726uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
1727 uint64_t features)
1728{
1729 const int *bit = feature_bits;
1730 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1731 uint64_t bit_mask = (1ULL << *bit);
1732 if (!(hdev->features & bit_mask)) {
1733 features &= ~bit_mask;
1734 }
1735 bit++;
1736 }
1737 return features;
1738}
1739
1740void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
1741 uint64_t features)
1742{
1743 const int *bit = feature_bits;
1744 while (*bit != VHOST_INVALID_FEATURE_BIT) {
1745 uint64_t bit_mask = (1ULL << *bit);
1746 if (features & bit_mask) {
1747 hdev->acked_features |= bit_mask;
1748 }
1749 bit++;
1750 }
1751}
1752
1753int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config,
1754 uint32_t config_len, Error **errp)
1755{
1756 assert(hdev->vhost_ops);
1757
1758 if (hdev->vhost_ops->vhost_get_config) {
1759 return hdev->vhost_ops->vhost_get_config(hdev, config, config_len,
1760 errp);
1761 }
1762
1763 error_setg(errp, "vhost_get_config not implemented");
1764 return -ENOSYS;
1765}
1766
1767int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data,
1768 uint32_t offset, uint32_t size, uint32_t flags)
1769{
1770 assert(hdev->vhost_ops);
1771
1772 if (hdev->vhost_ops->vhost_set_config) {
1773 return hdev->vhost_ops->vhost_set_config(hdev, data, offset,
1774 size, flags);
1775 }
1776
1777 return -ENOSYS;
1778}
1779
1780void vhost_dev_set_config_notifier(struct vhost_dev *hdev,
1781 const VhostDevConfigOps *ops)
1782{
1783 hdev->config_ops = ops;
1784}
1785
1786void vhost_dev_free_inflight(struct vhost_inflight *inflight)
1787{
1788 if (inflight && inflight->addr) {
1789 qemu_memfd_free(inflight->addr, inflight->size, inflight->fd);
1790 inflight->addr = NULL;
1791 inflight->fd = -1;
1792 }
1793}
1794
1795static int vhost_dev_resize_inflight(struct vhost_inflight *inflight,
1796 uint64_t new_size)
1797{
1798 Error *err = NULL;
1799 int fd = -1;
1800 void *addr = qemu_memfd_alloc("vhost-inflight", new_size,
1801 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1802 &fd, &err);
1803
1804 if (err) {
1805 error_report_err(err);
1806 return -ENOMEM;
1807 }
1808
1809 vhost_dev_free_inflight(inflight);
1810 inflight->offset = 0;
1811 inflight->addr = addr;
1812 inflight->fd = fd;
1813 inflight->size = new_size;
1814
1815 return 0;
1816}
1817
1818void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1819{
1820 if (inflight->addr) {
1821 qemu_put_be64(f, inflight->size);
1822 qemu_put_be16(f, inflight->queue_size);
1823 qemu_put_buffer(f, inflight->addr, inflight->size);
1824 } else {
1825 qemu_put_be64(f, 0);
1826 }
1827}
1828
1829int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f)
1830{
1831 uint64_t size;
1832
1833 size = qemu_get_be64(f);
1834 if (!size) {
1835 return 0;
1836 }
1837
1838 if (inflight->size != size) {
1839 int ret = vhost_dev_resize_inflight(inflight, size);
1840 if (ret < 0) {
1841 return ret;
1842 }
1843 }
1844 inflight->queue_size = qemu_get_be16(f);
1845
1846 qemu_get_buffer(f, inflight->addr, size);
1847
1848 return 0;
1849}
1850
1851int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev)
1852{
1853 int r;
1854
1855 if (hdev->vhost_ops->vhost_get_inflight_fd == NULL ||
1856 hdev->vhost_ops->vhost_set_inflight_fd == NULL) {
1857 return 0;
1858 }
1859
1860 hdev->vdev = vdev;
1861
1862 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1863 if (r < 0) {
1864 VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed");
1865 return r;
1866 }
1867
1868 return 0;
1869}
1870
1871int vhost_dev_set_inflight(struct vhost_dev *dev,
1872 struct vhost_inflight *inflight)
1873{
1874 int r;
1875
1876 if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) {
1877 r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight);
1878 if (r) {
1879 VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed");
1880 return r;
1881 }
1882 }
1883
1884 return 0;
1885}
1886
1887int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size,
1888 struct vhost_inflight *inflight)
1889{
1890 int r;
1891
1892 if (dev->vhost_ops->vhost_get_inflight_fd) {
1893 r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight);
1894 if (r) {
1895 VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed");
1896 return r;
1897 }
1898 }
1899
1900 return 0;
1901}
1902
1903static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable)
1904{
1905 if (!hdev->vhost_ops->vhost_set_vring_enable) {
1906 return 0;
1907 }
1908
1909
1910
1911
1912
1913
1914
1915 if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER &&
1916 !virtio_has_feature(hdev->backend_features,
1917 VHOST_USER_F_PROTOCOL_FEATURES)) {
1918 return 0;
1919 }
1920
1921 return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable);
1922}
1923
1924
1925int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
1926{
1927 int i, r;
1928
1929
1930 assert(hdev->vhost_ops);
1931
1932 trace_vhost_dev_start(hdev, vdev->name, vrings);
1933
1934 vdev->vhost_started = true;
1935 hdev->started = true;
1936 hdev->vdev = vdev;
1937
1938 r = vhost_dev_set_features(hdev, hdev->log_enabled);
1939 if (r < 0) {
1940 goto fail_features;
1941 }
1942
1943 if (vhost_dev_has_iommu(hdev)) {
1944 memory_listener_register(&hdev->iommu_listener, vdev->dma_as);
1945 }
1946
1947 r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem);
1948 if (r < 0) {
1949 VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed");
1950 goto fail_mem;
1951 }
1952 for (i = 0; i < hdev->nvqs; ++i) {
1953 r = vhost_virtqueue_start(hdev,
1954 vdev,
1955 hdev->vqs + i,
1956 hdev->vq_index + i);
1957 if (r < 0) {
1958 goto fail_vq;
1959 }
1960 }
1961
1962 r = event_notifier_init(
1963 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0);
1964 if (r < 0) {
1965 VHOST_OPS_DEBUG(r, "event_notifier_init failed");
1966 goto fail_vq;
1967 }
1968 event_notifier_test_and_clear(
1969 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
1970 if (!vdev->use_guest_notifier_mask) {
1971 vhost_config_mask(hdev, vdev, true);
1972 }
1973 if (hdev->log_enabled) {
1974 uint64_t log_base;
1975
1976 hdev->log_size = vhost_get_log_size(hdev);
1977 hdev->log = vhost_log_get(hdev->log_size,
1978 vhost_dev_log_is_shared(hdev));
1979 log_base = (uintptr_t)hdev->log->log;
1980 r = hdev->vhost_ops->vhost_set_log_base(hdev,
1981 hdev->log_size ? log_base : 0,
1982 hdev->log);
1983 if (r < 0) {
1984 VHOST_OPS_DEBUG(r, "vhost_set_log_base failed");
1985 goto fail_log;
1986 }
1987 }
1988 if (vrings) {
1989 r = vhost_dev_set_vring_enable(hdev, true);
1990 if (r) {
1991 goto fail_log;
1992 }
1993 }
1994 if (hdev->vhost_ops->vhost_dev_start) {
1995 r = hdev->vhost_ops->vhost_dev_start(hdev, true);
1996 if (r) {
1997 goto fail_start;
1998 }
1999 }
2000 if (vhost_dev_has_iommu(hdev) &&
2001 hdev->vhost_ops->vhost_set_iotlb_callback) {
2002 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true);
2003
2004
2005
2006 for (i = 0; i < hdev->nvqs; ++i) {
2007 struct vhost_virtqueue *vq = hdev->vqs + i;
2008 vhost_device_iotlb_miss(hdev, vq->used_phys, true);
2009 }
2010 }
2011 vhost_start_config_intr(hdev);
2012 return 0;
2013fail_start:
2014 if (vrings) {
2015 vhost_dev_set_vring_enable(hdev, false);
2016 }
2017fail_log:
2018 vhost_log_put(hdev, false);
2019fail_vq:
2020 while (--i >= 0) {
2021 vhost_virtqueue_stop(hdev,
2022 vdev,
2023 hdev->vqs + i,
2024 hdev->vq_index + i);
2025 }
2026
2027fail_mem:
2028 if (vhost_dev_has_iommu(hdev)) {
2029 memory_listener_unregister(&hdev->iommu_listener);
2030 }
2031fail_features:
2032 vdev->vhost_started = false;
2033 hdev->started = false;
2034 return r;
2035}
2036
2037
2038void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings)
2039{
2040 int i;
2041
2042
2043 assert(hdev->vhost_ops);
2044 event_notifier_test_and_clear(
2045 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
2046 event_notifier_test_and_clear(&vdev->config_notifier);
2047 event_notifier_cleanup(
2048 &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier);
2049
2050 trace_vhost_dev_stop(hdev, vdev->name, vrings);
2051
2052 if (hdev->vhost_ops->vhost_dev_start) {
2053 hdev->vhost_ops->vhost_dev_start(hdev, false);
2054 }
2055 if (vrings) {
2056 vhost_dev_set_vring_enable(hdev, false);
2057 }
2058 for (i = 0; i < hdev->nvqs; ++i) {
2059 vhost_virtqueue_stop(hdev,
2060 vdev,
2061 hdev->vqs + i,
2062 hdev->vq_index + i);
2063 }
2064 if (hdev->vhost_ops->vhost_reset_status) {
2065 hdev->vhost_ops->vhost_reset_status(hdev);
2066 }
2067
2068 if (vhost_dev_has_iommu(hdev)) {
2069 if (hdev->vhost_ops->vhost_set_iotlb_callback) {
2070 hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false);
2071 }
2072 memory_listener_unregister(&hdev->iommu_listener);
2073 }
2074 vhost_stop_config_intr(hdev);
2075 vhost_log_put(hdev, true);
2076 hdev->started = false;
2077 vdev->vhost_started = false;
2078 hdev->vdev = NULL;
2079}
2080
2081int vhost_net_set_backend(struct vhost_dev *hdev,
2082 struct vhost_vring_file *file)
2083{
2084 if (hdev->vhost_ops->vhost_net_set_backend) {
2085 return hdev->vhost_ops->vhost_net_set_backend(hdev, file);
2086 }
2087
2088 return -ENOSYS;
2089}
2090