1
2
3
4
5
6
7
8
9
10
11
12#include "qemu/osdep.h"
13#include <linux/vhost.h>
14#include <linux/vfio.h>
15#include <sys/eventfd.h>
16#include <sys/ioctl.h>
17#include "hw/virtio/vhost.h"
18#include "hw/virtio/vhost-backend.h"
19#include "hw/virtio/virtio-net.h"
20#include "hw/virtio/vhost-vdpa.h"
21#include "exec/address-spaces.h"
22#include "qemu/main-loop.h"
23#include "cpu.h"
24#include "trace.h"
25#include "qemu-common.h"
26
27
28
29
30
31static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
32{
33 Int128 llend = int128_make64(section->offset_within_address_space);
34 llend = int128_add(llend, section->size);
35 llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
36
37 return llend;
38}
39
40static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
41 uint64_t iova_min,
42 uint64_t iova_max)
43{
44 Int128 llend;
45
46 if ((!memory_region_is_ram(section->mr) &&
47 !memory_region_is_iommu(section->mr)) ||
48 memory_region_is_protected(section->mr) ||
49
50 memory_region_is_ram_device(section->mr)) {
51 return true;
52 }
53
54 if (section->offset_within_address_space < iova_min) {
55 error_report("RAM section out of device range (min=0x%" PRIx64
56 ", addr=0x%" HWADDR_PRIx ")",
57 iova_min, section->offset_within_address_space);
58 return true;
59 }
60
61 llend = vhost_vdpa_section_end(section);
62 if (int128_gt(llend, int128_make64(iova_max))) {
63 error_report("RAM section out of device range (max=0x%" PRIx64
64 ", end addr=0x%" PRIx64 ")",
65 iova_max, int128_get64(llend));
66 return true;
67 }
68
69 return false;
70}
71
72static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
73 void *vaddr, bool readonly)
74{
75 struct vhost_msg_v2 msg = {};
76 int fd = v->device_fd;
77 int ret = 0;
78
79 msg.type = v->msg_type;
80 msg.iotlb.iova = iova;
81 msg.iotlb.size = size;
82 msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
83 msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
84 msg.iotlb.type = VHOST_IOTLB_UPDATE;
85
86 trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
87 msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
88
89 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
90 error_report("failed to write, fd=%d, errno=%d (%s)",
91 fd, errno, strerror(errno));
92 return -EIO ;
93 }
94
95 return ret;
96}
97
98static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
99 hwaddr size)
100{
101 struct vhost_msg_v2 msg = {};
102 int fd = v->device_fd;
103 int ret = 0;
104
105 msg.type = v->msg_type;
106 msg.iotlb.iova = iova;
107 msg.iotlb.size = size;
108 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
109
110 trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
111 msg.iotlb.size, msg.iotlb.type);
112
113 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
114 error_report("failed to write, fd=%d, errno=%d (%s)",
115 fd, errno, strerror(errno));
116 return -EIO ;
117 }
118
119 return ret;
120}
121
122static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
123{
124 int fd = v->device_fd;
125 struct vhost_msg_v2 msg = {
126 .type = v->msg_type,
127 .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
128 };
129
130 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
131 error_report("failed to write, fd=%d, errno=%d (%s)",
132 fd, errno, strerror(errno));
133 }
134}
135
136static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
137{
138 if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
139 !v->iotlb_batch_begin_sent) {
140 vhost_vdpa_listener_begin_batch(v);
141 }
142
143 v->iotlb_batch_begin_sent = true;
144}
145
146static void vhost_vdpa_listener_commit(MemoryListener *listener)
147{
148 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
149 struct vhost_dev *dev = v->dev;
150 struct vhost_msg_v2 msg = {};
151 int fd = v->device_fd;
152
153 if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
154 return;
155 }
156
157 if (!v->iotlb_batch_begin_sent) {
158 return;
159 }
160
161 msg.type = v->msg_type;
162 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
163
164 if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
165 error_report("failed to write, fd=%d, errno=%d (%s)",
166 fd, errno, strerror(errno));
167 }
168
169 v->iotlb_batch_begin_sent = false;
170}
171
172static void vhost_vdpa_listener_region_add(MemoryListener *listener,
173 MemoryRegionSection *section)
174{
175 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
176 hwaddr iova;
177 Int128 llend, llsize;
178 void *vaddr;
179 int ret;
180
181 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
182 v->iova_range.last)) {
183 return;
184 }
185
186 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
187 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
188 error_report("%s received unaligned region", __func__);
189 return;
190 }
191
192 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
193 llend = vhost_vdpa_section_end(section);
194 if (int128_ge(int128_make64(iova), llend)) {
195 return;
196 }
197
198 memory_region_ref(section->mr);
199
200
201
202 vaddr = memory_region_get_ram_ptr(section->mr) +
203 section->offset_within_region +
204 (iova - section->offset_within_address_space);
205
206 trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
207 vaddr, section->readonly);
208
209 llsize = int128_sub(llend, int128_make64(iova));
210
211 vhost_vdpa_iotlb_batch_begin_once(v);
212 ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
213 vaddr, section->readonly);
214 if (ret) {
215 error_report("vhost vdpa map fail!");
216 goto fail;
217 }
218
219 return;
220
221fail:
222
223
224
225
226
227 error_report("vhost-vdpa: DMA mapping failed, unable to continue");
228 return;
229
230}
231
232static void vhost_vdpa_listener_region_del(MemoryListener *listener,
233 MemoryRegionSection *section)
234{
235 struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
236 hwaddr iova;
237 Int128 llend, llsize;
238 int ret;
239
240 if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
241 v->iova_range.last)) {
242 return;
243 }
244
245 if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
246 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
247 error_report("%s received unaligned region", __func__);
248 return;
249 }
250
251 iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
252 llend = vhost_vdpa_section_end(section);
253
254 trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
255
256 if (int128_ge(int128_make64(iova), llend)) {
257 return;
258 }
259
260 llsize = int128_sub(llend, int128_make64(iova));
261
262 vhost_vdpa_iotlb_batch_begin_once(v);
263 ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
264 if (ret) {
265 error_report("vhost_vdpa dma unmap error!");
266 }
267
268 memory_region_unref(section->mr);
269}
270
271
272
273
274
275static const MemoryListener vhost_vdpa_memory_listener = {
276 .name = "vhost-vdpa",
277 .commit = vhost_vdpa_listener_commit,
278 .region_add = vhost_vdpa_listener_region_add,
279 .region_del = vhost_vdpa_listener_region_del,
280};
281
282static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
283 void *arg)
284{
285 struct vhost_vdpa *v = dev->opaque;
286 int fd = v->device_fd;
287 int ret;
288
289 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
290
291 ret = ioctl(fd, request, arg);
292 return ret < 0 ? -errno : ret;
293}
294
295static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
296{
297 uint8_t s;
298
299 trace_vhost_vdpa_add_status(dev, status);
300 if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
301 return;
302 }
303
304 s |= status;
305
306 vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
307}
308
309static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
310{
311 int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
312 &v->iova_range);
313 if (ret != 0) {
314 v->iova_range.first = 0;
315 v->iova_range.last = UINT64_MAX;
316 }
317
318 trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
319 v->iova_range.last);
320}
321
322static bool vhost_vdpa_one_time_request(struct vhost_dev *dev)
323{
324 struct vhost_vdpa *v = dev->opaque;
325
326 return v->index != 0;
327}
328
329static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
330{
331 struct vhost_vdpa *v;
332 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
333 trace_vhost_vdpa_init(dev, opaque);
334 int ret;
335
336
337
338
339
340 ret = ram_block_discard_disable(true);
341 if (ret) {
342 error_report("Cannot set discarding of RAM broken");
343 return ret;
344 }
345
346 v = opaque;
347 v->dev = dev;
348 dev->opaque = opaque ;
349 v->listener = vhost_vdpa_memory_listener;
350 v->msg_type = VHOST_IOTLB_MSG_V2;
351
352 vhost_vdpa_get_iova_range(v);
353
354 if (vhost_vdpa_one_time_request(dev)) {
355 return 0;
356 }
357
358 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
359 VIRTIO_CONFIG_S_DRIVER);
360
361 return 0;
362}
363
364static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
365 int queue_index)
366{
367 size_t page_size = qemu_real_host_page_size;
368 struct vhost_vdpa *v = dev->opaque;
369 VirtIODevice *vdev = dev->vdev;
370 VhostVDPAHostNotifier *n;
371
372 n = &v->notifier[queue_index];
373
374 if (n->addr) {
375 virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
376 object_unparent(OBJECT(&n->mr));
377 munmap(n->addr, page_size);
378 n->addr = NULL;
379 }
380}
381
382static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
383{
384 int i;
385
386 for (i = 0; i < n; i++) {
387 vhost_vdpa_host_notifier_uninit(dev, i);
388 }
389}
390
391static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
392{
393 size_t page_size = qemu_real_host_page_size;
394 struct vhost_vdpa *v = dev->opaque;
395 VirtIODevice *vdev = dev->vdev;
396 VhostVDPAHostNotifier *n;
397 int fd = v->device_fd;
398 void *addr;
399 char *name;
400
401 vhost_vdpa_host_notifier_uninit(dev, queue_index);
402
403 n = &v->notifier[queue_index];
404
405 addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
406 queue_index * page_size);
407 if (addr == MAP_FAILED) {
408 goto err;
409 }
410
411 name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
412 v, queue_index);
413 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
414 page_size, addr);
415 g_free(name);
416
417 if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
418 munmap(addr, page_size);
419 goto err;
420 }
421 n->addr = addr;
422
423 return 0;
424
425err:
426 return -1;
427}
428
429static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
430{
431 int i;
432
433 for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
434 if (vhost_vdpa_host_notifier_init(dev, i)) {
435 goto err;
436 }
437 }
438
439 return;
440
441err:
442 vhost_vdpa_host_notifiers_uninit(dev, i);
443 return;
444}
445
446static int vhost_vdpa_cleanup(struct vhost_dev *dev)
447{
448 struct vhost_vdpa *v;
449 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
450 v = dev->opaque;
451 trace_vhost_vdpa_cleanup(dev, v);
452 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
453 memory_listener_unregister(&v->listener);
454
455 dev->opaque = NULL;
456 ram_block_discard_disable(false);
457
458 return 0;
459}
460
461static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
462{
463 trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
464 return INT_MAX;
465}
466
467static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
468 struct vhost_memory *mem)
469{
470 if (vhost_vdpa_one_time_request(dev)) {
471 return 0;
472 }
473
474 trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
475 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
476 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
477 int i;
478 for (i = 0; i < mem->nregions; i++) {
479 trace_vhost_vdpa_dump_regions(dev, i,
480 mem->regions[i].guest_phys_addr,
481 mem->regions[i].memory_size,
482 mem->regions[i].userspace_addr,
483 mem->regions[i].flags_padding);
484 }
485 }
486 if (mem->padding) {
487 return -1;
488 }
489
490 return 0;
491}
492
493static int vhost_vdpa_set_features(struct vhost_dev *dev,
494 uint64_t features)
495{
496 int ret;
497
498 if (vhost_vdpa_one_time_request(dev)) {
499 return 0;
500 }
501
502 trace_vhost_vdpa_set_features(dev, features);
503 ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
504 uint8_t status = 0;
505 if (ret) {
506 return ret;
507 }
508 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
509 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
510
511 return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
512}
513
514static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
515{
516 uint64_t features;
517 uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
518 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
519 int r;
520
521 if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
522 return -EFAULT;
523 }
524
525 features &= f;
526
527 if (vhost_vdpa_one_time_request(dev)) {
528 r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
529 if (r) {
530 return -EFAULT;
531 }
532 }
533
534 dev->backend_cap = features;
535
536 return 0;
537}
538
539static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
540 uint32_t *device_id)
541{
542 int ret;
543 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
544 trace_vhost_vdpa_get_device_id(dev, *device_id);
545 return ret;
546}
547
548static int vhost_vdpa_reset_device(struct vhost_dev *dev)
549{
550 int ret;
551 uint8_t status = 0;
552
553 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
554 trace_vhost_vdpa_reset_device(dev, status);
555 return ret;
556}
557
558static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
559{
560 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
561
562 trace_vhost_vdpa_get_vq_index(dev, idx, idx);
563 return idx;
564}
565
566static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
567{
568 int i;
569 trace_vhost_vdpa_set_vring_ready(dev);
570 for (i = 0; i < dev->nvqs; ++i) {
571 struct vhost_vring_state state = {
572 .index = dev->vq_index + i,
573 .num = 1,
574 };
575 vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
576 }
577 return 0;
578}
579
580static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
581 uint32_t config_len)
582{
583 int b, len;
584 char line[QEMU_HEXDUMP_LINE_LEN];
585
586 for (b = 0; b < config_len; b += 16) {
587 len = config_len - b;
588 qemu_hexdump_line(line, b, config, len, false);
589 trace_vhost_vdpa_dump_config(dev, line);
590 }
591}
592
593static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
594 uint32_t offset, uint32_t size,
595 uint32_t flags)
596{
597 struct vhost_vdpa_config *config;
598 int ret;
599 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
600
601 trace_vhost_vdpa_set_config(dev, offset, size, flags);
602 config = g_malloc(size + config_size);
603 config->off = offset;
604 config->len = size;
605 memcpy(config->buf, data, size);
606 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
607 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
608 vhost_vdpa_dump_config(dev, data, size);
609 }
610 ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
611 g_free(config);
612 return ret;
613}
614
615static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
616 uint32_t config_len, Error **errp)
617{
618 struct vhost_vdpa_config *v_config;
619 unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
620 int ret;
621
622 trace_vhost_vdpa_get_config(dev, config, config_len);
623 v_config = g_malloc(config_len + config_size);
624 v_config->len = config_len;
625 v_config->off = 0;
626 ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
627 memcpy(config, v_config->buf, config_len);
628 g_free(v_config);
629 if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
630 trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
631 vhost_vdpa_dump_config(dev, config, config_len);
632 }
633 return ret;
634 }
635
636static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
637{
638 struct vhost_vdpa *v = dev->opaque;
639 trace_vhost_vdpa_dev_start(dev, started);
640
641 if (started) {
642 vhost_vdpa_host_notifiers_init(dev);
643 vhost_vdpa_set_vring_ready(dev);
644 } else {
645 vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
646 }
647
648 if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
649 return 0;
650 }
651
652 if (started) {
653 uint8_t status = 0;
654 memory_listener_register(&v->listener, &address_space_memory);
655 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
656 vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
657
658 return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
659 } else {
660 vhost_vdpa_reset_device(dev);
661 vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
662 VIRTIO_CONFIG_S_DRIVER);
663 memory_listener_unregister(&v->listener);
664
665 return 0;
666 }
667}
668
669static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
670 struct vhost_log *log)
671{
672 if (vhost_vdpa_one_time_request(dev)) {
673 return 0;
674 }
675
676 trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
677 log->log);
678 return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
679}
680
681static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
682 struct vhost_vring_addr *addr)
683{
684 trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
685 addr->desc_user_addr, addr->used_user_addr,
686 addr->avail_user_addr,
687 addr->log_guest_addr);
688 return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
689}
690
691static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
692 struct vhost_vring_state *ring)
693{
694 trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
695 return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
696}
697
698static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
699 struct vhost_vring_state *ring)
700{
701 trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
702 return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
703}
704
705static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
706 struct vhost_vring_state *ring)
707{
708 int ret;
709
710 ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
711 trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
712 return ret;
713}
714
715static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
716 struct vhost_vring_file *file)
717{
718 trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
719 return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
720}
721
722static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
723 struct vhost_vring_file *file)
724{
725 trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
726 return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
727}
728
729static int vhost_vdpa_get_features(struct vhost_dev *dev,
730 uint64_t *features)
731{
732 int ret;
733
734 ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
735 trace_vhost_vdpa_get_features(dev, *features);
736 return ret;
737}
738
739static int vhost_vdpa_set_owner(struct vhost_dev *dev)
740{
741 if (vhost_vdpa_one_time_request(dev)) {
742 return 0;
743 }
744
745 trace_vhost_vdpa_set_owner(dev);
746 return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
747}
748
749static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
750 struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
751{
752 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
753 addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
754 addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
755 addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
756 trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
757 addr->avail_user_addr, addr->used_user_addr);
758 return 0;
759}
760
761static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
762{
763 return true;
764}
765
766const VhostOps vdpa_ops = {
767 .backend_type = VHOST_BACKEND_TYPE_VDPA,
768 .vhost_backend_init = vhost_vdpa_init,
769 .vhost_backend_cleanup = vhost_vdpa_cleanup,
770 .vhost_set_log_base = vhost_vdpa_set_log_base,
771 .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
772 .vhost_set_vring_num = vhost_vdpa_set_vring_num,
773 .vhost_set_vring_base = vhost_vdpa_set_vring_base,
774 .vhost_get_vring_base = vhost_vdpa_get_vring_base,
775 .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
776 .vhost_set_vring_call = vhost_vdpa_set_vring_call,
777 .vhost_get_features = vhost_vdpa_get_features,
778 .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
779 .vhost_set_owner = vhost_vdpa_set_owner,
780 .vhost_set_vring_endian = NULL,
781 .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
782 .vhost_set_mem_table = vhost_vdpa_set_mem_table,
783 .vhost_set_features = vhost_vdpa_set_features,
784 .vhost_reset_device = vhost_vdpa_reset_device,
785 .vhost_get_vq_index = vhost_vdpa_get_vq_index,
786 .vhost_get_config = vhost_vdpa_get_config,
787 .vhost_set_config = vhost_vdpa_set_config,
788 .vhost_requires_shm_log = NULL,
789 .vhost_migration_done = NULL,
790 .vhost_backend_can_merge = NULL,
791 .vhost_net_set_mtu = NULL,
792 .vhost_set_iotlb_callback = NULL,
793 .vhost_send_device_iotlb_msg = NULL,
794 .vhost_dev_start = vhost_vdpa_dev_start,
795 .vhost_get_device_id = vhost_vdpa_get_device_id,
796 .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
797 .vhost_force_iommu = vhost_vdpa_force_iommu,
798};
799