1
2
3
4
5
6
7
8
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/sched.h>
16#include <linux/dma-map-ops.h>
17#include <linux/vringh.h>
18#include <linux/vdpa.h>
19#include <linux/vhost_iotlb.h>
20
21#include "vdpa_sim.h"
22
23#define DRV_VERSION "0.1"
24#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
25#define DRV_DESC "vDPA Device Simulator core"
26#define DRV_LICENSE "GPL v2"
27
28static int batch_mapping = 1;
29module_param(batch_mapping, int, 0444);
30MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
31
32static int max_iotlb_entries = 2048;
33module_param(max_iotlb_entries, int, 0444);
34MODULE_PARM_DESC(max_iotlb_entries,
35 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
36
37#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
38#define VDPASIM_QUEUE_MAX 256
39#define VDPASIM_VENDOR_ID 0
40
41static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
42{
43 return container_of(vdpa, struct vdpasim, vdpa);
44}
45
46static struct vdpasim *dev_to_sim(struct device *dev)
47{
48 struct vdpa_device *vdpa = dev_to_vdpa(dev);
49
50 return vdpa_to_sim(vdpa);
51}
52
53static void vdpasim_vq_notify(struct vringh *vring)
54{
55 struct vdpasim_virtqueue *vq =
56 container_of(vring, struct vdpasim_virtqueue, vring);
57
58 if (!vq->cb)
59 return;
60
61 vq->cb(vq->private);
62}
63
64static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
65{
66 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
67
68 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
69 VDPASIM_QUEUE_MAX, false,
70 (struct vring_desc *)(uintptr_t)vq->desc_addr,
71 (struct vring_avail *)
72 (uintptr_t)vq->driver_addr,
73 (struct vring_used *)
74 (uintptr_t)vq->device_addr);
75
76 vq->vring.notify = vdpasim_vq_notify;
77}
78
79static void vdpasim_vq_reset(struct vdpasim *vdpasim,
80 struct vdpasim_virtqueue *vq)
81{
82 vq->ready = false;
83 vq->desc_addr = 0;
84 vq->driver_addr = 0;
85 vq->device_addr = 0;
86 vq->cb = NULL;
87 vq->private = NULL;
88 vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
89 VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
90
91 vq->vring.notify = NULL;
92}
93
94static void vdpasim_reset(struct vdpasim *vdpasim)
95{
96 int i;
97
98 for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
99 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
100
101 spin_lock(&vdpasim->iommu_lock);
102 vhost_iotlb_reset(vdpasim->iommu);
103 spin_unlock(&vdpasim->iommu_lock);
104
105 vdpasim->features = 0;
106 vdpasim->status = 0;
107 ++vdpasim->generation;
108}
109
110static int dir_to_perm(enum dma_data_direction dir)
111{
112 int perm = -EFAULT;
113
114 switch (dir) {
115 case DMA_FROM_DEVICE:
116 perm = VHOST_MAP_WO;
117 break;
118 case DMA_TO_DEVICE:
119 perm = VHOST_MAP_RO;
120 break;
121 case DMA_BIDIRECTIONAL:
122 perm = VHOST_MAP_RW;
123 break;
124 default:
125 break;
126 }
127
128 return perm;
129}
130
131static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
132 unsigned long offset, size_t size,
133 enum dma_data_direction dir,
134 unsigned long attrs)
135{
136 struct vdpasim *vdpasim = dev_to_sim(dev);
137 struct vhost_iotlb *iommu = vdpasim->iommu;
138 u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
139 int ret, perm = dir_to_perm(dir);
140
141 if (perm < 0)
142 return DMA_MAPPING_ERROR;
143
144
145
146
147 spin_lock(&vdpasim->iommu_lock);
148 ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
149 pa, dir_to_perm(dir));
150 spin_unlock(&vdpasim->iommu_lock);
151 if (ret)
152 return DMA_MAPPING_ERROR;
153
154 return (dma_addr_t)(pa);
155}
156
157static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
158 size_t size, enum dma_data_direction dir,
159 unsigned long attrs)
160{
161 struct vdpasim *vdpasim = dev_to_sim(dev);
162 struct vhost_iotlb *iommu = vdpasim->iommu;
163
164 spin_lock(&vdpasim->iommu_lock);
165 vhost_iotlb_del_range(iommu, (u64)dma_addr,
166 (u64)dma_addr + size - 1);
167 spin_unlock(&vdpasim->iommu_lock);
168}
169
170static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
171 dma_addr_t *dma_addr, gfp_t flag,
172 unsigned long attrs)
173{
174 struct vdpasim *vdpasim = dev_to_sim(dev);
175 struct vhost_iotlb *iommu = vdpasim->iommu;
176 void *addr = kmalloc(size, flag);
177 int ret;
178
179 spin_lock(&vdpasim->iommu_lock);
180 if (!addr) {
181 *dma_addr = DMA_MAPPING_ERROR;
182 } else {
183 u64 pa = virt_to_phys(addr);
184
185 ret = vhost_iotlb_add_range(iommu, (u64)pa,
186 (u64)pa + size - 1,
187 pa, VHOST_MAP_RW);
188 if (ret) {
189 *dma_addr = DMA_MAPPING_ERROR;
190 kfree(addr);
191 addr = NULL;
192 } else
193 *dma_addr = (dma_addr_t)pa;
194 }
195 spin_unlock(&vdpasim->iommu_lock);
196
197 return addr;
198}
199
200static void vdpasim_free_coherent(struct device *dev, size_t size,
201 void *vaddr, dma_addr_t dma_addr,
202 unsigned long attrs)
203{
204 struct vdpasim *vdpasim = dev_to_sim(dev);
205 struct vhost_iotlb *iommu = vdpasim->iommu;
206
207 spin_lock(&vdpasim->iommu_lock);
208 vhost_iotlb_del_range(iommu, (u64)dma_addr,
209 (u64)dma_addr + size - 1);
210 spin_unlock(&vdpasim->iommu_lock);
211
212 kfree(phys_to_virt((uintptr_t)dma_addr));
213}
214
215static const struct dma_map_ops vdpasim_dma_ops = {
216 .map_page = vdpasim_map_page,
217 .unmap_page = vdpasim_unmap_page,
218 .alloc = vdpasim_alloc_coherent,
219 .free = vdpasim_free_coherent,
220};
221
222static const struct vdpa_config_ops vdpasim_config_ops;
223static const struct vdpa_config_ops vdpasim_batch_config_ops;
224
225struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
226{
227 const struct vdpa_config_ops *ops;
228 struct vdpasim *vdpasim;
229 struct device *dev;
230 int i, ret = -ENOMEM;
231
232 if (batch_mapping)
233 ops = &vdpasim_batch_config_ops;
234 else
235 ops = &vdpasim_config_ops;
236
237 vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
238 dev_attr->name);
239 if (!vdpasim)
240 goto err_alloc;
241
242 vdpasim->dev_attr = *dev_attr;
243 INIT_WORK(&vdpasim->work, dev_attr->work_fn);
244 spin_lock_init(&vdpasim->lock);
245 spin_lock_init(&vdpasim->iommu_lock);
246
247 dev = &vdpasim->vdpa.dev;
248 dev->dma_mask = &dev->coherent_dma_mask;
249 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
250 goto err_iommu;
251 set_dma_ops(dev, &vdpasim_dma_ops);
252 vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
253
254 vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
255 if (!vdpasim->config)
256 goto err_iommu;
257
258 vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
259 GFP_KERNEL);
260 if (!vdpasim->vqs)
261 goto err_iommu;
262
263 vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
264 if (!vdpasim->iommu)
265 goto err_iommu;
266
267 vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
268 if (!vdpasim->buffer)
269 goto err_iommu;
270
271 for (i = 0; i < dev_attr->nvqs; i++)
272 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
273
274 vdpasim->vdpa.dma_dev = dev;
275
276 return vdpasim;
277
278err_iommu:
279 put_device(dev);
280err_alloc:
281 return ERR_PTR(ret);
282}
283EXPORT_SYMBOL_GPL(vdpasim_create);
284
285static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
286 u64 desc_area, u64 driver_area,
287 u64 device_area)
288{
289 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
290 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
291
292 vq->desc_addr = desc_area;
293 vq->driver_addr = driver_area;
294 vq->device_addr = device_area;
295
296 return 0;
297}
298
299static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
300{
301 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
302 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
303
304 vq->num = num;
305}
306
307static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
308{
309 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
310 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
311
312 if (vq->ready)
313 schedule_work(&vdpasim->work);
314}
315
316static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
317 struct vdpa_callback *cb)
318{
319 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
320 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
321
322 vq->cb = cb->callback;
323 vq->private = cb->private;
324}
325
326static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
327{
328 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
329 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
330
331 spin_lock(&vdpasim->lock);
332 vq->ready = ready;
333 if (vq->ready)
334 vdpasim_queue_ready(vdpasim, idx);
335 spin_unlock(&vdpasim->lock);
336}
337
338static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
339{
340 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
341 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
342
343 return vq->ready;
344}
345
346static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
347 const struct vdpa_vq_state *state)
348{
349 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
350 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
351 struct vringh *vrh = &vq->vring;
352
353 spin_lock(&vdpasim->lock);
354 vrh->last_avail_idx = state->avail_index;
355 spin_unlock(&vdpasim->lock);
356
357 return 0;
358}
359
360static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
361 struct vdpa_vq_state *state)
362{
363 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
364 struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
365 struct vringh *vrh = &vq->vring;
366
367 state->avail_index = vrh->last_avail_idx;
368 return 0;
369}
370
371static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
372{
373 return VDPASIM_QUEUE_ALIGN;
374}
375
376static u64 vdpasim_get_features(struct vdpa_device *vdpa)
377{
378 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
379
380 return vdpasim->dev_attr.supported_features;
381}
382
383static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
384{
385 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
386
387
388 if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
389 return -EINVAL;
390
391 vdpasim->features = features & vdpasim->dev_attr.supported_features;
392
393 return 0;
394}
395
396static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
397 struct vdpa_callback *cb)
398{
399
400}
401
402static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
403{
404 return VDPASIM_QUEUE_MAX;
405}
406
407static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
408{
409 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
410
411 return vdpasim->dev_attr.id;
412}
413
414static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
415{
416 return VDPASIM_VENDOR_ID;
417}
418
419static u8 vdpasim_get_status(struct vdpa_device *vdpa)
420{
421 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
422 u8 status;
423
424 spin_lock(&vdpasim->lock);
425 status = vdpasim->status;
426 spin_unlock(&vdpasim->lock);
427
428 return status;
429}
430
431static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
432{
433 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
434
435 spin_lock(&vdpasim->lock);
436 vdpasim->status = status;
437 if (status == 0)
438 vdpasim_reset(vdpasim);
439 spin_unlock(&vdpasim->lock);
440}
441
442static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
443 void *buf, unsigned int len)
444{
445 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
446
447 if (offset + len > vdpasim->dev_attr.config_size)
448 return;
449
450 if (vdpasim->dev_attr.get_config)
451 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
452
453 memcpy(buf, vdpasim->config + offset, len);
454}
455
456static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
457 const void *buf, unsigned int len)
458{
459 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
460
461 if (offset + len > vdpasim->dev_attr.config_size)
462 return;
463
464 memcpy(vdpasim->config + offset, buf, len);
465
466 if (vdpasim->dev_attr.set_config)
467 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
468}
469
470static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
471{
472 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
473
474 return vdpasim->generation;
475}
476
477static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
478{
479 struct vdpa_iova_range range = {
480 .first = 0ULL,
481 .last = ULLONG_MAX,
482 };
483
484 return range;
485}
486
487static int vdpasim_set_map(struct vdpa_device *vdpa,
488 struct vhost_iotlb *iotlb)
489{
490 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
491 struct vhost_iotlb_map *map;
492 u64 start = 0ULL, last = 0ULL - 1;
493 int ret;
494
495 spin_lock(&vdpasim->iommu_lock);
496 vhost_iotlb_reset(vdpasim->iommu);
497
498 for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
499 map = vhost_iotlb_itree_next(map, start, last)) {
500 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
501 map->last, map->addr, map->perm);
502 if (ret)
503 goto err;
504 }
505 spin_unlock(&vdpasim->iommu_lock);
506 return 0;
507
508err:
509 vhost_iotlb_reset(vdpasim->iommu);
510 spin_unlock(&vdpasim->iommu_lock);
511 return ret;
512}
513
514static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
515 u64 pa, u32 perm)
516{
517 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
518 int ret;
519
520 spin_lock(&vdpasim->iommu_lock);
521 ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
522 perm);
523 spin_unlock(&vdpasim->iommu_lock);
524
525 return ret;
526}
527
528static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
529{
530 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
531
532 spin_lock(&vdpasim->iommu_lock);
533 vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
534 spin_unlock(&vdpasim->iommu_lock);
535
536 return 0;
537}
538
539static void vdpasim_free(struct vdpa_device *vdpa)
540{
541 struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
542
543 cancel_work_sync(&vdpasim->work);
544 kvfree(vdpasim->buffer);
545 if (vdpasim->iommu)
546 vhost_iotlb_free(vdpasim->iommu);
547 kfree(vdpasim->vqs);
548 kfree(vdpasim->config);
549}
550
551static const struct vdpa_config_ops vdpasim_config_ops = {
552 .set_vq_address = vdpasim_set_vq_address,
553 .set_vq_num = vdpasim_set_vq_num,
554 .kick_vq = vdpasim_kick_vq,
555 .set_vq_cb = vdpasim_set_vq_cb,
556 .set_vq_ready = vdpasim_set_vq_ready,
557 .get_vq_ready = vdpasim_get_vq_ready,
558 .set_vq_state = vdpasim_set_vq_state,
559 .get_vq_state = vdpasim_get_vq_state,
560 .get_vq_align = vdpasim_get_vq_align,
561 .get_features = vdpasim_get_features,
562 .set_features = vdpasim_set_features,
563 .set_config_cb = vdpasim_set_config_cb,
564 .get_vq_num_max = vdpasim_get_vq_num_max,
565 .get_device_id = vdpasim_get_device_id,
566 .get_vendor_id = vdpasim_get_vendor_id,
567 .get_status = vdpasim_get_status,
568 .set_status = vdpasim_set_status,
569 .get_config = vdpasim_get_config,
570 .set_config = vdpasim_set_config,
571 .get_generation = vdpasim_get_generation,
572 .get_iova_range = vdpasim_get_iova_range,
573 .dma_map = vdpasim_dma_map,
574 .dma_unmap = vdpasim_dma_unmap,
575 .free = vdpasim_free,
576};
577
578static const struct vdpa_config_ops vdpasim_batch_config_ops = {
579 .set_vq_address = vdpasim_set_vq_address,
580 .set_vq_num = vdpasim_set_vq_num,
581 .kick_vq = vdpasim_kick_vq,
582 .set_vq_cb = vdpasim_set_vq_cb,
583 .set_vq_ready = vdpasim_set_vq_ready,
584 .get_vq_ready = vdpasim_get_vq_ready,
585 .set_vq_state = vdpasim_set_vq_state,
586 .get_vq_state = vdpasim_get_vq_state,
587 .get_vq_align = vdpasim_get_vq_align,
588 .get_features = vdpasim_get_features,
589 .set_features = vdpasim_set_features,
590 .set_config_cb = vdpasim_set_config_cb,
591 .get_vq_num_max = vdpasim_get_vq_num_max,
592 .get_device_id = vdpasim_get_device_id,
593 .get_vendor_id = vdpasim_get_vendor_id,
594 .get_status = vdpasim_get_status,
595 .set_status = vdpasim_set_status,
596 .get_config = vdpasim_get_config,
597 .set_config = vdpasim_set_config,
598 .get_generation = vdpasim_get_generation,
599 .get_iova_range = vdpasim_get_iova_range,
600 .set_map = vdpasim_set_map,
601 .free = vdpasim_free,
602};
603
604MODULE_VERSION(DRV_VERSION);
605MODULE_LICENSE(DRV_LICENSE);
606MODULE_AUTHOR(DRV_AUTHOR);
607MODULE_DESCRIPTION(DRV_DESC);
608