1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#define pr_fmt(fmt) "virtio-mmio: " fmt
90
91#include <linux/highmem.h>
92#include <linux/interrupt.h>
93#include <linux/io.h>
94#include <linux/list.h>
95#include <linux/module.h>
96#include <linux/platform_device.h>
97#include <linux/slab.h>
98#include <linux/spinlock.h>
99#include <linux/virtio.h>
100#include <linux/virtio_config.h>
101#include <linux/virtio_mmio.h>
102#include <linux/virtio_ring.h>
103
104
105
106
107
108#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
109
110
111
112#define to_virtio_mmio_device(_plat_dev) \
113 container_of(_plat_dev, struct virtio_mmio_device, vdev)
114
115struct virtio_mmio_device {
116 struct virtio_device vdev;
117 struct platform_device *pdev;
118
119 void __iomem *base;
120 unsigned long version;
121
122
123 spinlock_t lock;
124 struct list_head virtqueues;
125};
126
127struct virtio_mmio_vq_info {
128
129 struct virtqueue *vq;
130
131
132 unsigned int num;
133
134
135 void *queue;
136
137
138 struct list_head node;
139};
140
141
142
143
144
145static u64 vm_get_features(struct virtio_device *vdev)
146{
147 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
148
149
150 writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL);
151
152 return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES);
153}
154
155static int vm_finalize_features(struct virtio_device *vdev)
156{
157 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
158
159
160 vring_transport_features(vdev);
161
162
163 BUG_ON((u32)vdev->features != vdev->features);
164
165 writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL);
166 writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES);
167
168 return 0;
169}
170
171static void vm_get(struct virtio_device *vdev, unsigned offset,
172 void *buf, unsigned len)
173{
174 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
175 u8 *ptr = buf;
176 int i;
177
178 for (i = 0; i < len; i++)
179 ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
180}
181
182static void vm_set(struct virtio_device *vdev, unsigned offset,
183 const void *buf, unsigned len)
184{
185 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
186 const u8 *ptr = buf;
187 int i;
188
189 for (i = 0; i < len; i++)
190 writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i);
191}
192
193static u8 vm_get_status(struct virtio_device *vdev)
194{
195 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
196
197 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
198}
199
200static void vm_set_status(struct virtio_device *vdev, u8 status)
201{
202 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
203
204
205 BUG_ON(status == 0);
206
207 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
208}
209
210static void vm_reset(struct virtio_device *vdev)
211{
212 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
213
214
215 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
216}
217
218
219
220
221
222
223static bool vm_notify(struct virtqueue *vq)
224{
225 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
226
227
228
229 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
230 return true;
231}
232
233
234static irqreturn_t vm_interrupt(int irq, void *opaque)
235{
236 struct virtio_mmio_device *vm_dev = opaque;
237 struct virtio_mmio_vq_info *info;
238 unsigned long status;
239 unsigned long flags;
240 irqreturn_t ret = IRQ_NONE;
241
242
243 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
244 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
245
246 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
247 virtio_config_changed(&vm_dev->vdev);
248 ret = IRQ_HANDLED;
249 }
250
251 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
252 spin_lock_irqsave(&vm_dev->lock, flags);
253 list_for_each_entry(info, &vm_dev->virtqueues, node)
254 ret |= vring_interrupt(irq, info->vq);
255 spin_unlock_irqrestore(&vm_dev->lock, flags);
256 }
257
258 return ret;
259}
260
261
262
263static void vm_del_vq(struct virtqueue *vq)
264{
265 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
266 struct virtio_mmio_vq_info *info = vq->priv;
267 unsigned long flags, size;
268 unsigned int index = vq->index;
269
270 spin_lock_irqsave(&vm_dev->lock, flags);
271 list_del(&info->node);
272 spin_unlock_irqrestore(&vm_dev->lock, flags);
273
274 vring_del_virtqueue(vq);
275
276
277 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
278 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
279
280 size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN));
281 free_pages_exact(info->queue, size);
282 kfree(info);
283}
284
285static void vm_del_vqs(struct virtio_device *vdev)
286{
287 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
288 struct virtqueue *vq, *n;
289
290 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
291 vm_del_vq(vq);
292
293 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
294}
295
296
297
298static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
299 void (*callback)(struct virtqueue *vq),
300 const char *name)
301{
302 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
303 struct virtio_mmio_vq_info *info;
304 struct virtqueue *vq;
305 unsigned long flags, size;
306 int err;
307
308 if (!name)
309 return NULL;
310
311
312 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
313
314
315 if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) {
316 err = -ENOENT;
317 goto error_available;
318 }
319
320
321 info = kmalloc(sizeof(*info), GFP_KERNEL);
322 if (!info) {
323 err = -ENOMEM;
324 goto error_kmalloc;
325 }
326
327
328
329
330
331
332 info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
333
334
335
336
337
338 if (info->num == 0) {
339 err = -ENOENT;
340 goto error_alloc_pages;
341 }
342
343 while (1) {
344 size = PAGE_ALIGN(vring_size(info->num,
345 VIRTIO_MMIO_VRING_ALIGN));
346
347 if (size < VIRTIO_MMIO_VRING_ALIGN * 2) {
348 err = -ENOMEM;
349 goto error_alloc_pages;
350 }
351
352 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
353 if (info->queue)
354 break;
355
356 info->num /= 2;
357 }
358
359
360 writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
361 writel(VIRTIO_MMIO_VRING_ALIGN,
362 vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
363 writel(virt_to_phys(info->queue) >> PAGE_SHIFT,
364 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
365
366
367 vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
368 true, info->queue, vm_notify, callback, name);
369 if (!vq) {
370 err = -ENOMEM;
371 goto error_new_virtqueue;
372 }
373
374 vq->priv = info;
375 info->vq = vq;
376
377 spin_lock_irqsave(&vm_dev->lock, flags);
378 list_add(&info->node, &vm_dev->virtqueues);
379 spin_unlock_irqrestore(&vm_dev->lock, flags);
380
381 return vq;
382
383error_new_virtqueue:
384 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
385 free_pages_exact(info->queue, size);
386error_alloc_pages:
387 kfree(info);
388error_kmalloc:
389error_available:
390 return ERR_PTR(err);
391}
392
393static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
394 struct virtqueue *vqs[],
395 vq_callback_t *callbacks[],
396 const char *names[])
397{
398 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
399 unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
400 int i, err;
401
402 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
403 dev_name(&vdev->dev), vm_dev);
404 if (err)
405 return err;
406
407 for (i = 0; i < nvqs; ++i) {
408 vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]);
409 if (IS_ERR(vqs[i])) {
410 vm_del_vqs(vdev);
411 return PTR_ERR(vqs[i]);
412 }
413 }
414
415 return 0;
416}
417
418static const char *vm_bus_name(struct virtio_device *vdev)
419{
420 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
421
422 return vm_dev->pdev->name;
423}
424
425static const struct virtio_config_ops virtio_mmio_config_ops = {
426 .get = vm_get,
427 .set = vm_set,
428 .get_status = vm_get_status,
429 .set_status = vm_set_status,
430 .reset = vm_reset,
431 .find_vqs = vm_find_vqs,
432 .del_vqs = vm_del_vqs,
433 .get_features = vm_get_features,
434 .finalize_features = vm_finalize_features,
435 .bus_name = vm_bus_name,
436};
437
438
439
440
441
442static int virtio_mmio_probe(struct platform_device *pdev)
443{
444 struct virtio_mmio_device *vm_dev;
445 struct resource *mem;
446 unsigned long magic;
447
448 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
449 if (!mem)
450 return -EINVAL;
451
452 if (!devm_request_mem_region(&pdev->dev, mem->start,
453 resource_size(mem), pdev->name))
454 return -EBUSY;
455
456 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
457 if (!vm_dev)
458 return -ENOMEM;
459
460 vm_dev->vdev.dev.parent = &pdev->dev;
461 vm_dev->vdev.config = &virtio_mmio_config_ops;
462 vm_dev->pdev = pdev;
463 INIT_LIST_HEAD(&vm_dev->virtqueues);
464 spin_lock_init(&vm_dev->lock);
465
466 vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
467 if (vm_dev->base == NULL)
468 return -EFAULT;
469
470
471 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
472 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
473 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
474 return -ENODEV;
475 }
476
477
478 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
479 if (vm_dev->version != 1) {
480 dev_err(&pdev->dev, "Version %ld not supported!\n",
481 vm_dev->version);
482 return -ENXIO;
483 }
484
485 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
486 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
487
488 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
489
490 platform_set_drvdata(pdev, vm_dev);
491
492 return register_virtio_device(&vm_dev->vdev);
493}
494
495static int virtio_mmio_remove(struct platform_device *pdev)
496{
497 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
498
499 unregister_virtio_device(&vm_dev->vdev);
500
501 return 0;
502}
503
504
505
506
507
508#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
509
510static struct device vm_cmdline_parent = {
511 .init_name = "virtio-mmio-cmdline",
512};
513
514static int vm_cmdline_parent_registered;
515static int vm_cmdline_id;
516
517static int vm_cmdline_set(const char *device,
518 const struct kernel_param *kp)
519{
520 int err;
521 struct resource resources[2] = {};
522 char *str;
523 long long int base, size;
524 unsigned int irq;
525 int processed, consumed = 0;
526 struct platform_device *pdev;
527
528
529 size = memparse(device, &str);
530
531
532 processed = sscanf(str, "@%lli:%u%n:%d%n",
533 &base, &irq, &consumed,
534 &vm_cmdline_id, &consumed);
535
536
537
538
539
540
541 if (processed < 2 || str[consumed])
542 return -EINVAL;
543
544 resources[0].flags = IORESOURCE_MEM;
545 resources[0].start = base;
546 resources[0].end = base + size - 1;
547
548 resources[1].flags = IORESOURCE_IRQ;
549 resources[1].start = resources[1].end = irq;
550
551 if (!vm_cmdline_parent_registered) {
552 err = device_register(&vm_cmdline_parent);
553 if (err) {
554 pr_err("Failed to register parent device!\n");
555 return err;
556 }
557 vm_cmdline_parent_registered = 1;
558 }
559
560 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
561 vm_cmdline_id,
562 (unsigned long long)resources[0].start,
563 (unsigned long long)resources[0].end,
564 (int)resources[1].start);
565
566 pdev = platform_device_register_resndata(&vm_cmdline_parent,
567 "virtio-mmio", vm_cmdline_id++,
568 resources, ARRAY_SIZE(resources), NULL, 0);
569 if (IS_ERR(pdev))
570 return PTR_ERR(pdev);
571
572 return 0;
573}
574
575static int vm_cmdline_get_device(struct device *dev, void *data)
576{
577 char *buffer = data;
578 unsigned int len = strlen(buffer);
579 struct platform_device *pdev = to_platform_device(dev);
580
581 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
582 pdev->resource[0].end - pdev->resource[0].start + 1ULL,
583 (unsigned long long)pdev->resource[0].start,
584 (unsigned long long)pdev->resource[1].start,
585 pdev->id);
586 return 0;
587}
588
589static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
590{
591 buffer[0] = '\0';
592 device_for_each_child(&vm_cmdline_parent, buffer,
593 vm_cmdline_get_device);
594 return strlen(buffer) + 1;
595}
596
597static struct kernel_param_ops vm_cmdline_param_ops = {
598 .set = vm_cmdline_set,
599 .get = vm_cmdline_get,
600};
601
602device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
603
604static int vm_unregister_cmdline_device(struct device *dev,
605 void *data)
606{
607 platform_device_unregister(to_platform_device(dev));
608
609 return 0;
610}
611
612static void vm_unregister_cmdline_devices(void)
613{
614 if (vm_cmdline_parent_registered) {
615 device_for_each_child(&vm_cmdline_parent, NULL,
616 vm_unregister_cmdline_device);
617 device_unregister(&vm_cmdline_parent);
618 vm_cmdline_parent_registered = 0;
619 }
620}
621
622#else
623
624static void vm_unregister_cmdline_devices(void)
625{
626}
627
628#endif
629
630
631
632static struct of_device_id virtio_mmio_match[] = {
633 { .compatible = "virtio,mmio", },
634 {},
635};
636MODULE_DEVICE_TABLE(of, virtio_mmio_match);
637
638static struct platform_driver virtio_mmio_driver = {
639 .probe = virtio_mmio_probe,
640 .remove = virtio_mmio_remove,
641 .driver = {
642 .name = "virtio-mmio",
643 .of_match_table = virtio_mmio_match,
644 },
645};
646
647static int __init virtio_mmio_init(void)
648{
649 return platform_driver_register(&virtio_mmio_driver);
650}
651
652static void __exit virtio_mmio_exit(void)
653{
654 platform_driver_unregister(&virtio_mmio_driver);
655 vm_unregister_cmdline_devices();
656}
657
658module_init(virtio_mmio_init);
659module_exit(virtio_mmio_exit);
660
661MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
662MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
663MODULE_LICENSE("GPL");
664