1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/cpu.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/stat.h>
17#include <linux/device.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/console.h>
21#include <linux/export.h>
22#include <linux/mm.h>
23#include <linux/dma-mapping.h>
24#include <linux/kobject.h>
25
26#include <asm/iommu.h>
27#include <asm/dma.h>
28#include <asm/vio.h>
29#include <asm/prom.h>
30#include <asm/firmware.h>
31#include <asm/tce.h>
32#include <asm/page.h>
33#include <asm/hvcall.h>
34
35static struct vio_dev vio_bus_device = {
36 .name = "vio",
37 .type = "",
38 .dev.init_name = "vio",
39 .dev.bus = &vio_bus_type,
40};
41
42#ifdef CONFIG_PPC_SMLPAR
43
44
45
46
47
48
49struct vio_cmo_pool {
50 size_t size;
51 size_t free;
52};
53
54
55#define VIO_CMO_BALANCE_DELAY 100
56
57
58#define VIO_CMO_BALANCE_CHUNK 131072
59
60
61
62
63
64
65
66struct vio_cmo_dev_entry {
67 struct vio_dev *viodev;
68 struct list_head list;
69};
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86static struct vio_cmo {
87 spinlock_t lock;
88 struct delayed_work balance_q;
89 struct list_head device_list;
90 size_t entitled;
91 struct vio_cmo_pool reserve;
92 struct vio_cmo_pool excess;
93 size_t spare;
94 size_t min;
95 size_t desired;
96 size_t curr;
97 size_t high;
98} vio_cmo;
99
100
101
102
103static int vio_cmo_num_OF_devs(void)
104{
105 struct device_node *node_vroot;
106 int count = 0;
107
108
109
110
111
112 node_vroot = of_find_node_by_name(NULL, "vdevice");
113 if (node_vroot) {
114 struct device_node *of_node;
115 struct property *prop;
116
117 for_each_child_of_node(node_vroot, of_node) {
118 prop = of_find_property(of_node, "ibm,my-dma-window",
119 NULL);
120 if (prop)
121 count++;
122 }
123 }
124 of_node_put(node_vroot);
125 return count;
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
143{
144 unsigned long flags;
145 size_t reserve_free = 0;
146 size_t excess_free = 0;
147 int ret = -ENOMEM;
148
149 spin_lock_irqsave(&vio_cmo.lock, flags);
150
151
152 if (viodev->cmo.entitled > viodev->cmo.allocated)
153 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
154
155
156 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
157 excess_free = vio_cmo.excess.free;
158
159
160 if ((reserve_free + excess_free) >= size) {
161 vio_cmo.curr += size;
162 if (vio_cmo.curr > vio_cmo.high)
163 vio_cmo.high = vio_cmo.curr;
164 viodev->cmo.allocated += size;
165 size -= min(reserve_free, size);
166 vio_cmo.excess.free -= size;
167 ret = 0;
168 }
169
170 spin_unlock_irqrestore(&vio_cmo.lock, flags);
171 return ret;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
187{
188 unsigned long flags;
189 size_t spare_needed = 0;
190 size_t excess_freed = 0;
191 size_t reserve_freed = size;
192 size_t tmp;
193 int balance = 0;
194
195 spin_lock_irqsave(&vio_cmo.lock, flags);
196 vio_cmo.curr -= size;
197
198
199 if (viodev->cmo.allocated > viodev->cmo.entitled) {
200 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
201 viodev->cmo.entitled));
202 reserve_freed -= excess_freed;
203 }
204
205
206 viodev->cmo.allocated -= (reserve_freed + excess_freed);
207
208
209 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
210
211
212
213
214
215 if (spare_needed && excess_freed) {
216 tmp = min(excess_freed, spare_needed);
217 vio_cmo.excess.size -= tmp;
218 vio_cmo.reserve.size += tmp;
219 vio_cmo.spare += tmp;
220 excess_freed -= tmp;
221 spare_needed -= tmp;
222 balance = 1;
223 }
224
225
226
227
228
229
230
231 if (spare_needed && reserve_freed) {
232 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
233
234 vio_cmo.spare += tmp;
235 viodev->cmo.entitled -= tmp;
236 reserve_freed -= tmp;
237 spare_needed -= tmp;
238 balance = 1;
239 }
240
241
242
243
244
245
246 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
247 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
248
249 vio_cmo.excess.size -= tmp;
250 vio_cmo.reserve.size += tmp;
251 excess_freed -= tmp;
252 balance = 1;
253 }
254
255
256 if (excess_freed)
257 vio_cmo.excess.free += excess_freed;
258
259 if (balance)
260 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
261 spin_unlock_irqrestore(&vio_cmo.lock, flags);
262}
263
264
265
266
267
268
269
270
271
272
273
274
275int vio_cmo_entitlement_update(size_t new_entitlement)
276{
277 struct vio_dev *viodev;
278 struct vio_cmo_dev_entry *dev_ent;
279 unsigned long flags;
280 size_t avail, delta, tmp;
281
282 spin_lock_irqsave(&vio_cmo.lock, flags);
283
284
285 if (new_entitlement > vio_cmo.entitled) {
286 delta = new_entitlement - vio_cmo.entitled;
287
288
289 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
290 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
291 vio_cmo.spare += tmp;
292 vio_cmo.reserve.size += tmp;
293 delta -= tmp;
294 }
295
296
297 vio_cmo.entitled += delta;
298 vio_cmo.excess.size += delta;
299 vio_cmo.excess.free += delta;
300
301 goto out;
302 }
303
304
305 delta = vio_cmo.entitled - new_entitlement;
306 avail = vio_cmo.excess.free;
307
308
309
310
311
312 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
313 if (avail >= delta)
314 break;
315
316 viodev = dev_ent->viodev;
317 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
318 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
319 avail += viodev->cmo.entitled -
320 max_t(size_t, viodev->cmo.allocated,
321 VIO_CMO_MIN_ENT);
322 }
323
324 if (delta <= avail) {
325 vio_cmo.entitled -= delta;
326
327
328 tmp = min(vio_cmo.excess.free, delta);
329 vio_cmo.excess.size -= tmp;
330 vio_cmo.excess.free -= tmp;
331 delta -= tmp;
332
333
334
335
336
337 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
338 if (!delta)
339 break;
340
341 viodev = dev_ent->viodev;
342 tmp = 0;
343 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
344 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
345 tmp = viodev->cmo.entitled -
346 max_t(size_t, viodev->cmo.allocated,
347 VIO_CMO_MIN_ENT);
348 viodev->cmo.entitled -= min(tmp, delta);
349 delta -= min(tmp, delta);
350 }
351 } else {
352 spin_unlock_irqrestore(&vio_cmo.lock, flags);
353 return -ENOMEM;
354 }
355
356out:
357 schedule_delayed_work(&vio_cmo.balance_q, 0);
358 spin_unlock_irqrestore(&vio_cmo.lock, flags);
359 return 0;
360}
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383static void vio_cmo_balance(struct work_struct *work)
384{
385 struct vio_cmo *cmo;
386 struct vio_dev *viodev;
387 struct vio_cmo_dev_entry *dev_ent;
388 unsigned long flags;
389 size_t avail = 0, level, chunk, need;
390 int devcount = 0, fulfilled;
391
392 cmo = container_of(work, struct vio_cmo, balance_q.work);
393
394 spin_lock_irqsave(&vio_cmo.lock, flags);
395
396
397 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
398 BUG_ON(cmo->min > cmo->entitled);
399 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
400 cmo->min += cmo->spare;
401 cmo->desired = cmo->min;
402
403
404
405
406
407 avail = cmo->entitled - cmo->spare;
408 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
409 viodev = dev_ent->viodev;
410 devcount++;
411 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
412 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
413 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
414 }
415
416
417
418
419
420
421 level = VIO_CMO_MIN_ENT;
422 while (avail) {
423 fulfilled = 0;
424 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
425 viodev = dev_ent->viodev;
426
427 if (viodev->cmo.desired <= level) {
428 fulfilled++;
429 continue;
430 }
431
432
433
434
435
436
437 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
438 chunk = min(chunk, (viodev->cmo.desired -
439 viodev->cmo.entitled));
440 viodev->cmo.entitled += chunk;
441
442
443
444
445
446
447 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
448 max(viodev->cmo.allocated, level);
449 avail -= need;
450
451 }
452 if (fulfilled == devcount)
453 break;
454 level += VIO_CMO_BALANCE_CHUNK;
455 }
456
457
458 cmo->reserve.size = cmo->min;
459 cmo->excess.free = 0;
460 cmo->excess.size = 0;
461 need = 0;
462 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
463 viodev = dev_ent->viodev;
464
465 if (viodev->cmo.entitled)
466 cmo->reserve.size += (viodev->cmo.entitled -
467 VIO_CMO_MIN_ENT);
468
469 if (viodev->cmo.allocated > viodev->cmo.entitled)
470 need += viodev->cmo.allocated - viodev->cmo.entitled;
471 }
472 cmo->excess.size = cmo->entitled - cmo->reserve.size;
473 cmo->excess.free = cmo->excess.size - need;
474
475 cancel_delayed_work(to_delayed_work(work));
476 spin_unlock_irqrestore(&vio_cmo.lock, flags);
477}
478
479static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
480 dma_addr_t *dma_handle, gfp_t flag,
481 unsigned long attrs)
482{
483 struct vio_dev *viodev = to_vio_dev(dev);
484 void *ret;
485
486 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
487 atomic_inc(&viodev->cmo.allocs_failed);
488 return NULL;
489 }
490
491 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
492 dma_handle, dev->coherent_dma_mask, flag,
493 dev_to_node(dev));
494 if (unlikely(ret == NULL)) {
495 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
496 atomic_inc(&viodev->cmo.allocs_failed);
497 }
498
499 return ret;
500}
501
502static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
503 void *vaddr, dma_addr_t dma_handle,
504 unsigned long attrs)
505{
506 struct vio_dev *viodev = to_vio_dev(dev);
507
508 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
509 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
510}
511
512static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
513 unsigned long offset, size_t size,
514 enum dma_data_direction direction,
515 unsigned long attrs)
516{
517 struct vio_dev *viodev = to_vio_dev(dev);
518 struct iommu_table *tbl = get_iommu_table_base(dev);
519 dma_addr_t ret = DMA_MAPPING_ERROR;
520
521 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
522 goto out_fail;
523 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
524 direction, attrs);
525 if (unlikely(ret == DMA_MAPPING_ERROR))
526 goto out_deallocate;
527 return ret;
528
529out_deallocate:
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
531out_fail:
532 atomic_inc(&viodev->cmo.allocs_failed);
533 return DMA_MAPPING_ERROR;
534}
535
536static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
537 size_t size,
538 enum dma_data_direction direction,
539 unsigned long attrs)
540{
541 struct vio_dev *viodev = to_vio_dev(dev);
542 struct iommu_table *tbl = get_iommu_table_base(dev);
543
544 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
545 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
546}
547
548static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
549 int nelems, enum dma_data_direction direction,
550 unsigned long attrs)
551{
552 struct vio_dev *viodev = to_vio_dev(dev);
553 struct iommu_table *tbl = get_iommu_table_base(dev);
554 struct scatterlist *sgl;
555 int ret, count;
556 size_t alloc_size = 0;
557
558 for_each_sg(sglist, sgl, nelems, count)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
560
561 if (vio_cmo_alloc(viodev, alloc_size))
562 goto out_fail;
563 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
564 direction, attrs);
565 if (unlikely(!ret))
566 goto out_deallocate;
567
568 for_each_sg(sglist, sgl, ret, count)
569 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
570 if (alloc_size)
571 vio_cmo_dealloc(viodev, alloc_size);
572 return ret;
573
574out_deallocate:
575 vio_cmo_dealloc(viodev, alloc_size);
576out_fail:
577 atomic_inc(&viodev->cmo.allocs_failed);
578 return 0;
579}
580
581static void vio_dma_iommu_unmap_sg(struct device *dev,
582 struct scatterlist *sglist, int nelems,
583 enum dma_data_direction direction,
584 unsigned long attrs)
585{
586 struct vio_dev *viodev = to_vio_dev(dev);
587 struct iommu_table *tbl = get_iommu_table_base(dev);
588 struct scatterlist *sgl;
589 size_t alloc_size = 0;
590 int count;
591
592 for_each_sg(sglist, sgl, nelems, count)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
594
595 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
596 vio_cmo_dealloc(viodev, alloc_size);
597}
598
599static const struct dma_map_ops vio_dma_mapping_ops = {
600 .alloc = vio_dma_iommu_alloc_coherent,
601 .free = vio_dma_iommu_free_coherent,
602 .map_sg = vio_dma_iommu_map_sg,
603 .unmap_sg = vio_dma_iommu_unmap_sg,
604 .map_page = vio_dma_iommu_map_page,
605 .unmap_page = vio_dma_iommu_unmap_page,
606 .dma_supported = dma_iommu_dma_supported,
607 .get_required_mask = dma_iommu_get_required_mask,
608};
609
610
611
612
613
614
615
616
617
618
619
620void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
621{
622 unsigned long flags;
623 struct vio_cmo_dev_entry *dev_ent;
624 int found = 0;
625
626 if (!firmware_has_feature(FW_FEATURE_CMO))
627 return;
628
629 spin_lock_irqsave(&vio_cmo.lock, flags);
630 if (desired < VIO_CMO_MIN_ENT)
631 desired = VIO_CMO_MIN_ENT;
632
633
634
635
636
637
638 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
639 if (viodev == dev_ent->viodev) {
640 found = 1;
641 break;
642 }
643 if (!found) {
644 spin_unlock_irqrestore(&vio_cmo.lock, flags);
645 return;
646 }
647
648
649 if (desired >= viodev->cmo.desired) {
650
651 vio_cmo.desired += desired - viodev->cmo.desired;
652 viodev->cmo.desired = desired;
653 } else {
654
655 vio_cmo.desired -= viodev->cmo.desired - desired;
656 viodev->cmo.desired = desired;
657
658
659
660
661 if (viodev->cmo.entitled > desired) {
662 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
663 vio_cmo.excess.size += viodev->cmo.entitled - desired;
664
665
666
667
668
669 if (viodev->cmo.allocated < viodev->cmo.entitled)
670 vio_cmo.excess.free += viodev->cmo.entitled -
671 max(viodev->cmo.allocated, desired);
672 viodev->cmo.entitled = desired;
673 }
674 }
675 schedule_delayed_work(&vio_cmo.balance_q, 0);
676 spin_unlock_irqrestore(&vio_cmo.lock, flags);
677}
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693static int vio_cmo_bus_probe(struct vio_dev *viodev)
694{
695 struct vio_cmo_dev_entry *dev_ent;
696 struct device *dev = &viodev->dev;
697 struct iommu_table *tbl;
698 struct vio_driver *viodrv = to_vio_driver(dev->driver);
699 unsigned long flags;
700 size_t size;
701 bool dma_capable = false;
702
703 tbl = get_iommu_table_base(dev);
704
705
706 switch (viodev->family) {
707 case VDEVICE:
708 if (of_get_property(viodev->dev.of_node,
709 "ibm,my-dma-window", NULL))
710 dma_capable = true;
711 break;
712 case PFO:
713 dma_capable = false;
714 break;
715 default:
716 dev_warn(dev, "unknown device family: %d\n", viodev->family);
717 BUG();
718 break;
719 }
720
721
722 if (dma_capable) {
723
724 if (!viodrv->get_desired_dma) {
725 dev_err(dev, "%s: device driver does not support CMO\n",
726 __func__);
727 return -EINVAL;
728 }
729
730 viodev->cmo.desired =
731 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
732 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
733 viodev->cmo.desired = VIO_CMO_MIN_ENT;
734 size = VIO_CMO_MIN_ENT;
735
736 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
737 GFP_KERNEL);
738 if (!dev_ent)
739 return -ENOMEM;
740
741 dev_ent->viodev = viodev;
742 spin_lock_irqsave(&vio_cmo.lock, flags);
743 list_add(&dev_ent->list, &vio_cmo.device_list);
744 } else {
745 viodev->cmo.desired = 0;
746 size = 0;
747 spin_lock_irqsave(&vio_cmo.lock, flags);
748 }
749
750
751
752
753
754
755
756 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
757 VIO_CMO_MIN_ENT)) {
758
759 if (size)
760 vio_cmo.desired += (viodev->cmo.desired -
761 VIO_CMO_MIN_ENT);
762 } else {
763 size_t tmp;
764
765 tmp = vio_cmo.spare + vio_cmo.excess.free;
766 if (tmp < size) {
767 dev_err(dev, "%s: insufficient free "
768 "entitlement to add device. "
769 "Need %lu, have %lu\n", __func__,
770 size, (vio_cmo.spare + tmp));
771 spin_unlock_irqrestore(&vio_cmo.lock, flags);
772 return -ENOMEM;
773 }
774
775
776 tmp = min(size, vio_cmo.excess.free);
777 vio_cmo.excess.free -= tmp;
778 vio_cmo.excess.size -= tmp;
779 vio_cmo.reserve.size += tmp;
780
781
782 vio_cmo.spare -= size - tmp;
783
784
785 vio_cmo.min += size;
786 vio_cmo.desired += viodev->cmo.desired;
787 }
788 spin_unlock_irqrestore(&vio_cmo.lock, flags);
789 return 0;
790}
791
792
793
794
795
796
797
798
799
800
801
802static void vio_cmo_bus_remove(struct vio_dev *viodev)
803{
804 struct vio_cmo_dev_entry *dev_ent;
805 unsigned long flags;
806 size_t tmp;
807
808 spin_lock_irqsave(&vio_cmo.lock, flags);
809 if (viodev->cmo.allocated) {
810 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
811 "allocated after remove operation.\n",
812 __func__, viodev->cmo.allocated);
813 BUG();
814 }
815
816
817
818
819
820 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
821 if (viodev == dev_ent->viodev) {
822 list_del(&dev_ent->list);
823 kfree(dev_ent);
824 break;
825 }
826
827
828
829
830
831
832 if (viodev->cmo.entitled) {
833
834
835
836
837
838 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
839
840
841
842
843
844
845 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
846
847
848 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
849 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
850 vio_cmo.spare));
851 vio_cmo.spare += tmp;
852 viodev->cmo.entitled -= tmp;
853 }
854
855
856 vio_cmo.excess.size += viodev->cmo.entitled;
857 vio_cmo.excess.free += viodev->cmo.entitled;
858 vio_cmo.reserve.size -= viodev->cmo.entitled;
859
860
861
862
863
864
865 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
866 viodev->cmo.desired = VIO_CMO_MIN_ENT;
867 atomic_set(&viodev->cmo.allocs_failed, 0);
868 }
869
870 spin_unlock_irqrestore(&vio_cmo.lock, flags);
871}
872
873static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
874{
875 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
876}
877
878
879
880
881
882
883
884
885static void vio_cmo_bus_init(void)
886{
887 struct hvcall_mpp_data mpp_data;
888 int err;
889
890 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
891 spin_lock_init(&vio_cmo.lock);
892 INIT_LIST_HEAD(&vio_cmo.device_list);
893 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
894
895
896 err = h_get_mpp(&mpp_data);
897
898
899
900
901
902 if (err != H_SUCCESS) {
903 printk(KERN_ERR "%s: unable to determine system IO "\
904 "entitlement. (%d)\n", __func__, err);
905 vio_cmo.entitled = 0;
906 } else {
907 vio_cmo.entitled = mpp_data.entitled_mem;
908 }
909
910
911 vio_cmo.spare = VIO_CMO_MIN_ENT;
912 vio_cmo.reserve.size = vio_cmo.spare;
913 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
914 VIO_CMO_MIN_ENT);
915 if (vio_cmo.reserve.size > vio_cmo.entitled) {
916 printk(KERN_ERR "%s: insufficient system entitlement\n",
917 __func__);
918 panic("%s: Insufficient system entitlement", __func__);
919 }
920
921
922 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
923 vio_cmo.excess.free = vio_cmo.excess.size;
924 vio_cmo.min = vio_cmo.reserve.size;
925 vio_cmo.desired = vio_cmo.reserve.size;
926}
927
928
929
930#define viodev_cmo_rd_attr(name) \
931static ssize_t cmo_##name##_show(struct device *dev, \
932 struct device_attribute *attr, \
933 char *buf) \
934{ \
935 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
936}
937
938static ssize_t cmo_allocs_failed_show(struct device *dev,
939 struct device_attribute *attr, char *buf)
940{
941 struct vio_dev *viodev = to_vio_dev(dev);
942 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
943}
944
945static ssize_t cmo_allocs_failed_store(struct device *dev,
946 struct device_attribute *attr, const char *buf, size_t count)
947{
948 struct vio_dev *viodev = to_vio_dev(dev);
949 atomic_set(&viodev->cmo.allocs_failed, 0);
950 return count;
951}
952
953static ssize_t cmo_desired_store(struct device *dev,
954 struct device_attribute *attr, const char *buf, size_t count)
955{
956 struct vio_dev *viodev = to_vio_dev(dev);
957 size_t new_desired;
958 int ret;
959
960 ret = kstrtoul(buf, 10, &new_desired);
961 if (ret)
962 return ret;
963
964 vio_cmo_set_dev_desired(viodev, new_desired);
965 return count;
966}
967
968viodev_cmo_rd_attr(desired);
969viodev_cmo_rd_attr(entitled);
970viodev_cmo_rd_attr(allocated);
971
972static ssize_t name_show(struct device *, struct device_attribute *, char *);
973static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
974static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
975 char *buf);
976
977static struct device_attribute dev_attr_name;
978static struct device_attribute dev_attr_devspec;
979static struct device_attribute dev_attr_modalias;
980
981static DEVICE_ATTR_RO(cmo_entitled);
982static DEVICE_ATTR_RO(cmo_allocated);
983static DEVICE_ATTR_RW(cmo_desired);
984static DEVICE_ATTR_RW(cmo_allocs_failed);
985
986static struct attribute *vio_cmo_dev_attrs[] = {
987 &dev_attr_name.attr,
988 &dev_attr_devspec.attr,
989 &dev_attr_modalias.attr,
990 &dev_attr_cmo_entitled.attr,
991 &dev_attr_cmo_allocated.attr,
992 &dev_attr_cmo_desired.attr,
993 &dev_attr_cmo_allocs_failed.attr,
994 NULL,
995};
996ATTRIBUTE_GROUPS(vio_cmo_dev);
997
998
999
1000#define viobus_cmo_rd_attr(name) \
1001static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1002{ \
1003 return sprintf(buf, "%lu\n", vio_cmo.name); \
1004} \
1005static struct bus_attribute bus_attr_cmo_bus_##name = \
1006 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1007
1008#define viobus_cmo_pool_rd_attr(name, var) \
1009static ssize_t \
1010cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1011{ \
1012 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1013} \
1014static BUS_ATTR_RO(cmo_##name##_##var)
1015
1016viobus_cmo_rd_attr(entitled);
1017viobus_cmo_rd_attr(spare);
1018viobus_cmo_rd_attr(min);
1019viobus_cmo_rd_attr(desired);
1020viobus_cmo_rd_attr(curr);
1021viobus_cmo_pool_rd_attr(reserve, size);
1022viobus_cmo_pool_rd_attr(excess, size);
1023viobus_cmo_pool_rd_attr(excess, free);
1024
1025static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1026{
1027 return sprintf(buf, "%lu\n", vio_cmo.high);
1028}
1029
1030static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1031 size_t count)
1032{
1033 unsigned long flags;
1034
1035 spin_lock_irqsave(&vio_cmo.lock, flags);
1036 vio_cmo.high = vio_cmo.curr;
1037 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1038
1039 return count;
1040}
1041static BUS_ATTR_RW(cmo_high);
1042
1043static struct attribute *vio_bus_attrs[] = {
1044 &bus_attr_cmo_bus_entitled.attr,
1045 &bus_attr_cmo_bus_spare.attr,
1046 &bus_attr_cmo_bus_min.attr,
1047 &bus_attr_cmo_bus_desired.attr,
1048 &bus_attr_cmo_bus_curr.attr,
1049 &bus_attr_cmo_high.attr,
1050 &bus_attr_cmo_reserve_size.attr,
1051 &bus_attr_cmo_excess_size.attr,
1052 &bus_attr_cmo_excess_free.attr,
1053 NULL,
1054};
1055ATTRIBUTE_GROUPS(vio_bus);
1056
1057static void vio_cmo_sysfs_init(void)
1058{
1059 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1060 vio_bus_type.bus_groups = vio_bus_groups;
1061}
1062#else
1063int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1064void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1065static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1066static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1067static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1068static void vio_cmo_bus_init(void) {}
1069static void vio_cmo_sysfs_init(void) { }
1070#endif
1071EXPORT_SYMBOL(vio_cmo_entitlement_update);
1072EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1105{
1106 struct device *dev = &vdev->dev;
1107 unsigned long deadline = 0;
1108 long hret = 0;
1109 int ret = 0;
1110
1111 if (op->timeout)
1112 deadline = jiffies + msecs_to_jiffies(op->timeout);
1113
1114 while (true) {
1115 hret = plpar_hcall_norets(H_COP, op->flags,
1116 vdev->resource_id,
1117 op->in, op->inlen, op->out,
1118 op->outlen, op->csbcpb);
1119
1120 if (hret == H_SUCCESS ||
1121 (hret != H_NOT_ENOUGH_RESOURCES &&
1122 hret != H_BUSY && hret != H_RESOURCE) ||
1123 (op->timeout && time_after(deadline, jiffies)))
1124 break;
1125
1126 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1127 }
1128
1129 switch (hret) {
1130 case H_SUCCESS:
1131 ret = 0;
1132 break;
1133 case H_OP_MODE:
1134 case H_TOO_BIG:
1135 ret = -E2BIG;
1136 break;
1137 case H_RESCINDED:
1138 ret = -EACCES;
1139 break;
1140 case H_HARDWARE:
1141 ret = -EPERM;
1142 break;
1143 case H_NOT_ENOUGH_RESOURCES:
1144 case H_RESOURCE:
1145 case H_BUSY:
1146 ret = -EBUSY;
1147 break;
1148 default:
1149 ret = -EINVAL;
1150 break;
1151 }
1152
1153 if (ret)
1154 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1155 __func__, ret, hret);
1156
1157 op->hcall_err = hret;
1158 return ret;
1159}
1160EXPORT_SYMBOL(vio_h_cop_sync);
1161
1162static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1163{
1164 const __be32 *dma_window;
1165 struct iommu_table *tbl;
1166 unsigned long offset, size;
1167
1168 dma_window = of_get_property(dev->dev.of_node,
1169 "ibm,my-dma-window", NULL);
1170 if (!dma_window)
1171 return NULL;
1172
1173 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1174 if (tbl == NULL)
1175 return NULL;
1176
1177 of_parse_dma_window(dev->dev.of_node, dma_window,
1178 &tbl->it_index, &offset, &size);
1179
1180
1181 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1182 tbl->it_size = size >> tbl->it_page_shift;
1183
1184 tbl->it_offset = offset >> tbl->it_page_shift;
1185 tbl->it_busno = 0;
1186 tbl->it_type = TCE_VB;
1187 tbl->it_blocksize = 16;
1188
1189 if (firmware_has_feature(FW_FEATURE_LPAR))
1190 tbl->it_ops = &iommu_table_lpar_multi_ops;
1191 else
1192 tbl->it_ops = &iommu_table_pseries_ops;
1193
1194 return iommu_init_table(tbl, -1);
1195}
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207static const struct vio_device_id *vio_match_device(
1208 const struct vio_device_id *ids, const struct vio_dev *dev)
1209{
1210 while (ids->type[0] != '\0') {
1211 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1212 of_device_is_compatible(dev->dev.of_node,
1213 ids->compat))
1214 return ids;
1215 ids++;
1216 }
1217 return NULL;
1218}
1219
1220
1221
1222
1223
1224
1225static int vio_bus_probe(struct device *dev)
1226{
1227 struct vio_dev *viodev = to_vio_dev(dev);
1228 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1229 const struct vio_device_id *id;
1230 int error = -ENODEV;
1231
1232 if (!viodrv->probe)
1233 return error;
1234
1235 id = vio_match_device(viodrv->id_table, viodev);
1236 if (id) {
1237 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1238 if (firmware_has_feature(FW_FEATURE_CMO)) {
1239 error = vio_cmo_bus_probe(viodev);
1240 if (error)
1241 return error;
1242 }
1243 error = viodrv->probe(viodev, id);
1244 if (error && firmware_has_feature(FW_FEATURE_CMO))
1245 vio_cmo_bus_remove(viodev);
1246 }
1247
1248 return error;
1249}
1250
1251
1252static int vio_bus_remove(struct device *dev)
1253{
1254 struct vio_dev *viodev = to_vio_dev(dev);
1255 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1256 struct device *devptr;
1257 int ret = 1;
1258
1259
1260
1261
1262
1263 devptr = get_device(dev);
1264
1265 if (viodrv->remove)
1266 ret = viodrv->remove(viodev);
1267
1268 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1269 vio_cmo_bus_remove(viodev);
1270
1271 put_device(devptr);
1272 return ret;
1273}
1274
1275
1276
1277
1278
1279int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1280 const char *mod_name)
1281{
1282 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1283
1284
1285 viodrv->driver.name = viodrv->name;
1286 viodrv->driver.pm = viodrv->pm;
1287 viodrv->driver.bus = &vio_bus_type;
1288 viodrv->driver.owner = owner;
1289 viodrv->driver.mod_name = mod_name;
1290
1291 return driver_register(&viodrv->driver);
1292}
1293EXPORT_SYMBOL(__vio_register_driver);
1294
1295
1296
1297
1298
1299void vio_unregister_driver(struct vio_driver *viodrv)
1300{
1301 driver_unregister(&viodrv->driver);
1302}
1303EXPORT_SYMBOL(vio_unregister_driver);
1304
1305
1306static void vio_dev_release(struct device *dev)
1307{
1308 struct iommu_table *tbl = get_iommu_table_base(dev);
1309
1310 if (tbl)
1311 iommu_tce_table_put(tbl);
1312 of_node_put(dev->of_node);
1313 kfree(to_vio_dev(dev));
1314}
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325struct vio_dev *vio_register_device_node(struct device_node *of_node)
1326{
1327 struct vio_dev *viodev;
1328 struct device_node *parent_node;
1329 const __be32 *prop;
1330 enum vio_dev_family family;
1331
1332
1333
1334
1335
1336 parent_node = of_get_parent(of_node);
1337 if (parent_node) {
1338 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1339 family = PFO;
1340 else if (of_node_is_type(parent_node, "vdevice"))
1341 family = VDEVICE;
1342 else {
1343 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1344 __func__,
1345 parent_node,
1346 of_node);
1347 of_node_put(parent_node);
1348 return NULL;
1349 }
1350 of_node_put(parent_node);
1351 } else {
1352 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1353 __func__, of_node);
1354 return NULL;
1355 }
1356
1357 if (family == PFO) {
1358 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1359 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1360 __func__, of_node);
1361 return NULL;
1362 }
1363 }
1364
1365
1366 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1367 if (viodev == NULL) {
1368 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1369 return NULL;
1370 }
1371
1372
1373 viodev->family = family;
1374 if (viodev->family == VDEVICE) {
1375 unsigned int unit_address;
1376
1377 viodev->type = of_node_get_device_type(of_node);
1378 if (!viodev->type) {
1379 pr_warn("%s: node %pOFn is missing the 'device_type' "
1380 "property.\n", __func__, of_node);
1381 goto out;
1382 }
1383
1384 prop = of_get_property(of_node, "reg", NULL);
1385 if (prop == NULL) {
1386 pr_warn("%s: node %pOFn missing 'reg'\n",
1387 __func__, of_node);
1388 goto out;
1389 }
1390 unit_address = of_read_number(prop, 1);
1391 dev_set_name(&viodev->dev, "%x", unit_address);
1392 viodev->irq = irq_of_parse_and_map(of_node, 0);
1393 viodev->unit_address = unit_address;
1394 } else {
1395
1396
1397
1398 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1399 if (prop != NULL)
1400 viodev->resource_id = of_read_number(prop, 1);
1401
1402 dev_set_name(&viodev->dev, "%pOFn", of_node);
1403 viodev->type = dev_name(&viodev->dev);
1404 viodev->irq = 0;
1405 }
1406
1407 viodev->name = of_node->name;
1408 viodev->dev.of_node = of_node_get(of_node);
1409
1410 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1411
1412
1413 viodev->dev.parent = &vio_bus_device.dev;
1414 viodev->dev.bus = &vio_bus_type;
1415 viodev->dev.release = vio_dev_release;
1416
1417 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1418 if (firmware_has_feature(FW_FEATURE_CMO))
1419 vio_cmo_set_dma_ops(viodev);
1420 else
1421 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1422
1423 set_iommu_table_base(&viodev->dev,
1424 vio_build_iommu_table(viodev));
1425
1426
1427
1428 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1429 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1430 }
1431
1432
1433 if (device_register(&viodev->dev)) {
1434 printk(KERN_ERR "%s: failed to register device %s\n",
1435 __func__, dev_name(&viodev->dev));
1436 put_device(&viodev->dev);
1437 return NULL;
1438 }
1439
1440 return viodev;
1441
1442out:
1443 kfree(viodev);
1444
1445 return NULL;
1446}
1447EXPORT_SYMBOL(vio_register_device_node);
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457static void vio_bus_scan_register_devices(char *root_name)
1458{
1459 struct device_node *node_root, *node_child;
1460
1461 if (!root_name)
1462 return;
1463
1464 node_root = of_find_node_by_name(NULL, root_name);
1465 if (node_root) {
1466
1467
1468
1469
1470
1471 node_child = of_get_next_child(node_root, NULL);
1472 while (node_child) {
1473 vio_register_device_node(node_child);
1474 node_child = of_get_next_child(node_root, node_child);
1475 }
1476 of_node_put(node_root);
1477 }
1478}
1479
1480
1481
1482
1483static int __init vio_bus_init(void)
1484{
1485 int err;
1486
1487 if (firmware_has_feature(FW_FEATURE_CMO))
1488 vio_cmo_sysfs_init();
1489
1490 err = bus_register(&vio_bus_type);
1491 if (err) {
1492 printk(KERN_ERR "failed to register VIO bus\n");
1493 return err;
1494 }
1495
1496
1497
1498
1499
1500 err = device_register(&vio_bus_device.dev);
1501 if (err) {
1502 printk(KERN_WARNING "%s: device_register returned %i\n",
1503 __func__, err);
1504 return err;
1505 }
1506
1507 if (firmware_has_feature(FW_FEATURE_CMO))
1508 vio_cmo_bus_init();
1509
1510 return 0;
1511}
1512postcore_initcall(vio_bus_init);
1513
1514static int __init vio_device_init(void)
1515{
1516 vio_bus_scan_register_devices("vdevice");
1517 vio_bus_scan_register_devices("ibm,platform-facilities");
1518
1519 return 0;
1520}
1521device_initcall(vio_device_init);
1522
1523static ssize_t name_show(struct device *dev,
1524 struct device_attribute *attr, char *buf)
1525{
1526 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1527}
1528static DEVICE_ATTR_RO(name);
1529
1530static ssize_t devspec_show(struct device *dev,
1531 struct device_attribute *attr, char *buf)
1532{
1533 struct device_node *of_node = dev->of_node;
1534
1535 return sprintf(buf, "%pOF\n", of_node);
1536}
1537static DEVICE_ATTR_RO(devspec);
1538
1539static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1540 char *buf)
1541{
1542 const struct vio_dev *vio_dev = to_vio_dev(dev);
1543 struct device_node *dn;
1544 const char *cp;
1545
1546 dn = dev->of_node;
1547 if (!dn) {
1548 strcpy(buf, "\n");
1549 return strlen(buf);
1550 }
1551 cp = of_get_property(dn, "compatible", NULL);
1552 if (!cp) {
1553 strcpy(buf, "\n");
1554 return strlen(buf);
1555 }
1556
1557 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1558}
1559static DEVICE_ATTR_RO(modalias);
1560
1561static struct attribute *vio_dev_attrs[] = {
1562 &dev_attr_name.attr,
1563 &dev_attr_devspec.attr,
1564 &dev_attr_modalias.attr,
1565 NULL,
1566};
1567ATTRIBUTE_GROUPS(vio_dev);
1568
1569void vio_unregister_device(struct vio_dev *viodev)
1570{
1571 device_unregister(&viodev->dev);
1572 if (viodev->family == VDEVICE)
1573 irq_dispose_mapping(viodev->irq);
1574}
1575EXPORT_SYMBOL(vio_unregister_device);
1576
1577static int vio_bus_match(struct device *dev, struct device_driver *drv)
1578{
1579 const struct vio_dev *vio_dev = to_vio_dev(dev);
1580 struct vio_driver *vio_drv = to_vio_driver(drv);
1581 const struct vio_device_id *ids = vio_drv->id_table;
1582
1583 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1584}
1585
1586static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1587{
1588 const struct vio_dev *vio_dev = to_vio_dev(dev);
1589 struct device_node *dn;
1590 const char *cp;
1591
1592 dn = dev->of_node;
1593 if (!dn)
1594 return -ENODEV;
1595 cp = of_get_property(dn, "compatible", NULL);
1596 if (!cp)
1597 return -ENODEV;
1598
1599 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1600 return 0;
1601}
1602
1603struct bus_type vio_bus_type = {
1604 .name = "vio",
1605 .dev_groups = vio_dev_groups,
1606 .uevent = vio_hotplug,
1607 .match = vio_bus_match,
1608 .probe = vio_bus_probe,
1609 .remove = vio_bus_remove,
1610};
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1622{
1623 return of_get_property(vdev->dev.of_node, which, length);
1624}
1625EXPORT_SYMBOL(vio_get_attribute);
1626
1627#ifdef CONFIG_PPC_PSERIES
1628
1629
1630
1631static struct vio_dev *vio_find_name(const char *name)
1632{
1633 struct device *found;
1634
1635 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1636 if (!found)
1637 return NULL;
1638
1639 return to_vio_dev(found);
1640}
1641
1642
1643
1644
1645
1646
1647
1648
1649struct vio_dev *vio_find_node(struct device_node *vnode)
1650{
1651 char kobj_name[20];
1652 struct device_node *vnode_parent;
1653
1654 vnode_parent = of_get_parent(vnode);
1655 if (!vnode_parent)
1656 return NULL;
1657
1658
1659 if (of_node_is_type(vnode_parent, "vdevice")) {
1660 const __be32 *prop;
1661
1662 prop = of_get_property(vnode, "reg", NULL);
1663 if (!prop)
1664 goto out;
1665 snprintf(kobj_name, sizeof(kobj_name), "%x",
1666 (uint32_t)of_read_number(prop, 1));
1667 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1668 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1669 else
1670 goto out;
1671
1672 of_node_put(vnode_parent);
1673 return vio_find_name(kobj_name);
1674out:
1675 of_node_put(vnode_parent);
1676 return NULL;
1677}
1678EXPORT_SYMBOL(vio_find_node);
1679
1680int vio_enable_interrupts(struct vio_dev *dev)
1681{
1682 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1683 if (rc != H_SUCCESS)
1684 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1685 return rc;
1686}
1687EXPORT_SYMBOL(vio_enable_interrupts);
1688
1689int vio_disable_interrupts(struct vio_dev *dev)
1690{
1691 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1692 if (rc != H_SUCCESS)
1693 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1694 return rc;
1695}
1696EXPORT_SYMBOL(vio_disable_interrupts);
1697#endif
1698
1699static int __init vio_init(void)
1700{
1701 dma_debug_add_bus(&vio_bus_type);
1702 return 0;
1703}
1704fs_initcall(vio_init);
1705