1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/cpu.h>
14#include <linux/types.h>
15#include <linux/delay.h>
16#include <linux/stat.h>
17#include <linux/device.h>
18#include <linux/init.h>
19#include <linux/slab.h>
20#include <linux/console.h>
21#include <linux/export.h>
22#include <linux/mm.h>
23#include <linux/dma-map-ops.h>
24#include <linux/kobject.h>
25#include <linux/kexec.h>
26
27#include <asm/iommu.h>
28#include <asm/dma.h>
29#include <asm/vio.h>
30#include <asm/prom.h>
31#include <asm/firmware.h>
32#include <asm/tce.h>
33#include <asm/page.h>
34#include <asm/hvcall.h>
35#include <asm/machdep.h>
36
37static struct vio_dev vio_bus_device = {
38 .name = "vio",
39 .type = "",
40 .dev.init_name = "vio",
41 .dev.bus = &vio_bus_type,
42};
43
44#ifdef CONFIG_PPC_SMLPAR
45
46
47
48
49
50
51struct vio_cmo_pool {
52 size_t size;
53 size_t free;
54};
55
56
57#define VIO_CMO_BALANCE_DELAY 100
58
59
60#define VIO_CMO_BALANCE_CHUNK 131072
61
62
63
64
65
66
67
68struct vio_cmo_dev_entry {
69 struct vio_dev *viodev;
70 struct list_head list;
71};
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88static struct vio_cmo {
89 spinlock_t lock;
90 struct delayed_work balance_q;
91 struct list_head device_list;
92 size_t entitled;
93 struct vio_cmo_pool reserve;
94 struct vio_cmo_pool excess;
95 size_t spare;
96 size_t min;
97 size_t desired;
98 size_t curr;
99 size_t high;
100} vio_cmo;
101
102
103
104
105static int vio_cmo_num_OF_devs(void)
106{
107 struct device_node *node_vroot;
108 int count = 0;
109
110
111
112
113
114 node_vroot = of_find_node_by_name(NULL, "vdevice");
115 if (node_vroot) {
116 struct device_node *of_node;
117 struct property *prop;
118
119 for_each_child_of_node(node_vroot, of_node) {
120 prop = of_find_property(of_node, "ibm,my-dma-window",
121 NULL);
122 if (prop)
123 count++;
124 }
125 }
126 of_node_put(node_vroot);
127 return count;
128}
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
145{
146 unsigned long flags;
147 size_t reserve_free = 0;
148 size_t excess_free = 0;
149 int ret = -ENOMEM;
150
151 spin_lock_irqsave(&vio_cmo.lock, flags);
152
153
154 if (viodev->cmo.entitled > viodev->cmo.allocated)
155 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
156
157
158 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
159 excess_free = vio_cmo.excess.free;
160
161
162 if ((reserve_free + excess_free) >= size) {
163 vio_cmo.curr += size;
164 if (vio_cmo.curr > vio_cmo.high)
165 vio_cmo.high = vio_cmo.curr;
166 viodev->cmo.allocated += size;
167 size -= min(reserve_free, size);
168 vio_cmo.excess.free -= size;
169 ret = 0;
170 }
171
172 spin_unlock_irqrestore(&vio_cmo.lock, flags);
173 return ret;
174}
175
176
177
178
179
180
181
182
183
184
185
186
187
188static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
189{
190 unsigned long flags;
191 size_t spare_needed = 0;
192 size_t excess_freed = 0;
193 size_t reserve_freed = size;
194 size_t tmp;
195 int balance = 0;
196
197 spin_lock_irqsave(&vio_cmo.lock, flags);
198 vio_cmo.curr -= size;
199
200
201 if (viodev->cmo.allocated > viodev->cmo.entitled) {
202 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
203 viodev->cmo.entitled));
204 reserve_freed -= excess_freed;
205 }
206
207
208 viodev->cmo.allocated -= (reserve_freed + excess_freed);
209
210
211 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
212
213
214
215
216
217 if (spare_needed && excess_freed) {
218 tmp = min(excess_freed, spare_needed);
219 vio_cmo.excess.size -= tmp;
220 vio_cmo.reserve.size += tmp;
221 vio_cmo.spare += tmp;
222 excess_freed -= tmp;
223 spare_needed -= tmp;
224 balance = 1;
225 }
226
227
228
229
230
231
232
233 if (spare_needed && reserve_freed) {
234 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
235
236 vio_cmo.spare += tmp;
237 viodev->cmo.entitled -= tmp;
238 reserve_freed -= tmp;
239 spare_needed -= tmp;
240 balance = 1;
241 }
242
243
244
245
246
247
248 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
249 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
250
251 vio_cmo.excess.size -= tmp;
252 vio_cmo.reserve.size += tmp;
253 excess_freed -= tmp;
254 balance = 1;
255 }
256
257
258 if (excess_freed)
259 vio_cmo.excess.free += excess_freed;
260
261 if (balance)
262 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
263 spin_unlock_irqrestore(&vio_cmo.lock, flags);
264}
265
266
267
268
269
270
271
272
273
274
275
276
277int vio_cmo_entitlement_update(size_t new_entitlement)
278{
279 struct vio_dev *viodev;
280 struct vio_cmo_dev_entry *dev_ent;
281 unsigned long flags;
282 size_t avail, delta, tmp;
283
284 spin_lock_irqsave(&vio_cmo.lock, flags);
285
286
287 if (new_entitlement > vio_cmo.entitled) {
288 delta = new_entitlement - vio_cmo.entitled;
289
290
291 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
292 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
293 vio_cmo.spare += tmp;
294 vio_cmo.reserve.size += tmp;
295 delta -= tmp;
296 }
297
298
299 vio_cmo.entitled += delta;
300 vio_cmo.excess.size += delta;
301 vio_cmo.excess.free += delta;
302
303 goto out;
304 }
305
306
307 delta = vio_cmo.entitled - new_entitlement;
308 avail = vio_cmo.excess.free;
309
310
311
312
313
314 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
315 if (avail >= delta)
316 break;
317
318 viodev = dev_ent->viodev;
319 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
320 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
321 avail += viodev->cmo.entitled -
322 max_t(size_t, viodev->cmo.allocated,
323 VIO_CMO_MIN_ENT);
324 }
325
326 if (delta <= avail) {
327 vio_cmo.entitled -= delta;
328
329
330 tmp = min(vio_cmo.excess.free, delta);
331 vio_cmo.excess.size -= tmp;
332 vio_cmo.excess.free -= tmp;
333 delta -= tmp;
334
335
336
337
338
339 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
340 if (!delta)
341 break;
342
343 viodev = dev_ent->viodev;
344 tmp = 0;
345 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
346 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
347 tmp = viodev->cmo.entitled -
348 max_t(size_t, viodev->cmo.allocated,
349 VIO_CMO_MIN_ENT);
350 viodev->cmo.entitled -= min(tmp, delta);
351 delta -= min(tmp, delta);
352 }
353 } else {
354 spin_unlock_irqrestore(&vio_cmo.lock, flags);
355 return -ENOMEM;
356 }
357
358out:
359 schedule_delayed_work(&vio_cmo.balance_q, 0);
360 spin_unlock_irqrestore(&vio_cmo.lock, flags);
361 return 0;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385static void vio_cmo_balance(struct work_struct *work)
386{
387 struct vio_cmo *cmo;
388 struct vio_dev *viodev;
389 struct vio_cmo_dev_entry *dev_ent;
390 unsigned long flags;
391 size_t avail = 0, level, chunk, need;
392 int devcount = 0, fulfilled;
393
394 cmo = container_of(work, struct vio_cmo, balance_q.work);
395
396 spin_lock_irqsave(&vio_cmo.lock, flags);
397
398
399 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
400 BUG_ON(cmo->min > cmo->entitled);
401 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
402 cmo->min += cmo->spare;
403 cmo->desired = cmo->min;
404
405
406
407
408
409 avail = cmo->entitled - cmo->spare;
410 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
411 viodev = dev_ent->viodev;
412 devcount++;
413 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
414 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
415 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
416 }
417
418
419
420
421
422
423 level = VIO_CMO_MIN_ENT;
424 while (avail) {
425 fulfilled = 0;
426 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
427 viodev = dev_ent->viodev;
428
429 if (viodev->cmo.desired <= level) {
430 fulfilled++;
431 continue;
432 }
433
434
435
436
437
438
439 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
440 chunk = min(chunk, (viodev->cmo.desired -
441 viodev->cmo.entitled));
442 viodev->cmo.entitled += chunk;
443
444
445
446
447
448
449 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
450 max(viodev->cmo.allocated, level);
451 avail -= need;
452
453 }
454 if (fulfilled == devcount)
455 break;
456 level += VIO_CMO_BALANCE_CHUNK;
457 }
458
459
460 cmo->reserve.size = cmo->min;
461 cmo->excess.free = 0;
462 cmo->excess.size = 0;
463 need = 0;
464 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
465 viodev = dev_ent->viodev;
466
467 if (viodev->cmo.entitled)
468 cmo->reserve.size += (viodev->cmo.entitled -
469 VIO_CMO_MIN_ENT);
470
471 if (viodev->cmo.allocated > viodev->cmo.entitled)
472 need += viodev->cmo.allocated - viodev->cmo.entitled;
473 }
474 cmo->excess.size = cmo->entitled - cmo->reserve.size;
475 cmo->excess.free = cmo->excess.size - need;
476
477 cancel_delayed_work(to_delayed_work(work));
478 spin_unlock_irqrestore(&vio_cmo.lock, flags);
479}
480
481static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
482 dma_addr_t *dma_handle, gfp_t flag,
483 unsigned long attrs)
484{
485 struct vio_dev *viodev = to_vio_dev(dev);
486 void *ret;
487
488 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
489 atomic_inc(&viodev->cmo.allocs_failed);
490 return NULL;
491 }
492
493 ret = iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
494 dma_handle, dev->coherent_dma_mask, flag,
495 dev_to_node(dev));
496 if (unlikely(ret == NULL)) {
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
498 atomic_inc(&viodev->cmo.allocs_failed);
499 }
500
501 return ret;
502}
503
504static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
505 void *vaddr, dma_addr_t dma_handle,
506 unsigned long attrs)
507{
508 struct vio_dev *viodev = to_vio_dev(dev);
509
510 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
511 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
512}
513
514static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
515 unsigned long offset, size_t size,
516 enum dma_data_direction direction,
517 unsigned long attrs)
518{
519 struct vio_dev *viodev = to_vio_dev(dev);
520 struct iommu_table *tbl = get_iommu_table_base(dev);
521 dma_addr_t ret = DMA_MAPPING_ERROR;
522
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))))
524 goto out_fail;
525 ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev),
526 direction, attrs);
527 if (unlikely(ret == DMA_MAPPING_ERROR))
528 goto out_deallocate;
529 return ret;
530
531out_deallocate:
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533out_fail:
534 atomic_inc(&viodev->cmo.allocs_failed);
535 return DMA_MAPPING_ERROR;
536}
537
538static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
539 size_t size,
540 enum dma_data_direction direction,
541 unsigned long attrs)
542{
543 struct vio_dev *viodev = to_vio_dev(dev);
544 struct iommu_table *tbl = get_iommu_table_base(dev);
545
546 iommu_unmap_page(tbl, dma_handle, size, direction, attrs);
547 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
548}
549
550static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
551 int nelems, enum dma_data_direction direction,
552 unsigned long attrs)
553{
554 struct vio_dev *viodev = to_vio_dev(dev);
555 struct iommu_table *tbl = get_iommu_table_base(dev);
556 struct scatterlist *sgl;
557 int ret, count;
558 size_t alloc_size = 0;
559
560 for_each_sg(sglist, sgl, nelems, count)
561 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
562
563 ret = vio_cmo_alloc(viodev, alloc_size);
564 if (ret)
565 goto out_fail;
566 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
567 direction, attrs);
568 if (unlikely(!ret))
569 goto out_deallocate;
570
571 for_each_sg(sglist, sgl, ret, count)
572 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
573 if (alloc_size)
574 vio_cmo_dealloc(viodev, alloc_size);
575 return ret;
576
577out_deallocate:
578 vio_cmo_dealloc(viodev, alloc_size);
579out_fail:
580 atomic_inc(&viodev->cmo.allocs_failed);
581 return ret;
582}
583
584static void vio_dma_iommu_unmap_sg(struct device *dev,
585 struct scatterlist *sglist, int nelems,
586 enum dma_data_direction direction,
587 unsigned long attrs)
588{
589 struct vio_dev *viodev = to_vio_dev(dev);
590 struct iommu_table *tbl = get_iommu_table_base(dev);
591 struct scatterlist *sgl;
592 size_t alloc_size = 0;
593 int count;
594
595 for_each_sg(sglist, sgl, nelems, count)
596 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
597
598 ppc_iommu_unmap_sg(tbl, sglist, nelems, direction, attrs);
599 vio_cmo_dealloc(viodev, alloc_size);
600}
601
602static const struct dma_map_ops vio_dma_mapping_ops = {
603 .alloc = vio_dma_iommu_alloc_coherent,
604 .free = vio_dma_iommu_free_coherent,
605 .map_sg = vio_dma_iommu_map_sg,
606 .unmap_sg = vio_dma_iommu_unmap_sg,
607 .map_page = vio_dma_iommu_map_page,
608 .unmap_page = vio_dma_iommu_unmap_page,
609 .dma_supported = dma_iommu_dma_supported,
610 .get_required_mask = dma_iommu_get_required_mask,
611 .mmap = dma_common_mmap,
612 .get_sgtable = dma_common_get_sgtable,
613 .alloc_pages = dma_common_alloc_pages,
614 .free_pages = dma_common_free_pages,
615};
616
617
618
619
620
621
622
623
624
625
626
627void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
628{
629 unsigned long flags;
630 struct vio_cmo_dev_entry *dev_ent;
631 int found = 0;
632
633 if (!firmware_has_feature(FW_FEATURE_CMO))
634 return;
635
636 spin_lock_irqsave(&vio_cmo.lock, flags);
637 if (desired < VIO_CMO_MIN_ENT)
638 desired = VIO_CMO_MIN_ENT;
639
640
641
642
643
644
645 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
646 if (viodev == dev_ent->viodev) {
647 found = 1;
648 break;
649 }
650 if (!found) {
651 spin_unlock_irqrestore(&vio_cmo.lock, flags);
652 return;
653 }
654
655
656 if (desired >= viodev->cmo.desired) {
657
658 vio_cmo.desired += desired - viodev->cmo.desired;
659 viodev->cmo.desired = desired;
660 } else {
661
662 vio_cmo.desired -= viodev->cmo.desired - desired;
663 viodev->cmo.desired = desired;
664
665
666
667
668 if (viodev->cmo.entitled > desired) {
669 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
670 vio_cmo.excess.size += viodev->cmo.entitled - desired;
671
672
673
674
675
676 if (viodev->cmo.allocated < viodev->cmo.entitled)
677 vio_cmo.excess.free += viodev->cmo.entitled -
678 max(viodev->cmo.allocated, desired);
679 viodev->cmo.entitled = desired;
680 }
681 }
682 schedule_delayed_work(&vio_cmo.balance_q, 0);
683 spin_unlock_irqrestore(&vio_cmo.lock, flags);
684}
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700static int vio_cmo_bus_probe(struct vio_dev *viodev)
701{
702 struct vio_cmo_dev_entry *dev_ent;
703 struct device *dev = &viodev->dev;
704 struct iommu_table *tbl;
705 struct vio_driver *viodrv = to_vio_driver(dev->driver);
706 unsigned long flags;
707 size_t size;
708 bool dma_capable = false;
709
710 tbl = get_iommu_table_base(dev);
711
712
713 switch (viodev->family) {
714 case VDEVICE:
715 if (of_get_property(viodev->dev.of_node,
716 "ibm,my-dma-window", NULL))
717 dma_capable = true;
718 break;
719 case PFO:
720 dma_capable = false;
721 break;
722 default:
723 dev_warn(dev, "unknown device family: %d\n", viodev->family);
724 BUG();
725 break;
726 }
727
728
729 if (dma_capable) {
730
731 if (!viodrv->get_desired_dma) {
732 dev_err(dev, "%s: device driver does not support CMO\n",
733 __func__);
734 return -EINVAL;
735 }
736
737 viodev->cmo.desired =
738 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
739 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
740 viodev->cmo.desired = VIO_CMO_MIN_ENT;
741 size = VIO_CMO_MIN_ENT;
742
743 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
744 GFP_KERNEL);
745 if (!dev_ent)
746 return -ENOMEM;
747
748 dev_ent->viodev = viodev;
749 spin_lock_irqsave(&vio_cmo.lock, flags);
750 list_add(&dev_ent->list, &vio_cmo.device_list);
751 } else {
752 viodev->cmo.desired = 0;
753 size = 0;
754 spin_lock_irqsave(&vio_cmo.lock, flags);
755 }
756
757
758
759
760
761
762
763 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
764 VIO_CMO_MIN_ENT)) {
765
766 if (size)
767 vio_cmo.desired += (viodev->cmo.desired -
768 VIO_CMO_MIN_ENT);
769 } else {
770 size_t tmp;
771
772 tmp = vio_cmo.spare + vio_cmo.excess.free;
773 if (tmp < size) {
774 dev_err(dev, "%s: insufficient free "
775 "entitlement to add device. "
776 "Need %lu, have %lu\n", __func__,
777 size, (vio_cmo.spare + tmp));
778 spin_unlock_irqrestore(&vio_cmo.lock, flags);
779 return -ENOMEM;
780 }
781
782
783 tmp = min(size, vio_cmo.excess.free);
784 vio_cmo.excess.free -= tmp;
785 vio_cmo.excess.size -= tmp;
786 vio_cmo.reserve.size += tmp;
787
788
789 vio_cmo.spare -= size - tmp;
790
791
792 vio_cmo.min += size;
793 vio_cmo.desired += viodev->cmo.desired;
794 }
795 spin_unlock_irqrestore(&vio_cmo.lock, flags);
796 return 0;
797}
798
799
800
801
802
803
804
805
806
807
808
809static void vio_cmo_bus_remove(struct vio_dev *viodev)
810{
811 struct vio_cmo_dev_entry *dev_ent;
812 unsigned long flags;
813 size_t tmp;
814
815 spin_lock_irqsave(&vio_cmo.lock, flags);
816 if (viodev->cmo.allocated) {
817 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
818 "allocated after remove operation.\n",
819 __func__, viodev->cmo.allocated);
820 BUG();
821 }
822
823
824
825
826
827 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
828 if (viodev == dev_ent->viodev) {
829 list_del(&dev_ent->list);
830 kfree(dev_ent);
831 break;
832 }
833
834
835
836
837
838
839 if (viodev->cmo.entitled) {
840
841
842
843
844
845 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
846
847
848
849
850
851
852 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
853
854
855 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
856 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
857 vio_cmo.spare));
858 vio_cmo.spare += tmp;
859 viodev->cmo.entitled -= tmp;
860 }
861
862
863 vio_cmo.excess.size += viodev->cmo.entitled;
864 vio_cmo.excess.free += viodev->cmo.entitled;
865 vio_cmo.reserve.size -= viodev->cmo.entitled;
866
867
868
869
870
871
872 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
873 viodev->cmo.desired = VIO_CMO_MIN_ENT;
874 atomic_set(&viodev->cmo.allocs_failed, 0);
875 }
876
877 spin_unlock_irqrestore(&vio_cmo.lock, flags);
878}
879
880static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
881{
882 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
883}
884
885
886
887
888
889
890
891
892static void vio_cmo_bus_init(void)
893{
894 struct hvcall_mpp_data mpp_data;
895 int err;
896
897 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
898 spin_lock_init(&vio_cmo.lock);
899 INIT_LIST_HEAD(&vio_cmo.device_list);
900 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
901
902
903 err = h_get_mpp(&mpp_data);
904
905
906
907
908
909 if (err != H_SUCCESS) {
910 printk(KERN_ERR "%s: unable to determine system IO "\
911 "entitlement. (%d)\n", __func__, err);
912 vio_cmo.entitled = 0;
913 } else {
914 vio_cmo.entitled = mpp_data.entitled_mem;
915 }
916
917
918 vio_cmo.spare = VIO_CMO_MIN_ENT;
919 vio_cmo.reserve.size = vio_cmo.spare;
920 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
921 VIO_CMO_MIN_ENT);
922 if (vio_cmo.reserve.size > vio_cmo.entitled) {
923 printk(KERN_ERR "%s: insufficient system entitlement\n",
924 __func__);
925 panic("%s: Insufficient system entitlement", __func__);
926 }
927
928
929 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
930 vio_cmo.excess.free = vio_cmo.excess.size;
931 vio_cmo.min = vio_cmo.reserve.size;
932 vio_cmo.desired = vio_cmo.reserve.size;
933}
934
935
936
937#define viodev_cmo_rd_attr(name) \
938static ssize_t cmo_##name##_show(struct device *dev, \
939 struct device_attribute *attr, \
940 char *buf) \
941{ \
942 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
943}
944
945static ssize_t cmo_allocs_failed_show(struct device *dev,
946 struct device_attribute *attr, char *buf)
947{
948 struct vio_dev *viodev = to_vio_dev(dev);
949 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
950}
951
952static ssize_t cmo_allocs_failed_store(struct device *dev,
953 struct device_attribute *attr, const char *buf, size_t count)
954{
955 struct vio_dev *viodev = to_vio_dev(dev);
956 atomic_set(&viodev->cmo.allocs_failed, 0);
957 return count;
958}
959
960static ssize_t cmo_desired_store(struct device *dev,
961 struct device_attribute *attr, const char *buf, size_t count)
962{
963 struct vio_dev *viodev = to_vio_dev(dev);
964 size_t new_desired;
965 int ret;
966
967 ret = kstrtoul(buf, 10, &new_desired);
968 if (ret)
969 return ret;
970
971 vio_cmo_set_dev_desired(viodev, new_desired);
972 return count;
973}
974
975viodev_cmo_rd_attr(desired);
976viodev_cmo_rd_attr(entitled);
977viodev_cmo_rd_attr(allocated);
978
979static ssize_t name_show(struct device *, struct device_attribute *, char *);
980static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
981static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
982 char *buf);
983
984static struct device_attribute dev_attr_name;
985static struct device_attribute dev_attr_devspec;
986static struct device_attribute dev_attr_modalias;
987
988static DEVICE_ATTR_RO(cmo_entitled);
989static DEVICE_ATTR_RO(cmo_allocated);
990static DEVICE_ATTR_RW(cmo_desired);
991static DEVICE_ATTR_RW(cmo_allocs_failed);
992
993static struct attribute *vio_cmo_dev_attrs[] = {
994 &dev_attr_name.attr,
995 &dev_attr_devspec.attr,
996 &dev_attr_modalias.attr,
997 &dev_attr_cmo_entitled.attr,
998 &dev_attr_cmo_allocated.attr,
999 &dev_attr_cmo_desired.attr,
1000 &dev_attr_cmo_allocs_failed.attr,
1001 NULL,
1002};
1003ATTRIBUTE_GROUPS(vio_cmo_dev);
1004
1005
1006
1007#define viobus_cmo_rd_attr(name) \
1008static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1009{ \
1010 return sprintf(buf, "%lu\n", vio_cmo.name); \
1011} \
1012static struct bus_attribute bus_attr_cmo_bus_##name = \
1013 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1014
1015#define viobus_cmo_pool_rd_attr(name, var) \
1016static ssize_t \
1017cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1018{ \
1019 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1020} \
1021static BUS_ATTR_RO(cmo_##name##_##var)
1022
1023viobus_cmo_rd_attr(entitled);
1024viobus_cmo_rd_attr(spare);
1025viobus_cmo_rd_attr(min);
1026viobus_cmo_rd_attr(desired);
1027viobus_cmo_rd_attr(curr);
1028viobus_cmo_pool_rd_attr(reserve, size);
1029viobus_cmo_pool_rd_attr(excess, size);
1030viobus_cmo_pool_rd_attr(excess, free);
1031
1032static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1033{
1034 return sprintf(buf, "%lu\n", vio_cmo.high);
1035}
1036
1037static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1038 size_t count)
1039{
1040 unsigned long flags;
1041
1042 spin_lock_irqsave(&vio_cmo.lock, flags);
1043 vio_cmo.high = vio_cmo.curr;
1044 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1045
1046 return count;
1047}
1048static BUS_ATTR_RW(cmo_high);
1049
1050static struct attribute *vio_bus_attrs[] = {
1051 &bus_attr_cmo_bus_entitled.attr,
1052 &bus_attr_cmo_bus_spare.attr,
1053 &bus_attr_cmo_bus_min.attr,
1054 &bus_attr_cmo_bus_desired.attr,
1055 &bus_attr_cmo_bus_curr.attr,
1056 &bus_attr_cmo_high.attr,
1057 &bus_attr_cmo_reserve_size.attr,
1058 &bus_attr_cmo_excess_size.attr,
1059 &bus_attr_cmo_excess_free.attr,
1060 NULL,
1061};
1062ATTRIBUTE_GROUPS(vio_bus);
1063
1064static void vio_cmo_sysfs_init(void)
1065{
1066 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1067 vio_bus_type.bus_groups = vio_bus_groups;
1068}
1069#else
1070int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1071void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1072static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1073static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1074static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1075static void vio_cmo_bus_init(void) {}
1076static void vio_cmo_sysfs_init(void) { }
1077#endif
1078EXPORT_SYMBOL(vio_cmo_entitlement_update);
1079EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1112{
1113 struct device *dev = &vdev->dev;
1114 unsigned long deadline = 0;
1115 long hret = 0;
1116 int ret = 0;
1117
1118 if (op->timeout)
1119 deadline = jiffies + msecs_to_jiffies(op->timeout);
1120
1121 while (true) {
1122 hret = plpar_hcall_norets(H_COP, op->flags,
1123 vdev->resource_id,
1124 op->in, op->inlen, op->out,
1125 op->outlen, op->csbcpb);
1126
1127 if (hret == H_SUCCESS ||
1128 (hret != H_NOT_ENOUGH_RESOURCES &&
1129 hret != H_BUSY && hret != H_RESOURCE) ||
1130 (op->timeout && time_after(deadline, jiffies)))
1131 break;
1132
1133 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1134 }
1135
1136 switch (hret) {
1137 case H_SUCCESS:
1138 ret = 0;
1139 break;
1140 case H_OP_MODE:
1141 case H_TOO_BIG:
1142 ret = -E2BIG;
1143 break;
1144 case H_RESCINDED:
1145 ret = -EACCES;
1146 break;
1147 case H_HARDWARE:
1148 ret = -EPERM;
1149 break;
1150 case H_NOT_ENOUGH_RESOURCES:
1151 case H_RESOURCE:
1152 case H_BUSY:
1153 ret = -EBUSY;
1154 break;
1155 default:
1156 ret = -EINVAL;
1157 break;
1158 }
1159
1160 if (ret)
1161 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1162 __func__, ret, hret);
1163
1164 op->hcall_err = hret;
1165 return ret;
1166}
1167EXPORT_SYMBOL(vio_h_cop_sync);
1168
1169static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1170{
1171 const __be32 *dma_window;
1172 struct iommu_table *tbl;
1173 unsigned long offset, size;
1174
1175 dma_window = of_get_property(dev->dev.of_node,
1176 "ibm,my-dma-window", NULL);
1177 if (!dma_window)
1178 return NULL;
1179
1180 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1181 if (tbl == NULL)
1182 return NULL;
1183
1184 kref_init(&tbl->it_kref);
1185
1186 of_parse_dma_window(dev->dev.of_node, dma_window,
1187 &tbl->it_index, &offset, &size);
1188
1189
1190 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1191 tbl->it_size = size >> tbl->it_page_shift;
1192
1193 tbl->it_offset = offset >> tbl->it_page_shift;
1194 tbl->it_busno = 0;
1195 tbl->it_type = TCE_VB;
1196 tbl->it_blocksize = 16;
1197
1198 if (firmware_has_feature(FW_FEATURE_LPAR))
1199 tbl->it_ops = &iommu_table_lpar_multi_ops;
1200 else
1201 tbl->it_ops = &iommu_table_pseries_ops;
1202
1203 return iommu_init_table(tbl, -1, 0, 0);
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216static const struct vio_device_id *vio_match_device(
1217 const struct vio_device_id *ids, const struct vio_dev *dev)
1218{
1219 while (ids->type[0] != '\0') {
1220 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1221 of_device_is_compatible(dev->dev.of_node,
1222 ids->compat))
1223 return ids;
1224 ids++;
1225 }
1226 return NULL;
1227}
1228
1229
1230
1231
1232
1233
1234static int vio_bus_probe(struct device *dev)
1235{
1236 struct vio_dev *viodev = to_vio_dev(dev);
1237 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1238 const struct vio_device_id *id;
1239 int error = -ENODEV;
1240
1241 if (!viodrv->probe)
1242 return error;
1243
1244 id = vio_match_device(viodrv->id_table, viodev);
1245 if (id) {
1246 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1247 if (firmware_has_feature(FW_FEATURE_CMO)) {
1248 error = vio_cmo_bus_probe(viodev);
1249 if (error)
1250 return error;
1251 }
1252 error = viodrv->probe(viodev, id);
1253 if (error && firmware_has_feature(FW_FEATURE_CMO))
1254 vio_cmo_bus_remove(viodev);
1255 }
1256
1257 return error;
1258}
1259
1260
1261static void vio_bus_remove(struct device *dev)
1262{
1263 struct vio_dev *viodev = to_vio_dev(dev);
1264 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1265 struct device *devptr;
1266
1267
1268
1269
1270
1271 devptr = get_device(dev);
1272
1273 if (viodrv->remove)
1274 viodrv->remove(viodev);
1275
1276 if (firmware_has_feature(FW_FEATURE_CMO))
1277 vio_cmo_bus_remove(viodev);
1278
1279 put_device(devptr);
1280}
1281
1282static void vio_bus_shutdown(struct device *dev)
1283{
1284 struct vio_dev *viodev = to_vio_dev(dev);
1285 struct vio_driver *viodrv;
1286
1287 if (dev->driver) {
1288 viodrv = to_vio_driver(dev->driver);
1289 if (viodrv->shutdown)
1290 viodrv->shutdown(viodev);
1291 else if (kexec_in_progress)
1292 vio_bus_remove(dev);
1293 }
1294}
1295
1296
1297
1298
1299
1300int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1301 const char *mod_name)
1302{
1303
1304 if (!machine_is(pseries))
1305 return -ENODEV;
1306
1307 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1308
1309
1310 viodrv->driver.name = viodrv->name;
1311 viodrv->driver.pm = viodrv->pm;
1312 viodrv->driver.bus = &vio_bus_type;
1313 viodrv->driver.owner = owner;
1314 viodrv->driver.mod_name = mod_name;
1315
1316 return driver_register(&viodrv->driver);
1317}
1318EXPORT_SYMBOL(__vio_register_driver);
1319
1320
1321
1322
1323
1324void vio_unregister_driver(struct vio_driver *viodrv)
1325{
1326 driver_unregister(&viodrv->driver);
1327}
1328EXPORT_SYMBOL(vio_unregister_driver);
1329
1330
1331static void vio_dev_release(struct device *dev)
1332{
1333 struct iommu_table *tbl = get_iommu_table_base(dev);
1334
1335 if (tbl)
1336 iommu_tce_table_put(tbl);
1337 of_node_put(dev->of_node);
1338 kfree(to_vio_dev(dev));
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350struct vio_dev *vio_register_device_node(struct device_node *of_node)
1351{
1352 struct vio_dev *viodev;
1353 struct device_node *parent_node;
1354 const __be32 *prop;
1355 enum vio_dev_family family;
1356
1357
1358
1359
1360
1361 parent_node = of_get_parent(of_node);
1362 if (parent_node) {
1363 if (of_node_is_type(parent_node, "ibm,platform-facilities"))
1364 family = PFO;
1365 else if (of_node_is_type(parent_node, "vdevice"))
1366 family = VDEVICE;
1367 else {
1368 pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
1369 __func__,
1370 parent_node,
1371 of_node);
1372 of_node_put(parent_node);
1373 return NULL;
1374 }
1375 of_node_put(parent_node);
1376 } else {
1377 pr_warn("%s: could not determine the parent of node %pOFn.\n",
1378 __func__, of_node);
1379 return NULL;
1380 }
1381
1382 if (family == PFO) {
1383 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1384 pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
1385 __func__, of_node);
1386 return NULL;
1387 }
1388 }
1389
1390
1391 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1392 if (viodev == NULL) {
1393 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1394 return NULL;
1395 }
1396
1397
1398 viodev->family = family;
1399 if (viodev->family == VDEVICE) {
1400 unsigned int unit_address;
1401
1402 viodev->type = of_node_get_device_type(of_node);
1403 if (!viodev->type) {
1404 pr_warn("%s: node %pOFn is missing the 'device_type' "
1405 "property.\n", __func__, of_node);
1406 goto out;
1407 }
1408
1409 prop = of_get_property(of_node, "reg", NULL);
1410 if (prop == NULL) {
1411 pr_warn("%s: node %pOFn missing 'reg'\n",
1412 __func__, of_node);
1413 goto out;
1414 }
1415 unit_address = of_read_number(prop, 1);
1416 dev_set_name(&viodev->dev, "%x", unit_address);
1417 viodev->irq = irq_of_parse_and_map(of_node, 0);
1418 viodev->unit_address = unit_address;
1419 } else {
1420
1421
1422
1423 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1424 if (prop != NULL)
1425 viodev->resource_id = of_read_number(prop, 1);
1426
1427 dev_set_name(&viodev->dev, "%pOFn", of_node);
1428 viodev->type = dev_name(&viodev->dev);
1429 viodev->irq = 0;
1430 }
1431
1432 viodev->name = of_node->name;
1433 viodev->dev.of_node = of_node_get(of_node);
1434
1435 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1436
1437
1438 viodev->dev.parent = &vio_bus_device.dev;
1439 viodev->dev.bus = &vio_bus_type;
1440 viodev->dev.release = vio_dev_release;
1441
1442 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1443 if (firmware_has_feature(FW_FEATURE_CMO))
1444 vio_cmo_set_dma_ops(viodev);
1445 else
1446 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1447
1448 set_iommu_table_base(&viodev->dev,
1449 vio_build_iommu_table(viodev));
1450
1451
1452
1453 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1454 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1455 }
1456
1457
1458 if (device_register(&viodev->dev)) {
1459 printk(KERN_ERR "%s: failed to register device %s\n",
1460 __func__, dev_name(&viodev->dev));
1461 put_device(&viodev->dev);
1462 return NULL;
1463 }
1464
1465 return viodev;
1466
1467out:
1468 kfree(viodev);
1469
1470 return NULL;
1471}
1472EXPORT_SYMBOL(vio_register_device_node);
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static void vio_bus_scan_register_devices(char *root_name)
1483{
1484 struct device_node *node_root, *node_child;
1485
1486 if (!root_name)
1487 return;
1488
1489 node_root = of_find_node_by_name(NULL, root_name);
1490 if (node_root) {
1491
1492
1493
1494
1495
1496 node_child = of_get_next_child(node_root, NULL);
1497 while (node_child) {
1498 vio_register_device_node(node_child);
1499 node_child = of_get_next_child(node_root, node_child);
1500 }
1501 of_node_put(node_root);
1502 }
1503}
1504
1505
1506
1507
1508static int __init vio_bus_init(void)
1509{
1510 int err;
1511
1512 if (firmware_has_feature(FW_FEATURE_CMO))
1513 vio_cmo_sysfs_init();
1514
1515 err = bus_register(&vio_bus_type);
1516 if (err) {
1517 printk(KERN_ERR "failed to register VIO bus\n");
1518 return err;
1519 }
1520
1521
1522
1523
1524
1525 err = device_register(&vio_bus_device.dev);
1526 if (err) {
1527 printk(KERN_WARNING "%s: device_register returned %i\n",
1528 __func__, err);
1529 return err;
1530 }
1531
1532 if (firmware_has_feature(FW_FEATURE_CMO))
1533 vio_cmo_bus_init();
1534
1535 return 0;
1536}
1537machine_postcore_initcall(pseries, vio_bus_init);
1538
1539static int __init vio_device_init(void)
1540{
1541 vio_bus_scan_register_devices("vdevice");
1542 vio_bus_scan_register_devices("ibm,platform-facilities");
1543
1544 return 0;
1545}
1546machine_device_initcall(pseries, vio_device_init);
1547
1548static ssize_t name_show(struct device *dev,
1549 struct device_attribute *attr, char *buf)
1550{
1551 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1552}
1553static DEVICE_ATTR_RO(name);
1554
1555static ssize_t devspec_show(struct device *dev,
1556 struct device_attribute *attr, char *buf)
1557{
1558 struct device_node *of_node = dev->of_node;
1559
1560 return sprintf(buf, "%pOF\n", of_node);
1561}
1562static DEVICE_ATTR_RO(devspec);
1563
1564static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1565 char *buf)
1566{
1567 const struct vio_dev *vio_dev = to_vio_dev(dev);
1568 struct device_node *dn;
1569 const char *cp;
1570
1571 dn = dev->of_node;
1572 if (!dn) {
1573 strcpy(buf, "\n");
1574 return strlen(buf);
1575 }
1576 cp = of_get_property(dn, "compatible", NULL);
1577 if (!cp) {
1578 strcpy(buf, "\n");
1579 return strlen(buf);
1580 }
1581
1582 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1583}
1584static DEVICE_ATTR_RO(modalias);
1585
1586static struct attribute *vio_dev_attrs[] = {
1587 &dev_attr_name.attr,
1588 &dev_attr_devspec.attr,
1589 &dev_attr_modalias.attr,
1590 NULL,
1591};
1592ATTRIBUTE_GROUPS(vio_dev);
1593
1594void vio_unregister_device(struct vio_dev *viodev)
1595{
1596 device_unregister(&viodev->dev);
1597 if (viodev->family == VDEVICE)
1598 irq_dispose_mapping(viodev->irq);
1599}
1600EXPORT_SYMBOL(vio_unregister_device);
1601
1602static int vio_bus_match(struct device *dev, struct device_driver *drv)
1603{
1604 const struct vio_dev *vio_dev = to_vio_dev(dev);
1605 struct vio_driver *vio_drv = to_vio_driver(drv);
1606 const struct vio_device_id *ids = vio_drv->id_table;
1607
1608 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1609}
1610
1611static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1612{
1613 const struct vio_dev *vio_dev = to_vio_dev(dev);
1614 struct device_node *dn;
1615 const char *cp;
1616
1617 dn = dev->of_node;
1618 if (!dn)
1619 return -ENODEV;
1620 cp = of_get_property(dn, "compatible", NULL);
1621 if (!cp)
1622 return -ENODEV;
1623
1624 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1625 return 0;
1626}
1627
1628struct bus_type vio_bus_type = {
1629 .name = "vio",
1630 .dev_groups = vio_dev_groups,
1631 .uevent = vio_hotplug,
1632 .match = vio_bus_match,
1633 .probe = vio_bus_probe,
1634 .remove = vio_bus_remove,
1635 .shutdown = vio_bus_shutdown,
1636};
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1648{
1649 return of_get_property(vdev->dev.of_node, which, length);
1650}
1651EXPORT_SYMBOL(vio_get_attribute);
1652
1653
1654
1655
1656static struct vio_dev *vio_find_name(const char *name)
1657{
1658 struct device *found;
1659
1660 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1661 if (!found)
1662 return NULL;
1663
1664 return to_vio_dev(found);
1665}
1666
1667
1668
1669
1670
1671
1672
1673
1674struct vio_dev *vio_find_node(struct device_node *vnode)
1675{
1676 char kobj_name[20];
1677 struct device_node *vnode_parent;
1678
1679 vnode_parent = of_get_parent(vnode);
1680 if (!vnode_parent)
1681 return NULL;
1682
1683
1684 if (of_node_is_type(vnode_parent, "vdevice")) {
1685 const __be32 *prop;
1686
1687 prop = of_get_property(vnode, "reg", NULL);
1688 if (!prop)
1689 goto out;
1690 snprintf(kobj_name, sizeof(kobj_name), "%x",
1691 (uint32_t)of_read_number(prop, 1));
1692 } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
1693 snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
1694 else
1695 goto out;
1696
1697 of_node_put(vnode_parent);
1698 return vio_find_name(kobj_name);
1699out:
1700 of_node_put(vnode_parent);
1701 return NULL;
1702}
1703EXPORT_SYMBOL(vio_find_node);
1704
1705int vio_enable_interrupts(struct vio_dev *dev)
1706{
1707 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1708 if (rc != H_SUCCESS)
1709 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1710 return rc;
1711}
1712EXPORT_SYMBOL(vio_enable_interrupts);
1713
1714int vio_disable_interrupts(struct vio_dev *dev)
1715{
1716 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1717 if (rc != H_SUCCESS)
1718 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1719 return rc;
1720}
1721EXPORT_SYMBOL(vio_disable_interrupts);
1722
1723static int __init vio_init(void)
1724{
1725 dma_debug_add_bus(&vio_bus_type);
1726 return 0;
1727}
1728machine_fs_initcall(pseries, vio_init);
1729