1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/cpu.h>
18#include <linux/types.h>
19#include <linux/delay.h>
20#include <linux/stat.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/console.h>
25#include <linux/export.h>
26#include <linux/mm.h>
27#include <linux/dma-mapping.h>
28#include <linux/kobject.h>
29
30#include <asm/iommu.h>
31#include <asm/dma.h>
32#include <asm/vio.h>
33#include <asm/prom.h>
34#include <asm/firmware.h>
35#include <asm/tce.h>
36#include <asm/page.h>
37#include <asm/hvcall.h>
38
39static struct vio_dev vio_bus_device = {
40 .name = "vio",
41 .type = "",
42 .dev.init_name = "vio",
43 .dev.bus = &vio_bus_type,
44};
45
46#ifdef CONFIG_PPC_SMLPAR
47
48
49
50
51
52
53struct vio_cmo_pool {
54 size_t size;
55 size_t free;
56};
57
58
59#define VIO_CMO_BALANCE_DELAY 100
60
61
62#define VIO_CMO_BALANCE_CHUNK 131072
63
64
65
66
67
68
69
70struct vio_cmo_dev_entry {
71 struct vio_dev *viodev;
72 struct list_head list;
73};
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static struct vio_cmo {
91 spinlock_t lock;
92 struct delayed_work balance_q;
93 struct list_head device_list;
94 size_t entitled;
95 struct vio_cmo_pool reserve;
96 struct vio_cmo_pool excess;
97 size_t spare;
98 size_t min;
99 size_t desired;
100 size_t curr;
101 size_t high;
102} vio_cmo;
103
104
105
106
107static int vio_cmo_num_OF_devs(void)
108{
109 struct device_node *node_vroot;
110 int count = 0;
111
112
113
114
115
116 node_vroot = of_find_node_by_name(NULL, "vdevice");
117 if (node_vroot) {
118 struct device_node *of_node;
119 struct property *prop;
120
121 for_each_child_of_node(node_vroot, of_node) {
122 prop = of_find_property(of_node, "ibm,my-dma-window",
123 NULL);
124 if (prop)
125 count++;
126 }
127 }
128 of_node_put(node_vroot);
129 return count;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
147{
148 unsigned long flags;
149 size_t reserve_free = 0;
150 size_t excess_free = 0;
151 int ret = -ENOMEM;
152
153 spin_lock_irqsave(&vio_cmo.lock, flags);
154
155
156 if (viodev->cmo.entitled > viodev->cmo.allocated)
157 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
158
159
160 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
161 excess_free = vio_cmo.excess.free;
162
163
164 if ((reserve_free + excess_free) >= size) {
165 vio_cmo.curr += size;
166 if (vio_cmo.curr > vio_cmo.high)
167 vio_cmo.high = vio_cmo.curr;
168 viodev->cmo.allocated += size;
169 size -= min(reserve_free, size);
170 vio_cmo.excess.free -= size;
171 ret = 0;
172 }
173
174 spin_unlock_irqrestore(&vio_cmo.lock, flags);
175 return ret;
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
191{
192 unsigned long flags;
193 size_t spare_needed = 0;
194 size_t excess_freed = 0;
195 size_t reserve_freed = size;
196 size_t tmp;
197 int balance = 0;
198
199 spin_lock_irqsave(&vio_cmo.lock, flags);
200 vio_cmo.curr -= size;
201
202
203 if (viodev->cmo.allocated > viodev->cmo.entitled) {
204 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
205 viodev->cmo.entitled));
206 reserve_freed -= excess_freed;
207 }
208
209
210 viodev->cmo.allocated -= (reserve_freed + excess_freed);
211
212
213 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
214
215
216
217
218
219 if (spare_needed && excess_freed) {
220 tmp = min(excess_freed, spare_needed);
221 vio_cmo.excess.size -= tmp;
222 vio_cmo.reserve.size += tmp;
223 vio_cmo.spare += tmp;
224 excess_freed -= tmp;
225 spare_needed -= tmp;
226 balance = 1;
227 }
228
229
230
231
232
233
234
235 if (spare_needed && reserve_freed) {
236 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
237
238 vio_cmo.spare += tmp;
239 viodev->cmo.entitled -= tmp;
240 reserve_freed -= tmp;
241 spare_needed -= tmp;
242 balance = 1;
243 }
244
245
246
247
248
249
250 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
251 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
252
253 vio_cmo.excess.size -= tmp;
254 vio_cmo.reserve.size += tmp;
255 excess_freed -= tmp;
256 balance = 1;
257 }
258
259
260 if (excess_freed)
261 vio_cmo.excess.free += excess_freed;
262
263 if (balance)
264 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
265 spin_unlock_irqrestore(&vio_cmo.lock, flags);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279int vio_cmo_entitlement_update(size_t new_entitlement)
280{
281 struct vio_dev *viodev;
282 struct vio_cmo_dev_entry *dev_ent;
283 unsigned long flags;
284 size_t avail, delta, tmp;
285
286 spin_lock_irqsave(&vio_cmo.lock, flags);
287
288
289 if (new_entitlement > vio_cmo.entitled) {
290 delta = new_entitlement - vio_cmo.entitled;
291
292
293 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
294 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
295 vio_cmo.spare += tmp;
296 vio_cmo.reserve.size += tmp;
297 delta -= tmp;
298 }
299
300
301 vio_cmo.entitled += delta;
302 vio_cmo.excess.size += delta;
303 vio_cmo.excess.free += delta;
304
305 goto out;
306 }
307
308
309 delta = vio_cmo.entitled - new_entitlement;
310 avail = vio_cmo.excess.free;
311
312
313
314
315
316 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
317 if (avail >= delta)
318 break;
319
320 viodev = dev_ent->viodev;
321 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
322 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
323 avail += viodev->cmo.entitled -
324 max_t(size_t, viodev->cmo.allocated,
325 VIO_CMO_MIN_ENT);
326 }
327
328 if (delta <= avail) {
329 vio_cmo.entitled -= delta;
330
331
332 tmp = min(vio_cmo.excess.free, delta);
333 vio_cmo.excess.size -= tmp;
334 vio_cmo.excess.free -= tmp;
335 delta -= tmp;
336
337
338
339
340
341 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
342 if (!delta)
343 break;
344
345 viodev = dev_ent->viodev;
346 tmp = 0;
347 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
348 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
349 tmp = viodev->cmo.entitled -
350 max_t(size_t, viodev->cmo.allocated,
351 VIO_CMO_MIN_ENT);
352 viodev->cmo.entitled -= min(tmp, delta);
353 delta -= min(tmp, delta);
354 }
355 } else {
356 spin_unlock_irqrestore(&vio_cmo.lock, flags);
357 return -ENOMEM;
358 }
359
360out:
361 schedule_delayed_work(&vio_cmo.balance_q, 0);
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return 0;
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387static void vio_cmo_balance(struct work_struct *work)
388{
389 struct vio_cmo *cmo;
390 struct vio_dev *viodev;
391 struct vio_cmo_dev_entry *dev_ent;
392 unsigned long flags;
393 size_t avail = 0, level, chunk, need;
394 int devcount = 0, fulfilled;
395
396 cmo = container_of(work, struct vio_cmo, balance_q.work);
397
398 spin_lock_irqsave(&vio_cmo.lock, flags);
399
400
401 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
402 BUG_ON(cmo->min > cmo->entitled);
403 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
404 cmo->min += cmo->spare;
405 cmo->desired = cmo->min;
406
407
408
409
410
411 avail = cmo->entitled - cmo->spare;
412 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
413 viodev = dev_ent->viodev;
414 devcount++;
415 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
416 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
417 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
418 }
419
420
421
422
423
424
425 level = VIO_CMO_MIN_ENT;
426 while (avail) {
427 fulfilled = 0;
428 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
429 viodev = dev_ent->viodev;
430
431 if (viodev->cmo.desired <= level) {
432 fulfilled++;
433 continue;
434 }
435
436
437
438
439
440
441 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
442 chunk = min(chunk, (viodev->cmo.desired -
443 viodev->cmo.entitled));
444 viodev->cmo.entitled += chunk;
445
446
447
448
449
450
451 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
452 max(viodev->cmo.allocated, level);
453 avail -= need;
454
455 }
456 if (fulfilled == devcount)
457 break;
458 level += VIO_CMO_BALANCE_CHUNK;
459 }
460
461
462 cmo->reserve.size = cmo->min;
463 cmo->excess.free = 0;
464 cmo->excess.size = 0;
465 need = 0;
466 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
467 viodev = dev_ent->viodev;
468
469 if (viodev->cmo.entitled)
470 cmo->reserve.size += (viodev->cmo.entitled -
471 VIO_CMO_MIN_ENT);
472
473 if (viodev->cmo.allocated > viodev->cmo.entitled)
474 need += viodev->cmo.allocated - viodev->cmo.entitled;
475 }
476 cmo->excess.size = cmo->entitled - cmo->reserve.size;
477 cmo->excess.free = cmo->excess.size - need;
478
479 cancel_delayed_work(to_delayed_work(work));
480 spin_unlock_irqrestore(&vio_cmo.lock, flags);
481}
482
483static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
484 dma_addr_t *dma_handle, gfp_t flag,
485 unsigned long attrs)
486{
487 struct vio_dev *viodev = to_vio_dev(dev);
488 void *ret;
489
490 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
491 atomic_inc(&viodev->cmo.allocs_failed);
492 return NULL;
493 }
494
495 ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
496 if (unlikely(ret == NULL)) {
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
498 atomic_inc(&viodev->cmo.allocs_failed);
499 }
500
501 return ret;
502}
503
504static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
505 void *vaddr, dma_addr_t dma_handle,
506 unsigned long attrs)
507{
508 struct vio_dev *viodev = to_vio_dev(dev);
509
510 dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
511
512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
513}
514
515static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
516 unsigned long offset, size_t size,
517 enum dma_data_direction direction,
518 unsigned long attrs)
519{
520 struct vio_dev *viodev = to_vio_dev(dev);
521 struct iommu_table *tbl;
522 dma_addr_t ret = IOMMU_MAPPING_ERROR;
523
524 tbl = get_iommu_table_base(dev);
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
526 atomic_inc(&viodev->cmo.allocs_failed);
527 return ret;
528 }
529
530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
531 if (unlikely(dma_mapping_error(dev, ret))) {
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533 atomic_inc(&viodev->cmo.allocs_failed);
534 }
535
536 return ret;
537}
538
539static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540 size_t size,
541 enum dma_data_direction direction,
542 unsigned long attrs)
543{
544 struct vio_dev *viodev = to_vio_dev(dev);
545 struct iommu_table *tbl;
546
547 tbl = get_iommu_table_base(dev);
548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
551}
552
553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 unsigned long attrs)
556{
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct iommu_table *tbl;
559 struct scatterlist *sgl;
560 int ret, count;
561 size_t alloc_size = 0;
562
563 tbl = get_iommu_table_base(dev);
564 for_each_sg(sglist, sgl, nelems, count)
565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
566
567 if (vio_cmo_alloc(viodev, alloc_size)) {
568 atomic_inc(&viodev->cmo.allocs_failed);
569 return 0;
570 }
571
572 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
573
574 if (unlikely(!ret)) {
575 vio_cmo_dealloc(viodev, alloc_size);
576 atomic_inc(&viodev->cmo.allocs_failed);
577 return ret;
578 }
579
580 for_each_sg(sglist, sgl, ret, count)
581 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
582 if (alloc_size)
583 vio_cmo_dealloc(viodev, alloc_size);
584
585 return ret;
586}
587
588static void vio_dma_iommu_unmap_sg(struct device *dev,
589 struct scatterlist *sglist, int nelems,
590 enum dma_data_direction direction,
591 unsigned long attrs)
592{
593 struct vio_dev *viodev = to_vio_dev(dev);
594 struct iommu_table *tbl;
595 struct scatterlist *sgl;
596 size_t alloc_size = 0;
597 int count;
598
599 tbl = get_iommu_table_base(dev);
600 for_each_sg(sglist, sgl, nelems, count)
601 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
602
603 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
604
605 vio_cmo_dealloc(viodev, alloc_size);
606}
607
608static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
609{
610 return dma_iommu_ops.dma_supported(dev, mask);
611}
612
613static u64 vio_dma_get_required_mask(struct device *dev)
614{
615 return dma_iommu_ops.get_required_mask(dev);
616}
617
618static const struct dma_map_ops vio_dma_mapping_ops = {
619 .alloc = vio_dma_iommu_alloc_coherent,
620 .free = vio_dma_iommu_free_coherent,
621 .mmap = dma_nommu_mmap_coherent,
622 .map_sg = vio_dma_iommu_map_sg,
623 .unmap_sg = vio_dma_iommu_unmap_sg,
624 .map_page = vio_dma_iommu_map_page,
625 .unmap_page = vio_dma_iommu_unmap_page,
626 .dma_supported = vio_dma_iommu_dma_supported,
627 .get_required_mask = vio_dma_get_required_mask,
628 .mapping_error = dma_iommu_mapping_error,
629};
630
631
632
633
634
635
636
637
638
639
640
641void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
642{
643 unsigned long flags;
644 struct vio_cmo_dev_entry *dev_ent;
645 int found = 0;
646
647 if (!firmware_has_feature(FW_FEATURE_CMO))
648 return;
649
650 spin_lock_irqsave(&vio_cmo.lock, flags);
651 if (desired < VIO_CMO_MIN_ENT)
652 desired = VIO_CMO_MIN_ENT;
653
654
655
656
657
658
659 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
660 if (viodev == dev_ent->viodev) {
661 found = 1;
662 break;
663 }
664 if (!found) {
665 spin_unlock_irqrestore(&vio_cmo.lock, flags);
666 return;
667 }
668
669
670 if (desired >= viodev->cmo.desired) {
671
672 vio_cmo.desired += desired - viodev->cmo.desired;
673 viodev->cmo.desired = desired;
674 } else {
675
676 vio_cmo.desired -= viodev->cmo.desired - desired;
677 viodev->cmo.desired = desired;
678
679
680
681
682 if (viodev->cmo.entitled > desired) {
683 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
684 vio_cmo.excess.size += viodev->cmo.entitled - desired;
685
686
687
688
689
690 if (viodev->cmo.allocated < viodev->cmo.entitled)
691 vio_cmo.excess.free += viodev->cmo.entitled -
692 max(viodev->cmo.allocated, desired);
693 viodev->cmo.entitled = desired;
694 }
695 }
696 schedule_delayed_work(&vio_cmo.balance_q, 0);
697 spin_unlock_irqrestore(&vio_cmo.lock, flags);
698}
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714static int vio_cmo_bus_probe(struct vio_dev *viodev)
715{
716 struct vio_cmo_dev_entry *dev_ent;
717 struct device *dev = &viodev->dev;
718 struct iommu_table *tbl;
719 struct vio_driver *viodrv = to_vio_driver(dev->driver);
720 unsigned long flags;
721 size_t size;
722 bool dma_capable = false;
723
724 tbl = get_iommu_table_base(dev);
725
726
727 switch (viodev->family) {
728 case VDEVICE:
729 if (of_get_property(viodev->dev.of_node,
730 "ibm,my-dma-window", NULL))
731 dma_capable = true;
732 break;
733 case PFO:
734 dma_capable = false;
735 break;
736 default:
737 dev_warn(dev, "unknown device family: %d\n", viodev->family);
738 BUG();
739 break;
740 }
741
742
743 if (dma_capable) {
744
745 if (!viodrv->get_desired_dma) {
746 dev_err(dev, "%s: device driver does not support CMO\n",
747 __func__);
748 return -EINVAL;
749 }
750
751 viodev->cmo.desired =
752 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
753 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
754 viodev->cmo.desired = VIO_CMO_MIN_ENT;
755 size = VIO_CMO_MIN_ENT;
756
757 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
758 GFP_KERNEL);
759 if (!dev_ent)
760 return -ENOMEM;
761
762 dev_ent->viodev = viodev;
763 spin_lock_irqsave(&vio_cmo.lock, flags);
764 list_add(&dev_ent->list, &vio_cmo.device_list);
765 } else {
766 viodev->cmo.desired = 0;
767 size = 0;
768 spin_lock_irqsave(&vio_cmo.lock, flags);
769 }
770
771
772
773
774
775
776
777 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
778 VIO_CMO_MIN_ENT)) {
779
780 if (size)
781 vio_cmo.desired += (viodev->cmo.desired -
782 VIO_CMO_MIN_ENT);
783 } else {
784 size_t tmp;
785
786 tmp = vio_cmo.spare + vio_cmo.excess.free;
787 if (tmp < size) {
788 dev_err(dev, "%s: insufficient free "
789 "entitlement to add device. "
790 "Need %lu, have %lu\n", __func__,
791 size, (vio_cmo.spare + tmp));
792 spin_unlock_irqrestore(&vio_cmo.lock, flags);
793 return -ENOMEM;
794 }
795
796
797 tmp = min(size, vio_cmo.excess.free);
798 vio_cmo.excess.free -= tmp;
799 vio_cmo.excess.size -= tmp;
800 vio_cmo.reserve.size += tmp;
801
802
803 vio_cmo.spare -= size - tmp;
804
805
806 vio_cmo.min += size;
807 vio_cmo.desired += viodev->cmo.desired;
808 }
809 spin_unlock_irqrestore(&vio_cmo.lock, flags);
810 return 0;
811}
812
813
814
815
816
817
818
819
820
821
822
823static void vio_cmo_bus_remove(struct vio_dev *viodev)
824{
825 struct vio_cmo_dev_entry *dev_ent;
826 unsigned long flags;
827 size_t tmp;
828
829 spin_lock_irqsave(&vio_cmo.lock, flags);
830 if (viodev->cmo.allocated) {
831 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
832 "allocated after remove operation.\n",
833 __func__, viodev->cmo.allocated);
834 BUG();
835 }
836
837
838
839
840
841 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
842 if (viodev == dev_ent->viodev) {
843 list_del(&dev_ent->list);
844 kfree(dev_ent);
845 break;
846 }
847
848
849
850
851
852
853 if (viodev->cmo.entitled) {
854
855
856
857
858
859 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
860
861
862
863
864
865
866 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
867
868
869 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
870 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
871 vio_cmo.spare));
872 vio_cmo.spare += tmp;
873 viodev->cmo.entitled -= tmp;
874 }
875
876
877 vio_cmo.excess.size += viodev->cmo.entitled;
878 vio_cmo.excess.free += viodev->cmo.entitled;
879 vio_cmo.reserve.size -= viodev->cmo.entitled;
880
881
882
883
884
885
886 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
887 viodev->cmo.desired = VIO_CMO_MIN_ENT;
888 atomic_set(&viodev->cmo.allocs_failed, 0);
889 }
890
891 spin_unlock_irqrestore(&vio_cmo.lock, flags);
892}
893
894static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
895{
896 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
897}
898
899
900
901
902
903
904
905
906static void vio_cmo_bus_init(void)
907{
908 struct hvcall_mpp_data mpp_data;
909 int err;
910
911 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
912 spin_lock_init(&vio_cmo.lock);
913 INIT_LIST_HEAD(&vio_cmo.device_list);
914 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
915
916
917 err = h_get_mpp(&mpp_data);
918
919
920
921
922
923 if (err != H_SUCCESS) {
924 printk(KERN_ERR "%s: unable to determine system IO "\
925 "entitlement. (%d)\n", __func__, err);
926 vio_cmo.entitled = 0;
927 } else {
928 vio_cmo.entitled = mpp_data.entitled_mem;
929 }
930
931
932 vio_cmo.spare = VIO_CMO_MIN_ENT;
933 vio_cmo.reserve.size = vio_cmo.spare;
934 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
935 VIO_CMO_MIN_ENT);
936 if (vio_cmo.reserve.size > vio_cmo.entitled) {
937 printk(KERN_ERR "%s: insufficient system entitlement\n",
938 __func__);
939 panic("%s: Insufficient system entitlement", __func__);
940 }
941
942
943 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
944 vio_cmo.excess.free = vio_cmo.excess.size;
945 vio_cmo.min = vio_cmo.reserve.size;
946 vio_cmo.desired = vio_cmo.reserve.size;
947}
948
949
950
951#define viodev_cmo_rd_attr(name) \
952static ssize_t cmo_##name##_show(struct device *dev, \
953 struct device_attribute *attr, \
954 char *buf) \
955{ \
956 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
957}
958
959static ssize_t cmo_allocs_failed_show(struct device *dev,
960 struct device_attribute *attr, char *buf)
961{
962 struct vio_dev *viodev = to_vio_dev(dev);
963 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
964}
965
966static ssize_t cmo_allocs_failed_store(struct device *dev,
967 struct device_attribute *attr, const char *buf, size_t count)
968{
969 struct vio_dev *viodev = to_vio_dev(dev);
970 atomic_set(&viodev->cmo.allocs_failed, 0);
971 return count;
972}
973
974static ssize_t cmo_desired_store(struct device *dev,
975 struct device_attribute *attr, const char *buf, size_t count)
976{
977 struct vio_dev *viodev = to_vio_dev(dev);
978 size_t new_desired;
979 int ret;
980
981 ret = kstrtoul(buf, 10, &new_desired);
982 if (ret)
983 return ret;
984
985 vio_cmo_set_dev_desired(viodev, new_desired);
986 return count;
987}
988
989viodev_cmo_rd_attr(desired);
990viodev_cmo_rd_attr(entitled);
991viodev_cmo_rd_attr(allocated);
992
993static ssize_t name_show(struct device *, struct device_attribute *, char *);
994static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
995static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
996 char *buf);
997
998static struct device_attribute dev_attr_name;
999static struct device_attribute dev_attr_devspec;
1000static struct device_attribute dev_attr_modalias;
1001
1002static DEVICE_ATTR_RO(cmo_entitled);
1003static DEVICE_ATTR_RO(cmo_allocated);
1004static DEVICE_ATTR_RW(cmo_desired);
1005static DEVICE_ATTR_RW(cmo_allocs_failed);
1006
1007static struct attribute *vio_cmo_dev_attrs[] = {
1008 &dev_attr_name.attr,
1009 &dev_attr_devspec.attr,
1010 &dev_attr_modalias.attr,
1011 &dev_attr_cmo_entitled.attr,
1012 &dev_attr_cmo_allocated.attr,
1013 &dev_attr_cmo_desired.attr,
1014 &dev_attr_cmo_allocs_failed.attr,
1015 NULL,
1016};
1017ATTRIBUTE_GROUPS(vio_cmo_dev);
1018
1019
1020
1021#define viobus_cmo_rd_attr(name) \
1022static ssize_t cmo_bus_##name##_show(struct bus_type *bt, char *buf) \
1023{ \
1024 return sprintf(buf, "%lu\n", vio_cmo.name); \
1025} \
1026static struct bus_attribute bus_attr_cmo_bus_##name = \
1027 __ATTR(cmo_##name, S_IRUGO, cmo_bus_##name##_show, NULL)
1028
1029#define viobus_cmo_pool_rd_attr(name, var) \
1030static ssize_t \
1031cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1032{ \
1033 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1034} \
1035static BUS_ATTR_RO(cmo_##name##_##var)
1036
1037viobus_cmo_rd_attr(entitled);
1038viobus_cmo_rd_attr(spare);
1039viobus_cmo_rd_attr(min);
1040viobus_cmo_rd_attr(desired);
1041viobus_cmo_rd_attr(curr);
1042viobus_cmo_pool_rd_attr(reserve, size);
1043viobus_cmo_pool_rd_attr(excess, size);
1044viobus_cmo_pool_rd_attr(excess, free);
1045
1046static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1047{
1048 return sprintf(buf, "%lu\n", vio_cmo.high);
1049}
1050
1051static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1052 size_t count)
1053{
1054 unsigned long flags;
1055
1056 spin_lock_irqsave(&vio_cmo.lock, flags);
1057 vio_cmo.high = vio_cmo.curr;
1058 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1059
1060 return count;
1061}
1062static BUS_ATTR_RW(cmo_high);
1063
1064static struct attribute *vio_bus_attrs[] = {
1065 &bus_attr_cmo_bus_entitled.attr,
1066 &bus_attr_cmo_bus_spare.attr,
1067 &bus_attr_cmo_bus_min.attr,
1068 &bus_attr_cmo_bus_desired.attr,
1069 &bus_attr_cmo_bus_curr.attr,
1070 &bus_attr_cmo_high.attr,
1071 &bus_attr_cmo_reserve_size.attr,
1072 &bus_attr_cmo_excess_size.attr,
1073 &bus_attr_cmo_excess_free.attr,
1074 NULL,
1075};
1076ATTRIBUTE_GROUPS(vio_bus);
1077
1078static void vio_cmo_sysfs_init(void)
1079{
1080 vio_bus_type.dev_groups = vio_cmo_dev_groups;
1081 vio_bus_type.bus_groups = vio_bus_groups;
1082}
1083#else
1084int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1085void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1086static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1087static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1088static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1089static void vio_cmo_bus_init(void) {}
1090static void vio_cmo_sysfs_init(void) { }
1091#endif
1092EXPORT_SYMBOL(vio_cmo_entitlement_update);
1093EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1126{
1127 struct device *dev = &vdev->dev;
1128 unsigned long deadline = 0;
1129 long hret = 0;
1130 int ret = 0;
1131
1132 if (op->timeout)
1133 deadline = jiffies + msecs_to_jiffies(op->timeout);
1134
1135 while (true) {
1136 hret = plpar_hcall_norets(H_COP, op->flags,
1137 vdev->resource_id,
1138 op->in, op->inlen, op->out,
1139 op->outlen, op->csbcpb);
1140
1141 if (hret == H_SUCCESS ||
1142 (hret != H_NOT_ENOUGH_RESOURCES &&
1143 hret != H_BUSY && hret != H_RESOURCE) ||
1144 (op->timeout && time_after(deadline, jiffies)))
1145 break;
1146
1147 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1148 }
1149
1150 switch (hret) {
1151 case H_SUCCESS:
1152 ret = 0;
1153 break;
1154 case H_OP_MODE:
1155 case H_TOO_BIG:
1156 ret = -E2BIG;
1157 break;
1158 case H_RESCINDED:
1159 ret = -EACCES;
1160 break;
1161 case H_HARDWARE:
1162 ret = -EPERM;
1163 break;
1164 case H_NOT_ENOUGH_RESOURCES:
1165 case H_RESOURCE:
1166 case H_BUSY:
1167 ret = -EBUSY;
1168 break;
1169 default:
1170 ret = -EINVAL;
1171 break;
1172 }
1173
1174 if (ret)
1175 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1176 __func__, ret, hret);
1177
1178 op->hcall_err = hret;
1179 return ret;
1180}
1181EXPORT_SYMBOL(vio_h_cop_sync);
1182
1183static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1184{
1185 const __be32 *dma_window;
1186 struct iommu_table *tbl;
1187 unsigned long offset, size;
1188
1189 dma_window = of_get_property(dev->dev.of_node,
1190 "ibm,my-dma-window", NULL);
1191 if (!dma_window)
1192 return NULL;
1193
1194 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1195 if (tbl == NULL)
1196 return NULL;
1197
1198 of_parse_dma_window(dev->dev.of_node, dma_window,
1199 &tbl->it_index, &offset, &size);
1200
1201
1202 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1203 tbl->it_size = size >> tbl->it_page_shift;
1204
1205 tbl->it_offset = offset >> tbl->it_page_shift;
1206 tbl->it_busno = 0;
1207 tbl->it_type = TCE_VB;
1208 tbl->it_blocksize = 16;
1209
1210 if (firmware_has_feature(FW_FEATURE_LPAR))
1211 tbl->it_ops = &iommu_table_lpar_multi_ops;
1212 else
1213 tbl->it_ops = &iommu_table_pseries_ops;
1214
1215 return iommu_init_table(tbl, -1);
1216}
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static const struct vio_device_id *vio_match_device(
1229 const struct vio_device_id *ids, const struct vio_dev *dev)
1230{
1231 while (ids->type[0] != '\0') {
1232 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1233 of_device_is_compatible(dev->dev.of_node,
1234 ids->compat))
1235 return ids;
1236 ids++;
1237 }
1238 return NULL;
1239}
1240
1241
1242
1243
1244
1245
1246static int vio_bus_probe(struct device *dev)
1247{
1248 struct vio_dev *viodev = to_vio_dev(dev);
1249 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1250 const struct vio_device_id *id;
1251 int error = -ENODEV;
1252
1253 if (!viodrv->probe)
1254 return error;
1255
1256 id = vio_match_device(viodrv->id_table, viodev);
1257 if (id) {
1258 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1259 if (firmware_has_feature(FW_FEATURE_CMO)) {
1260 error = vio_cmo_bus_probe(viodev);
1261 if (error)
1262 return error;
1263 }
1264 error = viodrv->probe(viodev, id);
1265 if (error && firmware_has_feature(FW_FEATURE_CMO))
1266 vio_cmo_bus_remove(viodev);
1267 }
1268
1269 return error;
1270}
1271
1272
1273static int vio_bus_remove(struct device *dev)
1274{
1275 struct vio_dev *viodev = to_vio_dev(dev);
1276 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1277 struct device *devptr;
1278 int ret = 1;
1279
1280
1281
1282
1283
1284 devptr = get_device(dev);
1285
1286 if (viodrv->remove)
1287 ret = viodrv->remove(viodev);
1288
1289 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1290 vio_cmo_bus_remove(viodev);
1291
1292 put_device(devptr);
1293 return ret;
1294}
1295
1296
1297
1298
1299
1300int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1301 const char *mod_name)
1302{
1303 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1304
1305
1306 viodrv->driver.name = viodrv->name;
1307 viodrv->driver.pm = viodrv->pm;
1308 viodrv->driver.bus = &vio_bus_type;
1309 viodrv->driver.owner = owner;
1310 viodrv->driver.mod_name = mod_name;
1311
1312 return driver_register(&viodrv->driver);
1313}
1314EXPORT_SYMBOL(__vio_register_driver);
1315
1316
1317
1318
1319
1320void vio_unregister_driver(struct vio_driver *viodrv)
1321{
1322 driver_unregister(&viodrv->driver);
1323}
1324EXPORT_SYMBOL(vio_unregister_driver);
1325
1326
1327static void vio_dev_release(struct device *dev)
1328{
1329 struct iommu_table *tbl = get_iommu_table_base(dev);
1330
1331 if (tbl)
1332 iommu_tce_table_put(tbl);
1333 of_node_put(dev->of_node);
1334 kfree(to_vio_dev(dev));
1335}
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346struct vio_dev *vio_register_device_node(struct device_node *of_node)
1347{
1348 struct vio_dev *viodev;
1349 struct device_node *parent_node;
1350 const __be32 *prop;
1351 enum vio_dev_family family;
1352 const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
1353
1354
1355
1356
1357
1358 parent_node = of_get_parent(of_node);
1359 if (parent_node) {
1360 if (!strcmp(parent_node->type, "ibm,platform-facilities"))
1361 family = PFO;
1362 else if (!strcmp(parent_node->type, "vdevice"))
1363 family = VDEVICE;
1364 else {
1365 pr_warn("%s: parent(%pOF) of %s not recognized.\n",
1366 __func__,
1367 parent_node,
1368 of_node_name);
1369 of_node_put(parent_node);
1370 return NULL;
1371 }
1372 of_node_put(parent_node);
1373 } else {
1374 pr_warn("%s: could not determine the parent of node %s.\n",
1375 __func__, of_node_name);
1376 return NULL;
1377 }
1378
1379 if (family == PFO) {
1380 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1381 pr_debug("%s: Skipping the interrupt controller %s.\n",
1382 __func__, of_node_name);
1383 return NULL;
1384 }
1385 }
1386
1387
1388 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1389 if (viodev == NULL) {
1390 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1391 return NULL;
1392 }
1393
1394
1395 viodev->family = family;
1396 if (viodev->family == VDEVICE) {
1397 unsigned int unit_address;
1398
1399 if (of_node->type != NULL)
1400 viodev->type = of_node->type;
1401 else {
1402 pr_warn("%s: node %s is missing the 'device_type' "
1403 "property.\n", __func__, of_node_name);
1404 goto out;
1405 }
1406
1407 prop = of_get_property(of_node, "reg", NULL);
1408 if (prop == NULL) {
1409 pr_warn("%s: node %s missing 'reg'\n",
1410 __func__, of_node_name);
1411 goto out;
1412 }
1413 unit_address = of_read_number(prop, 1);
1414 dev_set_name(&viodev->dev, "%x", unit_address);
1415 viodev->irq = irq_of_parse_and_map(of_node, 0);
1416 viodev->unit_address = unit_address;
1417 } else {
1418
1419
1420
1421 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1422 if (prop != NULL)
1423 viodev->resource_id = of_read_number(prop, 1);
1424
1425 dev_set_name(&viodev->dev, "%s", of_node_name);
1426 viodev->type = of_node_name;
1427 viodev->irq = 0;
1428 }
1429
1430 viodev->name = of_node->name;
1431 viodev->dev.of_node = of_node_get(of_node);
1432
1433 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1434
1435
1436 viodev->dev.parent = &vio_bus_device.dev;
1437 viodev->dev.bus = &vio_bus_type;
1438 viodev->dev.release = vio_dev_release;
1439
1440 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1441 if (firmware_has_feature(FW_FEATURE_CMO))
1442 vio_cmo_set_dma_ops(viodev);
1443 else
1444 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1445
1446 set_iommu_table_base(&viodev->dev,
1447 vio_build_iommu_table(viodev));
1448
1449
1450
1451 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1452 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1453 }
1454
1455
1456 if (device_register(&viodev->dev)) {
1457 printk(KERN_ERR "%s: failed to register device %s\n",
1458 __func__, dev_name(&viodev->dev));
1459 put_device(&viodev->dev);
1460 return NULL;
1461 }
1462
1463 return viodev;
1464
1465out:
1466 kfree(viodev);
1467
1468 return NULL;
1469}
1470EXPORT_SYMBOL(vio_register_device_node);
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480static void vio_bus_scan_register_devices(char *root_name)
1481{
1482 struct device_node *node_root, *node_child;
1483
1484 if (!root_name)
1485 return;
1486
1487 node_root = of_find_node_by_name(NULL, root_name);
1488 if (node_root) {
1489
1490
1491
1492
1493
1494 node_child = of_get_next_child(node_root, NULL);
1495 while (node_child) {
1496 vio_register_device_node(node_child);
1497 node_child = of_get_next_child(node_root, node_child);
1498 }
1499 of_node_put(node_root);
1500 }
1501}
1502
1503
1504
1505
1506static int __init vio_bus_init(void)
1507{
1508 int err;
1509
1510 if (firmware_has_feature(FW_FEATURE_CMO))
1511 vio_cmo_sysfs_init();
1512
1513 err = bus_register(&vio_bus_type);
1514 if (err) {
1515 printk(KERN_ERR "failed to register VIO bus\n");
1516 return err;
1517 }
1518
1519
1520
1521
1522
1523 err = device_register(&vio_bus_device.dev);
1524 if (err) {
1525 printk(KERN_WARNING "%s: device_register returned %i\n",
1526 __func__, err);
1527 return err;
1528 }
1529
1530 if (firmware_has_feature(FW_FEATURE_CMO))
1531 vio_cmo_bus_init();
1532
1533 return 0;
1534}
1535postcore_initcall(vio_bus_init);
1536
1537static int __init vio_device_init(void)
1538{
1539 vio_bus_scan_register_devices("vdevice");
1540 vio_bus_scan_register_devices("ibm,platform-facilities");
1541
1542 return 0;
1543}
1544device_initcall(vio_device_init);
1545
1546static ssize_t name_show(struct device *dev,
1547 struct device_attribute *attr, char *buf)
1548{
1549 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1550}
1551static DEVICE_ATTR_RO(name);
1552
1553static ssize_t devspec_show(struct device *dev,
1554 struct device_attribute *attr, char *buf)
1555{
1556 struct device_node *of_node = dev->of_node;
1557
1558 return sprintf(buf, "%pOF\n", of_node);
1559}
1560static DEVICE_ATTR_RO(devspec);
1561
1562static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1563 char *buf)
1564{
1565 const struct vio_dev *vio_dev = to_vio_dev(dev);
1566 struct device_node *dn;
1567 const char *cp;
1568
1569 dn = dev->of_node;
1570 if (!dn) {
1571 strcpy(buf, "\n");
1572 return strlen(buf);
1573 }
1574 cp = of_get_property(dn, "compatible", NULL);
1575 if (!cp) {
1576 strcpy(buf, "\n");
1577 return strlen(buf);
1578 }
1579
1580 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1581}
1582static DEVICE_ATTR_RO(modalias);
1583
1584static struct attribute *vio_dev_attrs[] = {
1585 &dev_attr_name.attr,
1586 &dev_attr_devspec.attr,
1587 &dev_attr_modalias.attr,
1588 NULL,
1589};
1590ATTRIBUTE_GROUPS(vio_dev);
1591
1592void vio_unregister_device(struct vio_dev *viodev)
1593{
1594 device_unregister(&viodev->dev);
1595 if (viodev->family == VDEVICE)
1596 irq_dispose_mapping(viodev->irq);
1597}
1598EXPORT_SYMBOL(vio_unregister_device);
1599
1600static int vio_bus_match(struct device *dev, struct device_driver *drv)
1601{
1602 const struct vio_dev *vio_dev = to_vio_dev(dev);
1603 struct vio_driver *vio_drv = to_vio_driver(drv);
1604 const struct vio_device_id *ids = vio_drv->id_table;
1605
1606 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1607}
1608
1609static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1610{
1611 const struct vio_dev *vio_dev = to_vio_dev(dev);
1612 struct device_node *dn;
1613 const char *cp;
1614
1615 dn = dev->of_node;
1616 if (!dn)
1617 return -ENODEV;
1618 cp = of_get_property(dn, "compatible", NULL);
1619 if (!cp)
1620 return -ENODEV;
1621
1622 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1623 return 0;
1624}
1625
1626struct bus_type vio_bus_type = {
1627 .name = "vio",
1628 .dev_groups = vio_dev_groups,
1629 .uevent = vio_hotplug,
1630 .match = vio_bus_match,
1631 .probe = vio_bus_probe,
1632 .remove = vio_bus_remove,
1633};
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1645{
1646 return of_get_property(vdev->dev.of_node, which, length);
1647}
1648EXPORT_SYMBOL(vio_get_attribute);
1649
1650#ifdef CONFIG_PPC_PSERIES
1651
1652
1653
1654static struct vio_dev *vio_find_name(const char *name)
1655{
1656 struct device *found;
1657
1658 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1659 if (!found)
1660 return NULL;
1661
1662 return to_vio_dev(found);
1663}
1664
1665
1666
1667
1668
1669
1670
1671
1672struct vio_dev *vio_find_node(struct device_node *vnode)
1673{
1674 char kobj_name[20];
1675 struct device_node *vnode_parent;
1676 const char *dev_type;
1677
1678 vnode_parent = of_get_parent(vnode);
1679 if (!vnode_parent)
1680 return NULL;
1681
1682 dev_type = of_get_property(vnode_parent, "device_type", NULL);
1683 of_node_put(vnode_parent);
1684 if (!dev_type)
1685 return NULL;
1686
1687
1688 if (!strcmp(dev_type, "vdevice")) {
1689 const __be32 *prop;
1690
1691 prop = of_get_property(vnode, "reg", NULL);
1692 if (!prop)
1693 return NULL;
1694 snprintf(kobj_name, sizeof(kobj_name), "%x",
1695 (uint32_t)of_read_number(prop, 1));
1696 } else if (!strcmp(dev_type, "ibm,platform-facilities"))
1697 snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
1698 else
1699 return NULL;
1700
1701 return vio_find_name(kobj_name);
1702}
1703EXPORT_SYMBOL(vio_find_node);
1704
1705int vio_enable_interrupts(struct vio_dev *dev)
1706{
1707 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1708 if (rc != H_SUCCESS)
1709 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1710 return rc;
1711}
1712EXPORT_SYMBOL(vio_enable_interrupts);
1713
1714int vio_disable_interrupts(struct vio_dev *dev)
1715{
1716 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1717 if (rc != H_SUCCESS)
1718 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1719 return rc;
1720}
1721EXPORT_SYMBOL(vio_disable_interrupts);
1722#endif
1723