1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/cpu.h>
18#include <linux/types.h>
19#include <linux/delay.h>
20#include <linux/stat.h>
21#include <linux/device.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/console.h>
25#include <linux/export.h>
26#include <linux/mm.h>
27#include <linux/dma-mapping.h>
28#include <linux/kobject.h>
29
30#include <asm/iommu.h>
31#include <asm/dma.h>
32#include <asm/vio.h>
33#include <asm/prom.h>
34#include <asm/firmware.h>
35#include <asm/tce.h>
36#include <asm/page.h>
37#include <asm/hvcall.h>
38
39static struct vio_dev vio_bus_device = {
40 .name = "vio",
41 .type = "",
42 .dev.init_name = "vio",
43 .dev.bus = &vio_bus_type,
44};
45
46#ifdef CONFIG_PPC_SMLPAR
47
48
49
50
51
52
53struct vio_cmo_pool {
54 size_t size;
55 size_t free;
56};
57
58
59#define VIO_CMO_BALANCE_DELAY 100
60
61
62#define VIO_CMO_BALANCE_CHUNK 131072
63
64
65
66
67
68
69
70struct vio_cmo_dev_entry {
71 struct vio_dev *viodev;
72 struct list_head list;
73};
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90struct vio_cmo {
91 spinlock_t lock;
92 struct delayed_work balance_q;
93 struct list_head device_list;
94 size_t entitled;
95 struct vio_cmo_pool reserve;
96 struct vio_cmo_pool excess;
97 size_t spare;
98 size_t min;
99 size_t desired;
100 size_t curr;
101 size_t high;
102} vio_cmo;
103
104
105
106
107static int vio_cmo_num_OF_devs(void)
108{
109 struct device_node *node_vroot;
110 int count = 0;
111
112
113
114
115
116 node_vroot = of_find_node_by_name(NULL, "vdevice");
117 if (node_vroot) {
118 struct device_node *of_node;
119 struct property *prop;
120
121 for_each_child_of_node(node_vroot, of_node) {
122 prop = of_find_property(of_node, "ibm,my-dma-window",
123 NULL);
124 if (prop)
125 count++;
126 }
127 }
128 of_node_put(node_vroot);
129 return count;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
147{
148 unsigned long flags;
149 size_t reserve_free = 0;
150 size_t excess_free = 0;
151 int ret = -ENOMEM;
152
153 spin_lock_irqsave(&vio_cmo.lock, flags);
154
155
156 if (viodev->cmo.entitled > viodev->cmo.allocated)
157 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
158
159
160 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
161 excess_free = vio_cmo.excess.free;
162
163
164 if ((reserve_free + excess_free) >= size) {
165 vio_cmo.curr += size;
166 if (vio_cmo.curr > vio_cmo.high)
167 vio_cmo.high = vio_cmo.curr;
168 viodev->cmo.allocated += size;
169 size -= min(reserve_free, size);
170 vio_cmo.excess.free -= size;
171 ret = 0;
172 }
173
174 spin_unlock_irqrestore(&vio_cmo.lock, flags);
175 return ret;
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
191{
192 unsigned long flags;
193 size_t spare_needed = 0;
194 size_t excess_freed = 0;
195 size_t reserve_freed = size;
196 size_t tmp;
197 int balance = 0;
198
199 spin_lock_irqsave(&vio_cmo.lock, flags);
200 vio_cmo.curr -= size;
201
202
203 if (viodev->cmo.allocated > viodev->cmo.entitled) {
204 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
205 viodev->cmo.entitled));
206 reserve_freed -= excess_freed;
207 }
208
209
210 viodev->cmo.allocated -= (reserve_freed + excess_freed);
211
212
213 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
214
215
216
217
218
219 if (spare_needed && excess_freed) {
220 tmp = min(excess_freed, spare_needed);
221 vio_cmo.excess.size -= tmp;
222 vio_cmo.reserve.size += tmp;
223 vio_cmo.spare += tmp;
224 excess_freed -= tmp;
225 spare_needed -= tmp;
226 balance = 1;
227 }
228
229
230
231
232
233
234
235 if (spare_needed && reserve_freed) {
236 tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT));
237
238 vio_cmo.spare += tmp;
239 viodev->cmo.entitled -= tmp;
240 reserve_freed -= tmp;
241 spare_needed -= tmp;
242 balance = 1;
243 }
244
245
246
247
248
249
250 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
251 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
252
253 vio_cmo.excess.size -= tmp;
254 vio_cmo.reserve.size += tmp;
255 excess_freed -= tmp;
256 balance = 1;
257 }
258
259
260 if (excess_freed)
261 vio_cmo.excess.free += excess_freed;
262
263 if (balance)
264 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
265 spin_unlock_irqrestore(&vio_cmo.lock, flags);
266}
267
268
269
270
271
272
273
274
275
276
277
278
279int vio_cmo_entitlement_update(size_t new_entitlement)
280{
281 struct vio_dev *viodev;
282 struct vio_cmo_dev_entry *dev_ent;
283 unsigned long flags;
284 size_t avail, delta, tmp;
285
286 spin_lock_irqsave(&vio_cmo.lock, flags);
287
288
289 if (new_entitlement > vio_cmo.entitled) {
290 delta = new_entitlement - vio_cmo.entitled;
291
292
293 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
294 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
295 vio_cmo.spare += tmp;
296 vio_cmo.reserve.size += tmp;
297 delta -= tmp;
298 }
299
300
301 vio_cmo.entitled += delta;
302 vio_cmo.excess.size += delta;
303 vio_cmo.excess.free += delta;
304
305 goto out;
306 }
307
308
309 delta = vio_cmo.entitled - new_entitlement;
310 avail = vio_cmo.excess.free;
311
312
313
314
315
316 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
317 if (avail >= delta)
318 break;
319
320 viodev = dev_ent->viodev;
321 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
322 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
323 avail += viodev->cmo.entitled -
324 max_t(size_t, viodev->cmo.allocated,
325 VIO_CMO_MIN_ENT);
326 }
327
328 if (delta <= avail) {
329 vio_cmo.entitled -= delta;
330
331
332 tmp = min(vio_cmo.excess.free, delta);
333 vio_cmo.excess.size -= tmp;
334 vio_cmo.excess.free -= tmp;
335 delta -= tmp;
336
337
338
339
340
341 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
342 if (!delta)
343 break;
344
345 viodev = dev_ent->viodev;
346 tmp = 0;
347 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
348 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
349 tmp = viodev->cmo.entitled -
350 max_t(size_t, viodev->cmo.allocated,
351 VIO_CMO_MIN_ENT);
352 viodev->cmo.entitled -= min(tmp, delta);
353 delta -= min(tmp, delta);
354 }
355 } else {
356 spin_unlock_irqrestore(&vio_cmo.lock, flags);
357 return -ENOMEM;
358 }
359
360out:
361 schedule_delayed_work(&vio_cmo.balance_q, 0);
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return 0;
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387static void vio_cmo_balance(struct work_struct *work)
388{
389 struct vio_cmo *cmo;
390 struct vio_dev *viodev;
391 struct vio_cmo_dev_entry *dev_ent;
392 unsigned long flags;
393 size_t avail = 0, level, chunk, need;
394 int devcount = 0, fulfilled;
395
396 cmo = container_of(work, struct vio_cmo, balance_q.work);
397
398 spin_lock_irqsave(&vio_cmo.lock, flags);
399
400
401 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
402 BUG_ON(cmo->min > cmo->entitled);
403 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
404 cmo->min += cmo->spare;
405 cmo->desired = cmo->min;
406
407
408
409
410
411 avail = cmo->entitled - cmo->spare;
412 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
413 viodev = dev_ent->viodev;
414 devcount++;
415 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
416 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
417 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
418 }
419
420
421
422
423
424
425 level = VIO_CMO_MIN_ENT;
426 while (avail) {
427 fulfilled = 0;
428 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
429 viodev = dev_ent->viodev;
430
431 if (viodev->cmo.desired <= level) {
432 fulfilled++;
433 continue;
434 }
435
436
437
438
439
440
441 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
442 chunk = min(chunk, (viodev->cmo.desired -
443 viodev->cmo.entitled));
444 viodev->cmo.entitled += chunk;
445
446
447
448
449
450
451 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
452 max(viodev->cmo.allocated, level);
453 avail -= need;
454
455 }
456 if (fulfilled == devcount)
457 break;
458 level += VIO_CMO_BALANCE_CHUNK;
459 }
460
461
462 cmo->reserve.size = cmo->min;
463 cmo->excess.free = 0;
464 cmo->excess.size = 0;
465 need = 0;
466 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
467 viodev = dev_ent->viodev;
468
469 if (viodev->cmo.entitled)
470 cmo->reserve.size += (viodev->cmo.entitled -
471 VIO_CMO_MIN_ENT);
472
473 if (viodev->cmo.allocated > viodev->cmo.entitled)
474 need += viodev->cmo.allocated - viodev->cmo.entitled;
475 }
476 cmo->excess.size = cmo->entitled - cmo->reserve.size;
477 cmo->excess.free = cmo->excess.size - need;
478
479 cancel_delayed_work(to_delayed_work(work));
480 spin_unlock_irqrestore(&vio_cmo.lock, flags);
481}
482
483static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
484 dma_addr_t *dma_handle, gfp_t flag,
485 struct dma_attrs *attrs)
486{
487 struct vio_dev *viodev = to_vio_dev(dev);
488 void *ret;
489
490 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
491 atomic_inc(&viodev->cmo.allocs_failed);
492 return NULL;
493 }
494
495 ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
496 if (unlikely(ret == NULL)) {
497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
498 atomic_inc(&viodev->cmo.allocs_failed);
499 }
500
501 return ret;
502}
503
504static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
505 void *vaddr, dma_addr_t dma_handle,
506 struct dma_attrs *attrs)
507{
508 struct vio_dev *viodev = to_vio_dev(dev);
509
510 dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
511
512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
513}
514
515static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
516 unsigned long offset, size_t size,
517 enum dma_data_direction direction,
518 struct dma_attrs *attrs)
519{
520 struct vio_dev *viodev = to_vio_dev(dev);
521 struct iommu_table *tbl;
522 dma_addr_t ret = DMA_ERROR_CODE;
523
524 tbl = get_iommu_table_base(dev);
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
526 atomic_inc(&viodev->cmo.allocs_failed);
527 return ret;
528 }
529
530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
531 if (unlikely(dma_mapping_error(dev, ret))) {
532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
533 atomic_inc(&viodev->cmo.allocs_failed);
534 }
535
536 return ret;
537}
538
539static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540 size_t size,
541 enum dma_data_direction direction,
542 struct dma_attrs *attrs)
543{
544 struct vio_dev *viodev = to_vio_dev(dev);
545 struct iommu_table *tbl;
546
547 tbl = get_iommu_table_base(dev);
548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
549
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
551}
552
553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs)
556{
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct iommu_table *tbl;
559 struct scatterlist *sgl;
560 int ret, count;
561 size_t alloc_size = 0;
562
563 tbl = get_iommu_table_base(dev);
564 for_each_sg(sglist, sgl, nelems, count)
565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
566
567 if (vio_cmo_alloc(viodev, alloc_size)) {
568 atomic_inc(&viodev->cmo.allocs_failed);
569 return 0;
570 }
571
572 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
573
574 if (unlikely(!ret)) {
575 vio_cmo_dealloc(viodev, alloc_size);
576 atomic_inc(&viodev->cmo.allocs_failed);
577 return ret;
578 }
579
580 for_each_sg(sglist, sgl, ret, count)
581 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
582 if (alloc_size)
583 vio_cmo_dealloc(viodev, alloc_size);
584
585 return ret;
586}
587
588static void vio_dma_iommu_unmap_sg(struct device *dev,
589 struct scatterlist *sglist, int nelems,
590 enum dma_data_direction direction,
591 struct dma_attrs *attrs)
592{
593 struct vio_dev *viodev = to_vio_dev(dev);
594 struct iommu_table *tbl;
595 struct scatterlist *sgl;
596 size_t alloc_size = 0;
597 int count;
598
599 tbl = get_iommu_table_base(dev);
600 for_each_sg(sglist, sgl, nelems, count)
601 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
602
603 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
604
605 vio_cmo_dealloc(viodev, alloc_size);
606}
607
608static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
609{
610 return dma_iommu_ops.dma_supported(dev, mask);
611}
612
613static u64 vio_dma_get_required_mask(struct device *dev)
614{
615 return dma_iommu_ops.get_required_mask(dev);
616}
617
618struct dma_map_ops vio_dma_mapping_ops = {
619 .alloc = vio_dma_iommu_alloc_coherent,
620 .free = vio_dma_iommu_free_coherent,
621 .mmap = dma_direct_mmap_coherent,
622 .map_sg = vio_dma_iommu_map_sg,
623 .unmap_sg = vio_dma_iommu_unmap_sg,
624 .map_page = vio_dma_iommu_map_page,
625 .unmap_page = vio_dma_iommu_unmap_page,
626 .dma_supported = vio_dma_iommu_dma_supported,
627 .get_required_mask = vio_dma_get_required_mask,
628};
629
630
631
632
633
634
635
636
637
638
639
640void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
641{
642 unsigned long flags;
643 struct vio_cmo_dev_entry *dev_ent;
644 int found = 0;
645
646 if (!firmware_has_feature(FW_FEATURE_CMO))
647 return;
648
649 spin_lock_irqsave(&vio_cmo.lock, flags);
650 if (desired < VIO_CMO_MIN_ENT)
651 desired = VIO_CMO_MIN_ENT;
652
653
654
655
656
657
658 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
659 if (viodev == dev_ent->viodev) {
660 found = 1;
661 break;
662 }
663 if (!found) {
664 spin_unlock_irqrestore(&vio_cmo.lock, flags);
665 return;
666 }
667
668
669 if (desired >= viodev->cmo.desired) {
670
671 vio_cmo.desired += desired - viodev->cmo.desired;
672 viodev->cmo.desired = desired;
673 } else {
674
675 vio_cmo.desired -= viodev->cmo.desired - desired;
676 viodev->cmo.desired = desired;
677
678
679
680
681 if (viodev->cmo.entitled > desired) {
682 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
683 vio_cmo.excess.size += viodev->cmo.entitled - desired;
684
685
686
687
688
689 if (viodev->cmo.allocated < viodev->cmo.entitled)
690 vio_cmo.excess.free += viodev->cmo.entitled -
691 max(viodev->cmo.allocated, desired);
692 viodev->cmo.entitled = desired;
693 }
694 }
695 schedule_delayed_work(&vio_cmo.balance_q, 0);
696 spin_unlock_irqrestore(&vio_cmo.lock, flags);
697}
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713static int vio_cmo_bus_probe(struct vio_dev *viodev)
714{
715 struct vio_cmo_dev_entry *dev_ent;
716 struct device *dev = &viodev->dev;
717 struct iommu_table *tbl;
718 struct vio_driver *viodrv = to_vio_driver(dev->driver);
719 unsigned long flags;
720 size_t size;
721 bool dma_capable = false;
722
723 tbl = get_iommu_table_base(dev);
724
725
726 switch (viodev->family) {
727 case VDEVICE:
728 if (of_get_property(viodev->dev.of_node,
729 "ibm,my-dma-window", NULL))
730 dma_capable = true;
731 break;
732 case PFO:
733 dma_capable = false;
734 break;
735 default:
736 dev_warn(dev, "unknown device family: %d\n", viodev->family);
737 BUG();
738 break;
739 }
740
741
742 if (dma_capable) {
743
744 if (!viodrv->get_desired_dma) {
745 dev_err(dev, "%s: device driver does not support CMO\n",
746 __func__);
747 return -EINVAL;
748 }
749
750 viodev->cmo.desired =
751 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
752 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
753 viodev->cmo.desired = VIO_CMO_MIN_ENT;
754 size = VIO_CMO_MIN_ENT;
755
756 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
757 GFP_KERNEL);
758 if (!dev_ent)
759 return -ENOMEM;
760
761 dev_ent->viodev = viodev;
762 spin_lock_irqsave(&vio_cmo.lock, flags);
763 list_add(&dev_ent->list, &vio_cmo.device_list);
764 } else {
765 viodev->cmo.desired = 0;
766 size = 0;
767 spin_lock_irqsave(&vio_cmo.lock, flags);
768 }
769
770
771
772
773
774
775
776 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
777 VIO_CMO_MIN_ENT)) {
778
779 if (size)
780 vio_cmo.desired += (viodev->cmo.desired -
781 VIO_CMO_MIN_ENT);
782 } else {
783 size_t tmp;
784
785 tmp = vio_cmo.spare + vio_cmo.excess.free;
786 if (tmp < size) {
787 dev_err(dev, "%s: insufficient free "
788 "entitlement to add device. "
789 "Need %lu, have %lu\n", __func__,
790 size, (vio_cmo.spare + tmp));
791 spin_unlock_irqrestore(&vio_cmo.lock, flags);
792 return -ENOMEM;
793 }
794
795
796 tmp = min(size, vio_cmo.excess.free);
797 vio_cmo.excess.free -= tmp;
798 vio_cmo.excess.size -= tmp;
799 vio_cmo.reserve.size += tmp;
800
801
802 vio_cmo.spare -= size - tmp;
803
804
805 vio_cmo.min += size;
806 vio_cmo.desired += viodev->cmo.desired;
807 }
808 spin_unlock_irqrestore(&vio_cmo.lock, flags);
809 return 0;
810}
811
812
813
814
815
816
817
818
819
820
821
822static void vio_cmo_bus_remove(struct vio_dev *viodev)
823{
824 struct vio_cmo_dev_entry *dev_ent;
825 unsigned long flags;
826 size_t tmp;
827
828 spin_lock_irqsave(&vio_cmo.lock, flags);
829 if (viodev->cmo.allocated) {
830 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
831 "allocated after remove operation.\n",
832 __func__, viodev->cmo.allocated);
833 BUG();
834 }
835
836
837
838
839
840 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
841 if (viodev == dev_ent->viodev) {
842 list_del(&dev_ent->list);
843 kfree(dev_ent);
844 break;
845 }
846
847
848
849
850
851
852 if (viodev->cmo.entitled) {
853
854
855
856
857
858 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
859
860
861
862
863
864
865 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
866
867
868 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
869 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
870 vio_cmo.spare));
871 vio_cmo.spare += tmp;
872 viodev->cmo.entitled -= tmp;
873 }
874
875
876 vio_cmo.excess.size += viodev->cmo.entitled;
877 vio_cmo.excess.free += viodev->cmo.entitled;
878 vio_cmo.reserve.size -= viodev->cmo.entitled;
879
880
881
882
883
884
885 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
886 viodev->cmo.desired = VIO_CMO_MIN_ENT;
887 atomic_set(&viodev->cmo.allocs_failed, 0);
888 }
889
890 spin_unlock_irqrestore(&vio_cmo.lock, flags);
891}
892
893static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
894{
895 set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
896}
897
898
899
900
901
902
903
904
905static void vio_cmo_bus_init(void)
906{
907 struct hvcall_mpp_data mpp_data;
908 int err;
909
910 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
911 spin_lock_init(&vio_cmo.lock);
912 INIT_LIST_HEAD(&vio_cmo.device_list);
913 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
914
915
916 err = h_get_mpp(&mpp_data);
917
918
919
920
921
922 if (err != H_SUCCESS) {
923 printk(KERN_ERR "%s: unable to determine system IO "\
924 "entitlement. (%d)\n", __func__, err);
925 vio_cmo.entitled = 0;
926 } else {
927 vio_cmo.entitled = mpp_data.entitled_mem;
928 }
929
930
931 vio_cmo.spare = VIO_CMO_MIN_ENT;
932 vio_cmo.reserve.size = vio_cmo.spare;
933 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
934 VIO_CMO_MIN_ENT);
935 if (vio_cmo.reserve.size > vio_cmo.entitled) {
936 printk(KERN_ERR "%s: insufficient system entitlement\n",
937 __func__);
938 panic("%s: Insufficient system entitlement", __func__);
939 }
940
941
942 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
943 vio_cmo.excess.free = vio_cmo.excess.size;
944 vio_cmo.min = vio_cmo.reserve.size;
945 vio_cmo.desired = vio_cmo.reserve.size;
946}
947
948
949
950#define viodev_cmo_rd_attr(name) \
951static ssize_t viodev_cmo_##name##_show(struct device *dev, \
952 struct device_attribute *attr, \
953 char *buf) \
954{ \
955 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
956}
957
958static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
959 struct device_attribute *attr, char *buf)
960{
961 struct vio_dev *viodev = to_vio_dev(dev);
962 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
963}
964
965static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
966 struct device_attribute *attr, const char *buf, size_t count)
967{
968 struct vio_dev *viodev = to_vio_dev(dev);
969 atomic_set(&viodev->cmo.allocs_failed, 0);
970 return count;
971}
972
973static ssize_t viodev_cmo_desired_set(struct device *dev,
974 struct device_attribute *attr, const char *buf, size_t count)
975{
976 struct vio_dev *viodev = to_vio_dev(dev);
977 size_t new_desired;
978 int ret;
979
980 ret = kstrtoul(buf, 10, &new_desired);
981 if (ret)
982 return ret;
983
984 vio_cmo_set_dev_desired(viodev, new_desired);
985 return count;
986}
987
988viodev_cmo_rd_attr(desired);
989viodev_cmo_rd_attr(entitled);
990viodev_cmo_rd_attr(allocated);
991
992static ssize_t name_show(struct device *, struct device_attribute *, char *);
993static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
994static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
995 char *buf);
996static struct device_attribute vio_cmo_dev_attrs[] = {
997 __ATTR_RO(name),
998 __ATTR_RO(devspec),
999 __ATTR_RO(modalias),
1000 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1001 viodev_cmo_desired_show, viodev_cmo_desired_set),
1002 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
1003 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
1004 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1005 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
1006 __ATTR_NULL
1007};
1008
1009
1010
1011#define viobus_cmo_rd_attr(name) \
1012static ssize_t cmo_##name##_show(struct bus_type *bt, char *buf) \
1013{ \
1014 return sprintf(buf, "%lu\n", vio_cmo.name); \
1015} \
1016static BUS_ATTR_RO(cmo_##name)
1017
1018#define viobus_cmo_pool_rd_attr(name, var) \
1019static ssize_t \
1020cmo_##name##_##var##_show(struct bus_type *bt, char *buf) \
1021{ \
1022 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
1023} \
1024static BUS_ATTR_RO(cmo_##name##_##var)
1025
1026viobus_cmo_rd_attr(entitled);
1027viobus_cmo_rd_attr(spare);
1028viobus_cmo_rd_attr(min);
1029viobus_cmo_rd_attr(desired);
1030viobus_cmo_rd_attr(curr);
1031viobus_cmo_pool_rd_attr(reserve, size);
1032viobus_cmo_pool_rd_attr(excess, size);
1033viobus_cmo_pool_rd_attr(excess, free);
1034
1035static ssize_t cmo_high_show(struct bus_type *bt, char *buf)
1036{
1037 return sprintf(buf, "%lu\n", vio_cmo.high);
1038}
1039
1040static ssize_t cmo_high_store(struct bus_type *bt, const char *buf,
1041 size_t count)
1042{
1043 unsigned long flags;
1044
1045 spin_lock_irqsave(&vio_cmo.lock, flags);
1046 vio_cmo.high = vio_cmo.curr;
1047 spin_unlock_irqrestore(&vio_cmo.lock, flags);
1048
1049 return count;
1050}
1051static BUS_ATTR_RW(cmo_high);
1052
1053static struct attribute *vio_bus_attrs[] = {
1054 &bus_attr_cmo_entitled.attr,
1055 &bus_attr_cmo_spare.attr,
1056 &bus_attr_cmo_min.attr,
1057 &bus_attr_cmo_desired.attr,
1058 &bus_attr_cmo_curr.attr,
1059 &bus_attr_cmo_high.attr,
1060 &bus_attr_cmo_reserve_size.attr,
1061 &bus_attr_cmo_excess_size.attr,
1062 &bus_attr_cmo_excess_free.attr,
1063 NULL,
1064};
1065ATTRIBUTE_GROUPS(vio_bus);
1066
1067static void vio_cmo_sysfs_init(void)
1068{
1069 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1070 vio_bus_type.bus_groups = vio_bus_groups;
1071}
1072#else
1073int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1074void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1075static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1076static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1077static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1078static void vio_cmo_bus_init(void) {}
1079static void vio_cmo_sysfs_init(void) { }
1080#endif
1081EXPORT_SYMBOL(vio_cmo_entitlement_update);
1082EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
1115{
1116 struct device *dev = &vdev->dev;
1117 unsigned long deadline = 0;
1118 long hret = 0;
1119 int ret = 0;
1120
1121 if (op->timeout)
1122 deadline = jiffies + msecs_to_jiffies(op->timeout);
1123
1124 while (true) {
1125 hret = plpar_hcall_norets(H_COP, op->flags,
1126 vdev->resource_id,
1127 op->in, op->inlen, op->out,
1128 op->outlen, op->csbcpb);
1129
1130 if (hret == H_SUCCESS ||
1131 (hret != H_NOT_ENOUGH_RESOURCES &&
1132 hret != H_BUSY && hret != H_RESOURCE) ||
1133 (op->timeout && time_after(deadline, jiffies)))
1134 break;
1135
1136 dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
1137 }
1138
1139 switch (hret) {
1140 case H_SUCCESS:
1141 ret = 0;
1142 break;
1143 case H_OP_MODE:
1144 case H_TOO_BIG:
1145 ret = -E2BIG;
1146 break;
1147 case H_RESCINDED:
1148 ret = -EACCES;
1149 break;
1150 case H_HARDWARE:
1151 ret = -EPERM;
1152 break;
1153 case H_NOT_ENOUGH_RESOURCES:
1154 case H_RESOURCE:
1155 case H_BUSY:
1156 ret = -EBUSY;
1157 break;
1158 default:
1159 ret = -EINVAL;
1160 break;
1161 }
1162
1163 if (ret)
1164 dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
1165 __func__, ret, hret);
1166
1167 op->hcall_err = hret;
1168 return ret;
1169}
1170EXPORT_SYMBOL(vio_h_cop_sync);
1171
1172static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1173{
1174 const __be32 *dma_window;
1175 struct iommu_table *tbl;
1176 unsigned long offset, size;
1177
1178 dma_window = of_get_property(dev->dev.of_node,
1179 "ibm,my-dma-window", NULL);
1180 if (!dma_window)
1181 return NULL;
1182
1183 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
1184 if (tbl == NULL)
1185 return NULL;
1186
1187 of_parse_dma_window(dev->dev.of_node, dma_window,
1188 &tbl->it_index, &offset, &size);
1189
1190
1191 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1192 tbl->it_size = size >> tbl->it_page_shift;
1193
1194 tbl->it_offset = offset >> tbl->it_page_shift;
1195 tbl->it_busno = 0;
1196 tbl->it_type = TCE_VB;
1197 tbl->it_blocksize = 16;
1198
1199 if (firmware_has_feature(FW_FEATURE_LPAR))
1200 tbl->it_ops = &iommu_table_lpar_multi_ops;
1201 else
1202 tbl->it_ops = &iommu_table_pseries_ops;
1203
1204 return iommu_init_table(tbl, -1);
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217static const struct vio_device_id *vio_match_device(
1218 const struct vio_device_id *ids, const struct vio_dev *dev)
1219{
1220 while (ids->type[0] != '\0') {
1221 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1222 of_device_is_compatible(dev->dev.of_node,
1223 ids->compat))
1224 return ids;
1225 ids++;
1226 }
1227 return NULL;
1228}
1229
1230
1231
1232
1233
1234
1235static int vio_bus_probe(struct device *dev)
1236{
1237 struct vio_dev *viodev = to_vio_dev(dev);
1238 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1239 const struct vio_device_id *id;
1240 int error = -ENODEV;
1241
1242 if (!viodrv->probe)
1243 return error;
1244
1245 id = vio_match_device(viodrv->id_table, viodev);
1246 if (id) {
1247 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1248 if (firmware_has_feature(FW_FEATURE_CMO)) {
1249 error = vio_cmo_bus_probe(viodev);
1250 if (error)
1251 return error;
1252 }
1253 error = viodrv->probe(viodev, id);
1254 if (error && firmware_has_feature(FW_FEATURE_CMO))
1255 vio_cmo_bus_remove(viodev);
1256 }
1257
1258 return error;
1259}
1260
1261
1262static int vio_bus_remove(struct device *dev)
1263{
1264 struct vio_dev *viodev = to_vio_dev(dev);
1265 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1266 struct device *devptr;
1267 int ret = 1;
1268
1269
1270
1271
1272
1273 devptr = get_device(dev);
1274
1275 if (viodrv->remove)
1276 ret = viodrv->remove(viodev);
1277
1278 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1279 vio_cmo_bus_remove(viodev);
1280
1281 put_device(devptr);
1282 return ret;
1283}
1284
1285
1286
1287
1288
1289int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1290 const char *mod_name)
1291{
1292 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1293
1294
1295 viodrv->driver.name = viodrv->name;
1296 viodrv->driver.pm = viodrv->pm;
1297 viodrv->driver.bus = &vio_bus_type;
1298 viodrv->driver.owner = owner;
1299 viodrv->driver.mod_name = mod_name;
1300
1301 return driver_register(&viodrv->driver);
1302}
1303EXPORT_SYMBOL(__vio_register_driver);
1304
1305
1306
1307
1308
1309void vio_unregister_driver(struct vio_driver *viodrv)
1310{
1311 driver_unregister(&viodrv->driver);
1312}
1313EXPORT_SYMBOL(vio_unregister_driver);
1314
1315
1316static void vio_dev_release(struct device *dev)
1317{
1318 struct iommu_table *tbl = get_iommu_table_base(dev);
1319
1320 if (tbl)
1321 iommu_free_table(tbl, of_node_full_name(dev->of_node));
1322 of_node_put(dev->of_node);
1323 kfree(to_vio_dev(dev));
1324}
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335struct vio_dev *vio_register_device_node(struct device_node *of_node)
1336{
1337 struct vio_dev *viodev;
1338 struct device_node *parent_node;
1339 const __be32 *prop;
1340 enum vio_dev_family family;
1341 const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
1342
1343
1344
1345
1346
1347 parent_node = of_get_parent(of_node);
1348 if (parent_node) {
1349 if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
1350 family = PFO;
1351 else if (!strcmp(parent_node->full_name, "/vdevice"))
1352 family = VDEVICE;
1353 else {
1354 pr_warn("%s: parent(%s) of %s not recognized.\n",
1355 __func__,
1356 parent_node->full_name,
1357 of_node_name);
1358 of_node_put(parent_node);
1359 return NULL;
1360 }
1361 of_node_put(parent_node);
1362 } else {
1363 pr_warn("%s: could not determine the parent of node %s.\n",
1364 __func__, of_node_name);
1365 return NULL;
1366 }
1367
1368 if (family == PFO) {
1369 if (of_get_property(of_node, "interrupt-controller", NULL)) {
1370 pr_debug("%s: Skipping the interrupt controller %s.\n",
1371 __func__, of_node_name);
1372 return NULL;
1373 }
1374 }
1375
1376
1377 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1378 if (viodev == NULL) {
1379 pr_warn("%s: allocation failure for VIO device.\n", __func__);
1380 return NULL;
1381 }
1382
1383
1384 viodev->family = family;
1385 if (viodev->family == VDEVICE) {
1386 unsigned int unit_address;
1387
1388 if (of_node->type != NULL)
1389 viodev->type = of_node->type;
1390 else {
1391 pr_warn("%s: node %s is missing the 'device_type' "
1392 "property.\n", __func__, of_node_name);
1393 goto out;
1394 }
1395
1396 prop = of_get_property(of_node, "reg", NULL);
1397 if (prop == NULL) {
1398 pr_warn("%s: node %s missing 'reg'\n",
1399 __func__, of_node_name);
1400 goto out;
1401 }
1402 unit_address = of_read_number(prop, 1);
1403 dev_set_name(&viodev->dev, "%x", unit_address);
1404 viodev->irq = irq_of_parse_and_map(of_node, 0);
1405 viodev->unit_address = unit_address;
1406 } else {
1407
1408
1409
1410 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1411 if (prop != NULL)
1412 viodev->resource_id = of_read_number(prop, 1);
1413
1414 dev_set_name(&viodev->dev, "%s", of_node_name);
1415 viodev->type = of_node_name;
1416 viodev->irq = 0;
1417 }
1418
1419 viodev->name = of_node->name;
1420 viodev->dev.of_node = of_node_get(of_node);
1421
1422 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1423
1424
1425 viodev->dev.parent = &vio_bus_device.dev;
1426 viodev->dev.bus = &vio_bus_type;
1427 viodev->dev.release = vio_dev_release;
1428
1429 if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
1430 if (firmware_has_feature(FW_FEATURE_CMO))
1431 vio_cmo_set_dma_ops(viodev);
1432 else
1433 set_dma_ops(&viodev->dev, &dma_iommu_ops);
1434
1435 set_iommu_table_base(&viodev->dev,
1436 vio_build_iommu_table(viodev));
1437
1438
1439
1440 viodev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1441 viodev->dev.dma_mask = &viodev->dev.coherent_dma_mask;
1442 }
1443
1444
1445 if (device_register(&viodev->dev)) {
1446 printk(KERN_ERR "%s: failed to register device %s\n",
1447 __func__, dev_name(&viodev->dev));
1448 put_device(&viodev->dev);
1449 return NULL;
1450 }
1451
1452 return viodev;
1453
1454out:
1455 kfree(viodev);
1456
1457 return NULL;
1458}
1459EXPORT_SYMBOL(vio_register_device_node);
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469static void vio_bus_scan_register_devices(char *root_name)
1470{
1471 struct device_node *node_root, *node_child;
1472
1473 if (!root_name)
1474 return;
1475
1476 node_root = of_find_node_by_name(NULL, root_name);
1477 if (node_root) {
1478
1479
1480
1481
1482
1483 node_child = of_get_next_child(node_root, NULL);
1484 while (node_child) {
1485 vio_register_device_node(node_child);
1486 node_child = of_get_next_child(node_root, node_child);
1487 }
1488 of_node_put(node_root);
1489 }
1490}
1491
1492
1493
1494
1495static int __init vio_bus_init(void)
1496{
1497 int err;
1498
1499 if (firmware_has_feature(FW_FEATURE_CMO))
1500 vio_cmo_sysfs_init();
1501
1502 err = bus_register(&vio_bus_type);
1503 if (err) {
1504 printk(KERN_ERR "failed to register VIO bus\n");
1505 return err;
1506 }
1507
1508
1509
1510
1511
1512 err = device_register(&vio_bus_device.dev);
1513 if (err) {
1514 printk(KERN_WARNING "%s: device_register returned %i\n",
1515 __func__, err);
1516 return err;
1517 }
1518
1519 if (firmware_has_feature(FW_FEATURE_CMO))
1520 vio_cmo_bus_init();
1521
1522 return 0;
1523}
1524postcore_initcall(vio_bus_init);
1525
1526static int __init vio_device_init(void)
1527{
1528 vio_bus_scan_register_devices("vdevice");
1529 vio_bus_scan_register_devices("ibm,platform-facilities");
1530
1531 return 0;
1532}
1533device_initcall(vio_device_init);
1534
1535static ssize_t name_show(struct device *dev,
1536 struct device_attribute *attr, char *buf)
1537{
1538 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1539}
1540
1541static ssize_t devspec_show(struct device *dev,
1542 struct device_attribute *attr, char *buf)
1543{
1544 struct device_node *of_node = dev->of_node;
1545
1546 return sprintf(buf, "%s\n", of_node_full_name(of_node));
1547}
1548
1549static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1550 char *buf)
1551{
1552 const struct vio_dev *vio_dev = to_vio_dev(dev);
1553 struct device_node *dn;
1554 const char *cp;
1555
1556 dn = dev->of_node;
1557 if (!dn) {
1558 strcpy(buf, "\n");
1559 return strlen(buf);
1560 }
1561 cp = of_get_property(dn, "compatible", NULL);
1562 if (!cp) {
1563 strcpy(buf, "\n");
1564 return strlen(buf);
1565 }
1566
1567 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1568}
1569
1570static struct device_attribute vio_dev_attrs[] = {
1571 __ATTR_RO(name),
1572 __ATTR_RO(devspec),
1573 __ATTR_RO(modalias),
1574 __ATTR_NULL
1575};
1576
1577void vio_unregister_device(struct vio_dev *viodev)
1578{
1579 device_unregister(&viodev->dev);
1580}
1581EXPORT_SYMBOL(vio_unregister_device);
1582
1583static int vio_bus_match(struct device *dev, struct device_driver *drv)
1584{
1585 const struct vio_dev *vio_dev = to_vio_dev(dev);
1586 struct vio_driver *vio_drv = to_vio_driver(drv);
1587 const struct vio_device_id *ids = vio_drv->id_table;
1588
1589 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1590}
1591
1592static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1593{
1594 const struct vio_dev *vio_dev = to_vio_dev(dev);
1595 struct device_node *dn;
1596 const char *cp;
1597
1598 dn = dev->of_node;
1599 if (!dn)
1600 return -ENODEV;
1601 cp = of_get_property(dn, "compatible", NULL);
1602 if (!cp)
1603 return -ENODEV;
1604
1605 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1606 return 0;
1607}
1608
1609struct bus_type vio_bus_type = {
1610 .name = "vio",
1611 .dev_attrs = vio_dev_attrs,
1612 .uevent = vio_hotplug,
1613 .match = vio_bus_match,
1614 .probe = vio_bus_probe,
1615 .remove = vio_bus_remove,
1616};
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1628{
1629 return of_get_property(vdev->dev.of_node, which, length);
1630}
1631EXPORT_SYMBOL(vio_get_attribute);
1632
1633#ifdef CONFIG_PPC_PSERIES
1634
1635
1636
1637static struct vio_dev *vio_find_name(const char *name)
1638{
1639 struct device *found;
1640
1641 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1642 if (!found)
1643 return NULL;
1644
1645 return to_vio_dev(found);
1646}
1647
1648
1649
1650
1651
1652struct vio_dev *vio_find_node(struct device_node *vnode)
1653{
1654 char kobj_name[20];
1655 struct device_node *vnode_parent;
1656 const char *dev_type;
1657
1658 vnode_parent = of_get_parent(vnode);
1659 if (!vnode_parent)
1660 return NULL;
1661
1662 dev_type = of_get_property(vnode_parent, "device_type", NULL);
1663 of_node_put(vnode_parent);
1664 if (!dev_type)
1665 return NULL;
1666
1667
1668 if (!strcmp(dev_type, "vdevice")) {
1669 const __be32 *prop;
1670
1671 prop = of_get_property(vnode, "reg", NULL);
1672 if (!prop)
1673 return NULL;
1674 snprintf(kobj_name, sizeof(kobj_name), "%x",
1675 (uint32_t)of_read_number(prop, 1));
1676 } else if (!strcmp(dev_type, "ibm,platform-facilities"))
1677 snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
1678 else
1679 return NULL;
1680
1681 return vio_find_name(kobj_name);
1682}
1683EXPORT_SYMBOL(vio_find_node);
1684
1685int vio_enable_interrupts(struct vio_dev *dev)
1686{
1687 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1688 if (rc != H_SUCCESS)
1689 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1690 return rc;
1691}
1692EXPORT_SYMBOL(vio_enable_interrupts);
1693
1694int vio_disable_interrupts(struct vio_dev *dev)
1695{
1696 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1697 if (rc != H_SUCCESS)
1698 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1699 return rc;
1700}
1701EXPORT_SYMBOL(vio_disable_interrupts);
1702#endif
1703