1#define pr_fmt(fmt) "irq: " fmt
2
3#include <linux/debugfs.h>
4#include <linux/hardirq.h>
5#include <linux/interrupt.h>
6#include <linux/irq.h>
7#include <linux/irqdesc.h>
8#include <linux/irqdomain.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/topology.h>
14#include <linux/seq_file.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/fs.h>
18
19#define IRQ_DOMAIN_MAP_LEGACY 0
20
21#define IRQ_DOMAIN_MAP_NOMAP 1
22#define IRQ_DOMAIN_MAP_LINEAR 2
23#define IRQ_DOMAIN_MAP_TREE 3
24
25static LIST_HEAD(irq_domain_list);
26static DEFINE_MUTEX(irq_domain_mutex);
27
28static DEFINE_MUTEX(revmap_trees_mutex);
29static struct irq_domain *irq_default_domain;
30
31
32
33
34
35
36
37
38
39
40
41
42static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
43 unsigned int revmap_type,
44 const struct irq_domain_ops *ops,
45 void *host_data)
46{
47 struct irq_domain *domain;
48
49 domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
50 of_node_to_nid(of_node));
51 if (WARN_ON(!domain))
52 return NULL;
53
54
55 domain->revmap_type = revmap_type;
56 domain->ops = ops;
57 domain->host_data = host_data;
58 domain->of_node = of_node_get(of_node);
59
60 return domain;
61}
62
63static void irq_domain_free(struct irq_domain *domain)
64{
65 of_node_put(domain->of_node);
66 kfree(domain);
67}
68
69static void irq_domain_add(struct irq_domain *domain)
70{
71 mutex_lock(&irq_domain_mutex);
72 list_add(&domain->link, &irq_domain_list);
73 mutex_unlock(&irq_domain_mutex);
74 pr_debug("Allocated domain of type %d @0x%p\n",
75 domain->revmap_type, domain);
76}
77
78
79
80
81
82
83
84
85
86void irq_domain_remove(struct irq_domain *domain)
87{
88 mutex_lock(&irq_domain_mutex);
89
90 switch (domain->revmap_type) {
91 case IRQ_DOMAIN_MAP_LEGACY:
92
93
94
95
96
97 break;
98 case IRQ_DOMAIN_MAP_TREE:
99 WARN_ON(!radix_tree_empty(&domain->revmap_data.tree));
100 break;
101 case IRQ_DOMAIN_MAP_LINEAR:
102 kfree(domain->revmap_data.linear.revmap);
103 domain->revmap_data.linear.size = 0;
104 break;
105 case IRQ_DOMAIN_MAP_NOMAP:
106 break;
107 }
108
109 list_del(&domain->link);
110
111
112
113
114 if (unlikely(irq_default_domain == domain))
115 irq_set_default_host(NULL);
116
117 mutex_unlock(&irq_domain_mutex);
118
119 pr_debug("Removed domain of type %d @0x%p\n",
120 domain->revmap_type, domain);
121
122 irq_domain_free(domain);
123}
124EXPORT_SYMBOL_GPL(irq_domain_remove);
125
126static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
127 irq_hw_number_t hwirq)
128{
129 irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
130 int size = domain->revmap_data.legacy.size;
131
132 if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
133 return 0;
134 return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
135}
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
159 unsigned int size,
160 unsigned int first_irq,
161 const struct irq_domain_ops *ops,
162 void *host_data)
163{
164 if (first_irq > 0) {
165 int irq_base;
166
167 if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
168
169
170
171
172
173
174
175 irq_base = irq_alloc_descs(first_irq, first_irq, size,
176 of_node_to_nid(of_node));
177 if (irq_base < 0) {
178 pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
179 first_irq);
180 irq_base = first_irq;
181 }
182 } else
183 irq_base = first_irq;
184
185 return irq_domain_add_legacy(of_node, size, irq_base, 0,
186 ops, host_data);
187 }
188
189
190 return irq_domain_add_linear(of_node, size, ops, host_data);
191}
192EXPORT_SYMBOL_GPL(irq_domain_add_simple);
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
210 unsigned int size,
211 unsigned int first_irq,
212 irq_hw_number_t first_hwirq,
213 const struct irq_domain_ops *ops,
214 void *host_data)
215{
216 struct irq_domain *domain;
217 unsigned int i;
218
219 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
220 if (!domain)
221 return NULL;
222
223 domain->revmap_data.legacy.first_irq = first_irq;
224 domain->revmap_data.legacy.first_hwirq = first_hwirq;
225 domain->revmap_data.legacy.size = size;
226
227 mutex_lock(&irq_domain_mutex);
228
229 for (i = 0; i < size; i++) {
230 int irq = first_irq + i;
231 struct irq_data *irq_data = irq_get_irq_data(irq);
232
233 if (WARN_ON(!irq_data || irq_data->domain)) {
234 mutex_unlock(&irq_domain_mutex);
235 irq_domain_free(domain);
236 return NULL;
237 }
238 }
239
240
241 for (i = 0; i < size; i++) {
242 struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
243 irq_data->hwirq = first_hwirq + i;
244 irq_data->domain = domain;
245 }
246 mutex_unlock(&irq_domain_mutex);
247
248 for (i = 0; i < size; i++) {
249 int irq = first_irq + i;
250 int hwirq = first_hwirq + i;
251
252
253 if (!irq)
254 continue;
255
256
257
258
259
260 if (ops->map)
261 ops->map(domain, irq, hwirq);
262
263
264 irq_clear_status_flags(irq, IRQ_NOREQUEST);
265 }
266
267 irq_domain_add(domain);
268 return domain;
269}
270EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
271
272
273
274
275
276
277
278
279struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
280 unsigned int size,
281 const struct irq_domain_ops *ops,
282 void *host_data)
283{
284 struct irq_domain *domain;
285 unsigned int *revmap;
286
287 revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
288 of_node_to_nid(of_node));
289 if (WARN_ON(!revmap))
290 return NULL;
291
292 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
293 if (!domain) {
294 kfree(revmap);
295 return NULL;
296 }
297 domain->revmap_data.linear.size = size;
298 domain->revmap_data.linear.revmap = revmap;
299 irq_domain_add(domain);
300 return domain;
301}
302EXPORT_SYMBOL_GPL(irq_domain_add_linear);
303
304struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
305 unsigned int max_irq,
306 const struct irq_domain_ops *ops,
307 void *host_data)
308{
309 struct irq_domain *domain = irq_domain_alloc(of_node,
310 IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
311 if (domain) {
312 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
313 irq_domain_add(domain);
314 }
315 return domain;
316}
317EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
318
319
320
321
322
323
324
325
326
327struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
328 const struct irq_domain_ops *ops,
329 void *host_data)
330{
331 struct irq_domain *domain = irq_domain_alloc(of_node,
332 IRQ_DOMAIN_MAP_TREE, ops, host_data);
333 if (domain) {
334 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
335 irq_domain_add(domain);
336 }
337 return domain;
338}
339EXPORT_SYMBOL_GPL(irq_domain_add_tree);
340
341
342
343
344
345struct irq_domain *irq_find_host(struct device_node *node)
346{
347 struct irq_domain *h, *found = NULL;
348 int rc;
349
350
351
352
353
354
355 mutex_lock(&irq_domain_mutex);
356 list_for_each_entry(h, &irq_domain_list, link) {
357 if (h->ops->match)
358 rc = h->ops->match(h, node);
359 else
360 rc = (h->of_node != NULL) && (h->of_node == node);
361
362 if (rc) {
363 found = h;
364 break;
365 }
366 }
367 mutex_unlock(&irq_domain_mutex);
368 return found;
369}
370EXPORT_SYMBOL_GPL(irq_find_host);
371
372
373
374
375
376
377
378
379
380
381void irq_set_default_host(struct irq_domain *domain)
382{
383 pr_debug("Default domain set to @0x%p\n", domain);
384
385 irq_default_domain = domain;
386}
387EXPORT_SYMBOL_GPL(irq_set_default_host);
388
389static void irq_domain_disassociate_many(struct irq_domain *domain,
390 unsigned int irq_base, int count)
391{
392
393
394
395
396 while (count--) {
397 int irq = irq_base + count;
398 struct irq_data *irq_data = irq_get_irq_data(irq);
399 irq_hw_number_t hwirq;
400
401 if (WARN_ON(!irq_data || irq_data->domain != domain))
402 continue;
403
404 hwirq = irq_data->hwirq;
405 irq_set_status_flags(irq, IRQ_NOREQUEST);
406
407
408 irq_set_chip_and_handler(irq, NULL, NULL);
409
410
411 synchronize_irq(irq);
412
413
414 if (domain->ops->unmap)
415 domain->ops->unmap(domain, irq);
416 smp_mb();
417
418 irq_data->domain = NULL;
419 irq_data->hwirq = 0;
420
421
422 switch(domain->revmap_type) {
423 case IRQ_DOMAIN_MAP_LINEAR:
424 if (hwirq < domain->revmap_data.linear.size)
425 domain->revmap_data.linear.revmap[hwirq] = 0;
426 break;
427 case IRQ_DOMAIN_MAP_TREE:
428 mutex_lock(&revmap_trees_mutex);
429 radix_tree_delete(&domain->revmap_data.tree, hwirq);
430 mutex_unlock(&revmap_trees_mutex);
431 break;
432 }
433 }
434}
435
436int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
437 irq_hw_number_t hwirq_base, int count)
438{
439 unsigned int virq = irq_base;
440 irq_hw_number_t hwirq = hwirq_base;
441 int i, ret;
442
443 pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
444 of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
445
446 for (i = 0; i < count; i++) {
447 struct irq_data *irq_data = irq_get_irq_data(virq + i);
448
449 if (WARN(!irq_data, "error: irq_desc not allocated; "
450 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
451 return -EINVAL;
452 if (WARN(irq_data->domain, "error: irq_desc already associated; "
453 "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
454 return -EINVAL;
455 };
456
457 for (i = 0; i < count; i++, virq++, hwirq++) {
458 struct irq_data *irq_data = irq_get_irq_data(virq);
459
460 irq_data->hwirq = hwirq;
461 irq_data->domain = domain;
462 if (domain->ops->map) {
463 ret = domain->ops->map(domain, virq, hwirq);
464 if (ret != 0) {
465
466
467
468
469
470 if (ret != -EPERM) {
471 pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
472 of_node_full_name(domain->of_node), hwirq, virq, ret);
473 }
474 irq_data->domain = NULL;
475 irq_data->hwirq = 0;
476 continue;
477 }
478 }
479
480 switch (domain->revmap_type) {
481 case IRQ_DOMAIN_MAP_LINEAR:
482 if (hwirq < domain->revmap_data.linear.size)
483 domain->revmap_data.linear.revmap[hwirq] = virq;
484 break;
485 case IRQ_DOMAIN_MAP_TREE:
486 mutex_lock(&revmap_trees_mutex);
487 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
488 mutex_unlock(&revmap_trees_mutex);
489 break;
490 }
491
492 irq_clear_status_flags(virq, IRQ_NOREQUEST);
493 }
494
495 return 0;
496
497 irq_domain_disassociate_many(domain, irq_base, i);
498 return -EINVAL;
499}
500EXPORT_SYMBOL_GPL(irq_domain_associate_many);
501
502
503
504
505
506
507
508
509
510unsigned int irq_create_direct_mapping(struct irq_domain *domain)
511{
512 unsigned int virq;
513
514 if (domain == NULL)
515 domain = irq_default_domain;
516
517 if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
518 return 0;
519
520 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
521 if (!virq) {
522 pr_debug("create_direct virq allocation failed\n");
523 return 0;
524 }
525 if (virq >= domain->revmap_data.nomap.max_irq) {
526 pr_err("ERROR: no free irqs available below %i maximum\n",
527 domain->revmap_data.nomap.max_irq);
528 irq_free_desc(virq);
529 return 0;
530 }
531 pr_debug("create_direct obtained virq %d\n", virq);
532
533 if (irq_domain_associate(domain, virq, virq)) {
534 irq_free_desc(virq);
535 return 0;
536 }
537
538 return virq;
539}
540EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
541
542
543
544
545
546
547
548
549
550
551
552unsigned int irq_create_mapping(struct irq_domain *domain,
553 irq_hw_number_t hwirq)
554{
555 unsigned int hint;
556 int virq;
557
558 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
559
560
561 if (domain == NULL)
562 domain = irq_default_domain;
563 if (domain == NULL) {
564 pr_warning("irq_create_mapping called for"
565 " NULL domain, hwirq=%lx\n", hwirq);
566 WARN_ON(1);
567 return 0;
568 }
569 pr_debug("-> using domain @%p\n", domain);
570
571
572 virq = irq_find_mapping(domain, hwirq);
573 if (virq) {
574 pr_debug("-> existing mapping on virq %d\n", virq);
575 return virq;
576 }
577
578
579 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
580 return irq_domain_legacy_revmap(domain, hwirq);
581
582
583 hint = hwirq % nr_irqs;
584 if (hint == 0)
585 hint++;
586 virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
587 if (virq <= 0)
588 virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
589 if (virq <= 0) {
590 pr_debug("-> virq allocation failed\n");
591 return 0;
592 }
593
594 if (irq_domain_associate(domain, virq, hwirq)) {
595 irq_free_desc(virq);
596 return 0;
597 }
598
599 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
600 hwirq, of_node_full_name(domain->of_node), virq);
601
602 return virq;
603}
604EXPORT_SYMBOL_GPL(irq_create_mapping);
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
625 irq_hw_number_t hwirq_base, int count)
626{
627 int ret;
628
629 ret = irq_alloc_descs(irq_base, irq_base, count,
630 of_node_to_nid(domain->of_node));
631 if (unlikely(ret < 0))
632 return ret;
633
634 ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
635 if (unlikely(ret < 0)) {
636 irq_free_descs(irq_base, count);
637 return ret;
638 }
639
640 return 0;
641}
642EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
643
644unsigned int irq_create_of_mapping(struct device_node *controller,
645 const u32 *intspec, unsigned int intsize)
646{
647 struct irq_domain *domain;
648 irq_hw_number_t hwirq;
649 unsigned int type = IRQ_TYPE_NONE;
650 unsigned int virq;
651
652 domain = controller ? irq_find_host(controller) : irq_default_domain;
653 if (!domain) {
654#ifdef CONFIG_MIPS
655
656
657
658
659
660
661
662
663 if (intsize > 0)
664 return intspec[0];
665#endif
666 pr_warning("no irq domain found for %s !\n",
667 of_node_full_name(controller));
668 return 0;
669 }
670
671
672 if (domain->ops->xlate == NULL)
673 hwirq = intspec[0];
674 else {
675 if (domain->ops->xlate(domain, controller, intspec, intsize,
676 &hwirq, &type))
677 return 0;
678 }
679
680
681 virq = irq_create_mapping(domain, hwirq);
682 if (!virq)
683 return virq;
684
685
686 if (type != IRQ_TYPE_NONE &&
687 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
688 irq_set_irq_type(virq, type);
689 return virq;
690}
691EXPORT_SYMBOL_GPL(irq_create_of_mapping);
692
693
694
695
696
697void irq_dispose_mapping(unsigned int virq)
698{
699 struct irq_data *irq_data = irq_get_irq_data(virq);
700 struct irq_domain *domain;
701
702 if (!virq || !irq_data)
703 return;
704
705 domain = irq_data->domain;
706 if (WARN_ON(domain == NULL))
707 return;
708
709
710 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
711 return;
712
713 irq_domain_disassociate_many(domain, virq, 1);
714 irq_free_desc(virq);
715}
716EXPORT_SYMBOL_GPL(irq_dispose_mapping);
717
718
719
720
721
722
723unsigned int irq_find_mapping(struct irq_domain *domain,
724 irq_hw_number_t hwirq)
725{
726 struct irq_data *data;
727
728
729 if (domain == NULL)
730 domain = irq_default_domain;
731 if (domain == NULL)
732 return 0;
733
734 switch (domain->revmap_type) {
735 case IRQ_DOMAIN_MAP_LEGACY:
736 return irq_domain_legacy_revmap(domain, hwirq);
737 case IRQ_DOMAIN_MAP_LINEAR:
738 return irq_linear_revmap(domain, hwirq);
739 case IRQ_DOMAIN_MAP_TREE:
740 rcu_read_lock();
741 data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
742 rcu_read_unlock();
743 if (data)
744 return data->irq;
745 break;
746 case IRQ_DOMAIN_MAP_NOMAP:
747 data = irq_get_irq_data(hwirq);
748 if (data && (data->domain == domain) && (data->hwirq == hwirq))
749 return hwirq;
750 break;
751 }
752
753 return 0;
754}
755EXPORT_SYMBOL_GPL(irq_find_mapping);
756
757
758
759
760
761
762
763
764
765unsigned int irq_linear_revmap(struct irq_domain *domain,
766 irq_hw_number_t hwirq)
767{
768 BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
769
770
771 if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
772 return 0;
773
774 return domain->revmap_data.linear.revmap[hwirq];
775}
776EXPORT_SYMBOL_GPL(irq_linear_revmap);
777
778#ifdef CONFIG_IRQ_DOMAIN_DEBUG
779static int virq_debug_show(struct seq_file *m, void *private)
780{
781 unsigned long flags;
782 struct irq_desc *desc;
783 const char *p;
784 static const char none[] = "none";
785 void *data;
786 int i;
787
788 seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
789 "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
790 "domain name");
791
792 for (i = 1; i < nr_irqs; i++) {
793 desc = irq_to_desc(i);
794 if (!desc)
795 continue;
796
797 raw_spin_lock_irqsave(&desc->lock, flags);
798
799 if (desc->action && desc->action->handler) {
800 struct irq_chip *chip;
801
802 seq_printf(m, "%5d ", i);
803 seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
804
805 chip = irq_desc_get_chip(desc);
806 if (chip && chip->name)
807 p = chip->name;
808 else
809 p = none;
810 seq_printf(m, "%-15s ", p);
811
812 data = irq_desc_get_chip_data(desc);
813 seq_printf(m, data ? "0x%p " : " %p ", data);
814
815 if (desc->irq_data.domain)
816 p = of_node_full_name(desc->irq_data.domain->of_node);
817 else
818 p = none;
819 seq_printf(m, "%s\n", p);
820 }
821
822 raw_spin_unlock_irqrestore(&desc->lock, flags);
823 }
824
825 return 0;
826}
827
828static int virq_debug_open(struct inode *inode, struct file *file)
829{
830 return single_open(file, virq_debug_show, inode->i_private);
831}
832
833static const struct file_operations virq_debug_fops = {
834 .open = virq_debug_open,
835 .read = seq_read,
836 .llseek = seq_lseek,
837 .release = single_release,
838};
839
840static int __init irq_debugfs_init(void)
841{
842 if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
843 NULL, &virq_debug_fops) == NULL)
844 return -ENOMEM;
845
846 return 0;
847}
848__initcall(irq_debugfs_init);
849#endif
850
851
852
853
854
855
856
857int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
858 const u32 *intspec, unsigned int intsize,
859 unsigned long *out_hwirq, unsigned int *out_type)
860{
861 if (WARN_ON(intsize < 1))
862 return -EINVAL;
863 *out_hwirq = intspec[0];
864 *out_type = IRQ_TYPE_NONE;
865 return 0;
866}
867EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
868
869
870
871
872
873
874
875
876int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
877 const u32 *intspec, unsigned int intsize,
878 irq_hw_number_t *out_hwirq, unsigned int *out_type)
879{
880 if (WARN_ON(intsize < 2))
881 return -EINVAL;
882 *out_hwirq = intspec[0];
883 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
884 return 0;
885}
886EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
887
888
889
890
891
892
893
894
895
896
897
898
899int irq_domain_xlate_onetwocell(struct irq_domain *d,
900 struct device_node *ctrlr,
901 const u32 *intspec, unsigned int intsize,
902 unsigned long *out_hwirq, unsigned int *out_type)
903{
904 if (WARN_ON(intsize < 1))
905 return -EINVAL;
906 *out_hwirq = intspec[0];
907 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
908 return 0;
909}
910EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
911
912const struct irq_domain_ops irq_domain_simple_ops = {
913 .xlate = irq_domain_xlate_onetwocell,
914};
915EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
916
917#ifdef CONFIG_OF_IRQ
918void irq_domain_generate_simple(const struct of_device_id *match,
919 u64 phys_base, unsigned int irq_start)
920{
921 struct device_node *node;
922 pr_debug("looking for phys_base=%llx, irq_start=%i\n",
923 (unsigned long long) phys_base, (int) irq_start);
924 node = of_find_matching_node_by_address(NULL, match, phys_base);
925 if (node)
926 irq_domain_add_legacy(node, 32, irq_start, 0,
927 &irq_domain_simple_ops, NULL);
928}
929EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
930#endif
931