1
2
3
4
5
6
7
8
9
10#include <linux/irq.h>
11#include <linux/slab.h>
12#include <linux/export.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/radix-tree.h>
16#include <linux/bitmap.h>
17#include <linux/irqdomain.h>
18#include <linux/sysfs.h>
19
20#include "internals.h"
21
22
23
24
25static struct lock_class_key irq_desc_lock_class;
26
27#if defined(CONFIG_SMP)
28static int __init irq_affinity_setup(char *str)
29{
30 alloc_bootmem_cpumask_var(&irq_default_affinity);
31 cpulist_parse(str, irq_default_affinity);
32
33
34
35
36 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
37 return 1;
38}
39__setup("irqaffinity=", irq_affinity_setup);
40
41static void __init init_irq_default_affinity(void)
42{
43 if (!cpumask_available(irq_default_affinity))
44 zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
45 if (cpumask_empty(irq_default_affinity))
46 cpumask_setall(irq_default_affinity);
47}
48#else
49static void __init init_irq_default_affinity(void)
50{
51}
52#endif
53
54#ifdef CONFIG_SMP
55static int alloc_masks(struct irq_desc *desc, int node)
56{
57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
58 GFP_KERNEL, node))
59 return -ENOMEM;
60
61#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
62 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
63 GFP_KERNEL, node)) {
64 free_cpumask_var(desc->irq_common_data.affinity);
65 return -ENOMEM;
66 }
67#endif
68
69#ifdef CONFIG_GENERIC_PENDING_IRQ
70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
71#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
72 free_cpumask_var(desc->irq_common_data.effective_affinity);
73#endif
74 free_cpumask_var(desc->irq_common_data.affinity);
75 return -ENOMEM;
76 }
77#endif
78 return 0;
79}
80
81static void desc_smp_init(struct irq_desc *desc, int node,
82 const struct cpumask *affinity)
83{
84 if (!affinity)
85 affinity = irq_default_affinity;
86 cpumask_copy(desc->irq_common_data.affinity, affinity);
87
88#ifdef CONFIG_GENERIC_PENDING_IRQ
89 cpumask_clear(desc->pending_mask);
90#endif
91#ifdef CONFIG_NUMA
92 desc->irq_common_data.node = node;
93#endif
94}
95
96#else
97static inline int
98alloc_masks(struct irq_desc *desc, int node) { return 0; }
99static inline void
100desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
101#endif
102
103static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
104 const struct cpumask *affinity, struct module *owner)
105{
106 int cpu;
107
108 desc->irq_common_data.handler_data = NULL;
109 desc->irq_common_data.msi_desc = NULL;
110
111 desc->irq_data.common = &desc->irq_common_data;
112 desc->irq_data.irq = irq;
113 desc->irq_data.chip = &no_irq_chip;
114 desc->irq_data.chip_data = NULL;
115 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
116 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
117 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
118 desc->handle_irq = handle_bad_irq;
119 desc->depth = 1;
120 desc->irq_count = 0;
121 desc->irqs_unhandled = 0;
122 desc->tot_count = 0;
123 desc->name = NULL;
124 desc->owner = owner;
125 for_each_possible_cpu(cpu)
126 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
127 desc_smp_init(desc, node, affinity);
128}
129
130int nr_irqs = NR_IRQS;
131EXPORT_SYMBOL_GPL(nr_irqs);
132
133static DEFINE_MUTEX(sparse_irq_lock);
134static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
135
136#ifdef CONFIG_SPARSE_IRQ
137
138static void irq_kobj_release(struct kobject *kobj);
139
140#ifdef CONFIG_SYSFS
141static struct kobject *irq_kobj_base;
142
143#define IRQ_ATTR_RO(_name) \
144static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
145
146static ssize_t per_cpu_count_show(struct kobject *kobj,
147 struct kobj_attribute *attr, char *buf)
148{
149 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
150 int cpu, irq = desc->irq_data.irq;
151 ssize_t ret = 0;
152 char *p = "";
153
154 for_each_possible_cpu(cpu) {
155 unsigned int c = kstat_irqs_cpu(irq, cpu);
156
157 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
158 p = ",";
159 }
160
161 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
162 return ret;
163}
164IRQ_ATTR_RO(per_cpu_count);
165
166static ssize_t chip_name_show(struct kobject *kobj,
167 struct kobj_attribute *attr, char *buf)
168{
169 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
170 ssize_t ret = 0;
171
172 raw_spin_lock_irq(&desc->lock);
173 if (desc->irq_data.chip && desc->irq_data.chip->name) {
174 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
175 desc->irq_data.chip->name);
176 }
177 raw_spin_unlock_irq(&desc->lock);
178
179 return ret;
180}
181IRQ_ATTR_RO(chip_name);
182
183static ssize_t hwirq_show(struct kobject *kobj,
184 struct kobj_attribute *attr, char *buf)
185{
186 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
187 ssize_t ret = 0;
188
189 raw_spin_lock_irq(&desc->lock);
190 if (desc->irq_data.domain)
191 ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
192 raw_spin_unlock_irq(&desc->lock);
193
194 return ret;
195}
196IRQ_ATTR_RO(hwirq);
197
198static ssize_t type_show(struct kobject *kobj,
199 struct kobj_attribute *attr, char *buf)
200{
201 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
202 ssize_t ret = 0;
203
204 raw_spin_lock_irq(&desc->lock);
205 ret = sprintf(buf, "%s\n",
206 irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
207 raw_spin_unlock_irq(&desc->lock);
208
209 return ret;
210
211}
212IRQ_ATTR_RO(type);
213
214static ssize_t wakeup_show(struct kobject *kobj,
215 struct kobj_attribute *attr, char *buf)
216{
217 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
218 ssize_t ret = 0;
219
220 raw_spin_lock_irq(&desc->lock);
221 ret = sprintf(buf, "%s\n",
222 irqd_is_wakeup_set(&desc->irq_data) ? "enabled" : "disabled");
223 raw_spin_unlock_irq(&desc->lock);
224
225 return ret;
226
227}
228IRQ_ATTR_RO(wakeup);
229
230static ssize_t name_show(struct kobject *kobj,
231 struct kobj_attribute *attr, char *buf)
232{
233 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
234 ssize_t ret = 0;
235
236 raw_spin_lock_irq(&desc->lock);
237 if (desc->name)
238 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
239 raw_spin_unlock_irq(&desc->lock);
240
241 return ret;
242}
243IRQ_ATTR_RO(name);
244
245static ssize_t actions_show(struct kobject *kobj,
246 struct kobj_attribute *attr, char *buf)
247{
248 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
249 struct irqaction *action;
250 ssize_t ret = 0;
251 char *p = "";
252
253 raw_spin_lock_irq(&desc->lock);
254 for (action = desc->action; action != NULL; action = action->next) {
255 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
256 p, action->name);
257 p = ",";
258 }
259 raw_spin_unlock_irq(&desc->lock);
260
261 if (ret)
262 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
263
264 return ret;
265}
266IRQ_ATTR_RO(actions);
267
268static struct attribute *irq_attrs[] = {
269 &per_cpu_count_attr.attr,
270 &chip_name_attr.attr,
271 &hwirq_attr.attr,
272 &type_attr.attr,
273 &wakeup_attr.attr,
274 &name_attr.attr,
275 &actions_attr.attr,
276 NULL
277};
278ATTRIBUTE_GROUPS(irq);
279
280static struct kobj_type irq_kobj_type = {
281 .release = irq_kobj_release,
282 .sysfs_ops = &kobj_sysfs_ops,
283 .default_groups = irq_groups,
284};
285
286static void irq_sysfs_add(int irq, struct irq_desc *desc)
287{
288 if (irq_kobj_base) {
289
290
291
292
293 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
294 pr_warn("Failed to add kobject for irq %d\n", irq);
295 }
296}
297
298static void irq_sysfs_del(struct irq_desc *desc)
299{
300
301
302
303
304
305
306 if (irq_kobj_base)
307 kobject_del(&desc->kobj);
308}
309
310static int __init irq_sysfs_init(void)
311{
312 struct irq_desc *desc;
313 int irq;
314
315
316 irq_lock_sparse();
317
318 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
319 if (!irq_kobj_base) {
320 irq_unlock_sparse();
321 return -ENOMEM;
322 }
323
324
325 for_each_irq_desc(irq, desc)
326 irq_sysfs_add(irq, desc);
327 irq_unlock_sparse();
328
329 return 0;
330}
331postcore_initcall(irq_sysfs_init);
332
333#else
334
335static struct kobj_type irq_kobj_type = {
336 .release = irq_kobj_release,
337};
338
339static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
340static void irq_sysfs_del(struct irq_desc *desc) {}
341
342#endif
343
344static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
345
346static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
347{
348 radix_tree_insert(&irq_desc_tree, irq, desc);
349}
350
351struct irq_desc *irq_to_desc(unsigned int irq)
352{
353 return radix_tree_lookup(&irq_desc_tree, irq);
354}
355EXPORT_SYMBOL(irq_to_desc);
356
357static void delete_irq_desc(unsigned int irq)
358{
359 radix_tree_delete(&irq_desc_tree, irq);
360}
361
362#ifdef CONFIG_SMP
363static void free_masks(struct irq_desc *desc)
364{
365#ifdef CONFIG_GENERIC_PENDING_IRQ
366 free_cpumask_var(desc->pending_mask);
367#endif
368 free_cpumask_var(desc->irq_common_data.affinity);
369#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
370 free_cpumask_var(desc->irq_common_data.effective_affinity);
371#endif
372}
373#else
374static inline void free_masks(struct irq_desc *desc) { }
375#endif
376
377void irq_lock_sparse(void)
378{
379 mutex_lock(&sparse_irq_lock);
380}
381
382void irq_unlock_sparse(void)
383{
384 mutex_unlock(&sparse_irq_lock);
385}
386
387static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
388 const struct cpumask *affinity,
389 struct module *owner)
390{
391 struct irq_desc *desc;
392
393 desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
394 if (!desc)
395 return NULL;
396
397 desc->kstat_irqs = alloc_percpu(unsigned int);
398 if (!desc->kstat_irqs)
399 goto err_desc;
400
401 if (alloc_masks(desc, node))
402 goto err_kstat;
403
404 raw_spin_lock_init(&desc->lock);
405 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
406 mutex_init(&desc->request_mutex);
407 init_rcu_head(&desc->rcu);
408
409 desc_set_defaults(irq, desc, node, affinity, owner);
410 irqd_set(&desc->irq_data, flags);
411 kobject_init(&desc->kobj, &irq_kobj_type);
412
413 return desc;
414
415err_kstat:
416 free_percpu(desc->kstat_irqs);
417err_desc:
418 kfree(desc);
419 return NULL;
420}
421
422static void irq_kobj_release(struct kobject *kobj)
423{
424 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
425
426 free_masks(desc);
427 free_percpu(desc->kstat_irqs);
428 kfree(desc);
429}
430
431static void delayed_free_desc(struct rcu_head *rhp)
432{
433 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
434
435 kobject_put(&desc->kobj);
436}
437
438static void free_desc(unsigned int irq)
439{
440 struct irq_desc *desc = irq_to_desc(irq);
441
442 irq_remove_debugfs_entry(desc);
443 unregister_irq_proc(irq, desc);
444
445
446
447
448
449
450
451
452
453
454 irq_sysfs_del(desc);
455 delete_irq_desc(irq);
456
457
458
459
460
461
462
463 call_rcu(&desc->rcu, delayed_free_desc);
464}
465
466static int alloc_descs(unsigned int start, unsigned int cnt, int node,
467 const struct irq_affinity_desc *affinity,
468 struct module *owner)
469{
470 struct irq_desc *desc;
471 int i;
472
473
474 if (affinity) {
475 for (i = 0; i < cnt; i++) {
476 if (cpumask_empty(&affinity[i].mask))
477 return -EINVAL;
478 }
479 }
480
481 for (i = 0; i < cnt; i++) {
482 const struct cpumask *mask = NULL;
483 unsigned int flags = 0;
484
485 if (affinity) {
486 if (affinity->is_managed) {
487 flags = IRQD_AFFINITY_MANAGED |
488 IRQD_MANAGED_SHUTDOWN;
489 }
490 mask = &affinity->mask;
491 node = cpu_to_node(cpumask_first(mask));
492 affinity++;
493 }
494
495 desc = alloc_desc(start + i, node, flags, mask, owner);
496 if (!desc)
497 goto err;
498 irq_insert_desc(start + i, desc);
499 irq_sysfs_add(start + i, desc);
500 irq_add_debugfs_entry(start + i, desc);
501 }
502 bitmap_set(allocated_irqs, start, cnt);
503 return start;
504
505err:
506 for (i--; i >= 0; i--)
507 free_desc(start + i);
508 return -ENOMEM;
509}
510
511static int irq_expand_nr_irqs(unsigned int nr)
512{
513 if (nr > IRQ_BITMAP_BITS)
514 return -ENOMEM;
515 nr_irqs = nr;
516 return 0;
517}
518
519int __init early_irq_init(void)
520{
521 int i, initcnt, node = first_online_node;
522 struct irq_desc *desc;
523
524 init_irq_default_affinity();
525
526
527 initcnt = arch_probe_nr_irqs();
528 printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
529 NR_IRQS, nr_irqs, initcnt);
530
531 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
532 nr_irqs = IRQ_BITMAP_BITS;
533
534 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
535 initcnt = IRQ_BITMAP_BITS;
536
537 if (initcnt > nr_irqs)
538 nr_irqs = initcnt;
539
540 for (i = 0; i < initcnt; i++) {
541 desc = alloc_desc(i, node, 0, NULL, NULL);
542 set_bit(i, allocated_irqs);
543 irq_insert_desc(i, desc);
544 }
545 return arch_early_irq_init();
546}
547
548#else
549
550struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
551 [0 ... NR_IRQS-1] = {
552 .handle_irq = handle_bad_irq,
553 .depth = 1,
554 .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
555 }
556};
557
558int __init early_irq_init(void)
559{
560 int count, i, node = first_online_node;
561 struct irq_desc *desc;
562
563 init_irq_default_affinity();
564
565 printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
566
567 desc = irq_desc;
568 count = ARRAY_SIZE(irq_desc);
569
570 for (i = 0; i < count; i++) {
571 desc[i].kstat_irqs = alloc_percpu(unsigned int);
572 alloc_masks(&desc[i], node);
573 raw_spin_lock_init(&desc[i].lock);
574 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
575 mutex_init(&desc[i].request_mutex);
576 desc_set_defaults(i, &desc[i], node, NULL, NULL);
577 }
578 return arch_early_irq_init();
579}
580
581struct irq_desc *irq_to_desc(unsigned int irq)
582{
583 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
584}
585EXPORT_SYMBOL(irq_to_desc);
586
587static void free_desc(unsigned int irq)
588{
589 struct irq_desc *desc = irq_to_desc(irq);
590 unsigned long flags;
591
592 raw_spin_lock_irqsave(&desc->lock, flags);
593 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
594 raw_spin_unlock_irqrestore(&desc->lock, flags);
595}
596
597static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
598 const struct irq_affinity_desc *affinity,
599 struct module *owner)
600{
601 u32 i;
602
603 for (i = 0; i < cnt; i++) {
604 struct irq_desc *desc = irq_to_desc(start + i);
605
606 desc->owner = owner;
607 }
608 bitmap_set(allocated_irqs, start, cnt);
609 return start;
610}
611
612static int irq_expand_nr_irqs(unsigned int nr)
613{
614 return -ENOMEM;
615}
616
617void irq_mark_irq(unsigned int irq)
618{
619 mutex_lock(&sparse_irq_lock);
620 bitmap_set(allocated_irqs, irq, 1);
621 mutex_unlock(&sparse_irq_lock);
622}
623
624#ifdef CONFIG_GENERIC_IRQ_LEGACY
625void irq_init_desc(unsigned int irq)
626{
627 free_desc(irq);
628}
629#endif
630
631#endif
632
633
634
635
636
637
638int generic_handle_irq(unsigned int irq)
639{
640 struct irq_desc *desc = irq_to_desc(irq);
641
642 if (!desc)
643 return -EINVAL;
644 generic_handle_irq_desc(desc);
645 return 0;
646}
647EXPORT_SYMBOL_GPL(generic_handle_irq);
648
649#ifdef CONFIG_HANDLE_DOMAIN_IRQ
650
651
652
653
654
655
656
657
658
659int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
660 bool lookup, struct pt_regs *regs)
661{
662 struct pt_regs *old_regs = set_irq_regs(regs);
663 unsigned int irq = hwirq;
664 int ret = 0;
665
666 irq_enter();
667
668#ifdef CONFIG_IRQ_DOMAIN
669 if (lookup)
670 irq = irq_find_mapping(domain, hwirq);
671#endif
672
673
674
675
676
677 if (unlikely(!irq || irq >= nr_irqs)) {
678 ack_bad_irq(irq);
679 ret = -EINVAL;
680 } else {
681 generic_handle_irq(irq);
682 }
683
684 irq_exit();
685 set_irq_regs(old_regs);
686 return ret;
687}
688
689#ifdef CONFIG_IRQ_DOMAIN
690
691
692
693
694
695
696
697
698
699
700int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
701 struct pt_regs *regs)
702{
703 struct pt_regs *old_regs = set_irq_regs(regs);
704 unsigned int irq;
705 int ret = 0;
706
707
708
709
710 WARN_ON(!in_nmi());
711
712 irq = irq_find_mapping(domain, hwirq);
713
714
715
716
717
718 if (likely(irq))
719 generic_handle_irq(irq);
720 else
721 ret = -EINVAL;
722
723 set_irq_regs(old_regs);
724 return ret;
725}
726#endif
727#endif
728
729
730
731
732
733
734
735
736void irq_free_descs(unsigned int from, unsigned int cnt)
737{
738 int i;
739
740 if (from >= nr_irqs || (from + cnt) > nr_irqs)
741 return;
742
743 mutex_lock(&sparse_irq_lock);
744 for (i = 0; i < cnt; i++)
745 free_desc(from + i);
746
747 bitmap_clear(allocated_irqs, from, cnt);
748 mutex_unlock(&sparse_irq_lock);
749}
750EXPORT_SYMBOL_GPL(irq_free_descs);
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765int __ref
766__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
767 struct module *owner, const struct irq_affinity_desc *affinity)
768{
769 int start, ret;
770
771 if (!cnt)
772 return -EINVAL;
773
774 if (irq >= 0) {
775 if (from > irq)
776 return -EINVAL;
777 from = irq;
778 } else {
779
780
781
782
783
784 from = arch_dynirq_lower_bound(from);
785 }
786
787 mutex_lock(&sparse_irq_lock);
788
789 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
790 from, cnt, 0);
791 ret = -EEXIST;
792 if (irq >=0 && start != irq)
793 goto unlock;
794
795 if (start + cnt > nr_irqs) {
796 ret = irq_expand_nr_irqs(start + cnt);
797 if (ret)
798 goto unlock;
799 }
800 ret = alloc_descs(start, cnt, node, affinity, owner);
801unlock:
802 mutex_unlock(&sparse_irq_lock);
803 return ret;
804}
805EXPORT_SYMBOL_GPL(__irq_alloc_descs);
806
807#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
808
809
810
811
812
813
814
815unsigned int irq_alloc_hwirqs(int cnt, int node)
816{
817 int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
818
819 if (irq < 0)
820 return 0;
821
822 for (i = irq; cnt > 0; i++, cnt--) {
823 if (arch_setup_hwirq(i, node))
824 goto err;
825 irq_clear_status_flags(i, _IRQ_NOREQUEST);
826 }
827 return irq;
828
829err:
830 for (i--; i >= irq; i--) {
831 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
832 arch_teardown_hwirq(i);
833 }
834 irq_free_descs(irq, cnt);
835 return 0;
836}
837EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
838
839
840
841
842
843
844
845void irq_free_hwirqs(unsigned int from, int cnt)
846{
847 int i, j;
848
849 for (i = from, j = cnt; j > 0; i++, j--) {
850 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
851 arch_teardown_hwirq(i);
852 }
853 irq_free_descs(from, cnt);
854}
855EXPORT_SYMBOL_GPL(irq_free_hwirqs);
856#endif
857
858
859
860
861
862
863
864unsigned int irq_get_next_irq(unsigned int offset)
865{
866 return find_next_bit(allocated_irqs, nr_irqs, offset);
867}
868
869struct irq_desc *
870__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
871 unsigned int check)
872{
873 struct irq_desc *desc = irq_to_desc(irq);
874
875 if (desc) {
876 if (check & _IRQ_DESC_CHECK) {
877 if ((check & _IRQ_DESC_PERCPU) &&
878 !irq_settings_is_per_cpu_devid(desc))
879 return NULL;
880
881 if (!(check & _IRQ_DESC_PERCPU) &&
882 irq_settings_is_per_cpu_devid(desc))
883 return NULL;
884 }
885
886 if (bus)
887 chip_bus_lock(desc);
888 raw_spin_lock_irqsave(&desc->lock, *flags);
889 }
890 return desc;
891}
892
893void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
894{
895 raw_spin_unlock_irqrestore(&desc->lock, flags);
896 if (bus)
897 chip_bus_sync_unlock(desc);
898}
899
900int irq_set_percpu_devid_partition(unsigned int irq,
901 const struct cpumask *affinity)
902{
903 struct irq_desc *desc = irq_to_desc(irq);
904
905 if (!desc)
906 return -EINVAL;
907
908 if (desc->percpu_enabled)
909 return -EINVAL;
910
911 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
912
913 if (!desc->percpu_enabled)
914 return -ENOMEM;
915
916 if (affinity)
917 desc->percpu_affinity = affinity;
918 else
919 desc->percpu_affinity = cpu_possible_mask;
920
921 irq_set_percpu_devid_flags(irq);
922 return 0;
923}
924
925int irq_set_percpu_devid(unsigned int irq)
926{
927 return irq_set_percpu_devid_partition(irq, NULL);
928}
929
930int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
931{
932 struct irq_desc *desc = irq_to_desc(irq);
933
934 if (!desc || !desc->percpu_enabled)
935 return -EINVAL;
936
937 if (affinity)
938 cpumask_copy(affinity, desc->percpu_affinity);
939
940 return 0;
941}
942EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition);
943
944void kstat_incr_irq_this_cpu(unsigned int irq)
945{
946 kstat_incr_irqs_this_cpu(irq_to_desc(irq));
947}
948
949
950
951
952
953
954
955
956
957
958unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
959{
960 struct irq_desc *desc = irq_to_desc(irq);
961
962 return desc && desc->kstat_irqs ?
963 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
964}
965
966static bool irq_is_nmi(struct irq_desc *desc)
967{
968 return desc->istate & IRQS_NMI;
969}
970
971
972
973
974
975
976
977
978
979unsigned int kstat_irqs(unsigned int irq)
980{
981 struct irq_desc *desc = irq_to_desc(irq);
982 unsigned int sum = 0;
983 int cpu;
984
985 if (!desc || !desc->kstat_irqs)
986 return 0;
987 if (!irq_settings_is_per_cpu_devid(desc) &&
988 !irq_settings_is_per_cpu(desc) &&
989 !irq_is_nmi(desc))
990 return desc->tot_count;
991
992 for_each_possible_cpu(cpu)
993 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
994 return sum;
995}
996
997
998
999
1000
1001
1002
1003
1004
1005
1006unsigned int kstat_irqs_usr(unsigned int irq)
1007{
1008 unsigned int sum;
1009
1010 rcu_read_lock();
1011 sum = kstat_irqs(irq);
1012 rcu_read_unlock();
1013 return sum;
1014}
1015