1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <linux/sched/isolation.h>
22#include <uapi/linux/sched/types.h>
23#include <linux/task_work.h>
24
25#include "internals.h"
26
27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28__read_mostly bool force_irqthreads;
29EXPORT_SYMBOL_GPL(force_irqthreads);
30
31static int __init setup_forced_irqthreads(char *arg)
32{
33 force_irqthreads = true;
34 return 0;
35}
36early_param("threadirqs", setup_forced_irqthreads);
37#endif
38
39static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
40{
41 struct irq_data *irqd = irq_desc_get_irq_data(desc);
42 bool inprogress;
43
44 do {
45 unsigned long flags;
46
47
48
49
50
51 while (irqd_irq_inprogress(&desc->irq_data))
52 cpu_relax();
53
54
55 raw_spin_lock_irqsave(&desc->lock, flags);
56 inprogress = irqd_irq_inprogress(&desc->irq_data);
57
58
59
60
61
62
63 if (!inprogress && sync_chip) {
64
65
66
67
68 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
69 &inprogress);
70 }
71 raw_spin_unlock_irqrestore(&desc->lock, flags);
72
73
74 } while (inprogress);
75}
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99bool synchronize_hardirq(unsigned int irq)
100{
101 struct irq_desc *desc = irq_to_desc(irq);
102
103 if (desc) {
104 __synchronize_hardirq(desc, false);
105 return !atomic_read(&desc->threads_active);
106 }
107
108 return true;
109}
110EXPORT_SYMBOL(synchronize_hardirq);
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127void synchronize_irq(unsigned int irq)
128{
129 struct irq_desc *desc = irq_to_desc(irq);
130
131 if (desc) {
132 __synchronize_hardirq(desc, true);
133
134
135
136
137
138 wait_event(desc->wait_for_threads,
139 !atomic_read(&desc->threads_active));
140 }
141}
142EXPORT_SYMBOL(synchronize_irq);
143
144#ifdef CONFIG_SMP
145cpumask_var_t irq_default_affinity;
146
147static bool __irq_can_set_affinity(struct irq_desc *desc)
148{
149 if (!desc || !irqd_can_balance(&desc->irq_data) ||
150 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
151 return false;
152 return true;
153}
154
155
156
157
158
159
160int irq_can_set_affinity(unsigned int irq)
161{
162 return __irq_can_set_affinity(irq_to_desc(irq));
163}
164
165
166
167
168
169
170
171
172bool irq_can_set_affinity_usr(unsigned int irq)
173{
174 struct irq_desc *desc = irq_to_desc(irq);
175
176 return __irq_can_set_affinity(desc) &&
177 !irqd_affinity_is_managed(&desc->irq_data);
178}
179
180
181
182
183
184
185
186
187
188
189void irq_set_thread_affinity(struct irq_desc *desc)
190{
191 struct irqaction *action;
192
193 for_each_action_of_desc(desc, action)
194 if (action->thread)
195 set_bit(IRQTF_AFFINITY, &action->thread_flags);
196}
197
198#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
199static void irq_validate_effective_affinity(struct irq_data *data)
200{
201 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
202 struct irq_chip *chip = irq_data_get_irq_chip(data);
203
204 if (!cpumask_empty(m))
205 return;
206 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
207 chip->name, data->irq);
208}
209
210static inline void irq_init_effective_affinity(struct irq_data *data,
211 const struct cpumask *mask)
212{
213 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
214}
215#else
216static inline void irq_validate_effective_affinity(struct irq_data *data) { }
217static inline void irq_init_effective_affinity(struct irq_data *data,
218 const struct cpumask *mask) { }
219#endif
220
221int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
222 bool force)
223{
224 struct irq_desc *desc = irq_data_to_desc(data);
225 struct irq_chip *chip = irq_data_get_irq_chip(data);
226 int ret;
227
228 if (!chip || !chip->irq_set_affinity)
229 return -EINVAL;
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250 if (irqd_affinity_is_managed(data) &&
251 housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
252 const struct cpumask *hk_mask, *prog_mask;
253
254 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
255 static struct cpumask tmp_mask;
256
257 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
258
259 raw_spin_lock(&tmp_mask_lock);
260 cpumask_and(&tmp_mask, mask, hk_mask);
261 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
262 prog_mask = mask;
263 else
264 prog_mask = &tmp_mask;
265 ret = chip->irq_set_affinity(data, prog_mask, force);
266 raw_spin_unlock(&tmp_mask_lock);
267 } else {
268 ret = chip->irq_set_affinity(data, mask, force);
269 }
270 switch (ret) {
271 case IRQ_SET_MASK_OK:
272 case IRQ_SET_MASK_OK_DONE:
273 cpumask_copy(desc->irq_common_data.affinity, mask);
274
275 case IRQ_SET_MASK_OK_NOCOPY:
276 irq_validate_effective_affinity(data);
277 irq_set_thread_affinity(desc);
278 ret = 0;
279 }
280
281 return ret;
282}
283
284#ifdef CONFIG_GENERIC_PENDING_IRQ
285static inline int irq_set_affinity_pending(struct irq_data *data,
286 const struct cpumask *dest)
287{
288 struct irq_desc *desc = irq_data_to_desc(data);
289
290 irqd_set_move_pending(data);
291 irq_copy_pending(desc, dest);
292 return 0;
293}
294#else
295static inline int irq_set_affinity_pending(struct irq_data *data,
296 const struct cpumask *dest)
297{
298 return -EBUSY;
299}
300#endif
301
302static int irq_try_set_affinity(struct irq_data *data,
303 const struct cpumask *dest, bool force)
304{
305 int ret = irq_do_set_affinity(data, dest, force);
306
307
308
309
310
311
312 if (ret == -EBUSY && !force)
313 ret = irq_set_affinity_pending(data, dest);
314 return ret;
315}
316
317static bool irq_set_affinity_deactivated(struct irq_data *data,
318 const struct cpumask *mask, bool force)
319{
320 struct irq_desc *desc = irq_data_to_desc(data);
321
322
323
324
325
326
327
328 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data))
329 return false;
330
331 cpumask_copy(desc->irq_common_data.affinity, mask);
332 irq_init_effective_affinity(data, mask);
333 irqd_set(data, IRQD_AFFINITY_SET);
334 return true;
335}
336
337int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
338 bool force)
339{
340 struct irq_chip *chip = irq_data_get_irq_chip(data);
341 struct irq_desc *desc = irq_data_to_desc(data);
342 int ret = 0;
343
344 if (!chip || !chip->irq_set_affinity)
345 return -EINVAL;
346
347 if (irq_set_affinity_deactivated(data, mask, force))
348 return 0;
349
350 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
351 ret = irq_try_set_affinity(data, mask, force);
352 } else {
353 irqd_set_move_pending(data);
354 irq_copy_pending(desc, mask);
355 }
356
357 if (desc->affinity_notify) {
358 kref_get(&desc->affinity_notify->kref);
359 if (!schedule_work(&desc->affinity_notify->work)) {
360
361 kref_put(&desc->affinity_notify->kref,
362 desc->affinity_notify->release);
363 }
364 }
365 irqd_set(data, IRQD_AFFINITY_SET);
366
367 return ret;
368}
369
370int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
371{
372 struct irq_desc *desc = irq_to_desc(irq);
373 unsigned long flags;
374 int ret;
375
376 if (!desc)
377 return -EINVAL;
378
379 raw_spin_lock_irqsave(&desc->lock, flags);
380 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
381 raw_spin_unlock_irqrestore(&desc->lock, flags);
382 return ret;
383}
384
385int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
386{
387 unsigned long flags;
388 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
389
390 if (!desc)
391 return -EINVAL;
392 desc->affinity_hint = m;
393 irq_put_desc_unlock(desc, flags);
394
395 if (m)
396 __irq_set_affinity(irq, m, false);
397 return 0;
398}
399EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
400
401static void irq_affinity_notify(struct work_struct *work)
402{
403 struct irq_affinity_notify *notify =
404 container_of(work, struct irq_affinity_notify, work);
405 struct irq_desc *desc = irq_to_desc(notify->irq);
406 cpumask_var_t cpumask;
407 unsigned long flags;
408
409 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
410 goto out;
411
412 raw_spin_lock_irqsave(&desc->lock, flags);
413 if (irq_move_pending(&desc->irq_data))
414 irq_get_pending(cpumask, desc);
415 else
416 cpumask_copy(cpumask, desc->irq_common_data.affinity);
417 raw_spin_unlock_irqrestore(&desc->lock, flags);
418
419 notify->notify(notify, cpumask);
420
421 free_cpumask_var(cpumask);
422out:
423 kref_put(¬ify->kref, notify->release);
424}
425
426
427
428
429
430
431
432
433
434
435
436
437int
438irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
439{
440 struct irq_desc *desc = irq_to_desc(irq);
441 struct irq_affinity_notify *old_notify;
442 unsigned long flags;
443
444
445 might_sleep();
446
447 if (!desc || desc->istate & IRQS_NMI)
448 return -EINVAL;
449
450
451 if (notify) {
452 notify->irq = irq;
453 kref_init(¬ify->kref);
454 INIT_WORK(¬ify->work, irq_affinity_notify);
455 }
456
457 raw_spin_lock_irqsave(&desc->lock, flags);
458 old_notify = desc->affinity_notify;
459 desc->affinity_notify = notify;
460 raw_spin_unlock_irqrestore(&desc->lock, flags);
461
462 if (old_notify) {
463 if (cancel_work_sync(&old_notify->work)) {
464
465 kref_put(&old_notify->kref, old_notify->release);
466 }
467 kref_put(&old_notify->kref, old_notify->release);
468 }
469
470 return 0;
471}
472EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
473
474#ifndef CONFIG_AUTO_IRQ_AFFINITY
475
476
477
478int irq_setup_affinity(struct irq_desc *desc)
479{
480 struct cpumask *set = irq_default_affinity;
481 int ret, node = irq_desc_get_node(desc);
482 static DEFINE_RAW_SPINLOCK(mask_lock);
483 static struct cpumask mask;
484
485
486 if (!__irq_can_set_affinity(desc))
487 return 0;
488
489 raw_spin_lock(&mask_lock);
490
491
492
493
494 if (irqd_affinity_is_managed(&desc->irq_data) ||
495 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
496 if (cpumask_intersects(desc->irq_common_data.affinity,
497 cpu_online_mask))
498 set = desc->irq_common_data.affinity;
499 else
500 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
501 }
502
503 cpumask_and(&mask, cpu_online_mask, set);
504 if (cpumask_empty(&mask))
505 cpumask_copy(&mask, cpu_online_mask);
506
507 if (node != NUMA_NO_NODE) {
508 const struct cpumask *nodemask = cpumask_of_node(node);
509
510
511 if (cpumask_intersects(&mask, nodemask))
512 cpumask_and(&mask, &mask, nodemask);
513 }
514 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
515 raw_spin_unlock(&mask_lock);
516 return ret;
517}
518#else
519
520int irq_setup_affinity(struct irq_desc *desc)
521{
522 return irq_select_affinity(irq_desc_get_irq(desc));
523}
524#endif
525#endif
526
527
528
529
530
531
532
533
534
535
536
537
538
539int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
540{
541 unsigned long flags;
542 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
543 struct irq_data *data;
544 struct irq_chip *chip;
545 int ret = -ENOSYS;
546
547 if (!desc)
548 return -EINVAL;
549
550 data = irq_desc_get_irq_data(desc);
551 do {
552 chip = irq_data_get_irq_chip(data);
553 if (chip && chip->irq_set_vcpu_affinity)
554 break;
555#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
556 data = data->parent_data;
557#else
558 data = NULL;
559#endif
560 } while (data);
561
562 if (data)
563 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
564 irq_put_desc_unlock(desc, flags);
565
566 return ret;
567}
568EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
569
570void __disable_irq(struct irq_desc *desc)
571{
572 if (!desc->depth++)
573 irq_disable(desc);
574}
575
576static int __disable_irq_nosync(unsigned int irq)
577{
578 unsigned long flags;
579 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
580
581 if (!desc)
582 return -EINVAL;
583 __disable_irq(desc);
584 irq_put_desc_busunlock(desc, flags);
585 return 0;
586}
587
588
589
590
591
592
593
594
595
596
597
598
599void disable_irq_nosync(unsigned int irq)
600{
601 __disable_irq_nosync(irq);
602}
603EXPORT_SYMBOL(disable_irq_nosync);
604
605
606
607
608
609
610
611
612
613
614
615
616
617void disable_irq(unsigned int irq)
618{
619 if (!__disable_irq_nosync(irq))
620 synchronize_irq(irq);
621}
622EXPORT_SYMBOL(disable_irq);
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641bool disable_hardirq(unsigned int irq)
642{
643 if (!__disable_irq_nosync(irq))
644 return synchronize_hardirq(irq);
645
646 return false;
647}
648EXPORT_SYMBOL_GPL(disable_hardirq);
649
650
651
652
653
654
655
656
657
658
659
660void disable_nmi_nosync(unsigned int irq)
661{
662 disable_irq_nosync(irq);
663}
664
665void __enable_irq(struct irq_desc *desc)
666{
667 switch (desc->depth) {
668 case 0:
669 err_out:
670 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
671 irq_desc_get_irq(desc));
672 break;
673 case 1: {
674 if (desc->istate & IRQS_SUSPENDED)
675 goto err_out;
676
677 irq_settings_set_noprobe(desc);
678
679
680
681
682
683
684
685 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
686 break;
687 }
688 default:
689 desc->depth--;
690 }
691}
692
693
694
695
696
697
698
699
700
701
702
703
704void enable_irq(unsigned int irq)
705{
706 unsigned long flags;
707 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
708
709 if (!desc)
710 return;
711 if (WARN(!desc->irq_data.chip,
712 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
713 goto out;
714
715 __enable_irq(desc);
716out:
717 irq_put_desc_busunlock(desc, flags);
718}
719EXPORT_SYMBOL(enable_irq);
720
721
722
723
724
725
726
727
728
729
730void enable_nmi(unsigned int irq)
731{
732 enable_irq(irq);
733}
734
735static int set_irq_wake_real(unsigned int irq, unsigned int on)
736{
737 struct irq_desc *desc = irq_to_desc(irq);
738 int ret = -ENXIO;
739
740 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
741 return 0;
742
743 if (desc->irq_data.chip->irq_set_wake)
744 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
745
746 return ret;
747}
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768int irq_set_irq_wake(unsigned int irq, unsigned int on)
769{
770 unsigned long flags;
771 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
772 int ret = 0;
773
774 if (!desc)
775 return -EINVAL;
776
777
778 if (desc->istate & IRQS_NMI) {
779 ret = -EINVAL;
780 goto out_unlock;
781 }
782
783
784
785
786 if (on) {
787 if (desc->wake_depth++ == 0) {
788 ret = set_irq_wake_real(irq, on);
789 if (ret)
790 desc->wake_depth = 0;
791 else
792 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
793 }
794 } else {
795 if (desc->wake_depth == 0) {
796 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
797 } else if (--desc->wake_depth == 0) {
798 ret = set_irq_wake_real(irq, on);
799 if (ret)
800 desc->wake_depth = 1;
801 else
802 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
803 }
804 }
805
806out_unlock:
807 irq_put_desc_busunlock(desc, flags);
808 return ret;
809}
810EXPORT_SYMBOL(irq_set_irq_wake);
811
812
813
814
815
816
817int can_request_irq(unsigned int irq, unsigned long irqflags)
818{
819 unsigned long flags;
820 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
821 int canrequest = 0;
822
823 if (!desc)
824 return 0;
825
826 if (irq_settings_can_request(desc)) {
827 if (!desc->action ||
828 irqflags & desc->action->flags & IRQF_SHARED)
829 canrequest = 1;
830 }
831 irq_put_desc_unlock(desc, flags);
832 return canrequest;
833}
834
835int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
836{
837 struct irq_chip *chip = desc->irq_data.chip;
838 int ret, unmask = 0;
839
840 if (!chip || !chip->irq_set_type) {
841
842
843
844
845 pr_debug("No set_type function for IRQ %d (%s)\n",
846 irq_desc_get_irq(desc),
847 chip ? (chip->name ? : "unknown") : "unknown");
848 return 0;
849 }
850
851 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
852 if (!irqd_irq_masked(&desc->irq_data))
853 mask_irq(desc);
854 if (!irqd_irq_disabled(&desc->irq_data))
855 unmask = 1;
856 }
857
858
859 flags &= IRQ_TYPE_SENSE_MASK;
860 ret = chip->irq_set_type(&desc->irq_data, flags);
861
862 switch (ret) {
863 case IRQ_SET_MASK_OK:
864 case IRQ_SET_MASK_OK_DONE:
865 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
866 irqd_set(&desc->irq_data, flags);
867
868
869 case IRQ_SET_MASK_OK_NOCOPY:
870 flags = irqd_get_trigger_type(&desc->irq_data);
871 irq_settings_set_trigger_mask(desc, flags);
872 irqd_clear(&desc->irq_data, IRQD_LEVEL);
873 irq_settings_clr_level(desc);
874 if (flags & IRQ_TYPE_LEVEL_MASK) {
875 irq_settings_set_level(desc);
876 irqd_set(&desc->irq_data, IRQD_LEVEL);
877 }
878
879 ret = 0;
880 break;
881 default:
882 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
883 flags, irq_desc_get_irq(desc), chip->irq_set_type);
884 }
885 if (unmask)
886 unmask_irq(desc);
887 return ret;
888}
889
890#ifdef CONFIG_HARDIRQS_SW_RESEND
891int irq_set_parent(int irq, int parent_irq)
892{
893 unsigned long flags;
894 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
895
896 if (!desc)
897 return -EINVAL;
898
899 desc->parent_irq = parent_irq;
900
901 irq_put_desc_unlock(desc, flags);
902 return 0;
903}
904EXPORT_SYMBOL_GPL(irq_set_parent);
905#endif
906
907
908
909
910
911
912static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
913{
914 return IRQ_WAKE_THREAD;
915}
916
917
918
919
920
921static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
922{
923 WARN(1, "Primary handler called for nested irq %d\n", irq);
924 return IRQ_NONE;
925}
926
927static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
928{
929 WARN(1, "Secondary action handler called for irq %d\n", irq);
930 return IRQ_NONE;
931}
932
933static int irq_wait_for_interrupt(struct irqaction *action)
934{
935 for (;;) {
936 set_current_state(TASK_INTERRUPTIBLE);
937
938 if (kthread_should_stop()) {
939
940 if (test_and_clear_bit(IRQTF_RUNTHREAD,
941 &action->thread_flags)) {
942 __set_current_state(TASK_RUNNING);
943 return 0;
944 }
945 __set_current_state(TASK_RUNNING);
946 return -1;
947 }
948
949 if (test_and_clear_bit(IRQTF_RUNTHREAD,
950 &action->thread_flags)) {
951 __set_current_state(TASK_RUNNING);
952 return 0;
953 }
954 schedule();
955 }
956}
957
958
959
960
961
962
963static void irq_finalize_oneshot(struct irq_desc *desc,
964 struct irqaction *action)
965{
966 if (!(desc->istate & IRQS_ONESHOT) ||
967 action->handler == irq_forced_secondary_handler)
968 return;
969again:
970 chip_bus_lock(desc);
971 raw_spin_lock_irq(&desc->lock);
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
988 raw_spin_unlock_irq(&desc->lock);
989 chip_bus_sync_unlock(desc);
990 cpu_relax();
991 goto again;
992 }
993
994
995
996
997
998
999 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1000 goto out_unlock;
1001
1002 desc->threads_oneshot &= ~action->thread_mask;
1003
1004 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1005 irqd_irq_masked(&desc->irq_data))
1006 unmask_threaded_irq(desc);
1007
1008out_unlock:
1009 raw_spin_unlock_irq(&desc->lock);
1010 chip_bus_sync_unlock(desc);
1011}
1012
1013#ifdef CONFIG_SMP
1014
1015
1016
1017static void
1018irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1019{
1020 cpumask_var_t mask;
1021 bool valid = true;
1022
1023 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1024 return;
1025
1026
1027
1028
1029
1030 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1031 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1032 return;
1033 }
1034
1035 raw_spin_lock_irq(&desc->lock);
1036
1037
1038
1039
1040 if (cpumask_available(desc->irq_common_data.affinity)) {
1041 const struct cpumask *m;
1042
1043 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1044 cpumask_copy(mask, m);
1045 } else {
1046 valid = false;
1047 }
1048 raw_spin_unlock_irq(&desc->lock);
1049
1050 if (valid)
1051 set_cpus_allowed_ptr(current, mask);
1052 free_cpumask_var(mask);
1053}
1054#else
1055static inline void
1056irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1057#endif
1058
1059
1060
1061
1062
1063
1064
1065static irqreturn_t
1066irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1067{
1068 irqreturn_t ret;
1069
1070 local_bh_disable();
1071 ret = action->thread_fn(action->irq, action->dev_id);
1072 if (ret == IRQ_HANDLED)
1073 atomic_inc(&desc->threads_handled);
1074
1075 irq_finalize_oneshot(desc, action);
1076 local_bh_enable();
1077 return ret;
1078}
1079
1080
1081
1082
1083
1084
1085static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1086 struct irqaction *action)
1087{
1088 irqreturn_t ret;
1089
1090 ret = action->thread_fn(action->irq, action->dev_id);
1091 if (ret == IRQ_HANDLED)
1092 atomic_inc(&desc->threads_handled);
1093
1094 irq_finalize_oneshot(desc, action);
1095 return ret;
1096}
1097
1098static void wake_threads_waitq(struct irq_desc *desc)
1099{
1100 if (atomic_dec_and_test(&desc->threads_active))
1101 wake_up(&desc->wait_for_threads);
1102}
1103
1104static void irq_thread_dtor(struct callback_head *unused)
1105{
1106 struct task_struct *tsk = current;
1107 struct irq_desc *desc;
1108 struct irqaction *action;
1109
1110 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1111 return;
1112
1113 action = kthread_data(tsk);
1114
1115 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1116 tsk->comm, tsk->pid, action->irq);
1117
1118
1119 desc = irq_to_desc(action->irq);
1120
1121
1122
1123
1124 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1125 wake_threads_waitq(desc);
1126
1127
1128 irq_finalize_oneshot(desc, action);
1129}
1130
1131static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1132{
1133 struct irqaction *secondary = action->secondary;
1134
1135 if (WARN_ON_ONCE(!secondary))
1136 return;
1137
1138 raw_spin_lock_irq(&desc->lock);
1139 __irq_wake_thread(desc, secondary);
1140 raw_spin_unlock_irq(&desc->lock);
1141}
1142
1143
1144
1145
1146static int irq_thread(void *data)
1147{
1148 struct callback_head on_exit_work;
1149 struct irqaction *action = data;
1150 struct irq_desc *desc = irq_to_desc(action->irq);
1151 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1152 struct irqaction *action);
1153
1154 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1155 &action->thread_flags))
1156 handler_fn = irq_forced_thread_fn;
1157 else
1158 handler_fn = irq_thread_fn;
1159
1160 init_task_work(&on_exit_work, irq_thread_dtor);
1161 task_work_add(current, &on_exit_work, false);
1162
1163 irq_thread_check_affinity(desc, action);
1164
1165 while (!irq_wait_for_interrupt(action)) {
1166 irqreturn_t action_ret;
1167
1168 irq_thread_check_affinity(desc, action);
1169
1170 action_ret = handler_fn(desc, action);
1171 if (action_ret == IRQ_WAKE_THREAD)
1172 irq_wake_secondary(desc, action);
1173
1174 wake_threads_waitq(desc);
1175 }
1176
1177
1178
1179
1180
1181
1182
1183 task_work_cancel(current, irq_thread_dtor);
1184 return 0;
1185}
1186
1187
1188
1189
1190
1191
1192
1193void irq_wake_thread(unsigned int irq, void *dev_id)
1194{
1195 struct irq_desc *desc = irq_to_desc(irq);
1196 struct irqaction *action;
1197 unsigned long flags;
1198
1199 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1200 return;
1201
1202 raw_spin_lock_irqsave(&desc->lock, flags);
1203 for_each_action_of_desc(desc, action) {
1204 if (action->dev_id == dev_id) {
1205 if (action->thread)
1206 __irq_wake_thread(desc, action);
1207 break;
1208 }
1209 }
1210 raw_spin_unlock_irqrestore(&desc->lock, flags);
1211}
1212EXPORT_SYMBOL_GPL(irq_wake_thread);
1213
1214static int irq_setup_forced_threading(struct irqaction *new)
1215{
1216 if (!force_irqthreads)
1217 return 0;
1218 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1219 return 0;
1220
1221
1222
1223
1224
1225 if (new->handler == irq_default_primary_handler)
1226 return 0;
1227
1228 new->flags |= IRQF_ONESHOT;
1229
1230
1231
1232
1233
1234
1235 if (new->handler && new->thread_fn) {
1236
1237 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1238 if (!new->secondary)
1239 return -ENOMEM;
1240 new->secondary->handler = irq_forced_secondary_handler;
1241 new->secondary->thread_fn = new->thread_fn;
1242 new->secondary->dev_id = new->dev_id;
1243 new->secondary->irq = new->irq;
1244 new->secondary->name = new->name;
1245 }
1246
1247 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1248 new->thread_fn = new->handler;
1249 new->handler = irq_default_primary_handler;
1250 return 0;
1251}
1252
1253static int irq_request_resources(struct irq_desc *desc)
1254{
1255 struct irq_data *d = &desc->irq_data;
1256 struct irq_chip *c = d->chip;
1257
1258 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1259}
1260
1261static void irq_release_resources(struct irq_desc *desc)
1262{
1263 struct irq_data *d = &desc->irq_data;
1264 struct irq_chip *c = d->chip;
1265
1266 if (c->irq_release_resources)
1267 c->irq_release_resources(d);
1268}
1269
1270static bool irq_supports_nmi(struct irq_desc *desc)
1271{
1272 struct irq_data *d = irq_desc_get_irq_data(desc);
1273
1274#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1275
1276 if (d->parent_data)
1277 return false;
1278#endif
1279
1280 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1281 return false;
1282
1283 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1284}
1285
1286static int irq_nmi_setup(struct irq_desc *desc)
1287{
1288 struct irq_data *d = irq_desc_get_irq_data(desc);
1289 struct irq_chip *c = d->chip;
1290
1291 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1292}
1293
1294static void irq_nmi_teardown(struct irq_desc *desc)
1295{
1296 struct irq_data *d = irq_desc_get_irq_data(desc);
1297 struct irq_chip *c = d->chip;
1298
1299 if (c->irq_nmi_teardown)
1300 c->irq_nmi_teardown(d);
1301}
1302
1303static int
1304setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1305{
1306 struct task_struct *t;
1307 struct sched_param param = {
1308 .sched_priority = MAX_USER_RT_PRIO/2,
1309 };
1310
1311 if (!secondary) {
1312 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1313 new->name);
1314 } else {
1315 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1316 new->name);
1317 param.sched_priority -= 1;
1318 }
1319
1320 if (IS_ERR(t))
1321 return PTR_ERR(t);
1322
1323 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1324
1325
1326
1327
1328
1329
1330 new->thread = get_task_struct(t);
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1341 return 0;
1342}
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static int
1359__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1360{
1361 struct irqaction *old, **old_ptr;
1362 unsigned long flags, thread_mask = 0;
1363 int ret, nested, shared = 0;
1364
1365 if (!desc)
1366 return -EINVAL;
1367
1368 if (desc->irq_data.chip == &no_irq_chip)
1369 return -ENOSYS;
1370 if (!try_module_get(desc->owner))
1371 return -ENODEV;
1372
1373 new->irq = irq;
1374
1375
1376
1377
1378
1379 if (!(new->flags & IRQF_TRIGGER_MASK))
1380 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1381
1382
1383
1384
1385
1386 nested = irq_settings_is_nested_thread(desc);
1387 if (nested) {
1388 if (!new->thread_fn) {
1389 ret = -EINVAL;
1390 goto out_mput;
1391 }
1392
1393
1394
1395
1396
1397 new->handler = irq_nested_primary_handler;
1398 } else {
1399 if (irq_settings_can_thread(desc)) {
1400 ret = irq_setup_forced_threading(new);
1401 if (ret)
1402 goto out_mput;
1403 }
1404 }
1405
1406
1407
1408
1409
1410
1411 if (new->thread_fn && !nested) {
1412 ret = setup_irq_thread(new, irq, false);
1413 if (ret)
1414 goto out_mput;
1415 if (new->secondary) {
1416 ret = setup_irq_thread(new->secondary, irq, true);
1417 if (ret)
1418 goto out_thread;
1419 }
1420 }
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1432 new->flags &= ~IRQF_ONESHOT;
1433
1434
1435
1436
1437
1438
1439
1440
1441 mutex_lock(&desc->request_mutex);
1442
1443
1444
1445
1446
1447
1448 chip_bus_lock(desc);
1449
1450
1451 if (!desc->action) {
1452 ret = irq_request_resources(desc);
1453 if (ret) {
1454 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1455 new->name, irq, desc->irq_data.chip->name);
1456 goto out_bus_unlock;
1457 }
1458 }
1459
1460
1461
1462
1463
1464
1465
1466 raw_spin_lock_irqsave(&desc->lock, flags);
1467 old_ptr = &desc->action;
1468 old = *old_ptr;
1469 if (old) {
1470
1471
1472
1473
1474
1475
1476
1477
1478 unsigned int oldtype;
1479
1480 if (desc->istate & IRQS_NMI) {
1481 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1482 new->name, irq, desc->irq_data.chip->name);
1483 ret = -EINVAL;
1484 goto out_unlock;
1485 }
1486
1487
1488
1489
1490
1491 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1492 oldtype = irqd_get_trigger_type(&desc->irq_data);
1493 } else {
1494 oldtype = new->flags & IRQF_TRIGGER_MASK;
1495 irqd_set_trigger_type(&desc->irq_data, oldtype);
1496 }
1497
1498 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1499 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1500 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1501 goto mismatch;
1502
1503
1504 if ((old->flags & IRQF_PERCPU) !=
1505 (new->flags & IRQF_PERCPU))
1506 goto mismatch;
1507
1508
1509 do {
1510
1511
1512
1513
1514
1515 thread_mask |= old->thread_mask;
1516 old_ptr = &old->next;
1517 old = *old_ptr;
1518 } while (old);
1519 shared = 1;
1520 }
1521
1522
1523
1524
1525
1526
1527 if (new->flags & IRQF_ONESHOT) {
1528
1529
1530
1531
1532 if (thread_mask == ~0UL) {
1533 ret = -EBUSY;
1534 goto out_unlock;
1535 }
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556 new->thread_mask = 1UL << ffz(thread_mask);
1557
1558 } else if (new->handler == irq_default_primary_handler &&
1559 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1576 new->name, irq);
1577 ret = -EINVAL;
1578 goto out_unlock;
1579 }
1580
1581 if (!shared) {
1582 init_waitqueue_head(&desc->wait_for_threads);
1583
1584
1585 if (new->flags & IRQF_TRIGGER_MASK) {
1586 ret = __irq_set_trigger(desc,
1587 new->flags & IRQF_TRIGGER_MASK);
1588
1589 if (ret)
1590 goto out_unlock;
1591 }
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 ret = irq_activate(desc);
1605 if (ret)
1606 goto out_unlock;
1607
1608 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1609 IRQS_ONESHOT | IRQS_WAITING);
1610 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1611
1612 if (new->flags & IRQF_PERCPU) {
1613 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1614 irq_settings_set_per_cpu(desc);
1615 }
1616
1617 if (new->flags & IRQF_ONESHOT)
1618 desc->istate |= IRQS_ONESHOT;
1619
1620
1621 if (new->flags & IRQF_NOBALANCING) {
1622 irq_settings_set_no_balancing(desc);
1623 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1624 }
1625
1626 if (irq_settings_can_autoenable(desc)) {
1627 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1628 } else {
1629
1630
1631
1632
1633
1634
1635 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1636
1637 desc->depth = 1;
1638 }
1639
1640 } else if (new->flags & IRQF_TRIGGER_MASK) {
1641 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1642 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1643
1644 if (nmsk != omsk)
1645
1646 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1647 irq, omsk, nmsk);
1648 }
1649
1650 *old_ptr = new;
1651
1652 irq_pm_install_action(desc, new);
1653
1654
1655 desc->irq_count = 0;
1656 desc->irqs_unhandled = 0;
1657
1658
1659
1660
1661
1662 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1663 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1664 __enable_irq(desc);
1665 }
1666
1667 raw_spin_unlock_irqrestore(&desc->lock, flags);
1668 chip_bus_sync_unlock(desc);
1669 mutex_unlock(&desc->request_mutex);
1670
1671 irq_setup_timings(desc, new);
1672
1673
1674
1675
1676
1677 if (new->thread)
1678 wake_up_process(new->thread);
1679 if (new->secondary)
1680 wake_up_process(new->secondary->thread);
1681
1682 register_irq_proc(irq, desc);
1683 new->dir = NULL;
1684 register_handler_proc(irq, new);
1685 return 0;
1686
1687mismatch:
1688 if (!(new->flags & IRQF_PROBE_SHARED)) {
1689 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1690 irq, new->flags, new->name, old->flags, old->name);
1691#ifdef CONFIG_DEBUG_SHIRQ
1692 dump_stack();
1693#endif
1694 }
1695 ret = -EBUSY;
1696
1697out_unlock:
1698 raw_spin_unlock_irqrestore(&desc->lock, flags);
1699
1700 if (!desc->action)
1701 irq_release_resources(desc);
1702out_bus_unlock:
1703 chip_bus_sync_unlock(desc);
1704 mutex_unlock(&desc->request_mutex);
1705
1706out_thread:
1707 if (new->thread) {
1708 struct task_struct *t = new->thread;
1709
1710 new->thread = NULL;
1711 kthread_stop(t);
1712 put_task_struct(t);
1713 }
1714 if (new->secondary && new->secondary->thread) {
1715 struct task_struct *t = new->secondary->thread;
1716
1717 new->secondary->thread = NULL;
1718 kthread_stop(t);
1719 put_task_struct(t);
1720 }
1721out_mput:
1722 module_put(desc->owner);
1723 return ret;
1724}
1725
1726
1727
1728
1729
1730static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1731{
1732 unsigned irq = desc->irq_data.irq;
1733 struct irqaction *action, **action_ptr;
1734 unsigned long flags;
1735
1736 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1737
1738 mutex_lock(&desc->request_mutex);
1739 chip_bus_lock(desc);
1740 raw_spin_lock_irqsave(&desc->lock, flags);
1741
1742
1743
1744
1745
1746 action_ptr = &desc->action;
1747 for (;;) {
1748 action = *action_ptr;
1749
1750 if (!action) {
1751 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1752 raw_spin_unlock_irqrestore(&desc->lock, flags);
1753 chip_bus_sync_unlock(desc);
1754 mutex_unlock(&desc->request_mutex);
1755 return NULL;
1756 }
1757
1758 if (action->dev_id == dev_id)
1759 break;
1760 action_ptr = &action->next;
1761 }
1762
1763
1764 *action_ptr = action->next;
1765
1766 irq_pm_remove_action(desc, action);
1767
1768
1769 if (!desc->action) {
1770 irq_settings_clr_disable_unlazy(desc);
1771
1772 irq_shutdown(desc);
1773 }
1774
1775#ifdef CONFIG_SMP
1776
1777 if (WARN_ON_ONCE(desc->affinity_hint))
1778 desc->affinity_hint = NULL;
1779#endif
1780
1781 raw_spin_unlock_irqrestore(&desc->lock, flags);
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796 chip_bus_sync_unlock(desc);
1797
1798 unregister_handler_proc(irq, action);
1799
1800
1801
1802
1803
1804
1805 __synchronize_hardirq(desc, true);
1806
1807#ifdef CONFIG_DEBUG_SHIRQ
1808
1809
1810
1811
1812
1813
1814
1815
1816 if (action->flags & IRQF_SHARED) {
1817 local_irq_save(flags);
1818 action->handler(irq, dev_id);
1819 local_irq_restore(flags);
1820 }
1821#endif
1822
1823
1824
1825
1826
1827
1828
1829 if (action->thread) {
1830 kthread_stop(action->thread);
1831 put_task_struct(action->thread);
1832 if (action->secondary && action->secondary->thread) {
1833 kthread_stop(action->secondary->thread);
1834 put_task_struct(action->secondary->thread);
1835 }
1836 }
1837
1838
1839 if (!desc->action) {
1840
1841
1842
1843
1844 chip_bus_lock(desc);
1845
1846
1847
1848
1849 raw_spin_lock_irqsave(&desc->lock, flags);
1850 irq_domain_deactivate_irq(&desc->irq_data);
1851 raw_spin_unlock_irqrestore(&desc->lock, flags);
1852
1853 irq_release_resources(desc);
1854 chip_bus_sync_unlock(desc);
1855 irq_remove_timings(desc);
1856 }
1857
1858 mutex_unlock(&desc->request_mutex);
1859
1860 irq_chip_pm_put(&desc->irq_data);
1861 module_put(desc->owner);
1862 kfree(action->secondary);
1863 return action;
1864}
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882const void *free_irq(unsigned int irq, void *dev_id)
1883{
1884 struct irq_desc *desc = irq_to_desc(irq);
1885 struct irqaction *action;
1886 const char *devname;
1887
1888 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1889 return NULL;
1890
1891#ifdef CONFIG_SMP
1892 if (WARN_ON(desc->affinity_notify))
1893 desc->affinity_notify = NULL;
1894#endif
1895
1896 action = __free_irq(desc, dev_id);
1897
1898 if (!action)
1899 return NULL;
1900
1901 devname = action->name;
1902 kfree(action);
1903 return devname;
1904}
1905EXPORT_SYMBOL(free_irq);
1906
1907
1908static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1909{
1910 const char *devname = NULL;
1911
1912 desc->istate &= ~IRQS_NMI;
1913
1914 if (!WARN_ON(desc->action == NULL)) {
1915 irq_pm_remove_action(desc, desc->action);
1916 devname = desc->action->name;
1917 unregister_handler_proc(irq, desc->action);
1918
1919 kfree(desc->action);
1920 desc->action = NULL;
1921 }
1922
1923 irq_settings_clr_disable_unlazy(desc);
1924 irq_shutdown_and_deactivate(desc);
1925
1926 irq_release_resources(desc);
1927
1928 irq_chip_pm_put(&desc->irq_data);
1929 module_put(desc->owner);
1930
1931 return devname;
1932}
1933
1934const void *free_nmi(unsigned int irq, void *dev_id)
1935{
1936 struct irq_desc *desc = irq_to_desc(irq);
1937 unsigned long flags;
1938 const void *devname;
1939
1940 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1941 return NULL;
1942
1943 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1944 return NULL;
1945
1946
1947 if (WARN_ON(desc->depth == 0))
1948 disable_nmi_nosync(irq);
1949
1950 raw_spin_lock_irqsave(&desc->lock, flags);
1951
1952 irq_nmi_teardown(desc);
1953 devname = __cleanup_nmi(irq, desc);
1954
1955 raw_spin_unlock_irqrestore(&desc->lock, flags);
1956
1957 return devname;
1958}
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2003 irq_handler_t thread_fn, unsigned long irqflags,
2004 const char *devname, void *dev_id)
2005{
2006 struct irqaction *action;
2007 struct irq_desc *desc;
2008 int retval;
2009
2010 if (irq == IRQ_NOTCONNECTED)
2011 return -ENOTCONN;
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2023 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2024 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2025 return -EINVAL;
2026
2027 desc = irq_to_desc(irq);
2028 if (!desc)
2029 return -EINVAL;
2030
2031 if (!irq_settings_can_request(desc) ||
2032 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2033 return -EINVAL;
2034
2035 if (!handler) {
2036 if (!thread_fn)
2037 return -EINVAL;
2038 handler = irq_default_primary_handler;
2039 }
2040
2041 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2042 if (!action)
2043 return -ENOMEM;
2044
2045 action->handler = handler;
2046 action->thread_fn = thread_fn;
2047 action->flags = irqflags;
2048 action->name = devname;
2049 action->dev_id = dev_id;
2050
2051 retval = irq_chip_pm_get(&desc->irq_data);
2052 if (retval < 0) {
2053 kfree(action);
2054 return retval;
2055 }
2056
2057 retval = __setup_irq(irq, desc, action);
2058
2059 if (retval) {
2060 irq_chip_pm_put(&desc->irq_data);
2061 kfree(action->secondary);
2062 kfree(action);
2063 }
2064
2065#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2066 if (!retval && (irqflags & IRQF_SHARED)) {
2067
2068
2069
2070
2071
2072
2073 unsigned long flags;
2074
2075 disable_irq(irq);
2076 local_irq_save(flags);
2077
2078 handler(irq, dev_id);
2079
2080 local_irq_restore(flags);
2081 enable_irq(irq);
2082 }
2083#endif
2084 return retval;
2085}
2086EXPORT_SYMBOL(request_threaded_irq);
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2106 unsigned long flags, const char *name, void *dev_id)
2107{
2108 struct irq_desc *desc;
2109 int ret;
2110
2111 if (irq == IRQ_NOTCONNECTED)
2112 return -ENOTCONN;
2113
2114 desc = irq_to_desc(irq);
2115 if (!desc)
2116 return -EINVAL;
2117
2118 if (irq_settings_is_nested_thread(desc)) {
2119 ret = request_threaded_irq(irq, NULL, handler,
2120 flags, name, dev_id);
2121 return !ret ? IRQC_IS_NESTED : ret;
2122 }
2123
2124 ret = request_irq(irq, handler, flags, name, dev_id);
2125 return !ret ? IRQC_IS_HARDIRQ : ret;
2126}
2127EXPORT_SYMBOL_GPL(request_any_context_irq);
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155int request_nmi(unsigned int irq, irq_handler_t handler,
2156 unsigned long irqflags, const char *name, void *dev_id)
2157{
2158 struct irqaction *action;
2159 struct irq_desc *desc;
2160 unsigned long flags;
2161 int retval;
2162
2163 if (irq == IRQ_NOTCONNECTED)
2164 return -ENOTCONN;
2165
2166
2167 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2168 return -EINVAL;
2169
2170 if (!(irqflags & IRQF_PERCPU))
2171 return -EINVAL;
2172
2173 if (!handler)
2174 return -EINVAL;
2175
2176 desc = irq_to_desc(irq);
2177
2178 if (!desc || irq_settings_can_autoenable(desc) ||
2179 !irq_settings_can_request(desc) ||
2180 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2181 !irq_supports_nmi(desc))
2182 return -EINVAL;
2183
2184 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2185 if (!action)
2186 return -ENOMEM;
2187
2188 action->handler = handler;
2189 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2190 action->name = name;
2191 action->dev_id = dev_id;
2192
2193 retval = irq_chip_pm_get(&desc->irq_data);
2194 if (retval < 0)
2195 goto err_out;
2196
2197 retval = __setup_irq(irq, desc, action);
2198 if (retval)
2199 goto err_irq_setup;
2200
2201 raw_spin_lock_irqsave(&desc->lock, flags);
2202
2203
2204 desc->istate |= IRQS_NMI;
2205 retval = irq_nmi_setup(desc);
2206 if (retval) {
2207 __cleanup_nmi(irq, desc);
2208 raw_spin_unlock_irqrestore(&desc->lock, flags);
2209 return -EINVAL;
2210 }
2211
2212 raw_spin_unlock_irqrestore(&desc->lock, flags);
2213
2214 return 0;
2215
2216err_irq_setup:
2217 irq_chip_pm_put(&desc->irq_data);
2218err_out:
2219 kfree(action);
2220
2221 return retval;
2222}
2223
2224void enable_percpu_irq(unsigned int irq, unsigned int type)
2225{
2226 unsigned int cpu = smp_processor_id();
2227 unsigned long flags;
2228 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2229
2230 if (!desc)
2231 return;
2232
2233
2234
2235
2236
2237 type &= IRQ_TYPE_SENSE_MASK;
2238 if (type == IRQ_TYPE_NONE)
2239 type = irqd_get_trigger_type(&desc->irq_data);
2240
2241 if (type != IRQ_TYPE_NONE) {
2242 int ret;
2243
2244 ret = __irq_set_trigger(desc, type);
2245
2246 if (ret) {
2247 WARN(1, "failed to set type for IRQ%d\n", irq);
2248 goto out;
2249 }
2250 }
2251
2252 irq_percpu_enable(desc, cpu);
2253out:
2254 irq_put_desc_unlock(desc, flags);
2255}
2256EXPORT_SYMBOL_GPL(enable_percpu_irq);
2257
2258void enable_percpu_nmi(unsigned int irq, unsigned int type)
2259{
2260 enable_percpu_irq(irq, type);
2261}
2262
2263
2264
2265
2266
2267
2268
2269
2270bool irq_percpu_is_enabled(unsigned int irq)
2271{
2272 unsigned int cpu = smp_processor_id();
2273 struct irq_desc *desc;
2274 unsigned long flags;
2275 bool is_enabled;
2276
2277 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2278 if (!desc)
2279 return false;
2280
2281 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2282 irq_put_desc_unlock(desc, flags);
2283
2284 return is_enabled;
2285}
2286EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2287
2288void disable_percpu_irq(unsigned int irq)
2289{
2290 unsigned int cpu = smp_processor_id();
2291 unsigned long flags;
2292 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2293
2294 if (!desc)
2295 return;
2296
2297 irq_percpu_disable(desc, cpu);
2298 irq_put_desc_unlock(desc, flags);
2299}
2300EXPORT_SYMBOL_GPL(disable_percpu_irq);
2301
2302void disable_percpu_nmi(unsigned int irq)
2303{
2304 disable_percpu_irq(irq);
2305}
2306
2307
2308
2309
2310static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2311{
2312 struct irq_desc *desc = irq_to_desc(irq);
2313 struct irqaction *action;
2314 unsigned long flags;
2315
2316 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2317
2318 if (!desc)
2319 return NULL;
2320
2321 raw_spin_lock_irqsave(&desc->lock, flags);
2322
2323 action = desc->action;
2324 if (!action || action->percpu_dev_id != dev_id) {
2325 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2326 goto bad;
2327 }
2328
2329 if (!cpumask_empty(desc->percpu_enabled)) {
2330 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2331 irq, cpumask_first(desc->percpu_enabled));
2332 goto bad;
2333 }
2334
2335
2336 desc->action = NULL;
2337
2338 desc->istate &= ~IRQS_NMI;
2339
2340 raw_spin_unlock_irqrestore(&desc->lock, flags);
2341
2342 unregister_handler_proc(irq, action);
2343
2344 irq_chip_pm_put(&desc->irq_data);
2345 module_put(desc->owner);
2346 return action;
2347
2348bad:
2349 raw_spin_unlock_irqrestore(&desc->lock, flags);
2350 return NULL;
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2361{
2362 struct irq_desc *desc = irq_to_desc(irq);
2363
2364 if (desc && irq_settings_is_per_cpu_devid(desc))
2365 __free_percpu_irq(irq, act->percpu_dev_id);
2366}
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2381{
2382 struct irq_desc *desc = irq_to_desc(irq);
2383
2384 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2385 return;
2386
2387 chip_bus_lock(desc);
2388 kfree(__free_percpu_irq(irq, dev_id));
2389 chip_bus_sync_unlock(desc);
2390}
2391EXPORT_SYMBOL_GPL(free_percpu_irq);
2392
2393void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2394{
2395 struct irq_desc *desc = irq_to_desc(irq);
2396
2397 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2398 return;
2399
2400 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2401 return;
2402
2403 kfree(__free_percpu_irq(irq, dev_id));
2404}
2405
2406
2407
2408
2409
2410
2411
2412
2413int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2414{
2415 struct irq_desc *desc = irq_to_desc(irq);
2416 int retval;
2417
2418 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2419 return -EINVAL;
2420
2421 retval = irq_chip_pm_get(&desc->irq_data);
2422 if (retval < 0)
2423 return retval;
2424
2425 retval = __setup_irq(irq, desc, act);
2426
2427 if (retval)
2428 irq_chip_pm_put(&desc->irq_data);
2429
2430 return retval;
2431}
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2451 unsigned long flags, const char *devname,
2452 void __percpu *dev_id)
2453{
2454 struct irqaction *action;
2455 struct irq_desc *desc;
2456 int retval;
2457
2458 if (!dev_id)
2459 return -EINVAL;
2460
2461 desc = irq_to_desc(irq);
2462 if (!desc || !irq_settings_can_request(desc) ||
2463 !irq_settings_is_per_cpu_devid(desc))
2464 return -EINVAL;
2465
2466 if (flags && flags != IRQF_TIMER)
2467 return -EINVAL;
2468
2469 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2470 if (!action)
2471 return -ENOMEM;
2472
2473 action->handler = handler;
2474 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2475 action->name = devname;
2476 action->percpu_dev_id = dev_id;
2477
2478 retval = irq_chip_pm_get(&desc->irq_data);
2479 if (retval < 0) {
2480 kfree(action);
2481 return retval;
2482 }
2483
2484 retval = __setup_irq(irq, desc, action);
2485
2486 if (retval) {
2487 irq_chip_pm_put(&desc->irq_data);
2488 kfree(action);
2489 }
2490
2491 return retval;
2492}
2493EXPORT_SYMBOL_GPL(__request_percpu_irq);
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2517 const char *name, void __percpu *dev_id)
2518{
2519 struct irqaction *action;
2520 struct irq_desc *desc;
2521 unsigned long flags;
2522 int retval;
2523
2524 if (!handler)
2525 return -EINVAL;
2526
2527 desc = irq_to_desc(irq);
2528
2529 if (!desc || !irq_settings_can_request(desc) ||
2530 !irq_settings_is_per_cpu_devid(desc) ||
2531 irq_settings_can_autoenable(desc) ||
2532 !irq_supports_nmi(desc))
2533 return -EINVAL;
2534
2535
2536 if (desc->istate & IRQS_NMI)
2537 return -EINVAL;
2538
2539 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2540 if (!action)
2541 return -ENOMEM;
2542
2543 action->handler = handler;
2544 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2545 | IRQF_NOBALANCING;
2546 action->name = name;
2547 action->percpu_dev_id = dev_id;
2548
2549 retval = irq_chip_pm_get(&desc->irq_data);
2550 if (retval < 0)
2551 goto err_out;
2552
2553 retval = __setup_irq(irq, desc, action);
2554 if (retval)
2555 goto err_irq_setup;
2556
2557 raw_spin_lock_irqsave(&desc->lock, flags);
2558 desc->istate |= IRQS_NMI;
2559 raw_spin_unlock_irqrestore(&desc->lock, flags);
2560
2561 return 0;
2562
2563err_irq_setup:
2564 irq_chip_pm_put(&desc->irq_data);
2565err_out:
2566 kfree(action);
2567
2568 return retval;
2569}
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584int prepare_percpu_nmi(unsigned int irq)
2585{
2586 unsigned long flags;
2587 struct irq_desc *desc;
2588 int ret = 0;
2589
2590 WARN_ON(preemptible());
2591
2592 desc = irq_get_desc_lock(irq, &flags,
2593 IRQ_GET_DESC_CHECK_PERCPU);
2594 if (!desc)
2595 return -EINVAL;
2596
2597 if (WARN(!(desc->istate & IRQS_NMI),
2598 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2599 irq)) {
2600 ret = -EINVAL;
2601 goto out;
2602 }
2603
2604 ret = irq_nmi_setup(desc);
2605 if (ret) {
2606 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2607 goto out;
2608 }
2609
2610out:
2611 irq_put_desc_unlock(desc, flags);
2612 return ret;
2613}
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627void teardown_percpu_nmi(unsigned int irq)
2628{
2629 unsigned long flags;
2630 struct irq_desc *desc;
2631
2632 WARN_ON(preemptible());
2633
2634 desc = irq_get_desc_lock(irq, &flags,
2635 IRQ_GET_DESC_CHECK_PERCPU);
2636 if (!desc)
2637 return;
2638
2639 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2640 goto out;
2641
2642 irq_nmi_teardown(desc);
2643out:
2644 irq_put_desc_unlock(desc, flags);
2645}
2646
2647int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2648 bool *state)
2649{
2650 struct irq_chip *chip;
2651 int err = -EINVAL;
2652
2653 do {
2654 chip = irq_data_get_irq_chip(data);
2655 if (WARN_ON_ONCE(!chip))
2656 return -ENODEV;
2657 if (chip->irq_get_irqchip_state)
2658 break;
2659#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2660 data = data->parent_data;
2661#else
2662 data = NULL;
2663#endif
2664 } while (data);
2665
2666 if (data)
2667 err = chip->irq_get_irqchip_state(data, which, state);
2668 return err;
2669}
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2685 bool *state)
2686{
2687 struct irq_desc *desc;
2688 struct irq_data *data;
2689 unsigned long flags;
2690 int err = -EINVAL;
2691
2692 desc = irq_get_desc_buslock(irq, &flags, 0);
2693 if (!desc)
2694 return err;
2695
2696 data = irq_desc_get_irq_data(desc);
2697
2698 err = __irq_get_irqchip_state(data, which, state);
2699
2700 irq_put_desc_busunlock(desc, flags);
2701 return err;
2702}
2703EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2718 bool val)
2719{
2720 struct irq_desc *desc;
2721 struct irq_data *data;
2722 struct irq_chip *chip;
2723 unsigned long flags;
2724 int err = -EINVAL;
2725
2726 desc = irq_get_desc_buslock(irq, &flags, 0);
2727 if (!desc)
2728 return err;
2729
2730 data = irq_desc_get_irq_data(desc);
2731
2732 do {
2733 chip = irq_data_get_irq_chip(data);
2734 if (WARN_ON_ONCE(!chip))
2735 return -ENODEV;
2736 if (chip->irq_set_irqchip_state)
2737 break;
2738#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2739 data = data->parent_data;
2740#else
2741 data = NULL;
2742#endif
2743 } while (data);
2744
2745 if (data)
2746 err = chip->irq_set_irqchip_state(data, which, val);
2747
2748 irq_put_desc_busunlock(desc, flags);
2749 return err;
2750}
2751EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2752