1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <linux/sched/isolation.h>
22#include <uapi/linux/sched/types.h>
23#include <linux/task_work.h>
24
25#include "internals.h"
26
27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29
30static int __init setup_forced_irqthreads(char *arg)
31{
32 static_branch_enable(&force_irqthreads_key);
33 return 0;
34}
35early_param("threadirqs", setup_forced_irqthreads);
36#endif
37
38static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39{
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46
47
48
49
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57
58
59
60
61
62 if (!inprogress && sync_chip) {
63
64
65
66
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72
73 } while (inprogress);
74}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98bool synchronize_hardirq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108}
109EXPORT_SYMBOL(synchronize_hardirq);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126void synchronize_irq(unsigned int irq)
127{
128 struct irq_desc *desc = irq_to_desc(irq);
129
130 if (desc) {
131 __synchronize_hardirq(desc, true);
132
133
134
135
136
137 wait_event(desc->wait_for_threads,
138 !atomic_read(&desc->threads_active));
139 }
140}
141EXPORT_SYMBOL(synchronize_irq);
142
143#ifdef CONFIG_SMP
144cpumask_var_t irq_default_affinity;
145
146static bool __irq_can_set_affinity(struct irq_desc *desc)
147{
148 if (!desc || !irqd_can_balance(&desc->irq_data) ||
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
150 return false;
151 return true;
152}
153
154
155
156
157
158
159int irq_can_set_affinity(unsigned int irq)
160{
161 return __irq_can_set_affinity(irq_to_desc(irq));
162}
163
164
165
166
167
168
169
170
171bool irq_can_set_affinity_usr(unsigned int irq)
172{
173 struct irq_desc *desc = irq_to_desc(irq);
174
175 return __irq_can_set_affinity(desc) &&
176 !irqd_affinity_is_managed(&desc->irq_data);
177}
178
179
180
181
182
183
184
185
186
187
188void irq_set_thread_affinity(struct irq_desc *desc)
189{
190 struct irqaction *action;
191
192 for_each_action_of_desc(desc, action)
193 if (action->thread)
194 set_bit(IRQTF_AFFINITY, &action->thread_flags);
195}
196
197#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
198static void irq_validate_effective_affinity(struct irq_data *data)
199{
200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202
203 if (!cpumask_empty(m))
204 return;
205 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 chip->name, data->irq);
207}
208
209static inline void irq_init_effective_affinity(struct irq_data *data,
210 const struct cpumask *mask)
211{
212 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
213}
214#else
215static inline void irq_validate_effective_affinity(struct irq_data *data) { }
216static inline void irq_init_effective_affinity(struct irq_data *data,
217 const struct cpumask *mask) { }
218#endif
219
220int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
221 bool force)
222{
223 struct irq_desc *desc = irq_data_to_desc(data);
224 struct irq_chip *chip = irq_data_get_irq_chip(data);
225 int ret;
226
227 if (!chip || !chip->irq_set_affinity)
228 return -EINVAL;
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 if (irqd_affinity_is_managed(data) &&
250 housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
251 const struct cpumask *hk_mask, *prog_mask;
252
253 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
254 static struct cpumask tmp_mask;
255
256 hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
257
258 raw_spin_lock(&tmp_mask_lock);
259 cpumask_and(&tmp_mask, mask, hk_mask);
260 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
261 prog_mask = mask;
262 else
263 prog_mask = &tmp_mask;
264 ret = chip->irq_set_affinity(data, prog_mask, force);
265 raw_spin_unlock(&tmp_mask_lock);
266 } else {
267 ret = chip->irq_set_affinity(data, mask, force);
268 }
269 switch (ret) {
270 case IRQ_SET_MASK_OK:
271 case IRQ_SET_MASK_OK_DONE:
272 cpumask_copy(desc->irq_common_data.affinity, mask);
273 fallthrough;
274 case IRQ_SET_MASK_OK_NOCOPY:
275 irq_validate_effective_affinity(data);
276 irq_set_thread_affinity(desc);
277 ret = 0;
278 }
279
280 return ret;
281}
282
283#ifdef CONFIG_GENERIC_PENDING_IRQ
284static inline int irq_set_affinity_pending(struct irq_data *data,
285 const struct cpumask *dest)
286{
287 struct irq_desc *desc = irq_data_to_desc(data);
288
289 irqd_set_move_pending(data);
290 irq_copy_pending(desc, dest);
291 return 0;
292}
293#else
294static inline int irq_set_affinity_pending(struct irq_data *data,
295 const struct cpumask *dest)
296{
297 return -EBUSY;
298}
299#endif
300
301static int irq_try_set_affinity(struct irq_data *data,
302 const struct cpumask *dest, bool force)
303{
304 int ret = irq_do_set_affinity(data, dest, force);
305
306
307
308
309
310
311 if (ret == -EBUSY && !force)
312 ret = irq_set_affinity_pending(data, dest);
313 return ret;
314}
315
316static bool irq_set_affinity_deactivated(struct irq_data *data,
317 const struct cpumask *mask, bool force)
318{
319 struct irq_desc *desc = irq_data_to_desc(data);
320
321
322
323
324
325
326
327
328
329
330 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
331 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
332 return false;
333
334 cpumask_copy(desc->irq_common_data.affinity, mask);
335 irq_init_effective_affinity(data, mask);
336 irqd_set(data, IRQD_AFFINITY_SET);
337 return true;
338}
339
340int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
341 bool force)
342{
343 struct irq_chip *chip = irq_data_get_irq_chip(data);
344 struct irq_desc *desc = irq_data_to_desc(data);
345 int ret = 0;
346
347 if (!chip || !chip->irq_set_affinity)
348 return -EINVAL;
349
350 if (irq_set_affinity_deactivated(data, mask, force))
351 return 0;
352
353 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
354 ret = irq_try_set_affinity(data, mask, force);
355 } else {
356 irqd_set_move_pending(data);
357 irq_copy_pending(desc, mask);
358 }
359
360 if (desc->affinity_notify) {
361 kref_get(&desc->affinity_notify->kref);
362 if (!schedule_work(&desc->affinity_notify->work)) {
363
364 kref_put(&desc->affinity_notify->kref,
365 desc->affinity_notify->release);
366 }
367 }
368 irqd_set(data, IRQD_AFFINITY_SET);
369
370 return ret;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388int irq_update_affinity_desc(unsigned int irq,
389 struct irq_affinity_desc *affinity)
390{
391 struct irq_desc *desc;
392 unsigned long flags;
393 bool activated;
394 int ret = 0;
395
396
397
398
399
400 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
401 return -EOPNOTSUPP;
402
403 desc = irq_get_desc_buslock(irq, &flags, 0);
404 if (!desc)
405 return -EINVAL;
406
407
408 if (irqd_is_started(&desc->irq_data)) {
409 ret = -EBUSY;
410 goto out_unlock;
411 }
412
413
414 if (irqd_affinity_is_managed(&desc->irq_data)) {
415 ret = -EBUSY;
416 goto out_unlock;
417 }
418
419
420
421
422
423 activated = irqd_is_activated(&desc->irq_data);
424 if (activated)
425 irq_domain_deactivate_irq(&desc->irq_data);
426
427 if (affinity->is_managed) {
428 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
429 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
430 }
431
432 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
433
434
435 if (activated)
436 irq_domain_activate_irq(&desc->irq_data, false);
437
438out_unlock:
439 irq_put_desc_busunlock(desc, flags);
440 return ret;
441}
442
443static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
444 bool force)
445{
446 struct irq_desc *desc = irq_to_desc(irq);
447 unsigned long flags;
448 int ret;
449
450 if (!desc)
451 return -EINVAL;
452
453 raw_spin_lock_irqsave(&desc->lock, flags);
454 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
455 raw_spin_unlock_irqrestore(&desc->lock, flags);
456 return ret;
457}
458
459
460
461
462
463
464
465
466int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
467{
468 return __irq_set_affinity(irq, cpumask, false);
469}
470EXPORT_SYMBOL_GPL(irq_set_affinity);
471
472
473
474
475
476
477
478
479
480
481
482
483int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
484{
485 return __irq_set_affinity(irq, cpumask, true);
486}
487EXPORT_SYMBOL_GPL(irq_force_affinity);
488
489int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
490{
491 unsigned long flags;
492 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
493
494 if (!desc)
495 return -EINVAL;
496 desc->affinity_hint = m;
497 irq_put_desc_unlock(desc, flags);
498
499 if (m)
500 __irq_set_affinity(irq, m, false);
501 return 0;
502}
503EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
504
505static void irq_affinity_notify(struct work_struct *work)
506{
507 struct irq_affinity_notify *notify =
508 container_of(work, struct irq_affinity_notify, work);
509 struct irq_desc *desc = irq_to_desc(notify->irq);
510 cpumask_var_t cpumask;
511 unsigned long flags;
512
513 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
514 goto out;
515
516 raw_spin_lock_irqsave(&desc->lock, flags);
517 if (irq_move_pending(&desc->irq_data))
518 irq_get_pending(cpumask, desc);
519 else
520 cpumask_copy(cpumask, desc->irq_common_data.affinity);
521 raw_spin_unlock_irqrestore(&desc->lock, flags);
522
523 notify->notify(notify, cpumask);
524
525 free_cpumask_var(cpumask);
526out:
527 kref_put(¬ify->kref, notify->release);
528}
529
530
531
532
533
534
535
536
537
538
539
540
541int
542irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
543{
544 struct irq_desc *desc = irq_to_desc(irq);
545 struct irq_affinity_notify *old_notify;
546 unsigned long flags;
547
548
549 might_sleep();
550
551 if (!desc || desc->istate & IRQS_NMI)
552 return -EINVAL;
553
554
555 if (notify) {
556 notify->irq = irq;
557 kref_init(¬ify->kref);
558 INIT_WORK(¬ify->work, irq_affinity_notify);
559 }
560
561 raw_spin_lock_irqsave(&desc->lock, flags);
562 old_notify = desc->affinity_notify;
563 desc->affinity_notify = notify;
564 raw_spin_unlock_irqrestore(&desc->lock, flags);
565
566 if (old_notify) {
567 if (cancel_work_sync(&old_notify->work)) {
568
569 kref_put(&old_notify->kref, old_notify->release);
570 }
571 kref_put(&old_notify->kref, old_notify->release);
572 }
573
574 return 0;
575}
576EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
577
578#ifndef CONFIG_AUTO_IRQ_AFFINITY
579
580
581
582int irq_setup_affinity(struct irq_desc *desc)
583{
584 struct cpumask *set = irq_default_affinity;
585 int ret, node = irq_desc_get_node(desc);
586 static DEFINE_RAW_SPINLOCK(mask_lock);
587 static struct cpumask mask;
588
589
590 if (!__irq_can_set_affinity(desc))
591 return 0;
592
593 raw_spin_lock(&mask_lock);
594
595
596
597
598 if (irqd_affinity_is_managed(&desc->irq_data) ||
599 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
600 if (cpumask_intersects(desc->irq_common_data.affinity,
601 cpu_online_mask))
602 set = desc->irq_common_data.affinity;
603 else
604 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
605 }
606
607 cpumask_and(&mask, cpu_online_mask, set);
608 if (cpumask_empty(&mask))
609 cpumask_copy(&mask, cpu_online_mask);
610
611 if (node != NUMA_NO_NODE) {
612 const struct cpumask *nodemask = cpumask_of_node(node);
613
614
615 if (cpumask_intersects(&mask, nodemask))
616 cpumask_and(&mask, &mask, nodemask);
617 }
618 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
619 raw_spin_unlock(&mask_lock);
620 return ret;
621}
622#else
623
624int irq_setup_affinity(struct irq_desc *desc)
625{
626 return irq_select_affinity(irq_desc_get_irq(desc));
627}
628#endif
629#endif
630
631
632
633
634
635
636
637
638
639
640
641
642
643int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
644{
645 unsigned long flags;
646 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
647 struct irq_data *data;
648 struct irq_chip *chip;
649 int ret = -ENOSYS;
650
651 if (!desc)
652 return -EINVAL;
653
654 data = irq_desc_get_irq_data(desc);
655 do {
656 chip = irq_data_get_irq_chip(data);
657 if (chip && chip->irq_set_vcpu_affinity)
658 break;
659#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
660 data = data->parent_data;
661#else
662 data = NULL;
663#endif
664 } while (data);
665
666 if (data)
667 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
668 irq_put_desc_unlock(desc, flags);
669
670 return ret;
671}
672EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
673
674void __disable_irq(struct irq_desc *desc)
675{
676 if (!desc->depth++)
677 irq_disable(desc);
678}
679
680static int __disable_irq_nosync(unsigned int irq)
681{
682 unsigned long flags;
683 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
684
685 if (!desc)
686 return -EINVAL;
687 __disable_irq(desc);
688 irq_put_desc_busunlock(desc, flags);
689 return 0;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703void disable_irq_nosync(unsigned int irq)
704{
705 __disable_irq_nosync(irq);
706}
707EXPORT_SYMBOL(disable_irq_nosync);
708
709
710
711
712
713
714
715
716
717
718
719
720
721void disable_irq(unsigned int irq)
722{
723 if (!__disable_irq_nosync(irq))
724 synchronize_irq(irq);
725}
726EXPORT_SYMBOL(disable_irq);
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745bool disable_hardirq(unsigned int irq)
746{
747 if (!__disable_irq_nosync(irq))
748 return synchronize_hardirq(irq);
749
750 return false;
751}
752EXPORT_SYMBOL_GPL(disable_hardirq);
753
754
755
756
757
758
759
760
761
762
763
764void disable_nmi_nosync(unsigned int irq)
765{
766 disable_irq_nosync(irq);
767}
768
769void __enable_irq(struct irq_desc *desc)
770{
771 switch (desc->depth) {
772 case 0:
773 err_out:
774 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
775 irq_desc_get_irq(desc));
776 break;
777 case 1: {
778 if (desc->istate & IRQS_SUSPENDED)
779 goto err_out;
780
781 irq_settings_set_noprobe(desc);
782
783
784
785
786
787
788
789 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
790 break;
791 }
792 default:
793 desc->depth--;
794 }
795}
796
797
798
799
800
801
802
803
804
805
806
807
808void enable_irq(unsigned int irq)
809{
810 unsigned long flags;
811 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
812
813 if (!desc)
814 return;
815 if (WARN(!desc->irq_data.chip,
816 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
817 goto out;
818
819 __enable_irq(desc);
820out:
821 irq_put_desc_busunlock(desc, flags);
822}
823EXPORT_SYMBOL(enable_irq);
824
825
826
827
828
829
830
831
832
833
834void enable_nmi(unsigned int irq)
835{
836 enable_irq(irq);
837}
838
839static int set_irq_wake_real(unsigned int irq, unsigned int on)
840{
841 struct irq_desc *desc = irq_to_desc(irq);
842 int ret = -ENXIO;
843
844 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
845 return 0;
846
847 if (desc->irq_data.chip->irq_set_wake)
848 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
849
850 return ret;
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872int irq_set_irq_wake(unsigned int irq, unsigned int on)
873{
874 unsigned long flags;
875 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
876 int ret = 0;
877
878 if (!desc)
879 return -EINVAL;
880
881
882 if (desc->istate & IRQS_NMI) {
883 ret = -EINVAL;
884 goto out_unlock;
885 }
886
887
888
889
890 if (on) {
891 if (desc->wake_depth++ == 0) {
892 ret = set_irq_wake_real(irq, on);
893 if (ret)
894 desc->wake_depth = 0;
895 else
896 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
897 }
898 } else {
899 if (desc->wake_depth == 0) {
900 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
901 } else if (--desc->wake_depth == 0) {
902 ret = set_irq_wake_real(irq, on);
903 if (ret)
904 desc->wake_depth = 1;
905 else
906 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
907 }
908 }
909
910out_unlock:
911 irq_put_desc_busunlock(desc, flags);
912 return ret;
913}
914EXPORT_SYMBOL(irq_set_irq_wake);
915
916
917
918
919
920
921int can_request_irq(unsigned int irq, unsigned long irqflags)
922{
923 unsigned long flags;
924 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
925 int canrequest = 0;
926
927 if (!desc)
928 return 0;
929
930 if (irq_settings_can_request(desc)) {
931 if (!desc->action ||
932 irqflags & desc->action->flags & IRQF_SHARED)
933 canrequest = 1;
934 }
935 irq_put_desc_unlock(desc, flags);
936 return canrequest;
937}
938
939int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
940{
941 struct irq_chip *chip = desc->irq_data.chip;
942 int ret, unmask = 0;
943
944 if (!chip || !chip->irq_set_type) {
945
946
947
948
949 pr_debug("No set_type function for IRQ %d (%s)\n",
950 irq_desc_get_irq(desc),
951 chip ? (chip->name ? : "unknown") : "unknown");
952 return 0;
953 }
954
955 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
956 if (!irqd_irq_masked(&desc->irq_data))
957 mask_irq(desc);
958 if (!irqd_irq_disabled(&desc->irq_data))
959 unmask = 1;
960 }
961
962
963 flags &= IRQ_TYPE_SENSE_MASK;
964 ret = chip->irq_set_type(&desc->irq_data, flags);
965
966 switch (ret) {
967 case IRQ_SET_MASK_OK:
968 case IRQ_SET_MASK_OK_DONE:
969 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
970 irqd_set(&desc->irq_data, flags);
971 fallthrough;
972
973 case IRQ_SET_MASK_OK_NOCOPY:
974 flags = irqd_get_trigger_type(&desc->irq_data);
975 irq_settings_set_trigger_mask(desc, flags);
976 irqd_clear(&desc->irq_data, IRQD_LEVEL);
977 irq_settings_clr_level(desc);
978 if (flags & IRQ_TYPE_LEVEL_MASK) {
979 irq_settings_set_level(desc);
980 irqd_set(&desc->irq_data, IRQD_LEVEL);
981 }
982
983 ret = 0;
984 break;
985 default:
986 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
987 flags, irq_desc_get_irq(desc), chip->irq_set_type);
988 }
989 if (unmask)
990 unmask_irq(desc);
991 return ret;
992}
993
994#ifdef CONFIG_HARDIRQS_SW_RESEND
995int irq_set_parent(int irq, int parent_irq)
996{
997 unsigned long flags;
998 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
999
1000 if (!desc)
1001 return -EINVAL;
1002
1003 desc->parent_irq = parent_irq;
1004
1005 irq_put_desc_unlock(desc, flags);
1006 return 0;
1007}
1008EXPORT_SYMBOL_GPL(irq_set_parent);
1009#endif
1010
1011
1012
1013
1014
1015
1016static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1017{
1018 return IRQ_WAKE_THREAD;
1019}
1020
1021
1022
1023
1024
1025static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1026{
1027 WARN(1, "Primary handler called for nested irq %d\n", irq);
1028 return IRQ_NONE;
1029}
1030
1031static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1032{
1033 WARN(1, "Secondary action handler called for irq %d\n", irq);
1034 return IRQ_NONE;
1035}
1036
1037static int irq_wait_for_interrupt(struct irqaction *action)
1038{
1039 for (;;) {
1040 set_current_state(TASK_INTERRUPTIBLE);
1041
1042 if (kthread_should_stop()) {
1043
1044 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1045 &action->thread_flags)) {
1046 __set_current_state(TASK_RUNNING);
1047 return 0;
1048 }
1049 __set_current_state(TASK_RUNNING);
1050 return -1;
1051 }
1052
1053 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1054 &action->thread_flags)) {
1055 __set_current_state(TASK_RUNNING);
1056 return 0;
1057 }
1058 schedule();
1059 }
1060}
1061
1062
1063
1064
1065
1066
1067static void irq_finalize_oneshot(struct irq_desc *desc,
1068 struct irqaction *action)
1069{
1070 if (!(desc->istate & IRQS_ONESHOT) ||
1071 action->handler == irq_forced_secondary_handler)
1072 return;
1073again:
1074 chip_bus_lock(desc);
1075 raw_spin_lock_irq(&desc->lock);
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1092 raw_spin_unlock_irq(&desc->lock);
1093 chip_bus_sync_unlock(desc);
1094 cpu_relax();
1095 goto again;
1096 }
1097
1098
1099
1100
1101
1102
1103 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1104 goto out_unlock;
1105
1106 desc->threads_oneshot &= ~action->thread_mask;
1107
1108 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1109 irqd_irq_masked(&desc->irq_data))
1110 unmask_threaded_irq(desc);
1111
1112out_unlock:
1113 raw_spin_unlock_irq(&desc->lock);
1114 chip_bus_sync_unlock(desc);
1115}
1116
1117#ifdef CONFIG_SMP
1118
1119
1120
1121static void
1122irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1123{
1124 cpumask_var_t mask;
1125 bool valid = true;
1126
1127 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1128 return;
1129
1130
1131
1132
1133
1134 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1135 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1136 return;
1137 }
1138
1139 raw_spin_lock_irq(&desc->lock);
1140
1141
1142
1143
1144 if (cpumask_available(desc->irq_common_data.affinity)) {
1145 const struct cpumask *m;
1146
1147 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1148 cpumask_copy(mask, m);
1149 } else {
1150 valid = false;
1151 }
1152 raw_spin_unlock_irq(&desc->lock);
1153
1154 if (valid)
1155 set_cpus_allowed_ptr(current, mask);
1156 free_cpumask_var(mask);
1157}
1158#else
1159static inline void
1160irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1161#endif
1162
1163
1164
1165
1166
1167
1168
1169static irqreturn_t
1170irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1171{
1172 irqreturn_t ret;
1173
1174 local_bh_disable();
1175 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1176 local_irq_disable();
1177 ret = action->thread_fn(action->irq, action->dev_id);
1178 if (ret == IRQ_HANDLED)
1179 atomic_inc(&desc->threads_handled);
1180
1181 irq_finalize_oneshot(desc, action);
1182 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1183 local_irq_enable();
1184 local_bh_enable();
1185 return ret;
1186}
1187
1188
1189
1190
1191
1192
1193static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1194 struct irqaction *action)
1195{
1196 irqreturn_t ret;
1197
1198 ret = action->thread_fn(action->irq, action->dev_id);
1199 if (ret == IRQ_HANDLED)
1200 atomic_inc(&desc->threads_handled);
1201
1202 irq_finalize_oneshot(desc, action);
1203 return ret;
1204}
1205
1206static void wake_threads_waitq(struct irq_desc *desc)
1207{
1208 if (atomic_dec_and_test(&desc->threads_active))
1209 wake_up(&desc->wait_for_threads);
1210}
1211
1212static void irq_thread_dtor(struct callback_head *unused)
1213{
1214 struct task_struct *tsk = current;
1215 struct irq_desc *desc;
1216 struct irqaction *action;
1217
1218 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1219 return;
1220
1221 action = kthread_data(tsk);
1222
1223 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1224 tsk->comm, tsk->pid, action->irq);
1225
1226
1227 desc = irq_to_desc(action->irq);
1228
1229
1230
1231
1232 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1233 wake_threads_waitq(desc);
1234
1235
1236 irq_finalize_oneshot(desc, action);
1237}
1238
1239static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1240{
1241 struct irqaction *secondary = action->secondary;
1242
1243 if (WARN_ON_ONCE(!secondary))
1244 return;
1245
1246 raw_spin_lock_irq(&desc->lock);
1247 __irq_wake_thread(desc, secondary);
1248 raw_spin_unlock_irq(&desc->lock);
1249}
1250
1251
1252
1253
1254static int irq_thread(void *data)
1255{
1256 struct callback_head on_exit_work;
1257 struct irqaction *action = data;
1258 struct irq_desc *desc = irq_to_desc(action->irq);
1259 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1260 struct irqaction *action);
1261
1262 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1263 &action->thread_flags))
1264 handler_fn = irq_forced_thread_fn;
1265 else
1266 handler_fn = irq_thread_fn;
1267
1268 init_task_work(&on_exit_work, irq_thread_dtor);
1269 task_work_add(current, &on_exit_work, TWA_NONE);
1270
1271 irq_thread_check_affinity(desc, action);
1272
1273 while (!irq_wait_for_interrupt(action)) {
1274 irqreturn_t action_ret;
1275
1276 irq_thread_check_affinity(desc, action);
1277
1278 action_ret = handler_fn(desc, action);
1279 if (action_ret == IRQ_WAKE_THREAD)
1280 irq_wake_secondary(desc, action);
1281
1282 wake_threads_waitq(desc);
1283 }
1284
1285
1286
1287
1288
1289
1290
1291 task_work_cancel(current, irq_thread_dtor);
1292 return 0;
1293}
1294
1295
1296
1297
1298
1299
1300
1301void irq_wake_thread(unsigned int irq, void *dev_id)
1302{
1303 struct irq_desc *desc = irq_to_desc(irq);
1304 struct irqaction *action;
1305 unsigned long flags;
1306
1307 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1308 return;
1309
1310 raw_spin_lock_irqsave(&desc->lock, flags);
1311 for_each_action_of_desc(desc, action) {
1312 if (action->dev_id == dev_id) {
1313 if (action->thread)
1314 __irq_wake_thread(desc, action);
1315 break;
1316 }
1317 }
1318 raw_spin_unlock_irqrestore(&desc->lock, flags);
1319}
1320EXPORT_SYMBOL_GPL(irq_wake_thread);
1321
1322static int irq_setup_forced_threading(struct irqaction *new)
1323{
1324 if (!force_irqthreads())
1325 return 0;
1326 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1327 return 0;
1328
1329
1330
1331
1332
1333 if (new->handler == irq_default_primary_handler)
1334 return 0;
1335
1336 new->flags |= IRQF_ONESHOT;
1337
1338
1339
1340
1341
1342
1343 if (new->handler && new->thread_fn) {
1344
1345 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1346 if (!new->secondary)
1347 return -ENOMEM;
1348 new->secondary->handler = irq_forced_secondary_handler;
1349 new->secondary->thread_fn = new->thread_fn;
1350 new->secondary->dev_id = new->dev_id;
1351 new->secondary->irq = new->irq;
1352 new->secondary->name = new->name;
1353 }
1354
1355 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1356 new->thread_fn = new->handler;
1357 new->handler = irq_default_primary_handler;
1358 return 0;
1359}
1360
1361static int irq_request_resources(struct irq_desc *desc)
1362{
1363 struct irq_data *d = &desc->irq_data;
1364 struct irq_chip *c = d->chip;
1365
1366 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1367}
1368
1369static void irq_release_resources(struct irq_desc *desc)
1370{
1371 struct irq_data *d = &desc->irq_data;
1372 struct irq_chip *c = d->chip;
1373
1374 if (c->irq_release_resources)
1375 c->irq_release_resources(d);
1376}
1377
1378static bool irq_supports_nmi(struct irq_desc *desc)
1379{
1380 struct irq_data *d = irq_desc_get_irq_data(desc);
1381
1382#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1383
1384 if (d->parent_data)
1385 return false;
1386#endif
1387
1388 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1389 return false;
1390
1391 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1392}
1393
1394static int irq_nmi_setup(struct irq_desc *desc)
1395{
1396 struct irq_data *d = irq_desc_get_irq_data(desc);
1397 struct irq_chip *c = d->chip;
1398
1399 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1400}
1401
1402static void irq_nmi_teardown(struct irq_desc *desc)
1403{
1404 struct irq_data *d = irq_desc_get_irq_data(desc);
1405 struct irq_chip *c = d->chip;
1406
1407 if (c->irq_nmi_teardown)
1408 c->irq_nmi_teardown(d);
1409}
1410
1411static int
1412setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1413{
1414 struct task_struct *t;
1415
1416 if (!secondary) {
1417 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1418 new->name);
1419 } else {
1420 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1421 new->name);
1422 }
1423
1424 if (IS_ERR(t))
1425 return PTR_ERR(t);
1426
1427 sched_set_fifo(t);
1428
1429
1430
1431
1432
1433
1434 new->thread = get_task_struct(t);
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1445 return 0;
1446}
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462static int
1463__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1464{
1465 struct irqaction *old, **old_ptr;
1466 unsigned long flags, thread_mask = 0;
1467 int ret, nested, shared = 0;
1468
1469 if (!desc)
1470 return -EINVAL;
1471
1472 if (desc->irq_data.chip == &no_irq_chip)
1473 return -ENOSYS;
1474 if (!try_module_get(desc->owner))
1475 return -ENODEV;
1476
1477 new->irq = irq;
1478
1479
1480
1481
1482
1483 if (!(new->flags & IRQF_TRIGGER_MASK))
1484 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1485
1486
1487
1488
1489
1490 nested = irq_settings_is_nested_thread(desc);
1491 if (nested) {
1492 if (!new->thread_fn) {
1493 ret = -EINVAL;
1494 goto out_mput;
1495 }
1496
1497
1498
1499
1500
1501 new->handler = irq_nested_primary_handler;
1502 } else {
1503 if (irq_settings_can_thread(desc)) {
1504 ret = irq_setup_forced_threading(new);
1505 if (ret)
1506 goto out_mput;
1507 }
1508 }
1509
1510
1511
1512
1513
1514
1515 if (new->thread_fn && !nested) {
1516 ret = setup_irq_thread(new, irq, false);
1517 if (ret)
1518 goto out_mput;
1519 if (new->secondary) {
1520 ret = setup_irq_thread(new->secondary, irq, true);
1521 if (ret)
1522 goto out_thread;
1523 }
1524 }
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1536 new->flags &= ~IRQF_ONESHOT;
1537
1538
1539
1540
1541
1542
1543
1544
1545 mutex_lock(&desc->request_mutex);
1546
1547
1548
1549
1550
1551
1552 chip_bus_lock(desc);
1553
1554
1555 if (!desc->action) {
1556 ret = irq_request_resources(desc);
1557 if (ret) {
1558 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1559 new->name, irq, desc->irq_data.chip->name);
1560 goto out_bus_unlock;
1561 }
1562 }
1563
1564
1565
1566
1567
1568
1569
1570 raw_spin_lock_irqsave(&desc->lock, flags);
1571 old_ptr = &desc->action;
1572 old = *old_ptr;
1573 if (old) {
1574
1575
1576
1577
1578
1579
1580
1581
1582 unsigned int oldtype;
1583
1584 if (desc->istate & IRQS_NMI) {
1585 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1586 new->name, irq, desc->irq_data.chip->name);
1587 ret = -EINVAL;
1588 goto out_unlock;
1589 }
1590
1591
1592
1593
1594
1595 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1596 oldtype = irqd_get_trigger_type(&desc->irq_data);
1597 } else {
1598 oldtype = new->flags & IRQF_TRIGGER_MASK;
1599 irqd_set_trigger_type(&desc->irq_data, oldtype);
1600 }
1601
1602 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1603 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1604 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1605 goto mismatch;
1606
1607
1608 if ((old->flags & IRQF_PERCPU) !=
1609 (new->flags & IRQF_PERCPU))
1610 goto mismatch;
1611
1612
1613 do {
1614
1615
1616
1617
1618
1619 thread_mask |= old->thread_mask;
1620 old_ptr = &old->next;
1621 old = *old_ptr;
1622 } while (old);
1623 shared = 1;
1624 }
1625
1626
1627
1628
1629
1630
1631 if (new->flags & IRQF_ONESHOT) {
1632
1633
1634
1635
1636 if (thread_mask == ~0UL) {
1637 ret = -EBUSY;
1638 goto out_unlock;
1639 }
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660 new->thread_mask = 1UL << ffz(thread_mask);
1661
1662 } else if (new->handler == irq_default_primary_handler &&
1663 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1680 new->name, irq);
1681 ret = -EINVAL;
1682 goto out_unlock;
1683 }
1684
1685 if (!shared) {
1686 init_waitqueue_head(&desc->wait_for_threads);
1687
1688
1689 if (new->flags & IRQF_TRIGGER_MASK) {
1690 ret = __irq_set_trigger(desc,
1691 new->flags & IRQF_TRIGGER_MASK);
1692
1693 if (ret)
1694 goto out_unlock;
1695 }
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 ret = irq_activate(desc);
1709 if (ret)
1710 goto out_unlock;
1711
1712 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1713 IRQS_ONESHOT | IRQS_WAITING);
1714 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1715
1716 if (new->flags & IRQF_PERCPU) {
1717 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1718 irq_settings_set_per_cpu(desc);
1719 if (new->flags & IRQF_NO_DEBUG)
1720 irq_settings_set_no_debug(desc);
1721 }
1722
1723 if (noirqdebug)
1724 irq_settings_set_no_debug(desc);
1725
1726 if (new->flags & IRQF_ONESHOT)
1727 desc->istate |= IRQS_ONESHOT;
1728
1729
1730 if (new->flags & IRQF_NOBALANCING) {
1731 irq_settings_set_no_balancing(desc);
1732 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1733 }
1734
1735 if (!(new->flags & IRQF_NO_AUTOEN) &&
1736 irq_settings_can_autoenable(desc)) {
1737 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1738 } else {
1739
1740
1741
1742
1743
1744
1745 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1746
1747 desc->depth = 1;
1748 }
1749
1750 } else if (new->flags & IRQF_TRIGGER_MASK) {
1751 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1752 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1753
1754 if (nmsk != omsk)
1755
1756 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1757 irq, omsk, nmsk);
1758 }
1759
1760 *old_ptr = new;
1761
1762 irq_pm_install_action(desc, new);
1763
1764
1765 desc->irq_count = 0;
1766 desc->irqs_unhandled = 0;
1767
1768
1769
1770
1771
1772 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1773 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1774 __enable_irq(desc);
1775 }
1776
1777 raw_spin_unlock_irqrestore(&desc->lock, flags);
1778 chip_bus_sync_unlock(desc);
1779 mutex_unlock(&desc->request_mutex);
1780
1781 irq_setup_timings(desc, new);
1782
1783
1784
1785
1786
1787 if (new->thread)
1788 wake_up_process(new->thread);
1789 if (new->secondary)
1790 wake_up_process(new->secondary->thread);
1791
1792 register_irq_proc(irq, desc);
1793 new->dir = NULL;
1794 register_handler_proc(irq, new);
1795 return 0;
1796
1797mismatch:
1798 if (!(new->flags & IRQF_PROBE_SHARED)) {
1799 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1800 irq, new->flags, new->name, old->flags, old->name);
1801#ifdef CONFIG_DEBUG_SHIRQ
1802 dump_stack();
1803#endif
1804 }
1805 ret = -EBUSY;
1806
1807out_unlock:
1808 raw_spin_unlock_irqrestore(&desc->lock, flags);
1809
1810 if (!desc->action)
1811 irq_release_resources(desc);
1812out_bus_unlock:
1813 chip_bus_sync_unlock(desc);
1814 mutex_unlock(&desc->request_mutex);
1815
1816out_thread:
1817 if (new->thread) {
1818 struct task_struct *t = new->thread;
1819
1820 new->thread = NULL;
1821 kthread_stop(t);
1822 put_task_struct(t);
1823 }
1824 if (new->secondary && new->secondary->thread) {
1825 struct task_struct *t = new->secondary->thread;
1826
1827 new->secondary->thread = NULL;
1828 kthread_stop(t);
1829 put_task_struct(t);
1830 }
1831out_mput:
1832 module_put(desc->owner);
1833 return ret;
1834}
1835
1836
1837
1838
1839
1840static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1841{
1842 unsigned irq = desc->irq_data.irq;
1843 struct irqaction *action, **action_ptr;
1844 unsigned long flags;
1845
1846 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1847
1848 mutex_lock(&desc->request_mutex);
1849 chip_bus_lock(desc);
1850 raw_spin_lock_irqsave(&desc->lock, flags);
1851
1852
1853
1854
1855
1856 action_ptr = &desc->action;
1857 for (;;) {
1858 action = *action_ptr;
1859
1860 if (!action) {
1861 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1862 raw_spin_unlock_irqrestore(&desc->lock, flags);
1863 chip_bus_sync_unlock(desc);
1864 mutex_unlock(&desc->request_mutex);
1865 return NULL;
1866 }
1867
1868 if (action->dev_id == dev_id)
1869 break;
1870 action_ptr = &action->next;
1871 }
1872
1873
1874 *action_ptr = action->next;
1875
1876 irq_pm_remove_action(desc, action);
1877
1878
1879 if (!desc->action) {
1880 irq_settings_clr_disable_unlazy(desc);
1881
1882 irq_shutdown(desc);
1883 }
1884
1885#ifdef CONFIG_SMP
1886
1887 if (WARN_ON_ONCE(desc->affinity_hint))
1888 desc->affinity_hint = NULL;
1889#endif
1890
1891 raw_spin_unlock_irqrestore(&desc->lock, flags);
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 chip_bus_sync_unlock(desc);
1907
1908 unregister_handler_proc(irq, action);
1909
1910
1911
1912
1913
1914
1915 __synchronize_hardirq(desc, true);
1916
1917#ifdef CONFIG_DEBUG_SHIRQ
1918
1919
1920
1921
1922
1923
1924
1925
1926 if (action->flags & IRQF_SHARED) {
1927 local_irq_save(flags);
1928 action->handler(irq, dev_id);
1929 local_irq_restore(flags);
1930 }
1931#endif
1932
1933
1934
1935
1936
1937
1938
1939 if (action->thread) {
1940 kthread_stop(action->thread);
1941 put_task_struct(action->thread);
1942 if (action->secondary && action->secondary->thread) {
1943 kthread_stop(action->secondary->thread);
1944 put_task_struct(action->secondary->thread);
1945 }
1946 }
1947
1948
1949 if (!desc->action) {
1950
1951
1952
1953
1954 chip_bus_lock(desc);
1955
1956
1957
1958
1959 raw_spin_lock_irqsave(&desc->lock, flags);
1960 irq_domain_deactivate_irq(&desc->irq_data);
1961 raw_spin_unlock_irqrestore(&desc->lock, flags);
1962
1963 irq_release_resources(desc);
1964 chip_bus_sync_unlock(desc);
1965 irq_remove_timings(desc);
1966 }
1967
1968 mutex_unlock(&desc->request_mutex);
1969
1970 irq_chip_pm_put(&desc->irq_data);
1971 module_put(desc->owner);
1972 kfree(action->secondary);
1973 return action;
1974}
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992const void *free_irq(unsigned int irq, void *dev_id)
1993{
1994 struct irq_desc *desc = irq_to_desc(irq);
1995 struct irqaction *action;
1996 const char *devname;
1997
1998 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1999 return NULL;
2000
2001#ifdef CONFIG_SMP
2002 if (WARN_ON(desc->affinity_notify))
2003 desc->affinity_notify = NULL;
2004#endif
2005
2006 action = __free_irq(desc, dev_id);
2007
2008 if (!action)
2009 return NULL;
2010
2011 devname = action->name;
2012 kfree(action);
2013 return devname;
2014}
2015EXPORT_SYMBOL(free_irq);
2016
2017
2018static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2019{
2020 const char *devname = NULL;
2021
2022 desc->istate &= ~IRQS_NMI;
2023
2024 if (!WARN_ON(desc->action == NULL)) {
2025 irq_pm_remove_action(desc, desc->action);
2026 devname = desc->action->name;
2027 unregister_handler_proc(irq, desc->action);
2028
2029 kfree(desc->action);
2030 desc->action = NULL;
2031 }
2032
2033 irq_settings_clr_disable_unlazy(desc);
2034 irq_shutdown_and_deactivate(desc);
2035
2036 irq_release_resources(desc);
2037
2038 irq_chip_pm_put(&desc->irq_data);
2039 module_put(desc->owner);
2040
2041 return devname;
2042}
2043
2044const void *free_nmi(unsigned int irq, void *dev_id)
2045{
2046 struct irq_desc *desc = irq_to_desc(irq);
2047 unsigned long flags;
2048 const void *devname;
2049
2050 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2051 return NULL;
2052
2053 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2054 return NULL;
2055
2056
2057 if (WARN_ON(desc->depth == 0))
2058 disable_nmi_nosync(irq);
2059
2060 raw_spin_lock_irqsave(&desc->lock, flags);
2061
2062 irq_nmi_teardown(desc);
2063 devname = __cleanup_nmi(irq, desc);
2064
2065 raw_spin_unlock_irqrestore(&desc->lock, flags);
2066
2067 return devname;
2068}
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2113 irq_handler_t thread_fn, unsigned long irqflags,
2114 const char *devname, void *dev_id)
2115{
2116 struct irqaction *action;
2117 struct irq_desc *desc;
2118 int retval;
2119
2120 if (irq == IRQ_NOTCONNECTED)
2121 return -ENOTCONN;
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2137 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2138 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2139 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2140 return -EINVAL;
2141
2142 desc = irq_to_desc(irq);
2143 if (!desc)
2144 return -EINVAL;
2145
2146 if (!irq_settings_can_request(desc) ||
2147 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2148 return -EINVAL;
2149
2150 if (!handler) {
2151 if (!thread_fn)
2152 return -EINVAL;
2153 handler = irq_default_primary_handler;
2154 }
2155
2156 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2157 if (!action)
2158 return -ENOMEM;
2159
2160 action->handler = handler;
2161 action->thread_fn = thread_fn;
2162 action->flags = irqflags;
2163 action->name = devname;
2164 action->dev_id = dev_id;
2165
2166 retval = irq_chip_pm_get(&desc->irq_data);
2167 if (retval < 0) {
2168 kfree(action);
2169 return retval;
2170 }
2171
2172 retval = __setup_irq(irq, desc, action);
2173
2174 if (retval) {
2175 irq_chip_pm_put(&desc->irq_data);
2176 kfree(action->secondary);
2177 kfree(action);
2178 }
2179
2180#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2181 if (!retval && (irqflags & IRQF_SHARED)) {
2182
2183
2184
2185
2186
2187
2188 unsigned long flags;
2189
2190 disable_irq(irq);
2191 local_irq_save(flags);
2192
2193 handler(irq, dev_id);
2194
2195 local_irq_restore(flags);
2196 enable_irq(irq);
2197 }
2198#endif
2199 return retval;
2200}
2201EXPORT_SYMBOL(request_threaded_irq);
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2221 unsigned long flags, const char *name, void *dev_id)
2222{
2223 struct irq_desc *desc;
2224 int ret;
2225
2226 if (irq == IRQ_NOTCONNECTED)
2227 return -ENOTCONN;
2228
2229 desc = irq_to_desc(irq);
2230 if (!desc)
2231 return -EINVAL;
2232
2233 if (irq_settings_is_nested_thread(desc)) {
2234 ret = request_threaded_irq(irq, NULL, handler,
2235 flags, name, dev_id);
2236 return !ret ? IRQC_IS_NESTED : ret;
2237 }
2238
2239 ret = request_irq(irq, handler, flags, name, dev_id);
2240 return !ret ? IRQC_IS_HARDIRQ : ret;
2241}
2242EXPORT_SYMBOL_GPL(request_any_context_irq);
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270int request_nmi(unsigned int irq, irq_handler_t handler,
2271 unsigned long irqflags, const char *name, void *dev_id)
2272{
2273 struct irqaction *action;
2274 struct irq_desc *desc;
2275 unsigned long flags;
2276 int retval;
2277
2278 if (irq == IRQ_NOTCONNECTED)
2279 return -ENOTCONN;
2280
2281
2282 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2283 return -EINVAL;
2284
2285 if (!(irqflags & IRQF_PERCPU))
2286 return -EINVAL;
2287
2288 if (!handler)
2289 return -EINVAL;
2290
2291 desc = irq_to_desc(irq);
2292
2293 if (!desc || (irq_settings_can_autoenable(desc) &&
2294 !(irqflags & IRQF_NO_AUTOEN)) ||
2295 !irq_settings_can_request(desc) ||
2296 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2297 !irq_supports_nmi(desc))
2298 return -EINVAL;
2299
2300 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2301 if (!action)
2302 return -ENOMEM;
2303
2304 action->handler = handler;
2305 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2306 action->name = name;
2307 action->dev_id = dev_id;
2308
2309 retval = irq_chip_pm_get(&desc->irq_data);
2310 if (retval < 0)
2311 goto err_out;
2312
2313 retval = __setup_irq(irq, desc, action);
2314 if (retval)
2315 goto err_irq_setup;
2316
2317 raw_spin_lock_irqsave(&desc->lock, flags);
2318
2319
2320 desc->istate |= IRQS_NMI;
2321 retval = irq_nmi_setup(desc);
2322 if (retval) {
2323 __cleanup_nmi(irq, desc);
2324 raw_spin_unlock_irqrestore(&desc->lock, flags);
2325 return -EINVAL;
2326 }
2327
2328 raw_spin_unlock_irqrestore(&desc->lock, flags);
2329
2330 return 0;
2331
2332err_irq_setup:
2333 irq_chip_pm_put(&desc->irq_data);
2334err_out:
2335 kfree(action);
2336
2337 return retval;
2338}
2339
2340void enable_percpu_irq(unsigned int irq, unsigned int type)
2341{
2342 unsigned int cpu = smp_processor_id();
2343 unsigned long flags;
2344 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2345
2346 if (!desc)
2347 return;
2348
2349
2350
2351
2352
2353 type &= IRQ_TYPE_SENSE_MASK;
2354 if (type == IRQ_TYPE_NONE)
2355 type = irqd_get_trigger_type(&desc->irq_data);
2356
2357 if (type != IRQ_TYPE_NONE) {
2358 int ret;
2359
2360 ret = __irq_set_trigger(desc, type);
2361
2362 if (ret) {
2363 WARN(1, "failed to set type for IRQ%d\n", irq);
2364 goto out;
2365 }
2366 }
2367
2368 irq_percpu_enable(desc, cpu);
2369out:
2370 irq_put_desc_unlock(desc, flags);
2371}
2372EXPORT_SYMBOL_GPL(enable_percpu_irq);
2373
2374void enable_percpu_nmi(unsigned int irq, unsigned int type)
2375{
2376 enable_percpu_irq(irq, type);
2377}
2378
2379
2380
2381
2382
2383
2384
2385
2386bool irq_percpu_is_enabled(unsigned int irq)
2387{
2388 unsigned int cpu = smp_processor_id();
2389 struct irq_desc *desc;
2390 unsigned long flags;
2391 bool is_enabled;
2392
2393 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2394 if (!desc)
2395 return false;
2396
2397 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2398 irq_put_desc_unlock(desc, flags);
2399
2400 return is_enabled;
2401}
2402EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2403
2404void disable_percpu_irq(unsigned int irq)
2405{
2406 unsigned int cpu = smp_processor_id();
2407 unsigned long flags;
2408 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2409
2410 if (!desc)
2411 return;
2412
2413 irq_percpu_disable(desc, cpu);
2414 irq_put_desc_unlock(desc, flags);
2415}
2416EXPORT_SYMBOL_GPL(disable_percpu_irq);
2417
2418void disable_percpu_nmi(unsigned int irq)
2419{
2420 disable_percpu_irq(irq);
2421}
2422
2423
2424
2425
2426static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2427{
2428 struct irq_desc *desc = irq_to_desc(irq);
2429 struct irqaction *action;
2430 unsigned long flags;
2431
2432 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2433
2434 if (!desc)
2435 return NULL;
2436
2437 raw_spin_lock_irqsave(&desc->lock, flags);
2438
2439 action = desc->action;
2440 if (!action || action->percpu_dev_id != dev_id) {
2441 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2442 goto bad;
2443 }
2444
2445 if (!cpumask_empty(desc->percpu_enabled)) {
2446 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2447 irq, cpumask_first(desc->percpu_enabled));
2448 goto bad;
2449 }
2450
2451
2452 desc->action = NULL;
2453
2454 desc->istate &= ~IRQS_NMI;
2455
2456 raw_spin_unlock_irqrestore(&desc->lock, flags);
2457
2458 unregister_handler_proc(irq, action);
2459
2460 irq_chip_pm_put(&desc->irq_data);
2461 module_put(desc->owner);
2462 return action;
2463
2464bad:
2465 raw_spin_unlock_irqrestore(&desc->lock, flags);
2466 return NULL;
2467}
2468
2469
2470
2471
2472
2473
2474
2475
2476void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2477{
2478 struct irq_desc *desc = irq_to_desc(irq);
2479
2480 if (desc && irq_settings_is_per_cpu_devid(desc))
2481 __free_percpu_irq(irq, act->percpu_dev_id);
2482}
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2497{
2498 struct irq_desc *desc = irq_to_desc(irq);
2499
2500 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2501 return;
2502
2503 chip_bus_lock(desc);
2504 kfree(__free_percpu_irq(irq, dev_id));
2505 chip_bus_sync_unlock(desc);
2506}
2507EXPORT_SYMBOL_GPL(free_percpu_irq);
2508
2509void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2510{
2511 struct irq_desc *desc = irq_to_desc(irq);
2512
2513 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2514 return;
2515
2516 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2517 return;
2518
2519 kfree(__free_percpu_irq(irq, dev_id));
2520}
2521
2522
2523
2524
2525
2526
2527
2528
2529int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2530{
2531 struct irq_desc *desc = irq_to_desc(irq);
2532 int retval;
2533
2534 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2535 return -EINVAL;
2536
2537 retval = irq_chip_pm_get(&desc->irq_data);
2538 if (retval < 0)
2539 return retval;
2540
2541 retval = __setup_irq(irq, desc, act);
2542
2543 if (retval)
2544 irq_chip_pm_put(&desc->irq_data);
2545
2546 return retval;
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2567 unsigned long flags, const char *devname,
2568 void __percpu *dev_id)
2569{
2570 struct irqaction *action;
2571 struct irq_desc *desc;
2572 int retval;
2573
2574 if (!dev_id)
2575 return -EINVAL;
2576
2577 desc = irq_to_desc(irq);
2578 if (!desc || !irq_settings_can_request(desc) ||
2579 !irq_settings_is_per_cpu_devid(desc))
2580 return -EINVAL;
2581
2582 if (flags && flags != IRQF_TIMER)
2583 return -EINVAL;
2584
2585 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2586 if (!action)
2587 return -ENOMEM;
2588
2589 action->handler = handler;
2590 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2591 action->name = devname;
2592 action->percpu_dev_id = dev_id;
2593
2594 retval = irq_chip_pm_get(&desc->irq_data);
2595 if (retval < 0) {
2596 kfree(action);
2597 return retval;
2598 }
2599
2600 retval = __setup_irq(irq, desc, action);
2601
2602 if (retval) {
2603 irq_chip_pm_put(&desc->irq_data);
2604 kfree(action);
2605 }
2606
2607 return retval;
2608}
2609EXPORT_SYMBOL_GPL(__request_percpu_irq);
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2633 const char *name, void __percpu *dev_id)
2634{
2635 struct irqaction *action;
2636 struct irq_desc *desc;
2637 unsigned long flags;
2638 int retval;
2639
2640 if (!handler)
2641 return -EINVAL;
2642
2643 desc = irq_to_desc(irq);
2644
2645 if (!desc || !irq_settings_can_request(desc) ||
2646 !irq_settings_is_per_cpu_devid(desc) ||
2647 irq_settings_can_autoenable(desc) ||
2648 !irq_supports_nmi(desc))
2649 return -EINVAL;
2650
2651
2652 if (desc->istate & IRQS_NMI)
2653 return -EINVAL;
2654
2655 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2656 if (!action)
2657 return -ENOMEM;
2658
2659 action->handler = handler;
2660 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2661 | IRQF_NOBALANCING;
2662 action->name = name;
2663 action->percpu_dev_id = dev_id;
2664
2665 retval = irq_chip_pm_get(&desc->irq_data);
2666 if (retval < 0)
2667 goto err_out;
2668
2669 retval = __setup_irq(irq, desc, action);
2670 if (retval)
2671 goto err_irq_setup;
2672
2673 raw_spin_lock_irqsave(&desc->lock, flags);
2674 desc->istate |= IRQS_NMI;
2675 raw_spin_unlock_irqrestore(&desc->lock, flags);
2676
2677 return 0;
2678
2679err_irq_setup:
2680 irq_chip_pm_put(&desc->irq_data);
2681err_out:
2682 kfree(action);
2683
2684 return retval;
2685}
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700int prepare_percpu_nmi(unsigned int irq)
2701{
2702 unsigned long flags;
2703 struct irq_desc *desc;
2704 int ret = 0;
2705
2706 WARN_ON(preemptible());
2707
2708 desc = irq_get_desc_lock(irq, &flags,
2709 IRQ_GET_DESC_CHECK_PERCPU);
2710 if (!desc)
2711 return -EINVAL;
2712
2713 if (WARN(!(desc->istate & IRQS_NMI),
2714 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2715 irq)) {
2716 ret = -EINVAL;
2717 goto out;
2718 }
2719
2720 ret = irq_nmi_setup(desc);
2721 if (ret) {
2722 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2723 goto out;
2724 }
2725
2726out:
2727 irq_put_desc_unlock(desc, flags);
2728 return ret;
2729}
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743void teardown_percpu_nmi(unsigned int irq)
2744{
2745 unsigned long flags;
2746 struct irq_desc *desc;
2747
2748 WARN_ON(preemptible());
2749
2750 desc = irq_get_desc_lock(irq, &flags,
2751 IRQ_GET_DESC_CHECK_PERCPU);
2752 if (!desc)
2753 return;
2754
2755 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2756 goto out;
2757
2758 irq_nmi_teardown(desc);
2759out:
2760 irq_put_desc_unlock(desc, flags);
2761}
2762
2763int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2764 bool *state)
2765{
2766 struct irq_chip *chip;
2767 int err = -EINVAL;
2768
2769 do {
2770 chip = irq_data_get_irq_chip(data);
2771 if (WARN_ON_ONCE(!chip))
2772 return -ENODEV;
2773 if (chip->irq_get_irqchip_state)
2774 break;
2775#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2776 data = data->parent_data;
2777#else
2778 data = NULL;
2779#endif
2780 } while (data);
2781
2782 if (data)
2783 err = chip->irq_get_irqchip_state(data, which, state);
2784 return err;
2785}
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2801 bool *state)
2802{
2803 struct irq_desc *desc;
2804 struct irq_data *data;
2805 unsigned long flags;
2806 int err = -EINVAL;
2807
2808 desc = irq_get_desc_buslock(irq, &flags, 0);
2809 if (!desc)
2810 return err;
2811
2812 data = irq_desc_get_irq_data(desc);
2813
2814 err = __irq_get_irqchip_state(data, which, state);
2815
2816 irq_put_desc_busunlock(desc, flags);
2817 return err;
2818}
2819EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2834 bool val)
2835{
2836 struct irq_desc *desc;
2837 struct irq_data *data;
2838 struct irq_chip *chip;
2839 unsigned long flags;
2840 int err = -EINVAL;
2841
2842 desc = irq_get_desc_buslock(irq, &flags, 0);
2843 if (!desc)
2844 return err;
2845
2846 data = irq_desc_get_irq_data(desc);
2847
2848 do {
2849 chip = irq_data_get_irq_chip(data);
2850 if (WARN_ON_ONCE(!chip)) {
2851 err = -ENODEV;
2852 goto out_unlock;
2853 }
2854 if (chip->irq_set_irqchip_state)
2855 break;
2856#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2857 data = data->parent_data;
2858#else
2859 data = NULL;
2860#endif
2861 } while (data);
2862
2863 if (data)
2864 err = chip->irq_set_irqchip_state(data, which, val);
2865
2866out_unlock:
2867 irq_put_desc_busunlock(desc, flags);
2868 return err;
2869}
2870EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2871
2872
2873
2874
2875
2876
2877
2878bool irq_has_action(unsigned int irq)
2879{
2880 bool res;
2881
2882 rcu_read_lock();
2883 res = irq_desc_has_action(irq_to_desc(irq));
2884 rcu_read_unlock();
2885 return res;
2886}
2887EXPORT_SYMBOL_GPL(irq_has_action);
2888
2889
2890
2891
2892
2893
2894
2895
2896bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2897{
2898 struct irq_desc *desc;
2899 bool res = false;
2900
2901 rcu_read_lock();
2902 desc = irq_to_desc(irq);
2903 if (desc)
2904 res = !!(desc->status_use_accessors & bitmask);
2905 rcu_read_unlock();
2906 return res;
2907}
2908EXPORT_SYMBOL_GPL(irq_check_status_bit);
2909