1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/irqdomain.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <linux/sched/isolation.h>
22#include <uapi/linux/sched/types.h>
23#include <linux/task_work.h>
24
25#include "internals.h"
26
27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
28DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
29
30static int __init setup_forced_irqthreads(char *arg)
31{
32 static_branch_enable(&force_irqthreads_key);
33 return 0;
34}
35early_param("threadirqs", setup_forced_irqthreads);
36#endif
37
38static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
39{
40 struct irq_data *irqd = irq_desc_get_irq_data(desc);
41 bool inprogress;
42
43 do {
44 unsigned long flags;
45
46
47
48
49
50 while (irqd_irq_inprogress(&desc->irq_data))
51 cpu_relax();
52
53
54 raw_spin_lock_irqsave(&desc->lock, flags);
55 inprogress = irqd_irq_inprogress(&desc->irq_data);
56
57
58
59
60
61
62 if (!inprogress && sync_chip) {
63
64
65
66
67 __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
68 &inprogress);
69 }
70 raw_spin_unlock_irqrestore(&desc->lock, flags);
71
72
73 } while (inprogress);
74}
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98bool synchronize_hardirq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc, false);
104 return !atomic_read(&desc->threads_active);
105 }
106
107 return true;
108}
109EXPORT_SYMBOL(synchronize_hardirq);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126void synchronize_irq(unsigned int irq)
127{
128 struct irq_desc *desc = irq_to_desc(irq);
129
130 if (desc) {
131 __synchronize_hardirq(desc, true);
132
133
134
135
136
137 wait_event(desc->wait_for_threads,
138 !atomic_read(&desc->threads_active));
139 }
140}
141EXPORT_SYMBOL(synchronize_irq);
142
143#ifdef CONFIG_SMP
144cpumask_var_t irq_default_affinity;
145
146static bool __irq_can_set_affinity(struct irq_desc *desc)
147{
148 if (!desc || !irqd_can_balance(&desc->irq_data) ||
149 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
150 return false;
151 return true;
152}
153
154
155
156
157
158
159int irq_can_set_affinity(unsigned int irq)
160{
161 return __irq_can_set_affinity(irq_to_desc(irq));
162}
163
164
165
166
167
168
169
170
171bool irq_can_set_affinity_usr(unsigned int irq)
172{
173 struct irq_desc *desc = irq_to_desc(irq);
174
175 return __irq_can_set_affinity(desc) &&
176 !irqd_affinity_is_managed(&desc->irq_data);
177}
178
179
180
181
182
183
184
185
186
187
188void irq_set_thread_affinity(struct irq_desc *desc)
189{
190 struct irqaction *action;
191
192 for_each_action_of_desc(desc, action)
193 if (action->thread)
194 set_bit(IRQTF_AFFINITY, &action->thread_flags);
195}
196
197#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
198static void irq_validate_effective_affinity(struct irq_data *data)
199{
200 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202
203 if (!cpumask_empty(m))
204 return;
205 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
206 chip->name, data->irq);
207}
208
209static inline void irq_init_effective_affinity(struct irq_data *data,
210 const struct cpumask *mask)
211{
212 cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
213}
214#else
215static inline void irq_validate_effective_affinity(struct irq_data *data) { }
216static inline void irq_init_effective_affinity(struct irq_data *data,
217 const struct cpumask *mask) { }
218#endif
219
220int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
221 bool force)
222{
223 struct irq_desc *desc = irq_data_to_desc(data);
224 struct irq_chip *chip = irq_data_get_irq_chip(data);
225 int ret;
226
227 if (!chip || !chip->irq_set_affinity)
228 return -EINVAL;
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 if (irqd_affinity_is_managed(data) &&
250 housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
251 const struct cpumask *hk_mask, *prog_mask;
252
253 static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
254 static struct cpumask tmp_mask;
255
256 hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
257
258 raw_spin_lock(&tmp_mask_lock);
259 cpumask_and(&tmp_mask, mask, hk_mask);
260 if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
261 prog_mask = mask;
262 else
263 prog_mask = &tmp_mask;
264 ret = chip->irq_set_affinity(data, prog_mask, force);
265 raw_spin_unlock(&tmp_mask_lock);
266 } else {
267 ret = chip->irq_set_affinity(data, mask, force);
268 }
269 switch (ret) {
270 case IRQ_SET_MASK_OK:
271 case IRQ_SET_MASK_OK_DONE:
272 cpumask_copy(desc->irq_common_data.affinity, mask);
273 fallthrough;
274 case IRQ_SET_MASK_OK_NOCOPY:
275 irq_validate_effective_affinity(data);
276 irq_set_thread_affinity(desc);
277 ret = 0;
278 }
279
280 return ret;
281}
282
283#ifdef CONFIG_GENERIC_PENDING_IRQ
284static inline int irq_set_affinity_pending(struct irq_data *data,
285 const struct cpumask *dest)
286{
287 struct irq_desc *desc = irq_data_to_desc(data);
288
289 irqd_set_move_pending(data);
290 irq_copy_pending(desc, dest);
291 return 0;
292}
293#else
294static inline int irq_set_affinity_pending(struct irq_data *data,
295 const struct cpumask *dest)
296{
297 return -EBUSY;
298}
299#endif
300
301static int irq_try_set_affinity(struct irq_data *data,
302 const struct cpumask *dest, bool force)
303{
304 int ret = irq_do_set_affinity(data, dest, force);
305
306
307
308
309
310
311 if (ret == -EBUSY && !force)
312 ret = irq_set_affinity_pending(data, dest);
313 return ret;
314}
315
316static bool irq_set_affinity_deactivated(struct irq_data *data,
317 const struct cpumask *mask, bool force)
318{
319 struct irq_desc *desc = irq_data_to_desc(data);
320
321
322
323
324
325
326
327
328
329
330 if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
331 irqd_is_activated(data) || !irqd_affinity_on_activate(data))
332 return false;
333
334 cpumask_copy(desc->irq_common_data.affinity, mask);
335 irq_init_effective_affinity(data, mask);
336 irqd_set(data, IRQD_AFFINITY_SET);
337 return true;
338}
339
340int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
341 bool force)
342{
343 struct irq_chip *chip = irq_data_get_irq_chip(data);
344 struct irq_desc *desc = irq_data_to_desc(data);
345 int ret = 0;
346
347 if (!chip || !chip->irq_set_affinity)
348 return -EINVAL;
349
350 if (irq_set_affinity_deactivated(data, mask, force))
351 return 0;
352
353 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
354 ret = irq_try_set_affinity(data, mask, force);
355 } else {
356 irqd_set_move_pending(data);
357 irq_copy_pending(desc, mask);
358 }
359
360 if (desc->affinity_notify) {
361 kref_get(&desc->affinity_notify->kref);
362 if (!schedule_work(&desc->affinity_notify->work)) {
363
364 kref_put(&desc->affinity_notify->kref,
365 desc->affinity_notify->release);
366 }
367 }
368 irqd_set(data, IRQD_AFFINITY_SET);
369
370 return ret;
371}
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388int irq_update_affinity_desc(unsigned int irq,
389 struct irq_affinity_desc *affinity)
390{
391 struct irq_desc *desc;
392 unsigned long flags;
393 bool activated;
394 int ret = 0;
395
396
397
398
399
400 if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
401 return -EOPNOTSUPP;
402
403 desc = irq_get_desc_buslock(irq, &flags, 0);
404 if (!desc)
405 return -EINVAL;
406
407
408 if (irqd_is_started(&desc->irq_data)) {
409 ret = -EBUSY;
410 goto out_unlock;
411 }
412
413
414 if (irqd_affinity_is_managed(&desc->irq_data)) {
415 ret = -EBUSY;
416 goto out_unlock;
417 }
418
419
420
421
422
423 activated = irqd_is_activated(&desc->irq_data);
424 if (activated)
425 irq_domain_deactivate_irq(&desc->irq_data);
426
427 if (affinity->is_managed) {
428 irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
429 irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
430 }
431
432 cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
433
434
435 if (activated)
436 irq_domain_activate_irq(&desc->irq_data, false);
437
438out_unlock:
439 irq_put_desc_busunlock(desc, flags);
440 return ret;
441}
442
443static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
444 bool force)
445{
446 struct irq_desc *desc = irq_to_desc(irq);
447 unsigned long flags;
448 int ret;
449
450 if (!desc)
451 return -EINVAL;
452
453 raw_spin_lock_irqsave(&desc->lock, flags);
454 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
455 raw_spin_unlock_irqrestore(&desc->lock, flags);
456 return ret;
457}
458
459
460
461
462
463
464
465
466int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
467{
468 return __irq_set_affinity(irq, cpumask, false);
469}
470EXPORT_SYMBOL_GPL(irq_set_affinity);
471
472
473
474
475
476
477
478
479
480
481
482
483int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
484{
485 return __irq_set_affinity(irq, cpumask, true);
486}
487EXPORT_SYMBOL_GPL(irq_force_affinity);
488
489int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
490 bool setaffinity)
491{
492 unsigned long flags;
493 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
494
495 if (!desc)
496 return -EINVAL;
497 desc->affinity_hint = m;
498 irq_put_desc_unlock(desc, flags);
499 if (m && setaffinity)
500 __irq_set_affinity(irq, m, false);
501 return 0;
502}
503EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
504
505static void irq_affinity_notify(struct work_struct *work)
506{
507 struct irq_affinity_notify *notify =
508 container_of(work, struct irq_affinity_notify, work);
509 struct irq_desc *desc = irq_to_desc(notify->irq);
510 cpumask_var_t cpumask;
511 unsigned long flags;
512
513 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
514 goto out;
515
516 raw_spin_lock_irqsave(&desc->lock, flags);
517 if (irq_move_pending(&desc->irq_data))
518 irq_get_pending(cpumask, desc);
519 else
520 cpumask_copy(cpumask, desc->irq_common_data.affinity);
521 raw_spin_unlock_irqrestore(&desc->lock, flags);
522
523 notify->notify(notify, cpumask);
524
525 free_cpumask_var(cpumask);
526out:
527 kref_put(¬ify->kref, notify->release);
528}
529
530
531
532
533
534
535
536
537
538
539
540
541int
542irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
543{
544 struct irq_desc *desc = irq_to_desc(irq);
545 struct irq_affinity_notify *old_notify;
546 unsigned long flags;
547
548
549 might_sleep();
550
551 if (!desc || desc->istate & IRQS_NMI)
552 return -EINVAL;
553
554
555 if (notify) {
556 notify->irq = irq;
557 kref_init(¬ify->kref);
558 INIT_WORK(¬ify->work, irq_affinity_notify);
559 }
560
561 raw_spin_lock_irqsave(&desc->lock, flags);
562 old_notify = desc->affinity_notify;
563 desc->affinity_notify = notify;
564 raw_spin_unlock_irqrestore(&desc->lock, flags);
565
566 if (old_notify) {
567 if (cancel_work_sync(&old_notify->work)) {
568
569 kref_put(&old_notify->kref, old_notify->release);
570 }
571 kref_put(&old_notify->kref, old_notify->release);
572 }
573
574 return 0;
575}
576EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
577
578#ifndef CONFIG_AUTO_IRQ_AFFINITY
579
580
581
582int irq_setup_affinity(struct irq_desc *desc)
583{
584 struct cpumask *set = irq_default_affinity;
585 int ret, node = irq_desc_get_node(desc);
586 static DEFINE_RAW_SPINLOCK(mask_lock);
587 static struct cpumask mask;
588
589
590 if (!__irq_can_set_affinity(desc))
591 return 0;
592
593 raw_spin_lock(&mask_lock);
594
595
596
597
598 if (irqd_affinity_is_managed(&desc->irq_data) ||
599 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
600 if (cpumask_intersects(desc->irq_common_data.affinity,
601 cpu_online_mask))
602 set = desc->irq_common_data.affinity;
603 else
604 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
605 }
606
607 cpumask_and(&mask, cpu_online_mask, set);
608 if (cpumask_empty(&mask))
609 cpumask_copy(&mask, cpu_online_mask);
610
611 if (node != NUMA_NO_NODE) {
612 const struct cpumask *nodemask = cpumask_of_node(node);
613
614
615 if (cpumask_intersects(&mask, nodemask))
616 cpumask_and(&mask, &mask, nodemask);
617 }
618 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
619 raw_spin_unlock(&mask_lock);
620 return ret;
621}
622#else
623
624int irq_setup_affinity(struct irq_desc *desc)
625{
626 return irq_select_affinity(irq_desc_get_irq(desc));
627}
628#endif
629#endif
630
631
632
633
634
635
636
637
638
639
640
641
642
643int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
644{
645 unsigned long flags;
646 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
647 struct irq_data *data;
648 struct irq_chip *chip;
649 int ret = -ENOSYS;
650
651 if (!desc)
652 return -EINVAL;
653
654 data = irq_desc_get_irq_data(desc);
655 do {
656 chip = irq_data_get_irq_chip(data);
657 if (chip && chip->irq_set_vcpu_affinity)
658 break;
659#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
660 data = data->parent_data;
661#else
662 data = NULL;
663#endif
664 } while (data);
665
666 if (data)
667 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
668 irq_put_desc_unlock(desc, flags);
669
670 return ret;
671}
672EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
673
674void __disable_irq(struct irq_desc *desc)
675{
676 if (!desc->depth++)
677 irq_disable(desc);
678}
679
680static int __disable_irq_nosync(unsigned int irq)
681{
682 unsigned long flags;
683 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
684
685 if (!desc)
686 return -EINVAL;
687 __disable_irq(desc);
688 irq_put_desc_busunlock(desc, flags);
689 return 0;
690}
691
692
693
694
695
696
697
698
699
700
701
702
703void disable_irq_nosync(unsigned int irq)
704{
705 __disable_irq_nosync(irq);
706}
707EXPORT_SYMBOL(disable_irq_nosync);
708
709
710
711
712
713
714
715
716
717
718
719
720
721void disable_irq(unsigned int irq)
722{
723 if (!__disable_irq_nosync(irq))
724 synchronize_irq(irq);
725}
726EXPORT_SYMBOL(disable_irq);
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745bool disable_hardirq(unsigned int irq)
746{
747 if (!__disable_irq_nosync(irq))
748 return synchronize_hardirq(irq);
749
750 return false;
751}
752EXPORT_SYMBOL_GPL(disable_hardirq);
753
754
755
756
757
758
759
760
761
762
763
764void disable_nmi_nosync(unsigned int irq)
765{
766 disable_irq_nosync(irq);
767}
768
769void __enable_irq(struct irq_desc *desc)
770{
771 switch (desc->depth) {
772 case 0:
773 err_out:
774 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
775 irq_desc_get_irq(desc));
776 break;
777 case 1: {
778 if (desc->istate & IRQS_SUSPENDED)
779 goto err_out;
780
781 irq_settings_set_noprobe(desc);
782
783
784
785
786
787
788
789 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
790 break;
791 }
792 default:
793 desc->depth--;
794 }
795}
796
797
798
799
800
801
802
803
804
805
806
807
808void enable_irq(unsigned int irq)
809{
810 unsigned long flags;
811 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
812
813 if (!desc)
814 return;
815 if (WARN(!desc->irq_data.chip,
816 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
817 goto out;
818
819 __enable_irq(desc);
820out:
821 irq_put_desc_busunlock(desc, flags);
822}
823EXPORT_SYMBOL(enable_irq);
824
825
826
827
828
829
830
831
832
833
834void enable_nmi(unsigned int irq)
835{
836 enable_irq(irq);
837}
838
839static int set_irq_wake_real(unsigned int irq, unsigned int on)
840{
841 struct irq_desc *desc = irq_to_desc(irq);
842 int ret = -ENXIO;
843
844 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
845 return 0;
846
847 if (desc->irq_data.chip->irq_set_wake)
848 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
849
850 return ret;
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872int irq_set_irq_wake(unsigned int irq, unsigned int on)
873{
874 unsigned long flags;
875 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
876 int ret = 0;
877
878 if (!desc)
879 return -EINVAL;
880
881
882 if (desc->istate & IRQS_NMI) {
883 ret = -EINVAL;
884 goto out_unlock;
885 }
886
887
888
889
890 if (on) {
891 if (desc->wake_depth++ == 0) {
892 ret = set_irq_wake_real(irq, on);
893 if (ret)
894 desc->wake_depth = 0;
895 else
896 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
897 }
898 } else {
899 if (desc->wake_depth == 0) {
900 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
901 } else if (--desc->wake_depth == 0) {
902 ret = set_irq_wake_real(irq, on);
903 if (ret)
904 desc->wake_depth = 1;
905 else
906 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
907 }
908 }
909
910out_unlock:
911 irq_put_desc_busunlock(desc, flags);
912 return ret;
913}
914EXPORT_SYMBOL(irq_set_irq_wake);
915
916
917
918
919
920
921int can_request_irq(unsigned int irq, unsigned long irqflags)
922{
923 unsigned long flags;
924 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
925 int canrequest = 0;
926
927 if (!desc)
928 return 0;
929
930 if (irq_settings_can_request(desc)) {
931 if (!desc->action ||
932 irqflags & desc->action->flags & IRQF_SHARED)
933 canrequest = 1;
934 }
935 irq_put_desc_unlock(desc, flags);
936 return canrequest;
937}
938
939int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
940{
941 struct irq_chip *chip = desc->irq_data.chip;
942 int ret, unmask = 0;
943
944 if (!chip || !chip->irq_set_type) {
945
946
947
948
949 pr_debug("No set_type function for IRQ %d (%s)\n",
950 irq_desc_get_irq(desc),
951 chip ? (chip->name ? : "unknown") : "unknown");
952 return 0;
953 }
954
955 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
956 if (!irqd_irq_masked(&desc->irq_data))
957 mask_irq(desc);
958 if (!irqd_irq_disabled(&desc->irq_data))
959 unmask = 1;
960 }
961
962
963 flags &= IRQ_TYPE_SENSE_MASK;
964 ret = chip->irq_set_type(&desc->irq_data, flags);
965
966 switch (ret) {
967 case IRQ_SET_MASK_OK:
968 case IRQ_SET_MASK_OK_DONE:
969 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
970 irqd_set(&desc->irq_data, flags);
971 fallthrough;
972
973 case IRQ_SET_MASK_OK_NOCOPY:
974 flags = irqd_get_trigger_type(&desc->irq_data);
975 irq_settings_set_trigger_mask(desc, flags);
976 irqd_clear(&desc->irq_data, IRQD_LEVEL);
977 irq_settings_clr_level(desc);
978 if (flags & IRQ_TYPE_LEVEL_MASK) {
979 irq_settings_set_level(desc);
980 irqd_set(&desc->irq_data, IRQD_LEVEL);
981 }
982
983 ret = 0;
984 break;
985 default:
986 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
987 flags, irq_desc_get_irq(desc), chip->irq_set_type);
988 }
989 if (unmask)
990 unmask_irq(desc);
991 return ret;
992}
993
994#ifdef CONFIG_HARDIRQS_SW_RESEND
995int irq_set_parent(int irq, int parent_irq)
996{
997 unsigned long flags;
998 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
999
1000 if (!desc)
1001 return -EINVAL;
1002
1003 desc->parent_irq = parent_irq;
1004
1005 irq_put_desc_unlock(desc, flags);
1006 return 0;
1007}
1008EXPORT_SYMBOL_GPL(irq_set_parent);
1009#endif
1010
1011
1012
1013
1014
1015
1016static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1017{
1018 return IRQ_WAKE_THREAD;
1019}
1020
1021
1022
1023
1024
1025static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1026{
1027 WARN(1, "Primary handler called for nested irq %d\n", irq);
1028 return IRQ_NONE;
1029}
1030
1031static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1032{
1033 WARN(1, "Secondary action handler called for irq %d\n", irq);
1034 return IRQ_NONE;
1035}
1036
1037static int irq_wait_for_interrupt(struct irqaction *action)
1038{
1039 for (;;) {
1040 set_current_state(TASK_INTERRUPTIBLE);
1041
1042 if (kthread_should_stop()) {
1043
1044 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1045 &action->thread_flags)) {
1046 __set_current_state(TASK_RUNNING);
1047 return 0;
1048 }
1049 __set_current_state(TASK_RUNNING);
1050 return -1;
1051 }
1052
1053 if (test_and_clear_bit(IRQTF_RUNTHREAD,
1054 &action->thread_flags)) {
1055 __set_current_state(TASK_RUNNING);
1056 return 0;
1057 }
1058 schedule();
1059 }
1060}
1061
1062
1063
1064
1065
1066
1067static void irq_finalize_oneshot(struct irq_desc *desc,
1068 struct irqaction *action)
1069{
1070 if (!(desc->istate & IRQS_ONESHOT) ||
1071 action->handler == irq_forced_secondary_handler)
1072 return;
1073again:
1074 chip_bus_lock(desc);
1075 raw_spin_lock_irq(&desc->lock);
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1092 raw_spin_unlock_irq(&desc->lock);
1093 chip_bus_sync_unlock(desc);
1094 cpu_relax();
1095 goto again;
1096 }
1097
1098
1099
1100
1101
1102
1103 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1104 goto out_unlock;
1105
1106 desc->threads_oneshot &= ~action->thread_mask;
1107
1108 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1109 irqd_irq_masked(&desc->irq_data))
1110 unmask_threaded_irq(desc);
1111
1112out_unlock:
1113 raw_spin_unlock_irq(&desc->lock);
1114 chip_bus_sync_unlock(desc);
1115}
1116
1117#ifdef CONFIG_SMP
1118
1119
1120
1121static void
1122irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1123{
1124 cpumask_var_t mask;
1125 bool valid = true;
1126
1127 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1128 return;
1129
1130
1131
1132
1133
1134 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1135 set_bit(IRQTF_AFFINITY, &action->thread_flags);
1136 return;
1137 }
1138
1139 raw_spin_lock_irq(&desc->lock);
1140
1141
1142
1143
1144 if (cpumask_available(desc->irq_common_data.affinity)) {
1145 const struct cpumask *m;
1146
1147 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1148 cpumask_copy(mask, m);
1149 } else {
1150 valid = false;
1151 }
1152 raw_spin_unlock_irq(&desc->lock);
1153
1154 if (valid)
1155 set_cpus_allowed_ptr(current, mask);
1156 free_cpumask_var(mask);
1157}
1158#else
1159static inline void
1160irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1161#endif
1162
1163
1164
1165
1166
1167
1168
1169static irqreturn_t
1170irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1171{
1172 irqreturn_t ret;
1173
1174 local_bh_disable();
1175 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1176 local_irq_disable();
1177 ret = action->thread_fn(action->irq, action->dev_id);
1178 if (ret == IRQ_HANDLED)
1179 atomic_inc(&desc->threads_handled);
1180
1181 irq_finalize_oneshot(desc, action);
1182 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1183 local_irq_enable();
1184 local_bh_enable();
1185 return ret;
1186}
1187
1188
1189
1190
1191
1192
1193static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1194 struct irqaction *action)
1195{
1196 irqreturn_t ret;
1197
1198 ret = action->thread_fn(action->irq, action->dev_id);
1199 if (ret == IRQ_HANDLED)
1200 atomic_inc(&desc->threads_handled);
1201
1202 irq_finalize_oneshot(desc, action);
1203 return ret;
1204}
1205
1206static void wake_threads_waitq(struct irq_desc *desc)
1207{
1208 if (atomic_dec_and_test(&desc->threads_active))
1209 wake_up(&desc->wait_for_threads);
1210}
1211
1212static void irq_thread_dtor(struct callback_head *unused)
1213{
1214 struct task_struct *tsk = current;
1215 struct irq_desc *desc;
1216 struct irqaction *action;
1217
1218 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1219 return;
1220
1221 action = kthread_data(tsk);
1222
1223 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1224 tsk->comm, tsk->pid, action->irq);
1225
1226
1227 desc = irq_to_desc(action->irq);
1228
1229
1230
1231
1232 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1233 wake_threads_waitq(desc);
1234
1235
1236 irq_finalize_oneshot(desc, action);
1237}
1238
1239static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1240{
1241 struct irqaction *secondary = action->secondary;
1242
1243 if (WARN_ON_ONCE(!secondary))
1244 return;
1245
1246 raw_spin_lock_irq(&desc->lock);
1247 __irq_wake_thread(desc, secondary);
1248 raw_spin_unlock_irq(&desc->lock);
1249}
1250
1251
1252
1253
1254static void irq_thread_set_ready(struct irq_desc *desc,
1255 struct irqaction *action)
1256{
1257 set_bit(IRQTF_READY, &action->thread_flags);
1258 wake_up(&desc->wait_for_threads);
1259}
1260
1261
1262
1263
1264
1265static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
1266 struct irqaction *action)
1267{
1268 if (!action || !action->thread)
1269 return;
1270
1271 wake_up_process(action->thread);
1272 wait_event(desc->wait_for_threads,
1273 test_bit(IRQTF_READY, &action->thread_flags));
1274}
1275
1276
1277
1278
1279static int irq_thread(void *data)
1280{
1281 struct callback_head on_exit_work;
1282 struct irqaction *action = data;
1283 struct irq_desc *desc = irq_to_desc(action->irq);
1284 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1285 struct irqaction *action);
1286
1287 irq_thread_set_ready(desc, action);
1288
1289 sched_set_fifo(current);
1290
1291 if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
1292 &action->thread_flags))
1293 handler_fn = irq_forced_thread_fn;
1294 else
1295 handler_fn = irq_thread_fn;
1296
1297 init_task_work(&on_exit_work, irq_thread_dtor);
1298 task_work_add(current, &on_exit_work, TWA_NONE);
1299
1300 irq_thread_check_affinity(desc, action);
1301
1302 while (!irq_wait_for_interrupt(action)) {
1303 irqreturn_t action_ret;
1304
1305 irq_thread_check_affinity(desc, action);
1306
1307 action_ret = handler_fn(desc, action);
1308 if (action_ret == IRQ_WAKE_THREAD)
1309 irq_wake_secondary(desc, action);
1310
1311 wake_threads_waitq(desc);
1312 }
1313
1314
1315
1316
1317
1318
1319
1320 task_work_cancel(current, irq_thread_dtor);
1321 return 0;
1322}
1323
1324
1325
1326
1327
1328
1329
1330void irq_wake_thread(unsigned int irq, void *dev_id)
1331{
1332 struct irq_desc *desc = irq_to_desc(irq);
1333 struct irqaction *action;
1334 unsigned long flags;
1335
1336 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1337 return;
1338
1339 raw_spin_lock_irqsave(&desc->lock, flags);
1340 for_each_action_of_desc(desc, action) {
1341 if (action->dev_id == dev_id) {
1342 if (action->thread)
1343 __irq_wake_thread(desc, action);
1344 break;
1345 }
1346 }
1347 raw_spin_unlock_irqrestore(&desc->lock, flags);
1348}
1349EXPORT_SYMBOL_GPL(irq_wake_thread);
1350
1351static int irq_setup_forced_threading(struct irqaction *new)
1352{
1353 if (!force_irqthreads())
1354 return 0;
1355 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1356 return 0;
1357
1358
1359
1360
1361
1362 if (new->handler == irq_default_primary_handler)
1363 return 0;
1364
1365 new->flags |= IRQF_ONESHOT;
1366
1367
1368
1369
1370
1371
1372 if (new->handler && new->thread_fn) {
1373
1374 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1375 if (!new->secondary)
1376 return -ENOMEM;
1377 new->secondary->handler = irq_forced_secondary_handler;
1378 new->secondary->thread_fn = new->thread_fn;
1379 new->secondary->dev_id = new->dev_id;
1380 new->secondary->irq = new->irq;
1381 new->secondary->name = new->name;
1382 }
1383
1384 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1385 new->thread_fn = new->handler;
1386 new->handler = irq_default_primary_handler;
1387 return 0;
1388}
1389
1390static int irq_request_resources(struct irq_desc *desc)
1391{
1392 struct irq_data *d = &desc->irq_data;
1393 struct irq_chip *c = d->chip;
1394
1395 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1396}
1397
1398static void irq_release_resources(struct irq_desc *desc)
1399{
1400 struct irq_data *d = &desc->irq_data;
1401 struct irq_chip *c = d->chip;
1402
1403 if (c->irq_release_resources)
1404 c->irq_release_resources(d);
1405}
1406
1407static bool irq_supports_nmi(struct irq_desc *desc)
1408{
1409 struct irq_data *d = irq_desc_get_irq_data(desc);
1410
1411#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1412
1413 if (d->parent_data)
1414 return false;
1415#endif
1416
1417 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1418 return false;
1419
1420 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1421}
1422
1423static int irq_nmi_setup(struct irq_desc *desc)
1424{
1425 struct irq_data *d = irq_desc_get_irq_data(desc);
1426 struct irq_chip *c = d->chip;
1427
1428 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1429}
1430
1431static void irq_nmi_teardown(struct irq_desc *desc)
1432{
1433 struct irq_data *d = irq_desc_get_irq_data(desc);
1434 struct irq_chip *c = d->chip;
1435
1436 if (c->irq_nmi_teardown)
1437 c->irq_nmi_teardown(d);
1438}
1439
1440static int
1441setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1442{
1443 struct task_struct *t;
1444
1445 if (!secondary) {
1446 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1447 new->name);
1448 } else {
1449 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1450 new->name);
1451 }
1452
1453 if (IS_ERR(t))
1454 return PTR_ERR(t);
1455
1456
1457
1458
1459
1460
1461 new->thread = get_task_struct(t);
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1472 return 0;
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static int
1490__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1491{
1492 struct irqaction *old, **old_ptr;
1493 unsigned long flags, thread_mask = 0;
1494 int ret, nested, shared = 0;
1495
1496 if (!desc)
1497 return -EINVAL;
1498
1499 if (desc->irq_data.chip == &no_irq_chip)
1500 return -ENOSYS;
1501 if (!try_module_get(desc->owner))
1502 return -ENODEV;
1503
1504 new->irq = irq;
1505
1506
1507
1508
1509
1510 if (!(new->flags & IRQF_TRIGGER_MASK))
1511 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1512
1513
1514
1515
1516
1517 nested = irq_settings_is_nested_thread(desc);
1518 if (nested) {
1519 if (!new->thread_fn) {
1520 ret = -EINVAL;
1521 goto out_mput;
1522 }
1523
1524
1525
1526
1527
1528 new->handler = irq_nested_primary_handler;
1529 } else {
1530 if (irq_settings_can_thread(desc)) {
1531 ret = irq_setup_forced_threading(new);
1532 if (ret)
1533 goto out_mput;
1534 }
1535 }
1536
1537
1538
1539
1540
1541
1542 if (new->thread_fn && !nested) {
1543 ret = setup_irq_thread(new, irq, false);
1544 if (ret)
1545 goto out_mput;
1546 if (new->secondary) {
1547 ret = setup_irq_thread(new->secondary, irq, true);
1548 if (ret)
1549 goto out_thread;
1550 }
1551 }
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1563 new->flags &= ~IRQF_ONESHOT;
1564
1565
1566
1567
1568
1569
1570
1571
1572 mutex_lock(&desc->request_mutex);
1573
1574
1575
1576
1577
1578
1579 chip_bus_lock(desc);
1580
1581
1582 if (!desc->action) {
1583 ret = irq_request_resources(desc);
1584 if (ret) {
1585 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1586 new->name, irq, desc->irq_data.chip->name);
1587 goto out_bus_unlock;
1588 }
1589 }
1590
1591
1592
1593
1594
1595
1596
1597 raw_spin_lock_irqsave(&desc->lock, flags);
1598 old_ptr = &desc->action;
1599 old = *old_ptr;
1600 if (old) {
1601
1602
1603
1604
1605
1606
1607
1608
1609 unsigned int oldtype;
1610
1611 if (desc->istate & IRQS_NMI) {
1612 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1613 new->name, irq, desc->irq_data.chip->name);
1614 ret = -EINVAL;
1615 goto out_unlock;
1616 }
1617
1618
1619
1620
1621
1622 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1623 oldtype = irqd_get_trigger_type(&desc->irq_data);
1624 } else {
1625 oldtype = new->flags & IRQF_TRIGGER_MASK;
1626 irqd_set_trigger_type(&desc->irq_data, oldtype);
1627 }
1628
1629 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1630 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1631 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1632 goto mismatch;
1633
1634
1635 if ((old->flags & IRQF_PERCPU) !=
1636 (new->flags & IRQF_PERCPU))
1637 goto mismatch;
1638
1639
1640 do {
1641
1642
1643
1644
1645
1646 thread_mask |= old->thread_mask;
1647 old_ptr = &old->next;
1648 old = *old_ptr;
1649 } while (old);
1650 shared = 1;
1651 }
1652
1653
1654
1655
1656
1657
1658 if (new->flags & IRQF_ONESHOT) {
1659
1660
1661
1662
1663 if (thread_mask == ~0UL) {
1664 ret = -EBUSY;
1665 goto out_unlock;
1666 }
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 new->thread_mask = 1UL << ffz(thread_mask);
1688
1689 } else if (new->handler == irq_default_primary_handler &&
1690 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1707 new->name, irq);
1708 ret = -EINVAL;
1709 goto out_unlock;
1710 }
1711
1712 if (!shared) {
1713
1714 if (new->flags & IRQF_TRIGGER_MASK) {
1715 ret = __irq_set_trigger(desc,
1716 new->flags & IRQF_TRIGGER_MASK);
1717
1718 if (ret)
1719 goto out_unlock;
1720 }
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733 ret = irq_activate(desc);
1734 if (ret)
1735 goto out_unlock;
1736
1737 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1738 IRQS_ONESHOT | IRQS_WAITING);
1739 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1740
1741 if (new->flags & IRQF_PERCPU) {
1742 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1743 irq_settings_set_per_cpu(desc);
1744 if (new->flags & IRQF_NO_DEBUG)
1745 irq_settings_set_no_debug(desc);
1746 }
1747
1748 if (noirqdebug)
1749 irq_settings_set_no_debug(desc);
1750
1751 if (new->flags & IRQF_ONESHOT)
1752 desc->istate |= IRQS_ONESHOT;
1753
1754
1755 if (new->flags & IRQF_NOBALANCING) {
1756 irq_settings_set_no_balancing(desc);
1757 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1758 }
1759
1760 if (!(new->flags & IRQF_NO_AUTOEN) &&
1761 irq_settings_can_autoenable(desc)) {
1762 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1763 } else {
1764
1765
1766
1767
1768
1769
1770 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1771
1772 desc->depth = 1;
1773 }
1774
1775 } else if (new->flags & IRQF_TRIGGER_MASK) {
1776 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1777 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1778
1779 if (nmsk != omsk)
1780
1781 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1782 irq, omsk, nmsk);
1783 }
1784
1785 *old_ptr = new;
1786
1787 irq_pm_install_action(desc, new);
1788
1789
1790 desc->irq_count = 0;
1791 desc->irqs_unhandled = 0;
1792
1793
1794
1795
1796
1797 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1798 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1799 __enable_irq(desc);
1800 }
1801
1802 raw_spin_unlock_irqrestore(&desc->lock, flags);
1803 chip_bus_sync_unlock(desc);
1804 mutex_unlock(&desc->request_mutex);
1805
1806 irq_setup_timings(desc, new);
1807
1808 wake_up_and_wait_for_irq_thread_ready(desc, new);
1809 wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
1810
1811 register_irq_proc(irq, desc);
1812 new->dir = NULL;
1813 register_handler_proc(irq, new);
1814 return 0;
1815
1816mismatch:
1817 if (!(new->flags & IRQF_PROBE_SHARED)) {
1818 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1819 irq, new->flags, new->name, old->flags, old->name);
1820#ifdef CONFIG_DEBUG_SHIRQ
1821 dump_stack();
1822#endif
1823 }
1824 ret = -EBUSY;
1825
1826out_unlock:
1827 raw_spin_unlock_irqrestore(&desc->lock, flags);
1828
1829 if (!desc->action)
1830 irq_release_resources(desc);
1831out_bus_unlock:
1832 chip_bus_sync_unlock(desc);
1833 mutex_unlock(&desc->request_mutex);
1834
1835out_thread:
1836 if (new->thread) {
1837 struct task_struct *t = new->thread;
1838
1839 new->thread = NULL;
1840 kthread_stop(t);
1841 put_task_struct(t);
1842 }
1843 if (new->secondary && new->secondary->thread) {
1844 struct task_struct *t = new->secondary->thread;
1845
1846 new->secondary->thread = NULL;
1847 kthread_stop(t);
1848 put_task_struct(t);
1849 }
1850out_mput:
1851 module_put(desc->owner);
1852 return ret;
1853}
1854
1855
1856
1857
1858
1859static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1860{
1861 unsigned irq = desc->irq_data.irq;
1862 struct irqaction *action, **action_ptr;
1863 unsigned long flags;
1864
1865 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1866
1867 mutex_lock(&desc->request_mutex);
1868 chip_bus_lock(desc);
1869 raw_spin_lock_irqsave(&desc->lock, flags);
1870
1871
1872
1873
1874
1875 action_ptr = &desc->action;
1876 for (;;) {
1877 action = *action_ptr;
1878
1879 if (!action) {
1880 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1881 raw_spin_unlock_irqrestore(&desc->lock, flags);
1882 chip_bus_sync_unlock(desc);
1883 mutex_unlock(&desc->request_mutex);
1884 return NULL;
1885 }
1886
1887 if (action->dev_id == dev_id)
1888 break;
1889 action_ptr = &action->next;
1890 }
1891
1892
1893 *action_ptr = action->next;
1894
1895 irq_pm_remove_action(desc, action);
1896
1897
1898 if (!desc->action) {
1899 irq_settings_clr_disable_unlazy(desc);
1900
1901 irq_shutdown(desc);
1902 }
1903
1904#ifdef CONFIG_SMP
1905
1906 if (WARN_ON_ONCE(desc->affinity_hint))
1907 desc->affinity_hint = NULL;
1908#endif
1909
1910 raw_spin_unlock_irqrestore(&desc->lock, flags);
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925 chip_bus_sync_unlock(desc);
1926
1927 unregister_handler_proc(irq, action);
1928
1929
1930
1931
1932
1933
1934 __synchronize_hardirq(desc, true);
1935
1936#ifdef CONFIG_DEBUG_SHIRQ
1937
1938
1939
1940
1941
1942
1943
1944
1945 if (action->flags & IRQF_SHARED) {
1946 local_irq_save(flags);
1947 action->handler(irq, dev_id);
1948 local_irq_restore(flags);
1949 }
1950#endif
1951
1952
1953
1954
1955
1956
1957
1958 if (action->thread) {
1959 kthread_stop(action->thread);
1960 put_task_struct(action->thread);
1961 if (action->secondary && action->secondary->thread) {
1962 kthread_stop(action->secondary->thread);
1963 put_task_struct(action->secondary->thread);
1964 }
1965 }
1966
1967
1968 if (!desc->action) {
1969
1970
1971
1972
1973 chip_bus_lock(desc);
1974
1975
1976
1977
1978 raw_spin_lock_irqsave(&desc->lock, flags);
1979 irq_domain_deactivate_irq(&desc->irq_data);
1980 raw_spin_unlock_irqrestore(&desc->lock, flags);
1981
1982 irq_release_resources(desc);
1983 chip_bus_sync_unlock(desc);
1984 irq_remove_timings(desc);
1985 }
1986
1987 mutex_unlock(&desc->request_mutex);
1988
1989 irq_chip_pm_put(&desc->irq_data);
1990 module_put(desc->owner);
1991 kfree(action->secondary);
1992 return action;
1993}
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011const void *free_irq(unsigned int irq, void *dev_id)
2012{
2013 struct irq_desc *desc = irq_to_desc(irq);
2014 struct irqaction *action;
2015 const char *devname;
2016
2017 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2018 return NULL;
2019
2020#ifdef CONFIG_SMP
2021 if (WARN_ON(desc->affinity_notify))
2022 desc->affinity_notify = NULL;
2023#endif
2024
2025 action = __free_irq(desc, dev_id);
2026
2027 if (!action)
2028 return NULL;
2029
2030 devname = action->name;
2031 kfree(action);
2032 return devname;
2033}
2034EXPORT_SYMBOL(free_irq);
2035
2036
2037static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2038{
2039 const char *devname = NULL;
2040
2041 desc->istate &= ~IRQS_NMI;
2042
2043 if (!WARN_ON(desc->action == NULL)) {
2044 irq_pm_remove_action(desc, desc->action);
2045 devname = desc->action->name;
2046 unregister_handler_proc(irq, desc->action);
2047
2048 kfree(desc->action);
2049 desc->action = NULL;
2050 }
2051
2052 irq_settings_clr_disable_unlazy(desc);
2053 irq_shutdown_and_deactivate(desc);
2054
2055 irq_release_resources(desc);
2056
2057 irq_chip_pm_put(&desc->irq_data);
2058 module_put(desc->owner);
2059
2060 return devname;
2061}
2062
2063const void *free_nmi(unsigned int irq, void *dev_id)
2064{
2065 struct irq_desc *desc = irq_to_desc(irq);
2066 unsigned long flags;
2067 const void *devname;
2068
2069 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2070 return NULL;
2071
2072 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2073 return NULL;
2074
2075
2076 if (WARN_ON(desc->depth == 0))
2077 disable_nmi_nosync(irq);
2078
2079 raw_spin_lock_irqsave(&desc->lock, flags);
2080
2081 irq_nmi_teardown(desc);
2082 devname = __cleanup_nmi(irq, desc);
2083
2084 raw_spin_unlock_irqrestore(&desc->lock, flags);
2085
2086 return devname;
2087}
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2132 irq_handler_t thread_fn, unsigned long irqflags,
2133 const char *devname, void *dev_id)
2134{
2135 struct irqaction *action;
2136 struct irq_desc *desc;
2137 int retval;
2138
2139 if (irq == IRQ_NOTCONNECTED)
2140 return -ENOTCONN;
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155 if (((irqflags & IRQF_SHARED) && !dev_id) ||
2156 ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2157 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2158 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2159 return -EINVAL;
2160
2161 desc = irq_to_desc(irq);
2162 if (!desc)
2163 return -EINVAL;
2164
2165 if (!irq_settings_can_request(desc) ||
2166 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2167 return -EINVAL;
2168
2169 if (!handler) {
2170 if (!thread_fn)
2171 return -EINVAL;
2172 handler = irq_default_primary_handler;
2173 }
2174
2175 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2176 if (!action)
2177 return -ENOMEM;
2178
2179 action->handler = handler;
2180 action->thread_fn = thread_fn;
2181 action->flags = irqflags;
2182 action->name = devname;
2183 action->dev_id = dev_id;
2184
2185 retval = irq_chip_pm_get(&desc->irq_data);
2186 if (retval < 0) {
2187 kfree(action);
2188 return retval;
2189 }
2190
2191 retval = __setup_irq(irq, desc, action);
2192
2193 if (retval) {
2194 irq_chip_pm_put(&desc->irq_data);
2195 kfree(action->secondary);
2196 kfree(action);
2197 }
2198
2199#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2200 if (!retval && (irqflags & IRQF_SHARED)) {
2201
2202
2203
2204
2205
2206
2207 unsigned long flags;
2208
2209 disable_irq(irq);
2210 local_irq_save(flags);
2211
2212 handler(irq, dev_id);
2213
2214 local_irq_restore(flags);
2215 enable_irq(irq);
2216 }
2217#endif
2218 return retval;
2219}
2220EXPORT_SYMBOL(request_threaded_irq);
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2240 unsigned long flags, const char *name, void *dev_id)
2241{
2242 struct irq_desc *desc;
2243 int ret;
2244
2245 if (irq == IRQ_NOTCONNECTED)
2246 return -ENOTCONN;
2247
2248 desc = irq_to_desc(irq);
2249 if (!desc)
2250 return -EINVAL;
2251
2252 if (irq_settings_is_nested_thread(desc)) {
2253 ret = request_threaded_irq(irq, NULL, handler,
2254 flags, name, dev_id);
2255 return !ret ? IRQC_IS_NESTED : ret;
2256 }
2257
2258 ret = request_irq(irq, handler, flags, name, dev_id);
2259 return !ret ? IRQC_IS_HARDIRQ : ret;
2260}
2261EXPORT_SYMBOL_GPL(request_any_context_irq);
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289int request_nmi(unsigned int irq, irq_handler_t handler,
2290 unsigned long irqflags, const char *name, void *dev_id)
2291{
2292 struct irqaction *action;
2293 struct irq_desc *desc;
2294 unsigned long flags;
2295 int retval;
2296
2297 if (irq == IRQ_NOTCONNECTED)
2298 return -ENOTCONN;
2299
2300
2301 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2302 return -EINVAL;
2303
2304 if (!(irqflags & IRQF_PERCPU))
2305 return -EINVAL;
2306
2307 if (!handler)
2308 return -EINVAL;
2309
2310 desc = irq_to_desc(irq);
2311
2312 if (!desc || (irq_settings_can_autoenable(desc) &&
2313 !(irqflags & IRQF_NO_AUTOEN)) ||
2314 !irq_settings_can_request(desc) ||
2315 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2316 !irq_supports_nmi(desc))
2317 return -EINVAL;
2318
2319 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2320 if (!action)
2321 return -ENOMEM;
2322
2323 action->handler = handler;
2324 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2325 action->name = name;
2326 action->dev_id = dev_id;
2327
2328 retval = irq_chip_pm_get(&desc->irq_data);
2329 if (retval < 0)
2330 goto err_out;
2331
2332 retval = __setup_irq(irq, desc, action);
2333 if (retval)
2334 goto err_irq_setup;
2335
2336 raw_spin_lock_irqsave(&desc->lock, flags);
2337
2338
2339 desc->istate |= IRQS_NMI;
2340 retval = irq_nmi_setup(desc);
2341 if (retval) {
2342 __cleanup_nmi(irq, desc);
2343 raw_spin_unlock_irqrestore(&desc->lock, flags);
2344 return -EINVAL;
2345 }
2346
2347 raw_spin_unlock_irqrestore(&desc->lock, flags);
2348
2349 return 0;
2350
2351err_irq_setup:
2352 irq_chip_pm_put(&desc->irq_data);
2353err_out:
2354 kfree(action);
2355
2356 return retval;
2357}
2358
2359void enable_percpu_irq(unsigned int irq, unsigned int type)
2360{
2361 unsigned int cpu = smp_processor_id();
2362 unsigned long flags;
2363 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2364
2365 if (!desc)
2366 return;
2367
2368
2369
2370
2371
2372 type &= IRQ_TYPE_SENSE_MASK;
2373 if (type == IRQ_TYPE_NONE)
2374 type = irqd_get_trigger_type(&desc->irq_data);
2375
2376 if (type != IRQ_TYPE_NONE) {
2377 int ret;
2378
2379 ret = __irq_set_trigger(desc, type);
2380
2381 if (ret) {
2382 WARN(1, "failed to set type for IRQ%d\n", irq);
2383 goto out;
2384 }
2385 }
2386
2387 irq_percpu_enable(desc, cpu);
2388out:
2389 irq_put_desc_unlock(desc, flags);
2390}
2391EXPORT_SYMBOL_GPL(enable_percpu_irq);
2392
2393void enable_percpu_nmi(unsigned int irq, unsigned int type)
2394{
2395 enable_percpu_irq(irq, type);
2396}
2397
2398
2399
2400
2401
2402
2403
2404
2405bool irq_percpu_is_enabled(unsigned int irq)
2406{
2407 unsigned int cpu = smp_processor_id();
2408 struct irq_desc *desc;
2409 unsigned long flags;
2410 bool is_enabled;
2411
2412 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2413 if (!desc)
2414 return false;
2415
2416 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2417 irq_put_desc_unlock(desc, flags);
2418
2419 return is_enabled;
2420}
2421EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2422
2423void disable_percpu_irq(unsigned int irq)
2424{
2425 unsigned int cpu = smp_processor_id();
2426 unsigned long flags;
2427 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2428
2429 if (!desc)
2430 return;
2431
2432 irq_percpu_disable(desc, cpu);
2433 irq_put_desc_unlock(desc, flags);
2434}
2435EXPORT_SYMBOL_GPL(disable_percpu_irq);
2436
2437void disable_percpu_nmi(unsigned int irq)
2438{
2439 disable_percpu_irq(irq);
2440}
2441
2442
2443
2444
2445static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2446{
2447 struct irq_desc *desc = irq_to_desc(irq);
2448 struct irqaction *action;
2449 unsigned long flags;
2450
2451 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2452
2453 if (!desc)
2454 return NULL;
2455
2456 raw_spin_lock_irqsave(&desc->lock, flags);
2457
2458 action = desc->action;
2459 if (!action || action->percpu_dev_id != dev_id) {
2460 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2461 goto bad;
2462 }
2463
2464 if (!cpumask_empty(desc->percpu_enabled)) {
2465 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2466 irq, cpumask_first(desc->percpu_enabled));
2467 goto bad;
2468 }
2469
2470
2471 desc->action = NULL;
2472
2473 desc->istate &= ~IRQS_NMI;
2474
2475 raw_spin_unlock_irqrestore(&desc->lock, flags);
2476
2477 unregister_handler_proc(irq, action);
2478
2479 irq_chip_pm_put(&desc->irq_data);
2480 module_put(desc->owner);
2481 return action;
2482
2483bad:
2484 raw_spin_unlock_irqrestore(&desc->lock, flags);
2485 return NULL;
2486}
2487
2488
2489
2490
2491
2492
2493
2494
2495void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2496{
2497 struct irq_desc *desc = irq_to_desc(irq);
2498
2499 if (desc && irq_settings_is_per_cpu_devid(desc))
2500 __free_percpu_irq(irq, act->percpu_dev_id);
2501}
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2516{
2517 struct irq_desc *desc = irq_to_desc(irq);
2518
2519 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2520 return;
2521
2522 chip_bus_lock(desc);
2523 kfree(__free_percpu_irq(irq, dev_id));
2524 chip_bus_sync_unlock(desc);
2525}
2526EXPORT_SYMBOL_GPL(free_percpu_irq);
2527
2528void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2529{
2530 struct irq_desc *desc = irq_to_desc(irq);
2531
2532 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2533 return;
2534
2535 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2536 return;
2537
2538 kfree(__free_percpu_irq(irq, dev_id));
2539}
2540
2541
2542
2543
2544
2545
2546
2547
2548int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2549{
2550 struct irq_desc *desc = irq_to_desc(irq);
2551 int retval;
2552
2553 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2554 return -EINVAL;
2555
2556 retval = irq_chip_pm_get(&desc->irq_data);
2557 if (retval < 0)
2558 return retval;
2559
2560 retval = __setup_irq(irq, desc, act);
2561
2562 if (retval)
2563 irq_chip_pm_put(&desc->irq_data);
2564
2565 return retval;
2566}
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2586 unsigned long flags, const char *devname,
2587 void __percpu *dev_id)
2588{
2589 struct irqaction *action;
2590 struct irq_desc *desc;
2591 int retval;
2592
2593 if (!dev_id)
2594 return -EINVAL;
2595
2596 desc = irq_to_desc(irq);
2597 if (!desc || !irq_settings_can_request(desc) ||
2598 !irq_settings_is_per_cpu_devid(desc))
2599 return -EINVAL;
2600
2601 if (flags && flags != IRQF_TIMER)
2602 return -EINVAL;
2603
2604 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2605 if (!action)
2606 return -ENOMEM;
2607
2608 action->handler = handler;
2609 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2610 action->name = devname;
2611 action->percpu_dev_id = dev_id;
2612
2613 retval = irq_chip_pm_get(&desc->irq_data);
2614 if (retval < 0) {
2615 kfree(action);
2616 return retval;
2617 }
2618
2619 retval = __setup_irq(irq, desc, action);
2620
2621 if (retval) {
2622 irq_chip_pm_put(&desc->irq_data);
2623 kfree(action);
2624 }
2625
2626 return retval;
2627}
2628EXPORT_SYMBOL_GPL(__request_percpu_irq);
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2652 const char *name, void __percpu *dev_id)
2653{
2654 struct irqaction *action;
2655 struct irq_desc *desc;
2656 unsigned long flags;
2657 int retval;
2658
2659 if (!handler)
2660 return -EINVAL;
2661
2662 desc = irq_to_desc(irq);
2663
2664 if (!desc || !irq_settings_can_request(desc) ||
2665 !irq_settings_is_per_cpu_devid(desc) ||
2666 irq_settings_can_autoenable(desc) ||
2667 !irq_supports_nmi(desc))
2668 return -EINVAL;
2669
2670
2671 if (desc->istate & IRQS_NMI)
2672 return -EINVAL;
2673
2674 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2675 if (!action)
2676 return -ENOMEM;
2677
2678 action->handler = handler;
2679 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2680 | IRQF_NOBALANCING;
2681 action->name = name;
2682 action->percpu_dev_id = dev_id;
2683
2684 retval = irq_chip_pm_get(&desc->irq_data);
2685 if (retval < 0)
2686 goto err_out;
2687
2688 retval = __setup_irq(irq, desc, action);
2689 if (retval)
2690 goto err_irq_setup;
2691
2692 raw_spin_lock_irqsave(&desc->lock, flags);
2693 desc->istate |= IRQS_NMI;
2694 raw_spin_unlock_irqrestore(&desc->lock, flags);
2695
2696 return 0;
2697
2698err_irq_setup:
2699 irq_chip_pm_put(&desc->irq_data);
2700err_out:
2701 kfree(action);
2702
2703 return retval;
2704}
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719int prepare_percpu_nmi(unsigned int irq)
2720{
2721 unsigned long flags;
2722 struct irq_desc *desc;
2723 int ret = 0;
2724
2725 WARN_ON(preemptible());
2726
2727 desc = irq_get_desc_lock(irq, &flags,
2728 IRQ_GET_DESC_CHECK_PERCPU);
2729 if (!desc)
2730 return -EINVAL;
2731
2732 if (WARN(!(desc->istate & IRQS_NMI),
2733 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2734 irq)) {
2735 ret = -EINVAL;
2736 goto out;
2737 }
2738
2739 ret = irq_nmi_setup(desc);
2740 if (ret) {
2741 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2742 goto out;
2743 }
2744
2745out:
2746 irq_put_desc_unlock(desc, flags);
2747 return ret;
2748}
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762void teardown_percpu_nmi(unsigned int irq)
2763{
2764 unsigned long flags;
2765 struct irq_desc *desc;
2766
2767 WARN_ON(preemptible());
2768
2769 desc = irq_get_desc_lock(irq, &flags,
2770 IRQ_GET_DESC_CHECK_PERCPU);
2771 if (!desc)
2772 return;
2773
2774 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2775 goto out;
2776
2777 irq_nmi_teardown(desc);
2778out:
2779 irq_put_desc_unlock(desc, flags);
2780}
2781
2782int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2783 bool *state)
2784{
2785 struct irq_chip *chip;
2786 int err = -EINVAL;
2787
2788 do {
2789 chip = irq_data_get_irq_chip(data);
2790 if (WARN_ON_ONCE(!chip))
2791 return -ENODEV;
2792 if (chip->irq_get_irqchip_state)
2793 break;
2794#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2795 data = data->parent_data;
2796#else
2797 data = NULL;
2798#endif
2799 } while (data);
2800
2801 if (data)
2802 err = chip->irq_get_irqchip_state(data, which, state);
2803 return err;
2804}
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2820 bool *state)
2821{
2822 struct irq_desc *desc;
2823 struct irq_data *data;
2824 unsigned long flags;
2825 int err = -EINVAL;
2826
2827 desc = irq_get_desc_buslock(irq, &flags, 0);
2828 if (!desc)
2829 return err;
2830
2831 data = irq_desc_get_irq_data(desc);
2832
2833 err = __irq_get_irqchip_state(data, which, state);
2834
2835 irq_put_desc_busunlock(desc, flags);
2836 return err;
2837}
2838EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2853 bool val)
2854{
2855 struct irq_desc *desc;
2856 struct irq_data *data;
2857 struct irq_chip *chip;
2858 unsigned long flags;
2859 int err = -EINVAL;
2860
2861 desc = irq_get_desc_buslock(irq, &flags, 0);
2862 if (!desc)
2863 return err;
2864
2865 data = irq_desc_get_irq_data(desc);
2866
2867 do {
2868 chip = irq_data_get_irq_chip(data);
2869 if (WARN_ON_ONCE(!chip)) {
2870 err = -ENODEV;
2871 goto out_unlock;
2872 }
2873 if (chip->irq_set_irqchip_state)
2874 break;
2875#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2876 data = data->parent_data;
2877#else
2878 data = NULL;
2879#endif
2880 } while (data);
2881
2882 if (data)
2883 err = chip->irq_set_irqchip_state(data, which, val);
2884
2885out_unlock:
2886 irq_put_desc_busunlock(desc, flags);
2887 return err;
2888}
2889EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2890
2891
2892
2893
2894
2895
2896
2897bool irq_has_action(unsigned int irq)
2898{
2899 bool res;
2900
2901 rcu_read_lock();
2902 res = irq_desc_has_action(irq_to_desc(irq));
2903 rcu_read_unlock();
2904 return res;
2905}
2906EXPORT_SYMBOL_GPL(irq_has_action);
2907
2908
2909
2910
2911
2912
2913
2914
2915bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2916{
2917 struct irq_desc *desc;
2918 bool res = false;
2919
2920 rcu_read_lock();
2921 desc = irq_to_desc(irq);
2922 if (desc)
2923 res = !!(desc->status_use_accessors & bitmask);
2924 rcu_read_unlock();
2925 return res;
2926}
2927EXPORT_SYMBOL_GPL(irq_check_status_bit);
2928