1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/sched/rt.h>
19#include <linux/sched/task.h>
20#include <uapi/linux/sched/types.h>
21#include <linux/task_work.h>
22
23#include "internals.h"
24
25#ifdef CONFIG_IRQ_FORCED_THREADING
26__read_mostly bool force_irqthreads;
27EXPORT_SYMBOL_GPL(force_irqthreads);
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
37static void __synchronize_hardirq(struct irq_desc *desc)
38{
39 bool inprogress;
40
41 do {
42 unsigned long flags;
43
44
45
46
47
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50
51
52 raw_spin_lock_irqsave(&desc->lock, flags);
53 inprogress = irqd_irq_inprogress(&desc->irq_data);
54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56
57 } while (inprogress);
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77bool synchronize_hardirq(unsigned int irq)
78{
79 struct irq_desc *desc = irq_to_desc(irq);
80
81 if (desc) {
82 __synchronize_hardirq(desc);
83 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90
91
92
93
94
95
96
97
98
99
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106
107
108
109
110
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
114}
115EXPORT_SYMBOL(synchronize_irq);
116
117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
120static bool __irq_can_set_affinity(struct irq_desc *desc)
121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124 return false;
125 return true;
126}
127
128
129
130
131
132
133int irq_can_set_affinity(unsigned int irq)
134{
135 return __irq_can_set_affinity(irq_to_desc(irq));
136}
137
138
139
140
141
142
143
144
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153
154
155
156
157
158
159
160
161
162void irq_set_thread_affinity(struct irq_desc *desc)
163{
164 struct irqaction *action;
165
166 for_each_action_of_desc(desc, action)
167 if (action->thread)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169}
170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
194 ret = chip->irq_set_affinity(data, mask, force);
195 switch (ret) {
196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask);
199
200 case IRQ_SET_MASK_OK_NOCOPY:
201 irq_validate_effective_affinity(data);
202 irq_set_thread_affinity(desc);
203 ret = 0;
204 }
205
206 return ret;
207}
208
209#ifdef CONFIG_GENERIC_PENDING_IRQ
210static inline int irq_set_affinity_pending(struct irq_data *data,
211 const struct cpumask *dest)
212{
213 struct irq_desc *desc = irq_data_to_desc(data);
214
215 irqd_set_move_pending(data);
216 irq_copy_pending(desc, dest);
217 return 0;
218}
219#else
220static inline int irq_set_affinity_pending(struct irq_data *data,
221 const struct cpumask *dest)
222{
223 return -EBUSY;
224}
225#endif
226
227static int irq_try_set_affinity(struct irq_data *data,
228 const struct cpumask *dest, bool force)
229{
230 int ret = irq_do_set_affinity(data, dest, force);
231
232
233
234
235
236
237 if (ret == -EBUSY && !force)
238 ret = irq_set_affinity_pending(data, dest);
239 return ret;
240}
241
242int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
243 bool force)
244{
245 struct irq_chip *chip = irq_data_get_irq_chip(data);
246 struct irq_desc *desc = irq_data_to_desc(data);
247 int ret = 0;
248
249 if (!chip || !chip->irq_set_affinity)
250 return -EINVAL;
251
252 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
253 ret = irq_try_set_affinity(data, mask, force);
254 } else {
255 irqd_set_move_pending(data);
256 irq_copy_pending(desc, mask);
257 }
258
259 if (desc->affinity_notify) {
260 kref_get(&desc->affinity_notify->kref);
261 schedule_work(&desc->affinity_notify->work);
262 }
263 irqd_set(data, IRQD_AFFINITY_SET);
264
265 return ret;
266}
267
268int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
269{
270 struct irq_desc *desc = irq_to_desc(irq);
271 unsigned long flags;
272 int ret;
273
274 if (!desc)
275 return -EINVAL;
276
277 raw_spin_lock_irqsave(&desc->lock, flags);
278 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
279 raw_spin_unlock_irqrestore(&desc->lock, flags);
280 return ret;
281}
282
283int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
284{
285 unsigned long flags;
286 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
287
288 if (!desc)
289 return -EINVAL;
290 desc->affinity_hint = m;
291 irq_put_desc_unlock(desc, flags);
292
293 if (m)
294 __irq_set_affinity(irq, m, false);
295 return 0;
296}
297EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
298
299static void irq_affinity_notify(struct work_struct *work)
300{
301 struct irq_affinity_notify *notify =
302 container_of(work, struct irq_affinity_notify, work);
303 struct irq_desc *desc = irq_to_desc(notify->irq);
304 cpumask_var_t cpumask;
305 unsigned long flags;
306
307 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
308 goto out;
309
310 raw_spin_lock_irqsave(&desc->lock, flags);
311 if (irq_move_pending(&desc->irq_data))
312 irq_get_pending(cpumask, desc);
313 else
314 cpumask_copy(cpumask, desc->irq_common_data.affinity);
315 raw_spin_unlock_irqrestore(&desc->lock, flags);
316
317 notify->notify(notify, cpumask);
318
319 free_cpumask_var(cpumask);
320out:
321 kref_put(¬ify->kref, notify->release);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335int
336irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
337{
338 struct irq_desc *desc = irq_to_desc(irq);
339 struct irq_affinity_notify *old_notify;
340 unsigned long flags;
341
342
343 might_sleep();
344
345 if (!desc || desc->istate & IRQS_NMI)
346 return -EINVAL;
347
348
349 if (notify) {
350 notify->irq = irq;
351 kref_init(¬ify->kref);
352 INIT_WORK(¬ify->work, irq_affinity_notify);
353 }
354
355 raw_spin_lock_irqsave(&desc->lock, flags);
356 old_notify = desc->affinity_notify;
357 desc->affinity_notify = notify;
358 raw_spin_unlock_irqrestore(&desc->lock, flags);
359
360 if (old_notify)
361 kref_put(&old_notify->kref, old_notify->release);
362
363 return 0;
364}
365EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
366
367#ifndef CONFIG_AUTO_IRQ_AFFINITY
368
369
370
371int irq_setup_affinity(struct irq_desc *desc)
372{
373 struct cpumask *set = irq_default_affinity;
374 int ret, node = irq_desc_get_node(desc);
375 static DEFINE_RAW_SPINLOCK(mask_lock);
376 static struct cpumask mask;
377
378
379 if (!__irq_can_set_affinity(desc))
380 return 0;
381
382 raw_spin_lock(&mask_lock);
383
384
385
386
387 if (irqd_affinity_is_managed(&desc->irq_data) ||
388 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
389 if (cpumask_intersects(desc->irq_common_data.affinity,
390 cpu_online_mask))
391 set = desc->irq_common_data.affinity;
392 else
393 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
394 }
395
396 cpumask_and(&mask, cpu_online_mask, set);
397 if (cpumask_empty(&mask))
398 cpumask_copy(&mask, cpu_online_mask);
399
400 if (node != NUMA_NO_NODE) {
401 const struct cpumask *nodemask = cpumask_of_node(node);
402
403
404 if (cpumask_intersects(&mask, nodemask))
405 cpumask_and(&mask, &mask, nodemask);
406 }
407 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
408 raw_spin_unlock(&mask_lock);
409 return ret;
410}
411#else
412
413int irq_setup_affinity(struct irq_desc *desc)
414{
415 return irq_select_affinity(irq_desc_get_irq(desc));
416}
417#endif
418
419
420
421
422int irq_select_affinity_usr(unsigned int irq)
423{
424 struct irq_desc *desc = irq_to_desc(irq);
425 unsigned long flags;
426 int ret;
427
428 raw_spin_lock_irqsave(&desc->lock, flags);
429 ret = irq_setup_affinity(desc);
430 raw_spin_unlock_irqrestore(&desc->lock, flags);
431 return ret;
432}
433#endif
434
435
436
437
438
439
440
441
442
443
444
445
446int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
447{
448 unsigned long flags;
449 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
450 struct irq_data *data;
451 struct irq_chip *chip;
452 int ret = -ENOSYS;
453
454 if (!desc)
455 return -EINVAL;
456
457 data = irq_desc_get_irq_data(desc);
458 do {
459 chip = irq_data_get_irq_chip(data);
460 if (chip && chip->irq_set_vcpu_affinity)
461 break;
462#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
463 data = data->parent_data;
464#else
465 data = NULL;
466#endif
467 } while (data);
468
469 if (data)
470 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
471 irq_put_desc_unlock(desc, flags);
472
473 return ret;
474}
475EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
476
477void __disable_irq(struct irq_desc *desc)
478{
479 if (!desc->depth++)
480 irq_disable(desc);
481}
482
483static int __disable_irq_nosync(unsigned int irq)
484{
485 unsigned long flags;
486 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
487
488 if (!desc)
489 return -EINVAL;
490 __disable_irq(desc);
491 irq_put_desc_busunlock(desc, flags);
492 return 0;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506void disable_irq_nosync(unsigned int irq)
507{
508 __disable_irq_nosync(irq);
509}
510EXPORT_SYMBOL(disable_irq_nosync);
511
512
513
514
515
516
517
518
519
520
521
522
523
524void disable_irq(unsigned int irq)
525{
526 if (!__disable_irq_nosync(irq))
527 synchronize_irq(irq);
528}
529EXPORT_SYMBOL(disable_irq);
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548bool disable_hardirq(unsigned int irq)
549{
550 if (!__disable_irq_nosync(irq))
551 return synchronize_hardirq(irq);
552
553 return false;
554}
555EXPORT_SYMBOL_GPL(disable_hardirq);
556
557
558
559
560
561
562
563
564
565
566
567void disable_nmi_nosync(unsigned int irq)
568{
569 disable_irq_nosync(irq);
570}
571
572void __enable_irq(struct irq_desc *desc)
573{
574 switch (desc->depth) {
575 case 0:
576 err_out:
577 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
578 irq_desc_get_irq(desc));
579 break;
580 case 1: {
581 if (desc->istate & IRQS_SUSPENDED)
582 goto err_out;
583
584 irq_settings_set_noprobe(desc);
585
586
587
588
589
590
591
592 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
593 break;
594 }
595 default:
596 desc->depth--;
597 }
598}
599
600
601
602
603
604
605
606
607
608
609
610
611void enable_irq(unsigned int irq)
612{
613 unsigned long flags;
614 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
615
616 if (!desc)
617 return;
618 if (WARN(!desc->irq_data.chip,
619 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
620 goto out;
621
622 __enable_irq(desc);
623out:
624 irq_put_desc_busunlock(desc, flags);
625}
626EXPORT_SYMBOL(enable_irq);
627
628
629
630
631
632
633
634
635
636
637void enable_nmi(unsigned int irq)
638{
639 enable_irq(irq);
640}
641
642static int set_irq_wake_real(unsigned int irq, unsigned int on)
643{
644 struct irq_desc *desc = irq_to_desc(irq);
645 int ret = -ENXIO;
646
647 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
648 return 0;
649
650 if (desc->irq_data.chip->irq_set_wake)
651 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
652
653 return ret;
654}
655
656
657
658
659
660
661
662
663
664
665
666
667
668int irq_set_irq_wake(unsigned int irq, unsigned int on)
669{
670 unsigned long flags;
671 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
672 int ret = 0;
673
674 if (!desc)
675 return -EINVAL;
676
677
678 if (desc->istate & IRQS_NMI) {
679 ret = -EINVAL;
680 goto out_unlock;
681 }
682
683
684
685
686 if (on) {
687 if (desc->wake_depth++ == 0) {
688 ret = set_irq_wake_real(irq, on);
689 if (ret)
690 desc->wake_depth = 0;
691 else
692 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
693 }
694 } else {
695 if (desc->wake_depth == 0) {
696 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
697 } else if (--desc->wake_depth == 0) {
698 ret = set_irq_wake_real(irq, on);
699 if (ret)
700 desc->wake_depth = 1;
701 else
702 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
703 }
704 }
705
706out_unlock:
707 irq_put_desc_busunlock(desc, flags);
708 return ret;
709}
710EXPORT_SYMBOL(irq_set_irq_wake);
711
712
713
714
715
716
717int can_request_irq(unsigned int irq, unsigned long irqflags)
718{
719 unsigned long flags;
720 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
721 int canrequest = 0;
722
723 if (!desc)
724 return 0;
725
726 if (irq_settings_can_request(desc)) {
727 if (!desc->action ||
728 irqflags & desc->action->flags & IRQF_SHARED)
729 canrequest = 1;
730 }
731 irq_put_desc_unlock(desc, flags);
732 return canrequest;
733}
734
735int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
736{
737 struct irq_chip *chip = desc->irq_data.chip;
738 int ret, unmask = 0;
739
740 if (!chip || !chip->irq_set_type) {
741
742
743
744
745 pr_debug("No set_type function for IRQ %d (%s)\n",
746 irq_desc_get_irq(desc),
747 chip ? (chip->name ? : "unknown") : "unknown");
748 return 0;
749 }
750
751 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
752 if (!irqd_irq_masked(&desc->irq_data))
753 mask_irq(desc);
754 if (!irqd_irq_disabled(&desc->irq_data))
755 unmask = 1;
756 }
757
758
759 flags &= IRQ_TYPE_SENSE_MASK;
760 ret = chip->irq_set_type(&desc->irq_data, flags);
761
762 switch (ret) {
763 case IRQ_SET_MASK_OK:
764 case IRQ_SET_MASK_OK_DONE:
765 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
766 irqd_set(&desc->irq_data, flags);
767
768
769 case IRQ_SET_MASK_OK_NOCOPY:
770 flags = irqd_get_trigger_type(&desc->irq_data);
771 irq_settings_set_trigger_mask(desc, flags);
772 irqd_clear(&desc->irq_data, IRQD_LEVEL);
773 irq_settings_clr_level(desc);
774 if (flags & IRQ_TYPE_LEVEL_MASK) {
775 irq_settings_set_level(desc);
776 irqd_set(&desc->irq_data, IRQD_LEVEL);
777 }
778
779 ret = 0;
780 break;
781 default:
782 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
783 flags, irq_desc_get_irq(desc), chip->irq_set_type);
784 }
785 if (unmask)
786 unmask_irq(desc);
787 return ret;
788}
789
790#ifdef CONFIG_HARDIRQS_SW_RESEND
791int irq_set_parent(int irq, int parent_irq)
792{
793 unsigned long flags;
794 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
795
796 if (!desc)
797 return -EINVAL;
798
799 desc->parent_irq = parent_irq;
800
801 irq_put_desc_unlock(desc, flags);
802 return 0;
803}
804EXPORT_SYMBOL_GPL(irq_set_parent);
805#endif
806
807
808
809
810
811
812static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
813{
814 return IRQ_WAKE_THREAD;
815}
816
817
818
819
820
821static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
822{
823 WARN(1, "Primary handler called for nested irq %d\n", irq);
824 return IRQ_NONE;
825}
826
827static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
828{
829 WARN(1, "Secondary action handler called for irq %d\n", irq);
830 return IRQ_NONE;
831}
832
833static int irq_wait_for_interrupt(struct irqaction *action)
834{
835 for (;;) {
836 set_current_state(TASK_INTERRUPTIBLE);
837
838 if (kthread_should_stop()) {
839
840 if (test_and_clear_bit(IRQTF_RUNTHREAD,
841 &action->thread_flags)) {
842 __set_current_state(TASK_RUNNING);
843 return 0;
844 }
845 __set_current_state(TASK_RUNNING);
846 return -1;
847 }
848
849 if (test_and_clear_bit(IRQTF_RUNTHREAD,
850 &action->thread_flags)) {
851 __set_current_state(TASK_RUNNING);
852 return 0;
853 }
854 schedule();
855 }
856}
857
858
859
860
861
862
863static void irq_finalize_oneshot(struct irq_desc *desc,
864 struct irqaction *action)
865{
866 if (!(desc->istate & IRQS_ONESHOT) ||
867 action->handler == irq_forced_secondary_handler)
868 return;
869again:
870 chip_bus_lock(desc);
871 raw_spin_lock_irq(&desc->lock);
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
888 raw_spin_unlock_irq(&desc->lock);
889 chip_bus_sync_unlock(desc);
890 cpu_relax();
891 goto again;
892 }
893
894
895
896
897
898
899 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
900 goto out_unlock;
901
902 desc->threads_oneshot &= ~action->thread_mask;
903
904 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
905 irqd_irq_masked(&desc->irq_data))
906 unmask_threaded_irq(desc);
907
908out_unlock:
909 raw_spin_unlock_irq(&desc->lock);
910 chip_bus_sync_unlock(desc);
911}
912
913#ifdef CONFIG_SMP
914
915
916
917static void
918irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
919{
920 cpumask_var_t mask;
921 bool valid = true;
922
923 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
924 return;
925
926
927
928
929
930 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
931 set_bit(IRQTF_AFFINITY, &action->thread_flags);
932 return;
933 }
934
935 raw_spin_lock_irq(&desc->lock);
936
937
938
939
940 if (cpumask_available(desc->irq_common_data.affinity)) {
941 const struct cpumask *m;
942
943 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
944 cpumask_copy(mask, m);
945 } else {
946 valid = false;
947 }
948 raw_spin_unlock_irq(&desc->lock);
949
950 if (valid)
951 set_cpus_allowed_ptr(current, mask);
952 free_cpumask_var(mask);
953}
954#else
955static inline void
956irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
957#endif
958
959
960
961
962
963
964
965static irqreturn_t
966irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
967{
968 irqreturn_t ret;
969
970 local_bh_disable();
971 ret = action->thread_fn(action->irq, action->dev_id);
972 if (ret == IRQ_HANDLED)
973 atomic_inc(&desc->threads_handled);
974
975 irq_finalize_oneshot(desc, action);
976 local_bh_enable();
977 return ret;
978}
979
980
981
982
983
984
985static irqreturn_t irq_thread_fn(struct irq_desc *desc,
986 struct irqaction *action)
987{
988 irqreturn_t ret;
989
990 ret = action->thread_fn(action->irq, action->dev_id);
991 if (ret == IRQ_HANDLED)
992 atomic_inc(&desc->threads_handled);
993
994 irq_finalize_oneshot(desc, action);
995 return ret;
996}
997
998static void wake_threads_waitq(struct irq_desc *desc)
999{
1000 if (atomic_dec_and_test(&desc->threads_active))
1001 wake_up(&desc->wait_for_threads);
1002}
1003
1004static void irq_thread_dtor(struct callback_head *unused)
1005{
1006 struct task_struct *tsk = current;
1007 struct irq_desc *desc;
1008 struct irqaction *action;
1009
1010 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1011 return;
1012
1013 action = kthread_data(tsk);
1014
1015 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1016 tsk->comm, tsk->pid, action->irq);
1017
1018
1019 desc = irq_to_desc(action->irq);
1020
1021
1022
1023
1024 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1025 wake_threads_waitq(desc);
1026
1027
1028 irq_finalize_oneshot(desc, action);
1029}
1030
1031static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1032{
1033 struct irqaction *secondary = action->secondary;
1034
1035 if (WARN_ON_ONCE(!secondary))
1036 return;
1037
1038 raw_spin_lock_irq(&desc->lock);
1039 __irq_wake_thread(desc, secondary);
1040 raw_spin_unlock_irq(&desc->lock);
1041}
1042
1043
1044
1045
1046static int irq_thread(void *data)
1047{
1048 struct callback_head on_exit_work;
1049 struct irqaction *action = data;
1050 struct irq_desc *desc = irq_to_desc(action->irq);
1051 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1052 struct irqaction *action);
1053
1054 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1055 &action->thread_flags))
1056 handler_fn = irq_forced_thread_fn;
1057 else
1058 handler_fn = irq_thread_fn;
1059
1060 init_task_work(&on_exit_work, irq_thread_dtor);
1061 task_work_add(current, &on_exit_work, false);
1062
1063 irq_thread_check_affinity(desc, action);
1064
1065 while (!irq_wait_for_interrupt(action)) {
1066 irqreturn_t action_ret;
1067
1068 irq_thread_check_affinity(desc, action);
1069
1070 action_ret = handler_fn(desc, action);
1071 if (action_ret == IRQ_WAKE_THREAD)
1072 irq_wake_secondary(desc, action);
1073
1074 wake_threads_waitq(desc);
1075 }
1076
1077
1078
1079
1080
1081
1082
1083 task_work_cancel(current, irq_thread_dtor);
1084 return 0;
1085}
1086
1087
1088
1089
1090
1091
1092
1093void irq_wake_thread(unsigned int irq, void *dev_id)
1094{
1095 struct irq_desc *desc = irq_to_desc(irq);
1096 struct irqaction *action;
1097 unsigned long flags;
1098
1099 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1100 return;
1101
1102 raw_spin_lock_irqsave(&desc->lock, flags);
1103 for_each_action_of_desc(desc, action) {
1104 if (action->dev_id == dev_id) {
1105 if (action->thread)
1106 __irq_wake_thread(desc, action);
1107 break;
1108 }
1109 }
1110 raw_spin_unlock_irqrestore(&desc->lock, flags);
1111}
1112EXPORT_SYMBOL_GPL(irq_wake_thread);
1113
1114static int irq_setup_forced_threading(struct irqaction *new)
1115{
1116 if (!force_irqthreads)
1117 return 0;
1118 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1119 return 0;
1120
1121
1122
1123
1124
1125 if (new->handler == irq_default_primary_handler)
1126 return 0;
1127
1128 new->flags |= IRQF_ONESHOT;
1129
1130
1131
1132
1133
1134
1135 if (new->handler && new->thread_fn) {
1136
1137 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1138 if (!new->secondary)
1139 return -ENOMEM;
1140 new->secondary->handler = irq_forced_secondary_handler;
1141 new->secondary->thread_fn = new->thread_fn;
1142 new->secondary->dev_id = new->dev_id;
1143 new->secondary->irq = new->irq;
1144 new->secondary->name = new->name;
1145 }
1146
1147 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1148 new->thread_fn = new->handler;
1149 new->handler = irq_default_primary_handler;
1150 return 0;
1151}
1152
1153static int irq_request_resources(struct irq_desc *desc)
1154{
1155 struct irq_data *d = &desc->irq_data;
1156 struct irq_chip *c = d->chip;
1157
1158 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1159}
1160
1161static void irq_release_resources(struct irq_desc *desc)
1162{
1163 struct irq_data *d = &desc->irq_data;
1164 struct irq_chip *c = d->chip;
1165
1166 if (c->irq_release_resources)
1167 c->irq_release_resources(d);
1168}
1169
1170static bool irq_supports_nmi(struct irq_desc *desc)
1171{
1172 struct irq_data *d = irq_desc_get_irq_data(desc);
1173
1174#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1175
1176 if (d->parent_data)
1177 return false;
1178#endif
1179
1180 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1181 return false;
1182
1183 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1184}
1185
1186static int irq_nmi_setup(struct irq_desc *desc)
1187{
1188 struct irq_data *d = irq_desc_get_irq_data(desc);
1189 struct irq_chip *c = d->chip;
1190
1191 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1192}
1193
1194static void irq_nmi_teardown(struct irq_desc *desc)
1195{
1196 struct irq_data *d = irq_desc_get_irq_data(desc);
1197 struct irq_chip *c = d->chip;
1198
1199 if (c->irq_nmi_teardown)
1200 c->irq_nmi_teardown(d);
1201}
1202
1203static int
1204setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1205{
1206 struct task_struct *t;
1207 struct sched_param param = {
1208 .sched_priority = MAX_USER_RT_PRIO/2,
1209 };
1210
1211 if (!secondary) {
1212 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1213 new->name);
1214 } else {
1215 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1216 new->name);
1217 param.sched_priority -= 1;
1218 }
1219
1220 if (IS_ERR(t))
1221 return PTR_ERR(t);
1222
1223 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1224
1225
1226
1227
1228
1229
1230 get_task_struct(t);
1231 new->thread = t;
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1242 return 0;
1243}
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259static int
1260__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1261{
1262 struct irqaction *old, **old_ptr;
1263 unsigned long flags, thread_mask = 0;
1264 int ret, nested, shared = 0;
1265
1266 if (!desc)
1267 return -EINVAL;
1268
1269 if (desc->irq_data.chip == &no_irq_chip)
1270 return -ENOSYS;
1271 if (!try_module_get(desc->owner))
1272 return -ENODEV;
1273
1274 new->irq = irq;
1275
1276
1277
1278
1279
1280 if (!(new->flags & IRQF_TRIGGER_MASK))
1281 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1282
1283
1284
1285
1286
1287 nested = irq_settings_is_nested_thread(desc);
1288 if (nested) {
1289 if (!new->thread_fn) {
1290 ret = -EINVAL;
1291 goto out_mput;
1292 }
1293
1294
1295
1296
1297
1298 new->handler = irq_nested_primary_handler;
1299 } else {
1300 if (irq_settings_can_thread(desc)) {
1301 ret = irq_setup_forced_threading(new);
1302 if (ret)
1303 goto out_mput;
1304 }
1305 }
1306
1307
1308
1309
1310
1311
1312 if (new->thread_fn && !nested) {
1313 ret = setup_irq_thread(new, irq, false);
1314 if (ret)
1315 goto out_mput;
1316 if (new->secondary) {
1317 ret = setup_irq_thread(new->secondary, irq, true);
1318 if (ret)
1319 goto out_thread;
1320 }
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1333 new->flags &= ~IRQF_ONESHOT;
1334
1335
1336
1337
1338
1339
1340
1341
1342 mutex_lock(&desc->request_mutex);
1343
1344
1345
1346
1347
1348
1349 chip_bus_lock(desc);
1350
1351
1352 if (!desc->action) {
1353 ret = irq_request_resources(desc);
1354 if (ret) {
1355 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1356 new->name, irq, desc->irq_data.chip->name);
1357 goto out_bus_unlock;
1358 }
1359 }
1360
1361
1362
1363
1364
1365
1366
1367 raw_spin_lock_irqsave(&desc->lock, flags);
1368 old_ptr = &desc->action;
1369 old = *old_ptr;
1370 if (old) {
1371
1372
1373
1374
1375
1376
1377
1378
1379 unsigned int oldtype;
1380
1381 if (desc->istate & IRQS_NMI) {
1382 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1383 new->name, irq, desc->irq_data.chip->name);
1384 ret = -EINVAL;
1385 goto out_unlock;
1386 }
1387
1388
1389
1390
1391
1392 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1393 oldtype = irqd_get_trigger_type(&desc->irq_data);
1394 } else {
1395 oldtype = new->flags & IRQF_TRIGGER_MASK;
1396 irqd_set_trigger_type(&desc->irq_data, oldtype);
1397 }
1398
1399 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1400 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1401 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1402 goto mismatch;
1403
1404
1405 if ((old->flags & IRQF_PERCPU) !=
1406 (new->flags & IRQF_PERCPU))
1407 goto mismatch;
1408
1409
1410 do {
1411
1412
1413
1414
1415
1416 thread_mask |= old->thread_mask;
1417 old_ptr = &old->next;
1418 old = *old_ptr;
1419 } while (old);
1420 shared = 1;
1421 }
1422
1423
1424
1425
1426
1427
1428 if (new->flags & IRQF_ONESHOT) {
1429
1430
1431
1432
1433 if (thread_mask == ~0UL) {
1434 ret = -EBUSY;
1435 goto out_unlock;
1436 }
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 new->thread_mask = 1UL << ffz(thread_mask);
1458
1459 } else if (new->handler == irq_default_primary_handler &&
1460 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1477 irq);
1478 ret = -EINVAL;
1479 goto out_unlock;
1480 }
1481
1482 if (!shared) {
1483 init_waitqueue_head(&desc->wait_for_threads);
1484
1485
1486 if (new->flags & IRQF_TRIGGER_MASK) {
1487 ret = __irq_set_trigger(desc,
1488 new->flags & IRQF_TRIGGER_MASK);
1489
1490 if (ret)
1491 goto out_unlock;
1492 }
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505 ret = irq_activate(desc);
1506 if (ret)
1507 goto out_unlock;
1508
1509 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1510 IRQS_ONESHOT | IRQS_WAITING);
1511 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1512
1513 if (new->flags & IRQF_PERCPU) {
1514 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1515 irq_settings_set_per_cpu(desc);
1516 }
1517
1518 if (new->flags & IRQF_ONESHOT)
1519 desc->istate |= IRQS_ONESHOT;
1520
1521
1522 if (new->flags & IRQF_NOBALANCING) {
1523 irq_settings_set_no_balancing(desc);
1524 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1525 }
1526
1527 if (irq_settings_can_autoenable(desc)) {
1528 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1529 } else {
1530
1531
1532
1533
1534
1535
1536 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1537
1538 desc->depth = 1;
1539 }
1540
1541 } else if (new->flags & IRQF_TRIGGER_MASK) {
1542 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1543 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1544
1545 if (nmsk != omsk)
1546
1547 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1548 irq, omsk, nmsk);
1549 }
1550
1551 *old_ptr = new;
1552
1553 irq_pm_install_action(desc, new);
1554
1555
1556 desc->irq_count = 0;
1557 desc->irqs_unhandled = 0;
1558
1559
1560
1561
1562
1563 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1564 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1565 __enable_irq(desc);
1566 }
1567
1568 raw_spin_unlock_irqrestore(&desc->lock, flags);
1569 chip_bus_sync_unlock(desc);
1570 mutex_unlock(&desc->request_mutex);
1571
1572 irq_setup_timings(desc, new);
1573
1574
1575
1576
1577
1578 if (new->thread)
1579 wake_up_process(new->thread);
1580 if (new->secondary)
1581 wake_up_process(new->secondary->thread);
1582
1583 register_irq_proc(irq, desc);
1584 new->dir = NULL;
1585 register_handler_proc(irq, new);
1586 return 0;
1587
1588mismatch:
1589 if (!(new->flags & IRQF_PROBE_SHARED)) {
1590 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1591 irq, new->flags, new->name, old->flags, old->name);
1592#ifdef CONFIG_DEBUG_SHIRQ
1593 dump_stack();
1594#endif
1595 }
1596 ret = -EBUSY;
1597
1598out_unlock:
1599 raw_spin_unlock_irqrestore(&desc->lock, flags);
1600
1601 if (!desc->action)
1602 irq_release_resources(desc);
1603out_bus_unlock:
1604 chip_bus_sync_unlock(desc);
1605 mutex_unlock(&desc->request_mutex);
1606
1607out_thread:
1608 if (new->thread) {
1609 struct task_struct *t = new->thread;
1610
1611 new->thread = NULL;
1612 kthread_stop(t);
1613 put_task_struct(t);
1614 }
1615 if (new->secondary && new->secondary->thread) {
1616 struct task_struct *t = new->secondary->thread;
1617
1618 new->secondary->thread = NULL;
1619 kthread_stop(t);
1620 put_task_struct(t);
1621 }
1622out_mput:
1623 module_put(desc->owner);
1624 return ret;
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634int setup_irq(unsigned int irq, struct irqaction *act)
1635{
1636 int retval;
1637 struct irq_desc *desc = irq_to_desc(irq);
1638
1639 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1640 return -EINVAL;
1641
1642 retval = irq_chip_pm_get(&desc->irq_data);
1643 if (retval < 0)
1644 return retval;
1645
1646 retval = __setup_irq(irq, desc, act);
1647
1648 if (retval)
1649 irq_chip_pm_put(&desc->irq_data);
1650
1651 return retval;
1652}
1653EXPORT_SYMBOL_GPL(setup_irq);
1654
1655
1656
1657
1658
1659static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1660{
1661 unsigned irq = desc->irq_data.irq;
1662 struct irqaction *action, **action_ptr;
1663 unsigned long flags;
1664
1665 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1666
1667 mutex_lock(&desc->request_mutex);
1668 chip_bus_lock(desc);
1669 raw_spin_lock_irqsave(&desc->lock, flags);
1670
1671
1672
1673
1674
1675 action_ptr = &desc->action;
1676 for (;;) {
1677 action = *action_ptr;
1678
1679 if (!action) {
1680 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1681 raw_spin_unlock_irqrestore(&desc->lock, flags);
1682 chip_bus_sync_unlock(desc);
1683 mutex_unlock(&desc->request_mutex);
1684 return NULL;
1685 }
1686
1687 if (action->dev_id == dev_id)
1688 break;
1689 action_ptr = &action->next;
1690 }
1691
1692
1693 *action_ptr = action->next;
1694
1695 irq_pm_remove_action(desc, action);
1696
1697
1698 if (!desc->action) {
1699 irq_settings_clr_disable_unlazy(desc);
1700 irq_shutdown(desc);
1701 }
1702
1703#ifdef CONFIG_SMP
1704
1705 if (WARN_ON_ONCE(desc->affinity_hint))
1706 desc->affinity_hint = NULL;
1707#endif
1708
1709 raw_spin_unlock_irqrestore(&desc->lock, flags);
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724 chip_bus_sync_unlock(desc);
1725
1726 unregister_handler_proc(irq, action);
1727
1728
1729 synchronize_hardirq(irq);
1730
1731#ifdef CONFIG_DEBUG_SHIRQ
1732
1733
1734
1735
1736
1737
1738
1739
1740 if (action->flags & IRQF_SHARED) {
1741 local_irq_save(flags);
1742 action->handler(irq, dev_id);
1743 local_irq_restore(flags);
1744 }
1745#endif
1746
1747
1748
1749
1750
1751
1752
1753 if (action->thread) {
1754 kthread_stop(action->thread);
1755 put_task_struct(action->thread);
1756 if (action->secondary && action->secondary->thread) {
1757 kthread_stop(action->secondary->thread);
1758 put_task_struct(action->secondary->thread);
1759 }
1760 }
1761
1762
1763 if (!desc->action) {
1764
1765
1766
1767
1768 chip_bus_lock(desc);
1769 irq_release_resources(desc);
1770 chip_bus_sync_unlock(desc);
1771 irq_remove_timings(desc);
1772 }
1773
1774 mutex_unlock(&desc->request_mutex);
1775
1776 irq_chip_pm_put(&desc->irq_data);
1777 module_put(desc->owner);
1778 kfree(action->secondary);
1779 return action;
1780}
1781
1782
1783
1784
1785
1786
1787
1788
1789void remove_irq(unsigned int irq, struct irqaction *act)
1790{
1791 struct irq_desc *desc = irq_to_desc(irq);
1792
1793 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1794 __free_irq(desc, act->dev_id);
1795}
1796EXPORT_SYMBOL_GPL(remove_irq);
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814const void *free_irq(unsigned int irq, void *dev_id)
1815{
1816 struct irq_desc *desc = irq_to_desc(irq);
1817 struct irqaction *action;
1818 const char *devname;
1819
1820 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1821 return NULL;
1822
1823#ifdef CONFIG_SMP
1824 if (WARN_ON(desc->affinity_notify))
1825 desc->affinity_notify = NULL;
1826#endif
1827
1828 action = __free_irq(desc, dev_id);
1829
1830 if (!action)
1831 return NULL;
1832
1833 devname = action->name;
1834 kfree(action);
1835 return devname;
1836}
1837EXPORT_SYMBOL(free_irq);
1838
1839
1840static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1841{
1842 const char *devname = NULL;
1843
1844 desc->istate &= ~IRQS_NMI;
1845
1846 if (!WARN_ON(desc->action == NULL)) {
1847 irq_pm_remove_action(desc, desc->action);
1848 devname = desc->action->name;
1849 unregister_handler_proc(irq, desc->action);
1850
1851 kfree(desc->action);
1852 desc->action = NULL;
1853 }
1854
1855 irq_settings_clr_disable_unlazy(desc);
1856 irq_shutdown(desc);
1857
1858 irq_release_resources(desc);
1859
1860 irq_chip_pm_put(&desc->irq_data);
1861 module_put(desc->owner);
1862
1863 return devname;
1864}
1865
1866const void *free_nmi(unsigned int irq, void *dev_id)
1867{
1868 struct irq_desc *desc = irq_to_desc(irq);
1869 unsigned long flags;
1870 const void *devname;
1871
1872 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1873 return NULL;
1874
1875 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1876 return NULL;
1877
1878
1879 if (WARN_ON(desc->depth == 0))
1880 disable_nmi_nosync(irq);
1881
1882 raw_spin_lock_irqsave(&desc->lock, flags);
1883
1884 irq_nmi_teardown(desc);
1885 devname = __cleanup_nmi(irq, desc);
1886
1887 raw_spin_unlock_irqrestore(&desc->lock, flags);
1888
1889 return devname;
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1935 irq_handler_t thread_fn, unsigned long irqflags,
1936 const char *devname, void *dev_id)
1937{
1938 struct irqaction *action;
1939 struct irq_desc *desc;
1940 int retval;
1941
1942 if (irq == IRQ_NOTCONNECTED)
1943 return -ENOTCONN;
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1955 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1956 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1957 return -EINVAL;
1958
1959 desc = irq_to_desc(irq);
1960 if (!desc)
1961 return -EINVAL;
1962
1963 if (!irq_settings_can_request(desc) ||
1964 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1965 return -EINVAL;
1966
1967 if (!handler) {
1968 if (!thread_fn)
1969 return -EINVAL;
1970 handler = irq_default_primary_handler;
1971 }
1972
1973 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1974 if (!action)
1975 return -ENOMEM;
1976
1977 action->handler = handler;
1978 action->thread_fn = thread_fn;
1979 action->flags = irqflags;
1980 action->name = devname;
1981 action->dev_id = dev_id;
1982
1983 retval = irq_chip_pm_get(&desc->irq_data);
1984 if (retval < 0) {
1985 kfree(action);
1986 return retval;
1987 }
1988
1989 retval = __setup_irq(irq, desc, action);
1990
1991 if (retval) {
1992 irq_chip_pm_put(&desc->irq_data);
1993 kfree(action->secondary);
1994 kfree(action);
1995 }
1996
1997#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1998 if (!retval && (irqflags & IRQF_SHARED)) {
1999
2000
2001
2002
2003
2004
2005 unsigned long flags;
2006
2007 disable_irq(irq);
2008 local_irq_save(flags);
2009
2010 handler(irq, dev_id);
2011
2012 local_irq_restore(flags);
2013 enable_irq(irq);
2014 }
2015#endif
2016 return retval;
2017}
2018EXPORT_SYMBOL(request_threaded_irq);
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2038 unsigned long flags, const char *name, void *dev_id)
2039{
2040 struct irq_desc *desc;
2041 int ret;
2042
2043 if (irq == IRQ_NOTCONNECTED)
2044 return -ENOTCONN;
2045
2046 desc = irq_to_desc(irq);
2047 if (!desc)
2048 return -EINVAL;
2049
2050 if (irq_settings_is_nested_thread(desc)) {
2051 ret = request_threaded_irq(irq, NULL, handler,
2052 flags, name, dev_id);
2053 return !ret ? IRQC_IS_NESTED : ret;
2054 }
2055
2056 ret = request_irq(irq, handler, flags, name, dev_id);
2057 return !ret ? IRQC_IS_HARDIRQ : ret;
2058}
2059EXPORT_SYMBOL_GPL(request_any_context_irq);
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087int request_nmi(unsigned int irq, irq_handler_t handler,
2088 unsigned long irqflags, const char *name, void *dev_id)
2089{
2090 struct irqaction *action;
2091 struct irq_desc *desc;
2092 unsigned long flags;
2093 int retval;
2094
2095 if (irq == IRQ_NOTCONNECTED)
2096 return -ENOTCONN;
2097
2098
2099 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2100 return -EINVAL;
2101
2102 if (!(irqflags & IRQF_PERCPU))
2103 return -EINVAL;
2104
2105 if (!handler)
2106 return -EINVAL;
2107
2108 desc = irq_to_desc(irq);
2109
2110 if (!desc || irq_settings_can_autoenable(desc) ||
2111 !irq_settings_can_request(desc) ||
2112 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2113 !irq_supports_nmi(desc))
2114 return -EINVAL;
2115
2116 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2117 if (!action)
2118 return -ENOMEM;
2119
2120 action->handler = handler;
2121 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2122 action->name = name;
2123 action->dev_id = dev_id;
2124
2125 retval = irq_chip_pm_get(&desc->irq_data);
2126 if (retval < 0)
2127 goto err_out;
2128
2129 retval = __setup_irq(irq, desc, action);
2130 if (retval)
2131 goto err_irq_setup;
2132
2133 raw_spin_lock_irqsave(&desc->lock, flags);
2134
2135
2136 desc->istate |= IRQS_NMI;
2137 retval = irq_nmi_setup(desc);
2138 if (retval) {
2139 __cleanup_nmi(irq, desc);
2140 raw_spin_unlock_irqrestore(&desc->lock, flags);
2141 return -EINVAL;
2142 }
2143
2144 raw_spin_unlock_irqrestore(&desc->lock, flags);
2145
2146 return 0;
2147
2148err_irq_setup:
2149 irq_chip_pm_put(&desc->irq_data);
2150err_out:
2151 kfree(action);
2152
2153 return retval;
2154}
2155
2156void enable_percpu_irq(unsigned int irq, unsigned int type)
2157{
2158 unsigned int cpu = smp_processor_id();
2159 unsigned long flags;
2160 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2161
2162 if (!desc)
2163 return;
2164
2165
2166
2167
2168
2169 type &= IRQ_TYPE_SENSE_MASK;
2170 if (type == IRQ_TYPE_NONE)
2171 type = irqd_get_trigger_type(&desc->irq_data);
2172
2173 if (type != IRQ_TYPE_NONE) {
2174 int ret;
2175
2176 ret = __irq_set_trigger(desc, type);
2177
2178 if (ret) {
2179 WARN(1, "failed to set type for IRQ%d\n", irq);
2180 goto out;
2181 }
2182 }
2183
2184 irq_percpu_enable(desc, cpu);
2185out:
2186 irq_put_desc_unlock(desc, flags);
2187}
2188EXPORT_SYMBOL_GPL(enable_percpu_irq);
2189
2190void enable_percpu_nmi(unsigned int irq, unsigned int type)
2191{
2192 enable_percpu_irq(irq, type);
2193}
2194
2195
2196
2197
2198
2199
2200
2201
2202bool irq_percpu_is_enabled(unsigned int irq)
2203{
2204 unsigned int cpu = smp_processor_id();
2205 struct irq_desc *desc;
2206 unsigned long flags;
2207 bool is_enabled;
2208
2209 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2210 if (!desc)
2211 return false;
2212
2213 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2214 irq_put_desc_unlock(desc, flags);
2215
2216 return is_enabled;
2217}
2218EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2219
2220void disable_percpu_irq(unsigned int irq)
2221{
2222 unsigned int cpu = smp_processor_id();
2223 unsigned long flags;
2224 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2225
2226 if (!desc)
2227 return;
2228
2229 irq_percpu_disable(desc, cpu);
2230 irq_put_desc_unlock(desc, flags);
2231}
2232EXPORT_SYMBOL_GPL(disable_percpu_irq);
2233
2234void disable_percpu_nmi(unsigned int irq)
2235{
2236 disable_percpu_irq(irq);
2237}
2238
2239
2240
2241
2242static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2243{
2244 struct irq_desc *desc = irq_to_desc(irq);
2245 struct irqaction *action;
2246 unsigned long flags;
2247
2248 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2249
2250 if (!desc)
2251 return NULL;
2252
2253 raw_spin_lock_irqsave(&desc->lock, flags);
2254
2255 action = desc->action;
2256 if (!action || action->percpu_dev_id != dev_id) {
2257 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2258 goto bad;
2259 }
2260
2261 if (!cpumask_empty(desc->percpu_enabled)) {
2262 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2263 irq, cpumask_first(desc->percpu_enabled));
2264 goto bad;
2265 }
2266
2267
2268 desc->action = NULL;
2269
2270 desc->istate &= ~IRQS_NMI;
2271
2272 raw_spin_unlock_irqrestore(&desc->lock, flags);
2273
2274 unregister_handler_proc(irq, action);
2275
2276 irq_chip_pm_put(&desc->irq_data);
2277 module_put(desc->owner);
2278 return action;
2279
2280bad:
2281 raw_spin_unlock_irqrestore(&desc->lock, flags);
2282 return NULL;
2283}
2284
2285
2286
2287
2288
2289
2290
2291
2292void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2293{
2294 struct irq_desc *desc = irq_to_desc(irq);
2295
2296 if (desc && irq_settings_is_per_cpu_devid(desc))
2297 __free_percpu_irq(irq, act->percpu_dev_id);
2298}
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2313{
2314 struct irq_desc *desc = irq_to_desc(irq);
2315
2316 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2317 return;
2318
2319 chip_bus_lock(desc);
2320 kfree(__free_percpu_irq(irq, dev_id));
2321 chip_bus_sync_unlock(desc);
2322}
2323EXPORT_SYMBOL_GPL(free_percpu_irq);
2324
2325void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2326{
2327 struct irq_desc *desc = irq_to_desc(irq);
2328
2329 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2330 return;
2331
2332 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2333 return;
2334
2335 kfree(__free_percpu_irq(irq, dev_id));
2336}
2337
2338
2339
2340
2341
2342
2343
2344
2345int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2346{
2347 struct irq_desc *desc = irq_to_desc(irq);
2348 int retval;
2349
2350 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2351 return -EINVAL;
2352
2353 retval = irq_chip_pm_get(&desc->irq_data);
2354 if (retval < 0)
2355 return retval;
2356
2357 retval = __setup_irq(irq, desc, act);
2358
2359 if (retval)
2360 irq_chip_pm_put(&desc->irq_data);
2361
2362 return retval;
2363}
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2383 unsigned long flags, const char *devname,
2384 void __percpu *dev_id)
2385{
2386 struct irqaction *action;
2387 struct irq_desc *desc;
2388 int retval;
2389
2390 if (!dev_id)
2391 return -EINVAL;
2392
2393 desc = irq_to_desc(irq);
2394 if (!desc || !irq_settings_can_request(desc) ||
2395 !irq_settings_is_per_cpu_devid(desc))
2396 return -EINVAL;
2397
2398 if (flags && flags != IRQF_TIMER)
2399 return -EINVAL;
2400
2401 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2402 if (!action)
2403 return -ENOMEM;
2404
2405 action->handler = handler;
2406 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2407 action->name = devname;
2408 action->percpu_dev_id = dev_id;
2409
2410 retval = irq_chip_pm_get(&desc->irq_data);
2411 if (retval < 0) {
2412 kfree(action);
2413 return retval;
2414 }
2415
2416 retval = __setup_irq(irq, desc, action);
2417
2418 if (retval) {
2419 irq_chip_pm_put(&desc->irq_data);
2420 kfree(action);
2421 }
2422
2423 return retval;
2424}
2425EXPORT_SYMBOL_GPL(__request_percpu_irq);
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2449 const char *name, void __percpu *dev_id)
2450{
2451 struct irqaction *action;
2452 struct irq_desc *desc;
2453 unsigned long flags;
2454 int retval;
2455
2456 if (!handler)
2457 return -EINVAL;
2458
2459 desc = irq_to_desc(irq);
2460
2461 if (!desc || !irq_settings_can_request(desc) ||
2462 !irq_settings_is_per_cpu_devid(desc) ||
2463 irq_settings_can_autoenable(desc) ||
2464 !irq_supports_nmi(desc))
2465 return -EINVAL;
2466
2467
2468 if (desc->istate & IRQS_NMI)
2469 return -EINVAL;
2470
2471 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2472 if (!action)
2473 return -ENOMEM;
2474
2475 action->handler = handler;
2476 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2477 | IRQF_NOBALANCING;
2478 action->name = name;
2479 action->percpu_dev_id = dev_id;
2480
2481 retval = irq_chip_pm_get(&desc->irq_data);
2482 if (retval < 0)
2483 goto err_out;
2484
2485 retval = __setup_irq(irq, desc, action);
2486 if (retval)
2487 goto err_irq_setup;
2488
2489 raw_spin_lock_irqsave(&desc->lock, flags);
2490 desc->istate |= IRQS_NMI;
2491 raw_spin_unlock_irqrestore(&desc->lock, flags);
2492
2493 return 0;
2494
2495err_irq_setup:
2496 irq_chip_pm_put(&desc->irq_data);
2497err_out:
2498 kfree(action);
2499
2500 return retval;
2501}
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516int prepare_percpu_nmi(unsigned int irq)
2517{
2518 unsigned long flags;
2519 struct irq_desc *desc;
2520 int ret = 0;
2521
2522 WARN_ON(preemptible());
2523
2524 desc = irq_get_desc_lock(irq, &flags,
2525 IRQ_GET_DESC_CHECK_PERCPU);
2526 if (!desc)
2527 return -EINVAL;
2528
2529 if (WARN(!(desc->istate & IRQS_NMI),
2530 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2531 irq)) {
2532 ret = -EINVAL;
2533 goto out;
2534 }
2535
2536 ret = irq_nmi_setup(desc);
2537 if (ret) {
2538 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2539 goto out;
2540 }
2541
2542out:
2543 irq_put_desc_unlock(desc, flags);
2544 return ret;
2545}
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559void teardown_percpu_nmi(unsigned int irq)
2560{
2561 unsigned long flags;
2562 struct irq_desc *desc;
2563
2564 WARN_ON(preemptible());
2565
2566 desc = irq_get_desc_lock(irq, &flags,
2567 IRQ_GET_DESC_CHECK_PERCPU);
2568 if (!desc)
2569 return;
2570
2571 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2572 goto out;
2573
2574 irq_nmi_teardown(desc);
2575out:
2576 irq_put_desc_unlock(desc, flags);
2577}
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2593 bool *state)
2594{
2595 struct irq_desc *desc;
2596 struct irq_data *data;
2597 struct irq_chip *chip;
2598 unsigned long flags;
2599 int err = -EINVAL;
2600
2601 desc = irq_get_desc_buslock(irq, &flags, 0);
2602 if (!desc)
2603 return err;
2604
2605 data = irq_desc_get_irq_data(desc);
2606
2607 do {
2608 chip = irq_data_get_irq_chip(data);
2609 if (chip->irq_get_irqchip_state)
2610 break;
2611#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2612 data = data->parent_data;
2613#else
2614 data = NULL;
2615#endif
2616 } while (data);
2617
2618 if (data)
2619 err = chip->irq_get_irqchip_state(data, which, state);
2620
2621 irq_put_desc_busunlock(desc, flags);
2622 return err;
2623}
2624EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2639 bool val)
2640{
2641 struct irq_desc *desc;
2642 struct irq_data *data;
2643 struct irq_chip *chip;
2644 unsigned long flags;
2645 int err = -EINVAL;
2646
2647 desc = irq_get_desc_buslock(irq, &flags, 0);
2648 if (!desc)
2649 return err;
2650
2651 data = irq_desc_get_irq_data(desc);
2652
2653 do {
2654 chip = irq_data_get_irq_chip(data);
2655 if (chip->irq_set_irqchip_state)
2656 break;
2657#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2658 data = data->parent_data;
2659#else
2660 data = NULL;
2661#endif
2662 } while (data);
2663
2664 if (data)
2665 err = chip->irq_set_irqchip_state(data, which, val);
2666
2667 irq_put_desc_busunlock(desc, flags);
2668 return err;
2669}
2670EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2671