1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42
43
44
45
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54
55 } while (inprogress);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75bool synchronize_hardirq(unsigned int irq)
76{
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85}
86EXPORT_SYMBOL(synchronize_hardirq);
87
88
89
90
91
92
93
94
95
96
97
98void synchronize_irq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104
105
106
107
108
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112}
113EXPORT_SYMBOL(synchronize_irq);
114
115#ifdef CONFIG_SMP
116cpumask_var_t irq_default_affinity;
117
118
119
120
121
122
123int irq_can_set_affinity(unsigned int irq)
124{
125 struct irq_desc *desc = irq_to_desc(irq);
126
127 if (!desc || !irqd_can_balance(&desc->irq_data) ||
128 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
129 return 0;
130
131 return 1;
132}
133
134
135
136
137
138
139
140
141
142
143void irq_set_thread_affinity(struct irq_desc *desc)
144{
145 struct irqaction *action = desc->action;
146
147 while (action) {
148 if (action->thread)
149 set_bit(IRQTF_AFFINITY, &action->thread_flags);
150 action = action->next;
151 }
152}
153
154#ifdef CONFIG_GENERIC_PENDING_IRQ
155static inline bool irq_can_move_pcntxt(struct irq_data *data)
156{
157 return irqd_can_move_in_process_context(data);
158}
159static inline bool irq_move_pending(struct irq_data *data)
160{
161 return irqd_is_setaffinity_pending(data);
162}
163static inline void
164irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
165{
166 cpumask_copy(desc->pending_mask, mask);
167}
168static inline void
169irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
170{
171 cpumask_copy(mask, desc->pending_mask);
172}
173#else
174static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
175static inline bool irq_move_pending(struct irq_data *data) { return false; }
176static inline void
177irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
178static inline void
179irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
180#endif
181
182int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
183 bool force)
184{
185 struct irq_desc *desc = irq_data_to_desc(data);
186 struct irq_chip *chip = irq_data_get_irq_chip(data);
187 int ret;
188
189 ret = chip->irq_set_affinity(data, mask, force);
190 switch (ret) {
191 case IRQ_SET_MASK_OK:
192 case IRQ_SET_MASK_OK_DONE:
193 cpumask_copy(data->affinity, mask);
194 case IRQ_SET_MASK_OK_NOCOPY:
195 irq_set_thread_affinity(desc);
196 ret = 0;
197 }
198
199 return ret;
200}
201
202int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
203 bool force)
204{
205 struct irq_chip *chip = irq_data_get_irq_chip(data);
206 struct irq_desc *desc = irq_data_to_desc(data);
207 int ret = 0;
208
209 if (!chip || !chip->irq_set_affinity)
210 return -EINVAL;
211
212 if (irq_can_move_pcntxt(data)) {
213 ret = irq_do_set_affinity(data, mask, force);
214 } else {
215 irqd_set_move_pending(data);
216 irq_copy_pending(desc, mask);
217 }
218
219 if (desc->affinity_notify) {
220 kref_get(&desc->affinity_notify->kref);
221 schedule_work(&desc->affinity_notify->work);
222 }
223 irqd_set(data, IRQD_AFFINITY_SET);
224
225 return ret;
226}
227
228int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
229{
230 struct irq_desc *desc = irq_to_desc(irq);
231 unsigned long flags;
232 int ret;
233
234 if (!desc)
235 return -EINVAL;
236
237 raw_spin_lock_irqsave(&desc->lock, flags);
238 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240 return ret;
241}
242
243int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
244{
245 unsigned long flags;
246 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
247
248 if (!desc)
249 return -EINVAL;
250 desc->affinity_hint = m;
251 irq_put_desc_unlock(desc, flags);
252
253 if (m)
254 __irq_set_affinity(irq, m, false);
255 return 0;
256}
257EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
258
259
260
261
262
263
264
265
266
267
268
269int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
270{
271 unsigned long flags;
272 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
273 struct irq_data *data;
274 struct irq_chip *chip;
275 int ret = -ENOSYS;
276
277 if (!desc)
278 return -EINVAL;
279
280 data = irq_desc_get_irq_data(desc);
281 chip = irq_data_get_irq_chip(data);
282 if (chip && chip->irq_set_vcpu_affinity)
283 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
284 irq_put_desc_unlock(desc, flags);
285
286 return ret;
287}
288EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
289
290static void irq_affinity_notify(struct work_struct *work)
291{
292 struct irq_affinity_notify *notify =
293 container_of(work, struct irq_affinity_notify, work);
294 struct irq_desc *desc = irq_to_desc(notify->irq);
295 cpumask_var_t cpumask;
296 unsigned long flags;
297
298 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
299 goto out;
300
301 raw_spin_lock_irqsave(&desc->lock, flags);
302 if (irq_move_pending(&desc->irq_data))
303 irq_get_pending(cpumask, desc);
304 else
305 cpumask_copy(cpumask, desc->irq_data.affinity);
306 raw_spin_unlock_irqrestore(&desc->lock, flags);
307
308 notify->notify(notify, cpumask);
309
310 free_cpumask_var(cpumask);
311out:
312 kref_put(¬ify->kref, notify->release);
313}
314
315
316
317
318
319
320
321
322
323
324
325
326int
327irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
328{
329 struct irq_desc *desc = irq_to_desc(irq);
330 struct irq_affinity_notify *old_notify;
331 unsigned long flags;
332
333
334 might_sleep();
335
336 if (!desc)
337 return -EINVAL;
338
339
340 if (notify) {
341 notify->irq = irq;
342 kref_init(¬ify->kref);
343 INIT_WORK(¬ify->work, irq_affinity_notify);
344 }
345
346 raw_spin_lock_irqsave(&desc->lock, flags);
347 old_notify = desc->affinity_notify;
348 desc->affinity_notify = notify;
349 raw_spin_unlock_irqrestore(&desc->lock, flags);
350
351 if (old_notify)
352 kref_put(&old_notify->kref, old_notify->release);
353
354 return 0;
355}
356EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
357
358#ifndef CONFIG_AUTO_IRQ_AFFINITY
359
360
361
362static int
363setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
364{
365 struct cpumask *set = irq_default_affinity;
366 int node = irq_desc_get_node(desc);
367
368
369 if (!irq_can_set_affinity(irq))
370 return 0;
371
372
373
374
375
376 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
377 if (cpumask_intersects(desc->irq_data.affinity,
378 cpu_online_mask))
379 set = desc->irq_data.affinity;
380 else
381 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
382 }
383
384 cpumask_and(mask, cpu_online_mask, set);
385 if (node != NUMA_NO_NODE) {
386 const struct cpumask *nodemask = cpumask_of_node(node);
387
388
389 if (cpumask_intersects(mask, nodemask))
390 cpumask_and(mask, mask, nodemask);
391 }
392 irq_do_set_affinity(&desc->irq_data, mask, false);
393 return 0;
394}
395#else
396static inline int
397setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
398{
399 return irq_select_affinity(irq);
400}
401#endif
402
403
404
405
406int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
407{
408 struct irq_desc *desc = irq_to_desc(irq);
409 unsigned long flags;
410 int ret;
411
412 raw_spin_lock_irqsave(&desc->lock, flags);
413 ret = setup_affinity(irq, desc, mask);
414 raw_spin_unlock_irqrestore(&desc->lock, flags);
415 return ret;
416}
417
418#else
419static inline int
420setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
421{
422 return 0;
423}
424#endif
425
426void __disable_irq(struct irq_desc *desc, unsigned int irq)
427{
428 if (!desc->depth++)
429 irq_disable(desc);
430}
431
432static int __disable_irq_nosync(unsigned int irq)
433{
434 unsigned long flags;
435 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
436
437 if (!desc)
438 return -EINVAL;
439 __disable_irq(desc, irq);
440 irq_put_desc_busunlock(desc, flags);
441 return 0;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455void disable_irq_nosync(unsigned int irq)
456{
457 __disable_irq_nosync(irq);
458}
459EXPORT_SYMBOL(disable_irq_nosync);
460
461
462
463
464
465
466
467
468
469
470
471
472
473void disable_irq(unsigned int irq)
474{
475 if (!__disable_irq_nosync(irq))
476 synchronize_irq(irq);
477}
478EXPORT_SYMBOL(disable_irq);
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497bool disable_hardirq(unsigned int irq)
498{
499 if (!__disable_irq_nosync(irq))
500 return synchronize_hardirq(irq);
501
502 return false;
503}
504EXPORT_SYMBOL_GPL(disable_hardirq);
505
506void __enable_irq(struct irq_desc *desc, unsigned int irq)
507{
508 switch (desc->depth) {
509 case 0:
510 err_out:
511 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
512 break;
513 case 1: {
514 if (desc->istate & IRQS_SUSPENDED)
515 goto err_out;
516
517 irq_settings_set_noprobe(desc);
518 irq_enable(desc);
519 check_irq_resend(desc, irq);
520
521 }
522 default:
523 desc->depth--;
524 }
525}
526
527
528
529
530
531
532
533
534
535
536
537
538void enable_irq(unsigned int irq)
539{
540 unsigned long flags;
541 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
542
543 if (!desc)
544 return;
545 if (WARN(!desc->irq_data.chip,
546 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
547 goto out;
548
549 __enable_irq(desc, irq);
550out:
551 irq_put_desc_busunlock(desc, flags);
552}
553EXPORT_SYMBOL(enable_irq);
554
555static int set_irq_wake_real(unsigned int irq, unsigned int on)
556{
557 struct irq_desc *desc = irq_to_desc(irq);
558 int ret = -ENXIO;
559
560 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
561 return 0;
562
563 if (desc->irq_data.chip->irq_set_wake)
564 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
565
566 return ret;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581int irq_set_irq_wake(unsigned int irq, unsigned int on)
582{
583 unsigned long flags;
584 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
585 int ret = 0;
586
587 if (!desc)
588 return -EINVAL;
589
590
591
592
593 if (on) {
594 if (desc->wake_depth++ == 0) {
595 ret = set_irq_wake_real(irq, on);
596 if (ret)
597 desc->wake_depth = 0;
598 else
599 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
600 }
601 } else {
602 if (desc->wake_depth == 0) {
603 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
604 } else if (--desc->wake_depth == 0) {
605 ret = set_irq_wake_real(irq, on);
606 if (ret)
607 desc->wake_depth = 1;
608 else
609 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
610 }
611 }
612 irq_put_desc_busunlock(desc, flags);
613 return ret;
614}
615EXPORT_SYMBOL(irq_set_irq_wake);
616
617
618
619
620
621
622int can_request_irq(unsigned int irq, unsigned long irqflags)
623{
624 unsigned long flags;
625 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
626 int canrequest = 0;
627
628 if (!desc)
629 return 0;
630
631 if (irq_settings_can_request(desc)) {
632 if (!desc->action ||
633 irqflags & desc->action->flags & IRQF_SHARED)
634 canrequest = 1;
635 }
636 irq_put_desc_unlock(desc, flags);
637 return canrequest;
638}
639
640int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
641 unsigned long flags)
642{
643 struct irq_chip *chip = desc->irq_data.chip;
644 int ret, unmask = 0;
645
646 if (!chip || !chip->irq_set_type) {
647
648
649
650
651 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
652 chip ? (chip->name ? : "unknown") : "unknown");
653 return 0;
654 }
655
656 flags &= IRQ_TYPE_SENSE_MASK;
657
658 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
659 if (!irqd_irq_masked(&desc->irq_data))
660 mask_irq(desc);
661 if (!irqd_irq_disabled(&desc->irq_data))
662 unmask = 1;
663 }
664
665
666 ret = chip->irq_set_type(&desc->irq_data, flags);
667
668 switch (ret) {
669 case IRQ_SET_MASK_OK:
670 case IRQ_SET_MASK_OK_DONE:
671 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
672 irqd_set(&desc->irq_data, flags);
673
674 case IRQ_SET_MASK_OK_NOCOPY:
675 flags = irqd_get_trigger_type(&desc->irq_data);
676 irq_settings_set_trigger_mask(desc, flags);
677 irqd_clear(&desc->irq_data, IRQD_LEVEL);
678 irq_settings_clr_level(desc);
679 if (flags & IRQ_TYPE_LEVEL_MASK) {
680 irq_settings_set_level(desc);
681 irqd_set(&desc->irq_data, IRQD_LEVEL);
682 }
683
684 ret = 0;
685 break;
686 default:
687 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
688 flags, irq, chip->irq_set_type);
689 }
690 if (unmask)
691 unmask_irq(desc);
692 return ret;
693}
694
695#ifdef CONFIG_HARDIRQS_SW_RESEND
696int irq_set_parent(int irq, int parent_irq)
697{
698 unsigned long flags;
699 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
700
701 if (!desc)
702 return -EINVAL;
703
704 desc->parent_irq = parent_irq;
705
706 irq_put_desc_unlock(desc, flags);
707 return 0;
708}
709#endif
710
711
712
713
714
715
716static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
717{
718 return IRQ_WAKE_THREAD;
719}
720
721
722
723
724
725static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
726{
727 WARN(1, "Primary handler called for nested irq %d\n", irq);
728 return IRQ_NONE;
729}
730
731static int irq_wait_for_interrupt(struct irqaction *action)
732{
733 set_current_state(TASK_INTERRUPTIBLE);
734
735 while (!kthread_should_stop()) {
736
737 if (test_and_clear_bit(IRQTF_RUNTHREAD,
738 &action->thread_flags)) {
739 __set_current_state(TASK_RUNNING);
740 return 0;
741 }
742 schedule();
743 set_current_state(TASK_INTERRUPTIBLE);
744 }
745 __set_current_state(TASK_RUNNING);
746 return -1;
747}
748
749
750
751
752
753
754static void irq_finalize_oneshot(struct irq_desc *desc,
755 struct irqaction *action)
756{
757 if (!(desc->istate & IRQS_ONESHOT))
758 return;
759again:
760 chip_bus_lock(desc);
761 raw_spin_lock_irq(&desc->lock);
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
778 raw_spin_unlock_irq(&desc->lock);
779 chip_bus_sync_unlock(desc);
780 cpu_relax();
781 goto again;
782 }
783
784
785
786
787
788
789 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
790 goto out_unlock;
791
792 desc->threads_oneshot &= ~action->thread_mask;
793
794 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
795 irqd_irq_masked(&desc->irq_data))
796 unmask_threaded_irq(desc);
797
798out_unlock:
799 raw_spin_unlock_irq(&desc->lock);
800 chip_bus_sync_unlock(desc);
801}
802
803#ifdef CONFIG_SMP
804
805
806
807static void
808irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
809{
810 cpumask_var_t mask;
811 bool valid = true;
812
813 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
814 return;
815
816
817
818
819
820 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
821 set_bit(IRQTF_AFFINITY, &action->thread_flags);
822 return;
823 }
824
825 raw_spin_lock_irq(&desc->lock);
826
827
828
829
830 if (desc->irq_data.affinity)
831 cpumask_copy(mask, desc->irq_data.affinity);
832 else
833 valid = false;
834 raw_spin_unlock_irq(&desc->lock);
835
836 if (valid)
837 set_cpus_allowed_ptr(current, mask);
838 free_cpumask_var(mask);
839}
840#else
841static inline void
842irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
843#endif
844
845
846
847
848
849
850
851static irqreturn_t
852irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
853{
854 irqreturn_t ret;
855
856 local_bh_disable();
857 ret = action->thread_fn(action->irq, action->dev_id);
858 irq_finalize_oneshot(desc, action);
859 local_bh_enable();
860 return ret;
861}
862
863
864
865
866
867
868static irqreturn_t irq_thread_fn(struct irq_desc *desc,
869 struct irqaction *action)
870{
871 irqreturn_t ret;
872
873 ret = action->thread_fn(action->irq, action->dev_id);
874 irq_finalize_oneshot(desc, action);
875 return ret;
876}
877
878static void wake_threads_waitq(struct irq_desc *desc)
879{
880 if (atomic_dec_and_test(&desc->threads_active))
881 wake_up(&desc->wait_for_threads);
882}
883
884static void irq_thread_dtor(struct callback_head *unused)
885{
886 struct task_struct *tsk = current;
887 struct irq_desc *desc;
888 struct irqaction *action;
889
890 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
891 return;
892
893 action = kthread_data(tsk);
894
895 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
896 tsk->comm, tsk->pid, action->irq);
897
898
899 desc = irq_to_desc(action->irq);
900
901
902
903
904 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
905 wake_threads_waitq(desc);
906
907
908 irq_finalize_oneshot(desc, action);
909}
910
911
912
913
914static int irq_thread(void *data)
915{
916 struct callback_head on_exit_work;
917 struct irqaction *action = data;
918 struct irq_desc *desc = irq_to_desc(action->irq);
919 irqreturn_t (*handler_fn)(struct irq_desc *desc,
920 struct irqaction *action);
921
922 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
923 &action->thread_flags))
924 handler_fn = irq_forced_thread_fn;
925 else
926 handler_fn = irq_thread_fn;
927
928 init_task_work(&on_exit_work, irq_thread_dtor);
929 task_work_add(current, &on_exit_work, false);
930
931 irq_thread_check_affinity(desc, action);
932
933 while (!irq_wait_for_interrupt(action)) {
934 irqreturn_t action_ret;
935
936 irq_thread_check_affinity(desc, action);
937
938 action_ret = handler_fn(desc, action);
939 if (action_ret == IRQ_HANDLED)
940 atomic_inc(&desc->threads_handled);
941
942 wake_threads_waitq(desc);
943 }
944
945
946
947
948
949
950
951
952
953
954 task_work_cancel(current, irq_thread_dtor);
955 return 0;
956}
957
958
959
960
961
962
963
964void irq_wake_thread(unsigned int irq, void *dev_id)
965{
966 struct irq_desc *desc = irq_to_desc(irq);
967 struct irqaction *action;
968 unsigned long flags;
969
970 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
971 return;
972
973 raw_spin_lock_irqsave(&desc->lock, flags);
974 for (action = desc->action; action; action = action->next) {
975 if (action->dev_id == dev_id) {
976 if (action->thread)
977 __irq_wake_thread(desc, action);
978 break;
979 }
980 }
981 raw_spin_unlock_irqrestore(&desc->lock, flags);
982}
983EXPORT_SYMBOL_GPL(irq_wake_thread);
984
985static void irq_setup_forced_threading(struct irqaction *new)
986{
987 if (!force_irqthreads)
988 return;
989 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
990 return;
991
992 new->flags |= IRQF_ONESHOT;
993
994 if (!new->thread_fn) {
995 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
996 new->thread_fn = new->handler;
997 new->handler = irq_default_primary_handler;
998 }
999}
1000
1001static int irq_request_resources(struct irq_desc *desc)
1002{
1003 struct irq_data *d = &desc->irq_data;
1004 struct irq_chip *c = d->chip;
1005
1006 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1007}
1008
1009static void irq_release_resources(struct irq_desc *desc)
1010{
1011 struct irq_data *d = &desc->irq_data;
1012 struct irq_chip *c = d->chip;
1013
1014 if (c->irq_release_resources)
1015 c->irq_release_resources(d);
1016}
1017
1018
1019
1020
1021
1022static int
1023__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1024{
1025 struct irqaction *old, **old_ptr;
1026 unsigned long flags, thread_mask = 0;
1027 int ret, nested, shared = 0;
1028 cpumask_var_t mask;
1029
1030 if (!desc)
1031 return -EINVAL;
1032
1033 if (desc->irq_data.chip == &no_irq_chip)
1034 return -ENOSYS;
1035 if (!try_module_get(desc->owner))
1036 return -ENODEV;
1037
1038
1039
1040
1041
1042 nested = irq_settings_is_nested_thread(desc);
1043 if (nested) {
1044 if (!new->thread_fn) {
1045 ret = -EINVAL;
1046 goto out_mput;
1047 }
1048
1049
1050
1051
1052
1053 new->handler = irq_nested_primary_handler;
1054 } else {
1055 if (irq_settings_can_thread(desc))
1056 irq_setup_forced_threading(new);
1057 }
1058
1059
1060
1061
1062
1063
1064 if (new->thread_fn && !nested) {
1065 struct task_struct *t;
1066 static const struct sched_param param = {
1067 .sched_priority = MAX_USER_RT_PRIO/2,
1068 };
1069
1070 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1071 new->name);
1072 if (IS_ERR(t)) {
1073 ret = PTR_ERR(t);
1074 goto out_mput;
1075 }
1076
1077 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1078
1079
1080
1081
1082
1083
1084 get_task_struct(t);
1085 new->thread = t;
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1096 }
1097
1098 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1099 ret = -ENOMEM;
1100 goto out_thread;
1101 }
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1113 new->flags &= ~IRQF_ONESHOT;
1114
1115
1116
1117
1118 raw_spin_lock_irqsave(&desc->lock, flags);
1119 old_ptr = &desc->action;
1120 old = *old_ptr;
1121 if (old) {
1122
1123
1124
1125
1126
1127
1128
1129 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1130 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1131 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1132 goto mismatch;
1133
1134
1135 if ((old->flags & IRQF_PERCPU) !=
1136 (new->flags & IRQF_PERCPU))
1137 goto mismatch;
1138
1139
1140 do {
1141
1142
1143
1144
1145
1146 thread_mask |= old->thread_mask;
1147 old_ptr = &old->next;
1148 old = *old_ptr;
1149 } while (old);
1150 shared = 1;
1151 }
1152
1153
1154
1155
1156
1157
1158 if (new->flags & IRQF_ONESHOT) {
1159
1160
1161
1162
1163 if (thread_mask == ~0UL) {
1164 ret = -EBUSY;
1165 goto out_mask;
1166 }
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 new->thread_mask = 1 << ffz(thread_mask);
1188
1189 } else if (new->handler == irq_default_primary_handler &&
1190 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1207 irq);
1208 ret = -EINVAL;
1209 goto out_mask;
1210 }
1211
1212 if (!shared) {
1213 ret = irq_request_resources(desc);
1214 if (ret) {
1215 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1216 new->name, irq, desc->irq_data.chip->name);
1217 goto out_mask;
1218 }
1219
1220 init_waitqueue_head(&desc->wait_for_threads);
1221
1222
1223 if (new->flags & IRQF_TRIGGER_MASK) {
1224 ret = __irq_set_trigger(desc, irq,
1225 new->flags & IRQF_TRIGGER_MASK);
1226
1227 if (ret)
1228 goto out_mask;
1229 }
1230
1231 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1232 IRQS_ONESHOT | IRQS_WAITING);
1233 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1234
1235 if (new->flags & IRQF_PERCPU) {
1236 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1237 irq_settings_set_per_cpu(desc);
1238 }
1239
1240 if (new->flags & IRQF_ONESHOT)
1241 desc->istate |= IRQS_ONESHOT;
1242
1243 if (irq_settings_can_autoenable(desc))
1244 irq_startup(desc, true);
1245 else
1246
1247 desc->depth = 1;
1248
1249
1250 if (new->flags & IRQF_NOBALANCING) {
1251 irq_settings_set_no_balancing(desc);
1252 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1253 }
1254
1255
1256 setup_affinity(irq, desc, mask);
1257
1258 } else if (new->flags & IRQF_TRIGGER_MASK) {
1259 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1260 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1261
1262 if (nmsk != omsk)
1263
1264 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1265 irq, nmsk, omsk);
1266 }
1267
1268 new->irq = irq;
1269 *old_ptr = new;
1270
1271 irq_pm_install_action(desc, new);
1272
1273
1274 desc->irq_count = 0;
1275 desc->irqs_unhandled = 0;
1276
1277
1278
1279
1280
1281 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1282 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1283 __enable_irq(desc, irq);
1284 }
1285
1286 raw_spin_unlock_irqrestore(&desc->lock, flags);
1287
1288
1289
1290
1291
1292 if (new->thread)
1293 wake_up_process(new->thread);
1294
1295 register_irq_proc(irq, desc);
1296 new->dir = NULL;
1297 register_handler_proc(irq, new);
1298 free_cpumask_var(mask);
1299
1300 return 0;
1301
1302mismatch:
1303 if (!(new->flags & IRQF_PROBE_SHARED)) {
1304 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1305 irq, new->flags, new->name, old->flags, old->name);
1306#ifdef CONFIG_DEBUG_SHIRQ
1307 dump_stack();
1308#endif
1309 }
1310 ret = -EBUSY;
1311
1312out_mask:
1313 raw_spin_unlock_irqrestore(&desc->lock, flags);
1314 free_cpumask_var(mask);
1315
1316out_thread:
1317 if (new->thread) {
1318 struct task_struct *t = new->thread;
1319
1320 new->thread = NULL;
1321 kthread_stop(t);
1322 put_task_struct(t);
1323 }
1324out_mput:
1325 module_put(desc->owner);
1326 return ret;
1327}
1328
1329
1330
1331
1332
1333
1334
1335
1336int setup_irq(unsigned int irq, struct irqaction *act)
1337{
1338 int retval;
1339 struct irq_desc *desc = irq_to_desc(irq);
1340
1341 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1342 return -EINVAL;
1343 chip_bus_lock(desc);
1344 retval = __setup_irq(irq, desc, act);
1345 chip_bus_sync_unlock(desc);
1346
1347 return retval;
1348}
1349EXPORT_SYMBOL_GPL(setup_irq);
1350
1351
1352
1353
1354
1355static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1356{
1357 struct irq_desc *desc = irq_to_desc(irq);
1358 struct irqaction *action, **action_ptr;
1359 unsigned long flags;
1360
1361 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1362
1363 if (!desc)
1364 return NULL;
1365
1366 raw_spin_lock_irqsave(&desc->lock, flags);
1367
1368
1369
1370
1371
1372 action_ptr = &desc->action;
1373 for (;;) {
1374 action = *action_ptr;
1375
1376 if (!action) {
1377 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1378 raw_spin_unlock_irqrestore(&desc->lock, flags);
1379
1380 return NULL;
1381 }
1382
1383 if (action->dev_id == dev_id)
1384 break;
1385 action_ptr = &action->next;
1386 }
1387
1388
1389 *action_ptr = action->next;
1390
1391 irq_pm_remove_action(desc, action);
1392
1393
1394 if (!desc->action) {
1395 irq_shutdown(desc);
1396 irq_release_resources(desc);
1397 }
1398
1399#ifdef CONFIG_SMP
1400
1401 if (WARN_ON_ONCE(desc->affinity_hint))
1402 desc->affinity_hint = NULL;
1403#endif
1404
1405 raw_spin_unlock_irqrestore(&desc->lock, flags);
1406
1407 unregister_handler_proc(irq, action);
1408
1409
1410 synchronize_irq(irq);
1411
1412#ifdef CONFIG_DEBUG_SHIRQ
1413
1414
1415
1416
1417
1418
1419
1420
1421 if (action->flags & IRQF_SHARED) {
1422 local_irq_save(flags);
1423 action->handler(irq, dev_id);
1424 local_irq_restore(flags);
1425 }
1426#endif
1427
1428 if (action->thread) {
1429 kthread_stop(action->thread);
1430 put_task_struct(action->thread);
1431 }
1432
1433 module_put(desc->owner);
1434 return action;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444void remove_irq(unsigned int irq, struct irqaction *act)
1445{
1446 struct irq_desc *desc = irq_to_desc(irq);
1447
1448 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1449 __free_irq(irq, act->dev_id);
1450}
1451EXPORT_SYMBOL_GPL(remove_irq);
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467void free_irq(unsigned int irq, void *dev_id)
1468{
1469 struct irq_desc *desc = irq_to_desc(irq);
1470
1471 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1472 return;
1473
1474#ifdef CONFIG_SMP
1475 if (WARN_ON(desc->affinity_notify))
1476 desc->affinity_notify = NULL;
1477#endif
1478
1479 chip_bus_lock(desc);
1480 kfree(__free_irq(irq, dev_id));
1481 chip_bus_sync_unlock(desc);
1482}
1483EXPORT_SYMBOL(free_irq);
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1528 irq_handler_t thread_fn, unsigned long irqflags,
1529 const char *devname, void *dev_id)
1530{
1531 struct irqaction *action;
1532 struct irq_desc *desc;
1533 int retval;
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1545 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1546 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1547 return -EINVAL;
1548
1549 desc = irq_to_desc(irq);
1550 if (!desc)
1551 return -EINVAL;
1552
1553 if (!irq_settings_can_request(desc) ||
1554 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1555 return -EINVAL;
1556
1557 if (!handler) {
1558 if (!thread_fn)
1559 return -EINVAL;
1560 handler = irq_default_primary_handler;
1561 }
1562
1563 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1564 if (!action)
1565 return -ENOMEM;
1566
1567 action->handler = handler;
1568 action->thread_fn = thread_fn;
1569 action->flags = irqflags;
1570 action->name = devname;
1571 action->dev_id = dev_id;
1572
1573 chip_bus_lock(desc);
1574 retval = __setup_irq(irq, desc, action);
1575 chip_bus_sync_unlock(desc);
1576
1577 if (retval)
1578 kfree(action);
1579
1580#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1581 if (!retval && (irqflags & IRQF_SHARED)) {
1582
1583
1584
1585
1586
1587
1588 unsigned long flags;
1589
1590 disable_irq(irq);
1591 local_irq_save(flags);
1592
1593 handler(irq, dev_id);
1594
1595 local_irq_restore(flags);
1596 enable_irq(irq);
1597 }
1598#endif
1599 return retval;
1600}
1601EXPORT_SYMBOL(request_threaded_irq);
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1621 unsigned long flags, const char *name, void *dev_id)
1622{
1623 struct irq_desc *desc = irq_to_desc(irq);
1624 int ret;
1625
1626 if (!desc)
1627 return -EINVAL;
1628
1629 if (irq_settings_is_nested_thread(desc)) {
1630 ret = request_threaded_irq(irq, NULL, handler,
1631 flags, name, dev_id);
1632 return !ret ? IRQC_IS_NESTED : ret;
1633 }
1634
1635 ret = request_irq(irq, handler, flags, name, dev_id);
1636 return !ret ? IRQC_IS_HARDIRQ : ret;
1637}
1638EXPORT_SYMBOL_GPL(request_any_context_irq);
1639
1640void enable_percpu_irq(unsigned int irq, unsigned int type)
1641{
1642 unsigned int cpu = smp_processor_id();
1643 unsigned long flags;
1644 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1645
1646 if (!desc)
1647 return;
1648
1649 type &= IRQ_TYPE_SENSE_MASK;
1650 if (type != IRQ_TYPE_NONE) {
1651 int ret;
1652
1653 ret = __irq_set_trigger(desc, irq, type);
1654
1655 if (ret) {
1656 WARN(1, "failed to set type for IRQ%d\n", irq);
1657 goto out;
1658 }
1659 }
1660
1661 irq_percpu_enable(desc, cpu);
1662out:
1663 irq_put_desc_unlock(desc, flags);
1664}
1665EXPORT_SYMBOL_GPL(enable_percpu_irq);
1666
1667void disable_percpu_irq(unsigned int irq)
1668{
1669 unsigned int cpu = smp_processor_id();
1670 unsigned long flags;
1671 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1672
1673 if (!desc)
1674 return;
1675
1676 irq_percpu_disable(desc, cpu);
1677 irq_put_desc_unlock(desc, flags);
1678}
1679EXPORT_SYMBOL_GPL(disable_percpu_irq);
1680
1681
1682
1683
1684static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1685{
1686 struct irq_desc *desc = irq_to_desc(irq);
1687 struct irqaction *action;
1688 unsigned long flags;
1689
1690 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1691
1692 if (!desc)
1693 return NULL;
1694
1695 raw_spin_lock_irqsave(&desc->lock, flags);
1696
1697 action = desc->action;
1698 if (!action || action->percpu_dev_id != dev_id) {
1699 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1700 goto bad;
1701 }
1702
1703 if (!cpumask_empty(desc->percpu_enabled)) {
1704 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1705 irq, cpumask_first(desc->percpu_enabled));
1706 goto bad;
1707 }
1708
1709
1710 desc->action = NULL;
1711
1712 raw_spin_unlock_irqrestore(&desc->lock, flags);
1713
1714 unregister_handler_proc(irq, action);
1715
1716 module_put(desc->owner);
1717 return action;
1718
1719bad:
1720 raw_spin_unlock_irqrestore(&desc->lock, flags);
1721 return NULL;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1732{
1733 struct irq_desc *desc = irq_to_desc(irq);
1734
1735 if (desc && irq_settings_is_per_cpu_devid(desc))
1736 __free_percpu_irq(irq, act->percpu_dev_id);
1737}
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1752{
1753 struct irq_desc *desc = irq_to_desc(irq);
1754
1755 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1756 return;
1757
1758 chip_bus_lock(desc);
1759 kfree(__free_percpu_irq(irq, dev_id));
1760 chip_bus_sync_unlock(desc);
1761}
1762
1763
1764
1765
1766
1767
1768
1769
1770int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1771{
1772 struct irq_desc *desc = irq_to_desc(irq);
1773 int retval;
1774
1775 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1776 return -EINVAL;
1777 chip_bus_lock(desc);
1778 retval = __setup_irq(irq, desc, act);
1779 chip_bus_sync_unlock(desc);
1780
1781 return retval;
1782}
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1800 const char *devname, void __percpu *dev_id)
1801{
1802 struct irqaction *action;
1803 struct irq_desc *desc;
1804 int retval;
1805
1806 if (!dev_id)
1807 return -EINVAL;
1808
1809 desc = irq_to_desc(irq);
1810 if (!desc || !irq_settings_can_request(desc) ||
1811 !irq_settings_is_per_cpu_devid(desc))
1812 return -EINVAL;
1813
1814 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1815 if (!action)
1816 return -ENOMEM;
1817
1818 action->handler = handler;
1819 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1820 action->name = devname;
1821 action->percpu_dev_id = dev_id;
1822
1823 chip_bus_lock(desc);
1824 retval = __setup_irq(irq, desc, action);
1825 chip_bus_sync_unlock(desc);
1826
1827 if (retval)
1828 kfree(action);
1829
1830 return retval;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1847 bool *state)
1848{
1849 struct irq_desc *desc;
1850 struct irq_data *data;
1851 struct irq_chip *chip;
1852 unsigned long flags;
1853 int err = -EINVAL;
1854
1855 desc = irq_get_desc_buslock(irq, &flags, 0);
1856 if (!desc)
1857 return err;
1858
1859 data = irq_desc_get_irq_data(desc);
1860
1861 do {
1862 chip = irq_data_get_irq_chip(data);
1863 if (chip->irq_get_irqchip_state)
1864 break;
1865#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1866 data = data->parent_data;
1867#else
1868 data = NULL;
1869#endif
1870 } while (data);
1871
1872 if (data)
1873 err = chip->irq_get_irqchip_state(data, which, state);
1874
1875 irq_put_desc_busunlock(desc, flags);
1876 return err;
1877}
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1892 bool val)
1893{
1894 struct irq_desc *desc;
1895 struct irq_data *data;
1896 struct irq_chip *chip;
1897 unsigned long flags;
1898 int err = -EINVAL;
1899
1900 desc = irq_get_desc_buslock(irq, &flags, 0);
1901 if (!desc)
1902 return err;
1903
1904 data = irq_desc_get_irq_data(desc);
1905
1906 do {
1907 chip = irq_data_get_irq_chip(data);
1908 if (chip->irq_set_irqchip_state)
1909 break;
1910#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1911 data = data->parent_data;
1912#else
1913 data = NULL;
1914#endif
1915 } while (data);
1916
1917 if (data)
1918 err = chip->irq_set_irqchip_state(data, which, val);
1919
1920 irq_put_desc_busunlock(desc, flags);
1921 return err;
1922}
1923