1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42
43
44
45
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54
55 } while (inprogress);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73void synchronize_hardirq(unsigned int irq)
74{
75 struct irq_desc *desc = irq_to_desc(irq);
76
77 if (desc)
78 __synchronize_hardirq(desc);
79}
80EXPORT_SYMBOL(synchronize_hardirq);
81
82
83
84
85
86
87
88
89
90
91
92void synchronize_irq(unsigned int irq)
93{
94 struct irq_desc *desc = irq_to_desc(irq);
95
96 if (desc) {
97 __synchronize_hardirq(desc);
98
99
100
101
102
103 wait_event(desc->wait_for_threads,
104 !atomic_read(&desc->threads_active));
105 }
106}
107EXPORT_SYMBOL(synchronize_irq);
108
109#ifdef CONFIG_SMP
110cpumask_var_t irq_default_affinity;
111
112
113
114
115
116
117int irq_can_set_affinity(unsigned int irq)
118{
119 struct irq_desc *desc = irq_to_desc(irq);
120
121 if (!desc || !irqd_can_balance(&desc->irq_data) ||
122 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
123 return 0;
124
125 return 1;
126}
127
128
129
130
131
132
133
134
135
136
137void irq_set_thread_affinity(struct irq_desc *desc)
138{
139 struct irqaction *action = desc->action;
140
141 while (action) {
142 if (action->thread)
143 set_bit(IRQTF_AFFINITY, &action->thread_flags);
144 action = action->next;
145 }
146}
147
148#ifdef CONFIG_GENERIC_PENDING_IRQ
149static inline bool irq_can_move_pcntxt(struct irq_data *data)
150{
151 return irqd_can_move_in_process_context(data);
152}
153static inline bool irq_move_pending(struct irq_data *data)
154{
155 return irqd_is_setaffinity_pending(data);
156}
157static inline void
158irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
159{
160 cpumask_copy(desc->pending_mask, mask);
161}
162static inline void
163irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
164{
165 cpumask_copy(mask, desc->pending_mask);
166}
167#else
168static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
169static inline bool irq_move_pending(struct irq_data *data) { return false; }
170static inline void
171irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
172static inline void
173irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
174#endif
175
176int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
177 bool force)
178{
179 struct irq_desc *desc = irq_data_to_desc(data);
180 struct irq_chip *chip = irq_data_get_irq_chip(data);
181 int ret;
182
183 ret = chip->irq_set_affinity(data, mask, force);
184 switch (ret) {
185 case IRQ_SET_MASK_OK:
186 cpumask_copy(data->affinity, mask);
187 case IRQ_SET_MASK_OK_NOCOPY:
188 irq_set_thread_affinity(desc);
189 ret = 0;
190 }
191
192 return ret;
193}
194
195int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
196 bool force)
197{
198 struct irq_chip *chip = irq_data_get_irq_chip(data);
199 struct irq_desc *desc = irq_data_to_desc(data);
200 int ret = 0;
201
202 if (!chip || !chip->irq_set_affinity)
203 return -EINVAL;
204
205 if (irq_can_move_pcntxt(data)) {
206 ret = irq_do_set_affinity(data, mask, force);
207 } else {
208 irqd_set_move_pending(data);
209 irq_copy_pending(desc, mask);
210 }
211
212 if (desc->affinity_notify) {
213 kref_get(&desc->affinity_notify->kref);
214 schedule_work(&desc->affinity_notify->work);
215 }
216 irqd_set(data, IRQD_AFFINITY_SET);
217
218 return ret;
219}
220
221int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
222{
223 struct irq_desc *desc = irq_to_desc(irq);
224 unsigned long flags;
225 int ret;
226
227 if (!desc)
228 return -EINVAL;
229
230 raw_spin_lock_irqsave(&desc->lock, flags);
231 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
232 raw_spin_unlock_irqrestore(&desc->lock, flags);
233 return ret;
234}
235
236int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
237{
238 unsigned long flags;
239 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
240
241 if (!desc)
242 return -EINVAL;
243 desc->affinity_hint = m;
244 irq_put_desc_unlock(desc, flags);
245 return 0;
246}
247EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
248
249static void irq_affinity_notify(struct work_struct *work)
250{
251 struct irq_affinity_notify *notify =
252 container_of(work, struct irq_affinity_notify, work);
253 struct irq_desc *desc = irq_to_desc(notify->irq);
254 cpumask_var_t cpumask;
255 unsigned long flags;
256
257 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
258 goto out;
259
260 raw_spin_lock_irqsave(&desc->lock, flags);
261 if (irq_move_pending(&desc->irq_data))
262 irq_get_pending(cpumask, desc);
263 else
264 cpumask_copy(cpumask, desc->irq_data.affinity);
265 raw_spin_unlock_irqrestore(&desc->lock, flags);
266
267 notify->notify(notify, cpumask);
268
269 free_cpumask_var(cpumask);
270out:
271 kref_put(¬ify->kref, notify->release);
272}
273
274
275
276
277
278
279
280
281
282
283
284
285int
286irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
287{
288 struct irq_desc *desc = irq_to_desc(irq);
289 struct irq_affinity_notify *old_notify;
290 unsigned long flags;
291
292
293 might_sleep();
294
295 if (!desc)
296 return -EINVAL;
297
298
299 if (notify) {
300 notify->irq = irq;
301 kref_init(¬ify->kref);
302 INIT_WORK(¬ify->work, irq_affinity_notify);
303 }
304
305 raw_spin_lock_irqsave(&desc->lock, flags);
306 old_notify = desc->affinity_notify;
307 desc->affinity_notify = notify;
308 raw_spin_unlock_irqrestore(&desc->lock, flags);
309
310 if (old_notify)
311 kref_put(&old_notify->kref, old_notify->release);
312
313 return 0;
314}
315EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
316
317#ifndef CONFIG_AUTO_IRQ_AFFINITY
318
319
320
321static int
322setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
323{
324 struct cpumask *set = irq_default_affinity;
325 int node = desc->irq_data.node;
326
327
328 if (!irq_can_set_affinity(irq))
329 return 0;
330
331
332
333
334
335 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
336 if (cpumask_intersects(desc->irq_data.affinity,
337 cpu_online_mask))
338 set = desc->irq_data.affinity;
339 else
340 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
341 }
342
343 cpumask_and(mask, cpu_online_mask, set);
344 if (node != NUMA_NO_NODE) {
345 const struct cpumask *nodemask = cpumask_of_node(node);
346
347
348 if (cpumask_intersects(mask, nodemask))
349 cpumask_and(mask, mask, nodemask);
350 }
351 irq_do_set_affinity(&desc->irq_data, mask, false);
352 return 0;
353}
354#else
355static inline int
356setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
357{
358 return irq_select_affinity(irq);
359}
360#endif
361
362
363
364
365int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
366{
367 struct irq_desc *desc = irq_to_desc(irq);
368 unsigned long flags;
369 int ret;
370
371 raw_spin_lock_irqsave(&desc->lock, flags);
372 ret = setup_affinity(irq, desc, mask);
373 raw_spin_unlock_irqrestore(&desc->lock, flags);
374 return ret;
375}
376
377#else
378static inline int
379setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
380{
381 return 0;
382}
383#endif
384
385void __disable_irq(struct irq_desc *desc, unsigned int irq)
386{
387 if (!desc->depth++)
388 irq_disable(desc);
389}
390
391static int __disable_irq_nosync(unsigned int irq)
392{
393 unsigned long flags;
394 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
395
396 if (!desc)
397 return -EINVAL;
398 __disable_irq(desc, irq);
399 irq_put_desc_busunlock(desc, flags);
400 return 0;
401}
402
403
404
405
406
407
408
409
410
411
412
413
414void disable_irq_nosync(unsigned int irq)
415{
416 __disable_irq_nosync(irq);
417}
418EXPORT_SYMBOL(disable_irq_nosync);
419
420
421
422
423
424
425
426
427
428
429
430
431
432void disable_irq(unsigned int irq)
433{
434 if (!__disable_irq_nosync(irq))
435 synchronize_irq(irq);
436}
437EXPORT_SYMBOL(disable_irq);
438
439void __enable_irq(struct irq_desc *desc, unsigned int irq)
440{
441 switch (desc->depth) {
442 case 0:
443 err_out:
444 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
445 break;
446 case 1: {
447 if (desc->istate & IRQS_SUSPENDED)
448 goto err_out;
449
450 irq_settings_set_noprobe(desc);
451 irq_enable(desc);
452 check_irq_resend(desc, irq);
453
454 }
455 default:
456 desc->depth--;
457 }
458}
459
460
461
462
463
464
465
466
467
468
469
470
471void enable_irq(unsigned int irq)
472{
473 unsigned long flags;
474 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
475
476 if (!desc)
477 return;
478 if (WARN(!desc->irq_data.chip,
479 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
480 goto out;
481
482 __enable_irq(desc, irq);
483out:
484 irq_put_desc_busunlock(desc, flags);
485}
486EXPORT_SYMBOL(enable_irq);
487
488static int set_irq_wake_real(unsigned int irq, unsigned int on)
489{
490 struct irq_desc *desc = irq_to_desc(irq);
491 int ret = -ENXIO;
492
493 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
494 return 0;
495
496 if (desc->irq_data.chip->irq_set_wake)
497 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
498
499 return ret;
500}
501
502
503
504
505
506
507
508
509
510
511
512
513
514int irq_set_irq_wake(unsigned int irq, unsigned int on)
515{
516 unsigned long flags;
517 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
518 int ret = 0;
519
520 if (!desc)
521 return -EINVAL;
522
523
524
525
526 if (on) {
527 if (desc->wake_depth++ == 0) {
528 ret = set_irq_wake_real(irq, on);
529 if (ret)
530 desc->wake_depth = 0;
531 else
532 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
533 }
534 } else {
535 if (desc->wake_depth == 0) {
536 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
537 } else if (--desc->wake_depth == 0) {
538 ret = set_irq_wake_real(irq, on);
539 if (ret)
540 desc->wake_depth = 1;
541 else
542 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
543 }
544 }
545 irq_put_desc_busunlock(desc, flags);
546 return ret;
547}
548EXPORT_SYMBOL(irq_set_irq_wake);
549
550
551
552
553
554
555int can_request_irq(unsigned int irq, unsigned long irqflags)
556{
557 unsigned long flags;
558 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
559 int canrequest = 0;
560
561 if (!desc)
562 return 0;
563
564 if (irq_settings_can_request(desc)) {
565 if (!desc->action ||
566 irqflags & desc->action->flags & IRQF_SHARED)
567 canrequest = 1;
568 }
569 irq_put_desc_unlock(desc, flags);
570 return canrequest;
571}
572
573int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
574 unsigned long flags)
575{
576 struct irq_chip *chip = desc->irq_data.chip;
577 int ret, unmask = 0;
578
579 if (!chip || !chip->irq_set_type) {
580
581
582
583
584 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
585 chip ? (chip->name ? : "unknown") : "unknown");
586 return 0;
587 }
588
589 flags &= IRQ_TYPE_SENSE_MASK;
590
591 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
592 if (!irqd_irq_masked(&desc->irq_data))
593 mask_irq(desc);
594 if (!irqd_irq_disabled(&desc->irq_data))
595 unmask = 1;
596 }
597
598
599 ret = chip->irq_set_type(&desc->irq_data, flags);
600
601 switch (ret) {
602 case IRQ_SET_MASK_OK:
603 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
604 irqd_set(&desc->irq_data, flags);
605
606 case IRQ_SET_MASK_OK_NOCOPY:
607 flags = irqd_get_trigger_type(&desc->irq_data);
608 irq_settings_set_trigger_mask(desc, flags);
609 irqd_clear(&desc->irq_data, IRQD_LEVEL);
610 irq_settings_clr_level(desc);
611 if (flags & IRQ_TYPE_LEVEL_MASK) {
612 irq_settings_set_level(desc);
613 irqd_set(&desc->irq_data, IRQD_LEVEL);
614 }
615
616 ret = 0;
617 break;
618 default:
619 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
620 flags, irq, chip->irq_set_type);
621 }
622 if (unmask)
623 unmask_irq(desc);
624 return ret;
625}
626
627#ifdef CONFIG_HARDIRQS_SW_RESEND
628int irq_set_parent(int irq, int parent_irq)
629{
630 unsigned long flags;
631 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
632
633 if (!desc)
634 return -EINVAL;
635
636 desc->parent_irq = parent_irq;
637
638 irq_put_desc_unlock(desc, flags);
639 return 0;
640}
641#endif
642
643
644
645
646
647
648static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
649{
650 return IRQ_WAKE_THREAD;
651}
652
653
654
655
656
657static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
658{
659 WARN(1, "Primary handler called for nested irq %d\n", irq);
660 return IRQ_NONE;
661}
662
663static int irq_wait_for_interrupt(struct irqaction *action)
664{
665 set_current_state(TASK_INTERRUPTIBLE);
666
667 while (!kthread_should_stop()) {
668
669 if (test_and_clear_bit(IRQTF_RUNTHREAD,
670 &action->thread_flags)) {
671 __set_current_state(TASK_RUNNING);
672 return 0;
673 }
674 schedule();
675 set_current_state(TASK_INTERRUPTIBLE);
676 }
677 __set_current_state(TASK_RUNNING);
678 return -1;
679}
680
681
682
683
684
685
686static void irq_finalize_oneshot(struct irq_desc *desc,
687 struct irqaction *action)
688{
689 if (!(desc->istate & IRQS_ONESHOT))
690 return;
691again:
692 chip_bus_lock(desc);
693 raw_spin_lock_irq(&desc->lock);
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
710 raw_spin_unlock_irq(&desc->lock);
711 chip_bus_sync_unlock(desc);
712 cpu_relax();
713 goto again;
714 }
715
716
717
718
719
720
721 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
722 goto out_unlock;
723
724 desc->threads_oneshot &= ~action->thread_mask;
725
726 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
727 irqd_irq_masked(&desc->irq_data))
728 unmask_threaded_irq(desc);
729
730out_unlock:
731 raw_spin_unlock_irq(&desc->lock);
732 chip_bus_sync_unlock(desc);
733}
734
735#ifdef CONFIG_SMP
736
737
738
739static void
740irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
741{
742 cpumask_var_t mask;
743 bool valid = true;
744
745 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
746 return;
747
748
749
750
751
752 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
753 set_bit(IRQTF_AFFINITY, &action->thread_flags);
754 return;
755 }
756
757 raw_spin_lock_irq(&desc->lock);
758
759
760
761
762 if (desc->irq_data.affinity)
763 cpumask_copy(mask, desc->irq_data.affinity);
764 else
765 valid = false;
766 raw_spin_unlock_irq(&desc->lock);
767
768 if (valid)
769 set_cpus_allowed_ptr(current, mask);
770 free_cpumask_var(mask);
771}
772#else
773static inline void
774irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
775#endif
776
777
778
779
780
781
782
783static irqreturn_t
784irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
785{
786 irqreturn_t ret;
787
788 local_bh_disable();
789 ret = action->thread_fn(action->irq, action->dev_id);
790 irq_finalize_oneshot(desc, action);
791 local_bh_enable();
792 return ret;
793}
794
795
796
797
798
799
800static irqreturn_t irq_thread_fn(struct irq_desc *desc,
801 struct irqaction *action)
802{
803 irqreturn_t ret;
804
805 ret = action->thread_fn(action->irq, action->dev_id);
806 irq_finalize_oneshot(desc, action);
807 return ret;
808}
809
810static void wake_threads_waitq(struct irq_desc *desc)
811{
812 if (atomic_dec_and_test(&desc->threads_active))
813 wake_up(&desc->wait_for_threads);
814}
815
816static void irq_thread_dtor(struct callback_head *unused)
817{
818 struct task_struct *tsk = current;
819 struct irq_desc *desc;
820 struct irqaction *action;
821
822 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
823 return;
824
825 action = kthread_data(tsk);
826
827 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
828 tsk->comm, tsk->pid, action->irq);
829
830
831 desc = irq_to_desc(action->irq);
832
833
834
835
836 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
837 wake_threads_waitq(desc);
838
839
840 irq_finalize_oneshot(desc, action);
841}
842
843
844
845
846static int irq_thread(void *data)
847{
848 struct callback_head on_exit_work;
849 struct irqaction *action = data;
850 struct irq_desc *desc = irq_to_desc(action->irq);
851 irqreturn_t (*handler_fn)(struct irq_desc *desc,
852 struct irqaction *action);
853
854 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
855 &action->thread_flags))
856 handler_fn = irq_forced_thread_fn;
857 else
858 handler_fn = irq_thread_fn;
859
860 init_task_work(&on_exit_work, irq_thread_dtor);
861 task_work_add(current, &on_exit_work, false);
862
863 irq_thread_check_affinity(desc, action);
864
865 while (!irq_wait_for_interrupt(action)) {
866 irqreturn_t action_ret;
867
868 irq_thread_check_affinity(desc, action);
869
870 action_ret = handler_fn(desc, action);
871 if (action_ret == IRQ_HANDLED)
872 atomic_inc(&desc->threads_handled);
873
874 wake_threads_waitq(desc);
875 }
876
877
878
879
880
881
882
883
884
885
886 task_work_cancel(current, irq_thread_dtor);
887 return 0;
888}
889
890
891
892
893
894
895
896void irq_wake_thread(unsigned int irq, void *dev_id)
897{
898 struct irq_desc *desc = irq_to_desc(irq);
899 struct irqaction *action;
900 unsigned long flags;
901
902 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
903 return;
904
905 raw_spin_lock_irqsave(&desc->lock, flags);
906 for (action = desc->action; action; action = action->next) {
907 if (action->dev_id == dev_id) {
908 if (action->thread)
909 __irq_wake_thread(desc, action);
910 break;
911 }
912 }
913 raw_spin_unlock_irqrestore(&desc->lock, flags);
914}
915EXPORT_SYMBOL_GPL(irq_wake_thread);
916
917static void irq_setup_forced_threading(struct irqaction *new)
918{
919 if (!force_irqthreads)
920 return;
921 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
922 return;
923
924 new->flags |= IRQF_ONESHOT;
925
926 if (!new->thread_fn) {
927 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
928 new->thread_fn = new->handler;
929 new->handler = irq_default_primary_handler;
930 }
931}
932
933static int irq_request_resources(struct irq_desc *desc)
934{
935 struct irq_data *d = &desc->irq_data;
936 struct irq_chip *c = d->chip;
937
938 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
939}
940
941static void irq_release_resources(struct irq_desc *desc)
942{
943 struct irq_data *d = &desc->irq_data;
944 struct irq_chip *c = d->chip;
945
946 if (c->irq_release_resources)
947 c->irq_release_resources(d);
948}
949
950
951
952
953
954static int
955__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
956{
957 struct irqaction *old, **old_ptr;
958 unsigned long flags, thread_mask = 0;
959 int ret, nested, shared = 0;
960 cpumask_var_t mask;
961
962 if (!desc)
963 return -EINVAL;
964
965 if (desc->irq_data.chip == &no_irq_chip)
966 return -ENOSYS;
967 if (!try_module_get(desc->owner))
968 return -ENODEV;
969
970
971
972
973
974 nested = irq_settings_is_nested_thread(desc);
975 if (nested) {
976 if (!new->thread_fn) {
977 ret = -EINVAL;
978 goto out_mput;
979 }
980
981
982
983
984
985 new->handler = irq_nested_primary_handler;
986 } else {
987 if (irq_settings_can_thread(desc))
988 irq_setup_forced_threading(new);
989 }
990
991
992
993
994
995
996 if (new->thread_fn && !nested) {
997 struct task_struct *t;
998 static const struct sched_param param = {
999 .sched_priority = MAX_USER_RT_PRIO/2,
1000 };
1001
1002 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1003 new->name);
1004 if (IS_ERR(t)) {
1005 ret = PTR_ERR(t);
1006 goto out_mput;
1007 }
1008
1009 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1010
1011
1012
1013
1014
1015
1016 get_task_struct(t);
1017 new->thread = t;
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1028 }
1029
1030 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1031 ret = -ENOMEM;
1032 goto out_thread;
1033 }
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1045 new->flags &= ~IRQF_ONESHOT;
1046
1047
1048
1049
1050 raw_spin_lock_irqsave(&desc->lock, flags);
1051 old_ptr = &desc->action;
1052 old = *old_ptr;
1053 if (old) {
1054
1055
1056
1057
1058
1059
1060
1061 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1062 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1063 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1064 goto mismatch;
1065
1066
1067 if ((old->flags & IRQF_PERCPU) !=
1068 (new->flags & IRQF_PERCPU))
1069 goto mismatch;
1070
1071
1072 do {
1073
1074
1075
1076
1077
1078 thread_mask |= old->thread_mask;
1079 old_ptr = &old->next;
1080 old = *old_ptr;
1081 } while (old);
1082 shared = 1;
1083 }
1084
1085
1086
1087
1088
1089
1090 if (new->flags & IRQF_ONESHOT) {
1091
1092
1093
1094
1095 if (thread_mask == ~0UL) {
1096 ret = -EBUSY;
1097 goto out_mask;
1098 }
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 new->thread_mask = 1 << ffz(thread_mask);
1120
1121 } else if (new->handler == irq_default_primary_handler &&
1122 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1139 irq);
1140 ret = -EINVAL;
1141 goto out_mask;
1142 }
1143
1144 if (!shared) {
1145 ret = irq_request_resources(desc);
1146 if (ret) {
1147 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1148 new->name, irq, desc->irq_data.chip->name);
1149 goto out_mask;
1150 }
1151
1152 init_waitqueue_head(&desc->wait_for_threads);
1153
1154
1155 if (new->flags & IRQF_TRIGGER_MASK) {
1156 ret = __irq_set_trigger(desc, irq,
1157 new->flags & IRQF_TRIGGER_MASK);
1158
1159 if (ret)
1160 goto out_mask;
1161 }
1162
1163 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1164 IRQS_ONESHOT | IRQS_WAITING);
1165 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1166
1167 if (new->flags & IRQF_PERCPU) {
1168 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1169 irq_settings_set_per_cpu(desc);
1170 }
1171
1172 if (new->flags & IRQF_ONESHOT)
1173 desc->istate |= IRQS_ONESHOT;
1174
1175 if (irq_settings_can_autoenable(desc))
1176 irq_startup(desc, true);
1177 else
1178
1179 desc->depth = 1;
1180
1181
1182 if (new->flags & IRQF_NOBALANCING) {
1183 irq_settings_set_no_balancing(desc);
1184 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1185 }
1186
1187
1188 setup_affinity(irq, desc, mask);
1189
1190 } else if (new->flags & IRQF_TRIGGER_MASK) {
1191 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1192 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1193
1194 if (nmsk != omsk)
1195
1196 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1197 irq, nmsk, omsk);
1198 }
1199
1200 new->irq = irq;
1201 *old_ptr = new;
1202
1203 irq_pm_install_action(desc, new);
1204
1205
1206 desc->irq_count = 0;
1207 desc->irqs_unhandled = 0;
1208
1209
1210
1211
1212
1213 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1214 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1215 __enable_irq(desc, irq);
1216 }
1217
1218 raw_spin_unlock_irqrestore(&desc->lock, flags);
1219
1220
1221
1222
1223
1224 if (new->thread)
1225 wake_up_process(new->thread);
1226
1227 register_irq_proc(irq, desc);
1228 new->dir = NULL;
1229 register_handler_proc(irq, new);
1230 free_cpumask_var(mask);
1231
1232 return 0;
1233
1234mismatch:
1235 if (!(new->flags & IRQF_PROBE_SHARED)) {
1236 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1237 irq, new->flags, new->name, old->flags, old->name);
1238#ifdef CONFIG_DEBUG_SHIRQ
1239 dump_stack();
1240#endif
1241 }
1242 ret = -EBUSY;
1243
1244out_mask:
1245 raw_spin_unlock_irqrestore(&desc->lock, flags);
1246 free_cpumask_var(mask);
1247
1248out_thread:
1249 if (new->thread) {
1250 struct task_struct *t = new->thread;
1251
1252 new->thread = NULL;
1253 kthread_stop(t);
1254 put_task_struct(t);
1255 }
1256out_mput:
1257 module_put(desc->owner);
1258 return ret;
1259}
1260
1261
1262
1263
1264
1265
1266
1267
1268int setup_irq(unsigned int irq, struct irqaction *act)
1269{
1270 int retval;
1271 struct irq_desc *desc = irq_to_desc(irq);
1272
1273 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1274 return -EINVAL;
1275 chip_bus_lock(desc);
1276 retval = __setup_irq(irq, desc, act);
1277 chip_bus_sync_unlock(desc);
1278
1279 return retval;
1280}
1281EXPORT_SYMBOL_GPL(setup_irq);
1282
1283
1284
1285
1286
1287static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1288{
1289 struct irq_desc *desc = irq_to_desc(irq);
1290 struct irqaction *action, **action_ptr;
1291 unsigned long flags;
1292
1293 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1294
1295 if (!desc)
1296 return NULL;
1297
1298 raw_spin_lock_irqsave(&desc->lock, flags);
1299
1300
1301
1302
1303
1304 action_ptr = &desc->action;
1305 for (;;) {
1306 action = *action_ptr;
1307
1308 if (!action) {
1309 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1310 raw_spin_unlock_irqrestore(&desc->lock, flags);
1311
1312 return NULL;
1313 }
1314
1315 if (action->dev_id == dev_id)
1316 break;
1317 action_ptr = &action->next;
1318 }
1319
1320
1321 *action_ptr = action->next;
1322
1323 irq_pm_remove_action(desc, action);
1324
1325
1326 if (!desc->action) {
1327 irq_shutdown(desc);
1328 irq_release_resources(desc);
1329 }
1330
1331#ifdef CONFIG_SMP
1332
1333 if (WARN_ON_ONCE(desc->affinity_hint))
1334 desc->affinity_hint = NULL;
1335#endif
1336
1337 raw_spin_unlock_irqrestore(&desc->lock, flags);
1338
1339 unregister_handler_proc(irq, action);
1340
1341
1342 synchronize_irq(irq);
1343
1344#ifdef CONFIG_DEBUG_SHIRQ
1345
1346
1347
1348
1349
1350
1351
1352
1353 if (action->flags & IRQF_SHARED) {
1354 local_irq_save(flags);
1355 action->handler(irq, dev_id);
1356 local_irq_restore(flags);
1357 }
1358#endif
1359
1360 if (action->thread) {
1361 kthread_stop(action->thread);
1362 put_task_struct(action->thread);
1363 }
1364
1365 module_put(desc->owner);
1366 return action;
1367}
1368
1369
1370
1371
1372
1373
1374
1375
1376void remove_irq(unsigned int irq, struct irqaction *act)
1377{
1378 struct irq_desc *desc = irq_to_desc(irq);
1379
1380 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1381 __free_irq(irq, act->dev_id);
1382}
1383EXPORT_SYMBOL_GPL(remove_irq);
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399void free_irq(unsigned int irq, void *dev_id)
1400{
1401 struct irq_desc *desc = irq_to_desc(irq);
1402
1403 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1404 return;
1405
1406#ifdef CONFIG_SMP
1407 if (WARN_ON(desc->affinity_notify))
1408 desc->affinity_notify = NULL;
1409#endif
1410
1411 chip_bus_lock(desc);
1412 kfree(__free_irq(irq, dev_id));
1413 chip_bus_sync_unlock(desc);
1414}
1415EXPORT_SYMBOL(free_irq);
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1460 irq_handler_t thread_fn, unsigned long irqflags,
1461 const char *devname, void *dev_id)
1462{
1463 struct irqaction *action;
1464 struct irq_desc *desc;
1465 int retval;
1466
1467
1468
1469
1470
1471
1472
1473 if ((irqflags & IRQF_SHARED) && !dev_id)
1474 return -EINVAL;
1475
1476 desc = irq_to_desc(irq);
1477 if (!desc)
1478 return -EINVAL;
1479
1480 if (!irq_settings_can_request(desc) ||
1481 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1482 return -EINVAL;
1483
1484 if (!handler) {
1485 if (!thread_fn)
1486 return -EINVAL;
1487 handler = irq_default_primary_handler;
1488 }
1489
1490 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1491 if (!action)
1492 return -ENOMEM;
1493
1494 action->handler = handler;
1495 action->thread_fn = thread_fn;
1496 action->flags = irqflags;
1497 action->name = devname;
1498 action->dev_id = dev_id;
1499
1500 chip_bus_lock(desc);
1501 retval = __setup_irq(irq, desc, action);
1502 chip_bus_sync_unlock(desc);
1503
1504 if (retval)
1505 kfree(action);
1506
1507#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1508 if (!retval && (irqflags & IRQF_SHARED)) {
1509
1510
1511
1512
1513
1514
1515 unsigned long flags;
1516
1517 disable_irq(irq);
1518 local_irq_save(flags);
1519
1520 handler(irq, dev_id);
1521
1522 local_irq_restore(flags);
1523 enable_irq(irq);
1524 }
1525#endif
1526 return retval;
1527}
1528EXPORT_SYMBOL(request_threaded_irq);
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1548 unsigned long flags, const char *name, void *dev_id)
1549{
1550 struct irq_desc *desc = irq_to_desc(irq);
1551 int ret;
1552
1553 if (!desc)
1554 return -EINVAL;
1555
1556 if (irq_settings_is_nested_thread(desc)) {
1557 ret = request_threaded_irq(irq, NULL, handler,
1558 flags, name, dev_id);
1559 return !ret ? IRQC_IS_NESTED : ret;
1560 }
1561
1562 ret = request_irq(irq, handler, flags, name, dev_id);
1563 return !ret ? IRQC_IS_HARDIRQ : ret;
1564}
1565EXPORT_SYMBOL_GPL(request_any_context_irq);
1566
1567void enable_percpu_irq(unsigned int irq, unsigned int type)
1568{
1569 unsigned int cpu = smp_processor_id();
1570 unsigned long flags;
1571 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1572
1573 if (!desc)
1574 return;
1575
1576 type &= IRQ_TYPE_SENSE_MASK;
1577 if (type != IRQ_TYPE_NONE) {
1578 int ret;
1579
1580 ret = __irq_set_trigger(desc, irq, type);
1581
1582 if (ret) {
1583 WARN(1, "failed to set type for IRQ%d\n", irq);
1584 goto out;
1585 }
1586 }
1587
1588 irq_percpu_enable(desc, cpu);
1589out:
1590 irq_put_desc_unlock(desc, flags);
1591}
1592EXPORT_SYMBOL_GPL(enable_percpu_irq);
1593
1594void disable_percpu_irq(unsigned int irq)
1595{
1596 unsigned int cpu = smp_processor_id();
1597 unsigned long flags;
1598 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1599
1600 if (!desc)
1601 return;
1602
1603 irq_percpu_disable(desc, cpu);
1604 irq_put_desc_unlock(desc, flags);
1605}
1606EXPORT_SYMBOL_GPL(disable_percpu_irq);
1607
1608
1609
1610
1611static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1612{
1613 struct irq_desc *desc = irq_to_desc(irq);
1614 struct irqaction *action;
1615 unsigned long flags;
1616
1617 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1618
1619 if (!desc)
1620 return NULL;
1621
1622 raw_spin_lock_irqsave(&desc->lock, flags);
1623
1624 action = desc->action;
1625 if (!action || action->percpu_dev_id != dev_id) {
1626 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1627 goto bad;
1628 }
1629
1630 if (!cpumask_empty(desc->percpu_enabled)) {
1631 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1632 irq, cpumask_first(desc->percpu_enabled));
1633 goto bad;
1634 }
1635
1636
1637 desc->action = NULL;
1638
1639 raw_spin_unlock_irqrestore(&desc->lock, flags);
1640
1641 unregister_handler_proc(irq, action);
1642
1643 module_put(desc->owner);
1644 return action;
1645
1646bad:
1647 raw_spin_unlock_irqrestore(&desc->lock, flags);
1648 return NULL;
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1659{
1660 struct irq_desc *desc = irq_to_desc(irq);
1661
1662 if (desc && irq_settings_is_per_cpu_devid(desc))
1663 __free_percpu_irq(irq, act->percpu_dev_id);
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1679{
1680 struct irq_desc *desc = irq_to_desc(irq);
1681
1682 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1683 return;
1684
1685 chip_bus_lock(desc);
1686 kfree(__free_percpu_irq(irq, dev_id));
1687 chip_bus_sync_unlock(desc);
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1698{
1699 struct irq_desc *desc = irq_to_desc(irq);
1700 int retval;
1701
1702 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1703 return -EINVAL;
1704 chip_bus_lock(desc);
1705 retval = __setup_irq(irq, desc, act);
1706 chip_bus_sync_unlock(desc);
1707
1708 return retval;
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1727 const char *devname, void __percpu *dev_id)
1728{
1729 struct irqaction *action;
1730 struct irq_desc *desc;
1731 int retval;
1732
1733 if (!dev_id)
1734 return -EINVAL;
1735
1736 desc = irq_to_desc(irq);
1737 if (!desc || !irq_settings_can_request(desc) ||
1738 !irq_settings_is_per_cpu_devid(desc))
1739 return -EINVAL;
1740
1741 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1742 if (!action)
1743 return -ENOMEM;
1744
1745 action->handler = handler;
1746 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1747 action->name = devname;
1748 action->percpu_dev_id = dev_id;
1749
1750 chip_bus_lock(desc);
1751 retval = __setup_irq(irq, desc, action);
1752 chip_bus_sync_unlock(desc);
1753
1754 if (retval)
1755 kfree(action);
1756
1757 return retval;
1758}
1759