1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35
36
37
38
39
40
41
42
43
44
45void synchronize_irq(unsigned int irq)
46{
47 struct irq_desc *desc = irq_to_desc(irq);
48 bool inprogress;
49
50 if (!desc)
51 return;
52
53 do {
54 unsigned long flags;
55
56
57
58
59
60 while (irqd_irq_inprogress(&desc->irq_data))
61 cpu_relax();
62
63
64 raw_spin_lock_irqsave(&desc->lock, flags);
65 inprogress = irqd_irq_inprogress(&desc->irq_data);
66 raw_spin_unlock_irqrestore(&desc->lock, flags);
67
68
69 } while (inprogress);
70
71
72
73
74
75 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
76}
77EXPORT_SYMBOL(synchronize_irq);
78
79#ifdef CONFIG_SMP
80cpumask_var_t irq_default_affinity;
81
82
83
84
85
86
87int irq_can_set_affinity(unsigned int irq)
88{
89 struct irq_desc *desc = irq_to_desc(irq);
90
91 if (!desc || !irqd_can_balance(&desc->irq_data) ||
92 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
93 return 0;
94
95 return 1;
96}
97
98
99
100
101
102
103
104
105
106
107void irq_set_thread_affinity(struct irq_desc *desc)
108{
109 struct irqaction *action = desc->action;
110
111 while (action) {
112 if (action->thread)
113 set_bit(IRQTF_AFFINITY, &action->thread_flags);
114 action = action->next;
115 }
116}
117
118#ifdef CONFIG_GENERIC_PENDING_IRQ
119static inline bool irq_can_move_pcntxt(struct irq_data *data)
120{
121 return irqd_can_move_in_process_context(data);
122}
123static inline bool irq_move_pending(struct irq_data *data)
124{
125 return irqd_is_setaffinity_pending(data);
126}
127static inline void
128irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
129{
130 cpumask_copy(desc->pending_mask, mask);
131}
132static inline void
133irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
134{
135 cpumask_copy(mask, desc->pending_mask);
136}
137#else
138static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
139static inline bool irq_move_pending(struct irq_data *data) { return false; }
140static inline void
141irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
142static inline void
143irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
144#endif
145
146int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
147 bool force)
148{
149 struct irq_desc *desc = irq_data_to_desc(data);
150 struct irq_chip *chip = irq_data_get_irq_chip(data);
151 int ret;
152
153 ret = chip->irq_set_affinity(data, mask, false);
154 switch (ret) {
155 case IRQ_SET_MASK_OK:
156 cpumask_copy(data->affinity, mask);
157 case IRQ_SET_MASK_OK_NOCOPY:
158 irq_set_thread_affinity(desc);
159 ret = 0;
160 }
161
162 return ret;
163}
164
165int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
166{
167 struct irq_chip *chip = irq_data_get_irq_chip(data);
168 struct irq_desc *desc = irq_data_to_desc(data);
169 int ret = 0;
170
171 if (!chip || !chip->irq_set_affinity)
172 return -EINVAL;
173
174 if (irq_can_move_pcntxt(data)) {
175 ret = irq_do_set_affinity(data, mask, false);
176 } else {
177 irqd_set_move_pending(data);
178 irq_copy_pending(desc, mask);
179 }
180
181 if (desc->affinity_notify) {
182 kref_get(&desc->affinity_notify->kref);
183 schedule_work(&desc->affinity_notify->work);
184 }
185 irqd_set(data, IRQD_AFFINITY_SET);
186
187 return ret;
188}
189
190
191
192
193
194
195
196int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
197{
198 struct irq_desc *desc = irq_to_desc(irq);
199 unsigned long flags;
200 int ret;
201
202 if (!desc)
203 return -EINVAL;
204
205 raw_spin_lock_irqsave(&desc->lock, flags);
206 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
207 raw_spin_unlock_irqrestore(&desc->lock, flags);
208 return ret;
209}
210
211int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
212{
213 unsigned long flags;
214 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
215
216 if (!desc)
217 return -EINVAL;
218 desc->affinity_hint = m;
219 irq_put_desc_unlock(desc, flags);
220 return 0;
221}
222EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
223
224static void irq_affinity_notify(struct work_struct *work)
225{
226 struct irq_affinity_notify *notify =
227 container_of(work, struct irq_affinity_notify, work);
228 struct irq_desc *desc = irq_to_desc(notify->irq);
229 cpumask_var_t cpumask;
230 unsigned long flags;
231
232 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
233 goto out;
234
235 raw_spin_lock_irqsave(&desc->lock, flags);
236 if (irq_move_pending(&desc->irq_data))
237 irq_get_pending(cpumask, desc);
238 else
239 cpumask_copy(cpumask, desc->irq_data.affinity);
240 raw_spin_unlock_irqrestore(&desc->lock, flags);
241
242 notify->notify(notify, cpumask);
243
244 free_cpumask_var(cpumask);
245out:
246 kref_put(¬ify->kref, notify->release);
247}
248
249
250
251
252
253
254
255
256
257
258
259
260int
261irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
262{
263 struct irq_desc *desc = irq_to_desc(irq);
264 struct irq_affinity_notify *old_notify;
265 unsigned long flags;
266
267
268 might_sleep();
269
270 if (!desc)
271 return -EINVAL;
272
273
274 if (notify) {
275 notify->irq = irq;
276 kref_init(¬ify->kref);
277 INIT_WORK(¬ify->work, irq_affinity_notify);
278 }
279
280 raw_spin_lock_irqsave(&desc->lock, flags);
281 old_notify = desc->affinity_notify;
282 desc->affinity_notify = notify;
283 raw_spin_unlock_irqrestore(&desc->lock, flags);
284
285 if (old_notify)
286 kref_put(&old_notify->kref, old_notify->release);
287
288 return 0;
289}
290EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
291
292#ifndef CONFIG_AUTO_IRQ_AFFINITY
293
294
295
296static int
297setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
298{
299 struct cpumask *set = irq_default_affinity;
300 int node = desc->irq_data.node;
301
302
303 if (!irq_can_set_affinity(irq))
304 return 0;
305
306
307
308
309
310 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
311 if (cpumask_intersects(desc->irq_data.affinity,
312 cpu_online_mask))
313 set = desc->irq_data.affinity;
314 else
315 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
316 }
317
318 cpumask_and(mask, cpu_online_mask, set);
319 if (node != NUMA_NO_NODE) {
320 const struct cpumask *nodemask = cpumask_of_node(node);
321
322
323 if (cpumask_intersects(mask, nodemask))
324 cpumask_and(mask, mask, nodemask);
325 }
326 irq_do_set_affinity(&desc->irq_data, mask, false);
327 return 0;
328}
329#else
330static inline int
331setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
332{
333 return irq_select_affinity(irq);
334}
335#endif
336
337
338
339
340int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
341{
342 struct irq_desc *desc = irq_to_desc(irq);
343 unsigned long flags;
344 int ret;
345
346 raw_spin_lock_irqsave(&desc->lock, flags);
347 ret = setup_affinity(irq, desc, mask);
348 raw_spin_unlock_irqrestore(&desc->lock, flags);
349 return ret;
350}
351
352#else
353static inline int
354setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
355{
356 return 0;
357}
358#endif
359
360void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
361{
362 if (suspend) {
363 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
364 return;
365 desc->istate |= IRQS_SUSPENDED;
366 }
367
368 if (!desc->depth++)
369 irq_disable(desc);
370}
371
372static int __disable_irq_nosync(unsigned int irq)
373{
374 unsigned long flags;
375 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
376
377 if (!desc)
378 return -EINVAL;
379 __disable_irq(desc, irq, false);
380 irq_put_desc_busunlock(desc, flags);
381 return 0;
382}
383
384
385
386
387
388
389
390
391
392
393
394
395void disable_irq_nosync(unsigned int irq)
396{
397 __disable_irq_nosync(irq);
398}
399EXPORT_SYMBOL(disable_irq_nosync);
400
401
402
403
404
405
406
407
408
409
410
411
412
413void disable_irq(unsigned int irq)
414{
415 if (!__disable_irq_nosync(irq))
416 synchronize_irq(irq);
417}
418EXPORT_SYMBOL(disable_irq);
419
420void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
421{
422 if (resume) {
423 if (!(desc->istate & IRQS_SUSPENDED)) {
424 if (!desc->action)
425 return;
426 if (!(desc->action->flags & IRQF_FORCE_RESUME))
427 return;
428
429 desc->depth++;
430 }
431 desc->istate &= ~IRQS_SUSPENDED;
432 }
433
434 switch (desc->depth) {
435 case 0:
436 err_out:
437 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
438 break;
439 case 1: {
440 if (desc->istate & IRQS_SUSPENDED)
441 goto err_out;
442
443 irq_settings_set_noprobe(desc);
444 irq_enable(desc);
445 check_irq_resend(desc, irq);
446
447 }
448 default:
449 desc->depth--;
450 }
451}
452
453
454
455
456
457
458
459
460
461
462
463
464void enable_irq(unsigned int irq)
465{
466 unsigned long flags;
467 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
468
469 if (!desc)
470 return;
471 if (WARN(!desc->irq_data.chip,
472 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
473 goto out;
474
475 __enable_irq(desc, irq, false);
476out:
477 irq_put_desc_busunlock(desc, flags);
478}
479EXPORT_SYMBOL(enable_irq);
480
481static int set_irq_wake_real(unsigned int irq, unsigned int on)
482{
483 struct irq_desc *desc = irq_to_desc(irq);
484 int ret = -ENXIO;
485
486 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
487 return 0;
488
489 if (desc->irq_data.chip->irq_set_wake)
490 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
491
492 return ret;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507int irq_set_irq_wake(unsigned int irq, unsigned int on)
508{
509 unsigned long flags;
510 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
511 int ret = 0;
512
513 if (!desc)
514 return -EINVAL;
515
516
517
518
519 if (on) {
520 if (desc->wake_depth++ == 0) {
521 ret = set_irq_wake_real(irq, on);
522 if (ret)
523 desc->wake_depth = 0;
524 else
525 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
526 }
527 } else {
528 if (desc->wake_depth == 0) {
529 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
530 } else if (--desc->wake_depth == 0) {
531 ret = set_irq_wake_real(irq, on);
532 if (ret)
533 desc->wake_depth = 1;
534 else
535 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
536 }
537 }
538 irq_put_desc_busunlock(desc, flags);
539 return ret;
540}
541EXPORT_SYMBOL(irq_set_irq_wake);
542
543
544
545
546
547
548int can_request_irq(unsigned int irq, unsigned long irqflags)
549{
550 unsigned long flags;
551 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
552 int canrequest = 0;
553
554 if (!desc)
555 return 0;
556
557 if (irq_settings_can_request(desc)) {
558 if (desc->action)
559 if (irqflags & desc->action->flags & IRQF_SHARED)
560 canrequest =1;
561 }
562 irq_put_desc_unlock(desc, flags);
563 return canrequest;
564}
565
566int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
567 unsigned long flags)
568{
569 struct irq_chip *chip = desc->irq_data.chip;
570 int ret, unmask = 0;
571
572 if (!chip || !chip->irq_set_type) {
573
574
575
576
577 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
578 chip ? (chip->name ? : "unknown") : "unknown");
579 return 0;
580 }
581
582 flags &= IRQ_TYPE_SENSE_MASK;
583
584 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
585 if (!irqd_irq_masked(&desc->irq_data))
586 mask_irq(desc);
587 if (!irqd_irq_disabled(&desc->irq_data))
588 unmask = 1;
589 }
590
591
592 ret = chip->irq_set_type(&desc->irq_data, flags);
593
594 switch (ret) {
595 case IRQ_SET_MASK_OK:
596 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
597 irqd_set(&desc->irq_data, flags);
598
599 case IRQ_SET_MASK_OK_NOCOPY:
600 flags = irqd_get_trigger_type(&desc->irq_data);
601 irq_settings_set_trigger_mask(desc, flags);
602 irqd_clear(&desc->irq_data, IRQD_LEVEL);
603 irq_settings_clr_level(desc);
604 if (flags & IRQ_TYPE_LEVEL_MASK) {
605 irq_settings_set_level(desc);
606 irqd_set(&desc->irq_data, IRQD_LEVEL);
607 }
608
609 ret = 0;
610 break;
611 default:
612 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
613 flags, irq, chip->irq_set_type);
614 }
615 if (unmask)
616 unmask_irq(desc);
617 return ret;
618}
619
620#ifdef CONFIG_HARDIRQS_SW_RESEND
621int irq_set_parent(int irq, int parent_irq)
622{
623 unsigned long flags;
624 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
625
626 if (!desc)
627 return -EINVAL;
628
629 desc->parent_irq = parent_irq;
630
631 irq_put_desc_unlock(desc, flags);
632 return 0;
633}
634#endif
635
636
637
638
639
640
641static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
642{
643 return IRQ_WAKE_THREAD;
644}
645
646
647
648
649
650static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
651{
652 WARN(1, "Primary handler called for nested irq %d\n", irq);
653 return IRQ_NONE;
654}
655
656static int irq_wait_for_interrupt(struct irqaction *action)
657{
658 set_current_state(TASK_INTERRUPTIBLE);
659
660 while (!kthread_should_stop()) {
661
662 if (test_and_clear_bit(IRQTF_RUNTHREAD,
663 &action->thread_flags)) {
664 __set_current_state(TASK_RUNNING);
665 return 0;
666 }
667 schedule();
668 set_current_state(TASK_INTERRUPTIBLE);
669 }
670 __set_current_state(TASK_RUNNING);
671 return -1;
672}
673
674
675
676
677
678
679static void irq_finalize_oneshot(struct irq_desc *desc,
680 struct irqaction *action)
681{
682 if (!(desc->istate & IRQS_ONESHOT))
683 return;
684again:
685 chip_bus_lock(desc);
686 raw_spin_lock_irq(&desc->lock);
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
703 raw_spin_unlock_irq(&desc->lock);
704 chip_bus_sync_unlock(desc);
705 cpu_relax();
706 goto again;
707 }
708
709
710
711
712
713
714 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
715 goto out_unlock;
716
717 desc->threads_oneshot &= ~action->thread_mask;
718
719 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
720 irqd_irq_masked(&desc->irq_data))
721 unmask_irq(desc);
722
723out_unlock:
724 raw_spin_unlock_irq(&desc->lock);
725 chip_bus_sync_unlock(desc);
726}
727
728#ifdef CONFIG_SMP
729
730
731
732static void
733irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
734{
735 cpumask_var_t mask;
736 bool valid = true;
737
738 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
739 return;
740
741
742
743
744
745 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
746 set_bit(IRQTF_AFFINITY, &action->thread_flags);
747 return;
748 }
749
750 raw_spin_lock_irq(&desc->lock);
751
752
753
754
755 if (desc->irq_data.affinity)
756 cpumask_copy(mask, desc->irq_data.affinity);
757 else
758 valid = false;
759 raw_spin_unlock_irq(&desc->lock);
760
761 if (valid)
762 set_cpus_allowed_ptr(current, mask);
763 free_cpumask_var(mask);
764}
765#else
766static inline void
767irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
768#endif
769
770
771
772
773
774
775
776static irqreturn_t
777irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
778{
779 irqreturn_t ret;
780
781 local_bh_disable();
782 ret = action->thread_fn(action->irq, action->dev_id);
783 irq_finalize_oneshot(desc, action);
784 local_bh_enable();
785 return ret;
786}
787
788
789
790
791
792
793static irqreturn_t irq_thread_fn(struct irq_desc *desc,
794 struct irqaction *action)
795{
796 irqreturn_t ret;
797
798 ret = action->thread_fn(action->irq, action->dev_id);
799 irq_finalize_oneshot(desc, action);
800 return ret;
801}
802
803static void wake_threads_waitq(struct irq_desc *desc)
804{
805 if (atomic_dec_and_test(&desc->threads_active) &&
806 waitqueue_active(&desc->wait_for_threads))
807 wake_up(&desc->wait_for_threads);
808}
809
810static void irq_thread_dtor(struct callback_head *unused)
811{
812 struct task_struct *tsk = current;
813 struct irq_desc *desc;
814 struct irqaction *action;
815
816 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
817 return;
818
819 action = kthread_data(tsk);
820
821 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
822 tsk->comm, tsk->pid, action->irq);
823
824
825 desc = irq_to_desc(action->irq);
826
827
828
829
830 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
831 wake_threads_waitq(desc);
832
833
834 irq_finalize_oneshot(desc, action);
835}
836
837
838
839
840static int irq_thread(void *data)
841{
842 struct callback_head on_exit_work;
843 static const struct sched_param param = {
844 .sched_priority = MAX_USER_RT_PRIO/2,
845 };
846 struct irqaction *action = data;
847 struct irq_desc *desc = irq_to_desc(action->irq);
848 irqreturn_t (*handler_fn)(struct irq_desc *desc,
849 struct irqaction *action);
850
851 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
852 &action->thread_flags))
853 handler_fn = irq_forced_thread_fn;
854 else
855 handler_fn = irq_thread_fn;
856
857 sched_setscheduler(current, SCHED_FIFO, ¶m);
858
859 init_task_work(&on_exit_work, irq_thread_dtor);
860 task_work_add(current, &on_exit_work, false);
861
862 irq_thread_check_affinity(desc, action);
863
864 while (!irq_wait_for_interrupt(action)) {
865 irqreturn_t action_ret;
866
867 irq_thread_check_affinity(desc, action);
868
869 action_ret = handler_fn(desc, action);
870 if (!noirqdebug)
871 note_interrupt(action->irq, desc, action_ret);
872
873 wake_threads_waitq(desc);
874 }
875
876
877
878
879
880
881
882
883
884
885 task_work_cancel(current, irq_thread_dtor);
886 return 0;
887}
888
889static void irq_setup_forced_threading(struct irqaction *new)
890{
891 if (!force_irqthreads)
892 return;
893 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
894 return;
895
896 new->flags |= IRQF_ONESHOT;
897
898 if (!new->thread_fn) {
899 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
900 new->thread_fn = new->handler;
901 new->handler = irq_default_primary_handler;
902 }
903}
904
905
906
907
908
909static int
910__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
911{
912 struct irqaction *old, **old_ptr;
913 unsigned long flags, thread_mask = 0;
914 int ret, nested, shared = 0;
915 cpumask_var_t mask;
916
917 if (!desc)
918 return -EINVAL;
919
920 if (desc->irq_data.chip == &no_irq_chip)
921 return -ENOSYS;
922 if (!try_module_get(desc->owner))
923 return -ENODEV;
924
925
926
927
928
929 nested = irq_settings_is_nested_thread(desc);
930 if (nested) {
931 if (!new->thread_fn) {
932 ret = -EINVAL;
933 goto out_mput;
934 }
935
936
937
938
939
940 new->handler = irq_nested_primary_handler;
941 } else {
942 if (irq_settings_can_thread(desc))
943 irq_setup_forced_threading(new);
944 }
945
946
947
948
949
950
951 if (new->thread_fn && !nested) {
952 struct task_struct *t;
953
954 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
955 new->name);
956 if (IS_ERR(t)) {
957 ret = PTR_ERR(t);
958 goto out_mput;
959 }
960
961
962
963
964
965 get_task_struct(t);
966 new->thread = t;
967
968
969
970
971
972
973
974
975
976 set_bit(IRQTF_AFFINITY, &new->thread_flags);
977 }
978
979 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
980 ret = -ENOMEM;
981 goto out_thread;
982 }
983
984
985
986
987
988
989
990
991
992
993 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
994 new->flags &= ~IRQF_ONESHOT;
995
996
997
998
999 raw_spin_lock_irqsave(&desc->lock, flags);
1000 old_ptr = &desc->action;
1001 old = *old_ptr;
1002 if (old) {
1003
1004
1005
1006
1007
1008
1009
1010 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1011 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1012 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1013 goto mismatch;
1014
1015
1016 if ((old->flags & IRQF_PERCPU) !=
1017 (new->flags & IRQF_PERCPU))
1018 goto mismatch;
1019
1020
1021 do {
1022
1023
1024
1025
1026
1027 thread_mask |= old->thread_mask;
1028 old_ptr = &old->next;
1029 old = *old_ptr;
1030 } while (old);
1031 shared = 1;
1032 }
1033
1034
1035
1036
1037
1038
1039 if (new->flags & IRQF_ONESHOT) {
1040
1041
1042
1043
1044 if (thread_mask == ~0UL) {
1045 ret = -EBUSY;
1046 goto out_mask;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068 new->thread_mask = 1 << ffz(thread_mask);
1069
1070 } else if (new->handler == irq_default_primary_handler &&
1071 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1088 irq);
1089 ret = -EINVAL;
1090 goto out_mask;
1091 }
1092
1093 if (!shared) {
1094 init_waitqueue_head(&desc->wait_for_threads);
1095
1096
1097 if (new->flags & IRQF_TRIGGER_MASK) {
1098 ret = __irq_set_trigger(desc, irq,
1099 new->flags & IRQF_TRIGGER_MASK);
1100
1101 if (ret)
1102 goto out_mask;
1103 }
1104
1105 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1106 IRQS_ONESHOT | IRQS_WAITING);
1107 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1108
1109 if (new->flags & IRQF_PERCPU) {
1110 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1111 irq_settings_set_per_cpu(desc);
1112 }
1113
1114 if (new->flags & IRQF_ONESHOT)
1115 desc->istate |= IRQS_ONESHOT;
1116
1117 if (irq_settings_can_autoenable(desc))
1118 irq_startup(desc, true);
1119 else
1120
1121 desc->depth = 1;
1122
1123
1124 if (new->flags & IRQF_NOBALANCING) {
1125 irq_settings_set_no_balancing(desc);
1126 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1127 }
1128
1129
1130 setup_affinity(irq, desc, mask);
1131
1132 } else if (new->flags & IRQF_TRIGGER_MASK) {
1133 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1134 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1135
1136 if (nmsk != omsk)
1137
1138 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1139 irq, nmsk, omsk);
1140 }
1141
1142 new->irq = irq;
1143 *old_ptr = new;
1144
1145
1146 desc->irq_count = 0;
1147 desc->irqs_unhandled = 0;
1148
1149
1150
1151
1152
1153 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1154 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1155 __enable_irq(desc, irq, false);
1156 }
1157
1158 raw_spin_unlock_irqrestore(&desc->lock, flags);
1159
1160
1161
1162
1163
1164 if (new->thread)
1165 wake_up_process(new->thread);
1166
1167 register_irq_proc(irq, desc);
1168 new->dir = NULL;
1169 register_handler_proc(irq, new);
1170 free_cpumask_var(mask);
1171
1172 return 0;
1173
1174mismatch:
1175 if (!(new->flags & IRQF_PROBE_SHARED)) {
1176 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1177 irq, new->flags, new->name, old->flags, old->name);
1178#ifdef CONFIG_DEBUG_SHIRQ
1179 dump_stack();
1180#endif
1181 }
1182 ret = -EBUSY;
1183
1184out_mask:
1185 raw_spin_unlock_irqrestore(&desc->lock, flags);
1186 free_cpumask_var(mask);
1187
1188out_thread:
1189 if (new->thread) {
1190 struct task_struct *t = new->thread;
1191
1192 new->thread = NULL;
1193 kthread_stop(t);
1194 put_task_struct(t);
1195 }
1196out_mput:
1197 module_put(desc->owner);
1198 return ret;
1199}
1200
1201
1202
1203
1204
1205
1206
1207
1208int setup_irq(unsigned int irq, struct irqaction *act)
1209{
1210 int retval;
1211 struct irq_desc *desc = irq_to_desc(irq);
1212
1213 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1214 return -EINVAL;
1215 chip_bus_lock(desc);
1216 retval = __setup_irq(irq, desc, act);
1217 chip_bus_sync_unlock(desc);
1218
1219 return retval;
1220}
1221EXPORT_SYMBOL_GPL(setup_irq);
1222
1223
1224
1225
1226
1227static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1228{
1229 struct irq_desc *desc = irq_to_desc(irq);
1230 struct irqaction *action, **action_ptr;
1231 unsigned long flags;
1232
1233 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1234
1235 if (!desc)
1236 return NULL;
1237
1238 raw_spin_lock_irqsave(&desc->lock, flags);
1239
1240
1241
1242
1243
1244 action_ptr = &desc->action;
1245 for (;;) {
1246 action = *action_ptr;
1247
1248 if (!action) {
1249 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1250 raw_spin_unlock_irqrestore(&desc->lock, flags);
1251
1252 return NULL;
1253 }
1254
1255 if (action->dev_id == dev_id)
1256 break;
1257 action_ptr = &action->next;
1258 }
1259
1260
1261 *action_ptr = action->next;
1262
1263
1264 if (!desc->action)
1265 irq_shutdown(desc);
1266
1267#ifdef CONFIG_SMP
1268
1269 if (WARN_ON_ONCE(desc->affinity_hint))
1270 desc->affinity_hint = NULL;
1271#endif
1272
1273 raw_spin_unlock_irqrestore(&desc->lock, flags);
1274
1275 unregister_handler_proc(irq, action);
1276
1277
1278 synchronize_irq(irq);
1279
1280#ifdef CONFIG_DEBUG_SHIRQ
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (action->flags & IRQF_SHARED) {
1290 local_irq_save(flags);
1291 action->handler(irq, dev_id);
1292 local_irq_restore(flags);
1293 }
1294#endif
1295
1296 if (action->thread) {
1297 kthread_stop(action->thread);
1298 put_task_struct(action->thread);
1299 }
1300
1301 module_put(desc->owner);
1302 return action;
1303}
1304
1305
1306
1307
1308
1309
1310
1311
1312void remove_irq(unsigned int irq, struct irqaction *act)
1313{
1314 struct irq_desc *desc = irq_to_desc(irq);
1315
1316 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1317 __free_irq(irq, act->dev_id);
1318}
1319EXPORT_SYMBOL_GPL(remove_irq);
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335void free_irq(unsigned int irq, void *dev_id)
1336{
1337 struct irq_desc *desc = irq_to_desc(irq);
1338
1339 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1340 return;
1341
1342#ifdef CONFIG_SMP
1343 if (WARN_ON(desc->affinity_notify))
1344 desc->affinity_notify = NULL;
1345#endif
1346
1347 chip_bus_lock(desc);
1348 kfree(__free_irq(irq, dev_id));
1349 chip_bus_sync_unlock(desc);
1350}
1351EXPORT_SYMBOL(free_irq);
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1396 irq_handler_t thread_fn, unsigned long irqflags,
1397 const char *devname, void *dev_id)
1398{
1399 struct irqaction *action;
1400 struct irq_desc *desc;
1401 int retval;
1402
1403
1404
1405
1406
1407
1408
1409 if ((irqflags & IRQF_SHARED) && !dev_id)
1410 return -EINVAL;
1411
1412 desc = irq_to_desc(irq);
1413 if (!desc)
1414 return -EINVAL;
1415
1416 if (!irq_settings_can_request(desc) ||
1417 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1418 return -EINVAL;
1419
1420 if (!handler) {
1421 if (!thread_fn)
1422 return -EINVAL;
1423 handler = irq_default_primary_handler;
1424 }
1425
1426 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1427 if (!action)
1428 return -ENOMEM;
1429
1430 action->handler = handler;
1431 action->thread_fn = thread_fn;
1432 action->flags = irqflags;
1433 action->name = devname;
1434 action->dev_id = dev_id;
1435
1436 chip_bus_lock(desc);
1437 retval = __setup_irq(irq, desc, action);
1438 chip_bus_sync_unlock(desc);
1439
1440 if (retval)
1441 kfree(action);
1442
1443#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1444 if (!retval && (irqflags & IRQF_SHARED)) {
1445
1446
1447
1448
1449
1450
1451 unsigned long flags;
1452
1453 disable_irq(irq);
1454 local_irq_save(flags);
1455
1456 handler(irq, dev_id);
1457
1458 local_irq_restore(flags);
1459 enable_irq(irq);
1460 }
1461#endif
1462 return retval;
1463}
1464EXPORT_SYMBOL(request_threaded_irq);
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1484 unsigned long flags, const char *name, void *dev_id)
1485{
1486 struct irq_desc *desc = irq_to_desc(irq);
1487 int ret;
1488
1489 if (!desc)
1490 return -EINVAL;
1491
1492 if (irq_settings_is_nested_thread(desc)) {
1493 ret = request_threaded_irq(irq, NULL, handler,
1494 flags, name, dev_id);
1495 return !ret ? IRQC_IS_NESTED : ret;
1496 }
1497
1498 ret = request_irq(irq, handler, flags, name, dev_id);
1499 return !ret ? IRQC_IS_HARDIRQ : ret;
1500}
1501EXPORT_SYMBOL_GPL(request_any_context_irq);
1502
1503void enable_percpu_irq(unsigned int irq, unsigned int type)
1504{
1505 unsigned int cpu = smp_processor_id();
1506 unsigned long flags;
1507 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1508
1509 if (!desc)
1510 return;
1511
1512 type &= IRQ_TYPE_SENSE_MASK;
1513 if (type != IRQ_TYPE_NONE) {
1514 int ret;
1515
1516 ret = __irq_set_trigger(desc, irq, type);
1517
1518 if (ret) {
1519 WARN(1, "failed to set type for IRQ%d\n", irq);
1520 goto out;
1521 }
1522 }
1523
1524 irq_percpu_enable(desc, cpu);
1525out:
1526 irq_put_desc_unlock(desc, flags);
1527}
1528EXPORT_SYMBOL_GPL(enable_percpu_irq);
1529
1530void disable_percpu_irq(unsigned int irq)
1531{
1532 unsigned int cpu = smp_processor_id();
1533 unsigned long flags;
1534 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1535
1536 if (!desc)
1537 return;
1538
1539 irq_percpu_disable(desc, cpu);
1540 irq_put_desc_unlock(desc, flags);
1541}
1542EXPORT_SYMBOL_GPL(disable_percpu_irq);
1543
1544
1545
1546
1547static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1548{
1549 struct irq_desc *desc = irq_to_desc(irq);
1550 struct irqaction *action;
1551 unsigned long flags;
1552
1553 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1554
1555 if (!desc)
1556 return NULL;
1557
1558 raw_spin_lock_irqsave(&desc->lock, flags);
1559
1560 action = desc->action;
1561 if (!action || action->percpu_dev_id != dev_id) {
1562 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1563 goto bad;
1564 }
1565
1566 if (!cpumask_empty(desc->percpu_enabled)) {
1567 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1568 irq, cpumask_first(desc->percpu_enabled));
1569 goto bad;
1570 }
1571
1572
1573 desc->action = NULL;
1574
1575 raw_spin_unlock_irqrestore(&desc->lock, flags);
1576
1577 unregister_handler_proc(irq, action);
1578
1579 module_put(desc->owner);
1580 return action;
1581
1582bad:
1583 raw_spin_unlock_irqrestore(&desc->lock, flags);
1584 return NULL;
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1595{
1596 struct irq_desc *desc = irq_to_desc(irq);
1597
1598 if (desc && irq_settings_is_per_cpu_devid(desc))
1599 __free_percpu_irq(irq, act->percpu_dev_id);
1600}
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1615{
1616 struct irq_desc *desc = irq_to_desc(irq);
1617
1618 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1619 return;
1620
1621 chip_bus_lock(desc);
1622 kfree(__free_percpu_irq(irq, dev_id));
1623 chip_bus_sync_unlock(desc);
1624}
1625
1626
1627
1628
1629
1630
1631
1632
1633int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1634{
1635 struct irq_desc *desc = irq_to_desc(irq);
1636 int retval;
1637
1638 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1639 return -EINVAL;
1640 chip_bus_lock(desc);
1641 retval = __setup_irq(irq, desc, act);
1642 chip_bus_sync_unlock(desc);
1643
1644 return retval;
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1663 const char *devname, void __percpu *dev_id)
1664{
1665 struct irqaction *action;
1666 struct irq_desc *desc;
1667 int retval;
1668
1669 if (!dev_id)
1670 return -EINVAL;
1671
1672 desc = irq_to_desc(irq);
1673 if (!desc || !irq_settings_can_request(desc) ||
1674 !irq_settings_is_per_cpu_devid(desc))
1675 return -EINVAL;
1676
1677 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1678 if (!action)
1679 return -ENOMEM;
1680
1681 action->handler = handler;
1682 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1683 action->name = devname;
1684 action->percpu_dev_id = dev_id;
1685
1686 chip_bus_lock(desc);
1687 retval = __setup_irq(irq, desc, action);
1688 chip_bus_sync_unlock(desc);
1689
1690 if (retval)
1691 kfree(action);
1692
1693 return retval;
1694}
1695