1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35
36
37
38
39
40
41
42
43
44
45void synchronize_irq(unsigned int irq)
46{
47 struct irq_desc *desc = irq_to_desc(irq);
48 bool inprogress;
49
50 if (!desc)
51 return;
52
53 do {
54 unsigned long flags;
55
56
57
58
59
60 while (irqd_irq_inprogress(&desc->irq_data))
61 cpu_relax();
62
63
64 raw_spin_lock_irqsave(&desc->lock, flags);
65 inprogress = irqd_irq_inprogress(&desc->irq_data);
66 raw_spin_unlock_irqrestore(&desc->lock, flags);
67
68
69 } while (inprogress);
70
71
72
73
74
75 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
76}
77EXPORT_SYMBOL(synchronize_irq);
78
79#ifdef CONFIG_SMP
80cpumask_var_t irq_default_affinity;
81
82
83
84
85
86
87int irq_can_set_affinity(unsigned int irq)
88{
89 struct irq_desc *desc = irq_to_desc(irq);
90
91 if (!desc || !irqd_can_balance(&desc->irq_data) ||
92 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
93 return 0;
94
95 return 1;
96}
97
98
99
100
101
102
103
104
105
106
107void irq_set_thread_affinity(struct irq_desc *desc)
108{
109 struct irqaction *action = desc->action;
110
111 while (action) {
112 if (action->thread)
113 set_bit(IRQTF_AFFINITY, &action->thread_flags);
114 action = action->next;
115 }
116}
117
118#ifdef CONFIG_GENERIC_PENDING_IRQ
119static inline bool irq_can_move_pcntxt(struct irq_data *data)
120{
121 return irqd_can_move_in_process_context(data);
122}
123static inline bool irq_move_pending(struct irq_data *data)
124{
125 return irqd_is_setaffinity_pending(data);
126}
127static inline void
128irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
129{
130 cpumask_copy(desc->pending_mask, mask);
131}
132static inline void
133irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
134{
135 cpumask_copy(mask, desc->pending_mask);
136}
137#else
138static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
139static inline bool irq_move_pending(struct irq_data *data) { return false; }
140static inline void
141irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
142static inline void
143irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
144#endif
145
146int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
147 bool force)
148{
149 struct irq_desc *desc = irq_data_to_desc(data);
150 struct irq_chip *chip = irq_data_get_irq_chip(data);
151 int ret;
152
153 ret = chip->irq_set_affinity(data, mask, false);
154 switch (ret) {
155 case IRQ_SET_MASK_OK:
156 cpumask_copy(data->affinity, mask);
157 case IRQ_SET_MASK_OK_NOCOPY:
158 irq_set_thread_affinity(desc);
159 ret = 0;
160 }
161
162 return ret;
163}
164
165int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
166{
167 struct irq_chip *chip = irq_data_get_irq_chip(data);
168 struct irq_desc *desc = irq_data_to_desc(data);
169 int ret = 0;
170
171 if (!chip || !chip->irq_set_affinity)
172 return -EINVAL;
173
174 if (irq_can_move_pcntxt(data)) {
175 ret = irq_do_set_affinity(data, mask, false);
176 } else {
177 irqd_set_move_pending(data);
178 irq_copy_pending(desc, mask);
179 }
180
181 if (desc->affinity_notify) {
182 kref_get(&desc->affinity_notify->kref);
183 schedule_work(&desc->affinity_notify->work);
184 }
185 irqd_set(data, IRQD_AFFINITY_SET);
186
187 return ret;
188}
189
190
191
192
193
194
195
196int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
197{
198 struct irq_desc *desc = irq_to_desc(irq);
199 unsigned long flags;
200 int ret;
201
202 if (!desc)
203 return -EINVAL;
204
205 raw_spin_lock_irqsave(&desc->lock, flags);
206 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
207 raw_spin_unlock_irqrestore(&desc->lock, flags);
208 return ret;
209}
210
211int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
212{
213 unsigned long flags;
214 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
215
216 if (!desc)
217 return -EINVAL;
218 desc->affinity_hint = m;
219 irq_put_desc_unlock(desc, flags);
220 return 0;
221}
222EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
223
224static void irq_affinity_notify(struct work_struct *work)
225{
226 struct irq_affinity_notify *notify =
227 container_of(work, struct irq_affinity_notify, work);
228 struct irq_desc *desc = irq_to_desc(notify->irq);
229 cpumask_var_t cpumask;
230 unsigned long flags;
231
232 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
233 goto out;
234
235 raw_spin_lock_irqsave(&desc->lock, flags);
236 if (irq_move_pending(&desc->irq_data))
237 irq_get_pending(cpumask, desc);
238 else
239 cpumask_copy(cpumask, desc->irq_data.affinity);
240 raw_spin_unlock_irqrestore(&desc->lock, flags);
241
242 notify->notify(notify, cpumask);
243
244 free_cpumask_var(cpumask);
245out:
246 kref_put(¬ify->kref, notify->release);
247}
248
249
250
251
252
253
254
255
256
257
258
259
260int
261irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
262{
263 struct irq_desc *desc = irq_to_desc(irq);
264 struct irq_affinity_notify *old_notify;
265 unsigned long flags;
266
267
268 might_sleep();
269
270 if (!desc)
271 return -EINVAL;
272
273
274 if (notify) {
275 notify->irq = irq;
276 kref_init(¬ify->kref);
277 INIT_WORK(¬ify->work, irq_affinity_notify);
278 }
279
280 raw_spin_lock_irqsave(&desc->lock, flags);
281 old_notify = desc->affinity_notify;
282 desc->affinity_notify = notify;
283 raw_spin_unlock_irqrestore(&desc->lock, flags);
284
285 if (old_notify)
286 kref_put(&old_notify->kref, old_notify->release);
287
288 return 0;
289}
290EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
291
292#ifndef CONFIG_AUTO_IRQ_AFFINITY
293
294
295
296static int
297setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
298{
299 struct cpumask *set = irq_default_affinity;
300 int node = desc->irq_data.node;
301
302
303 if (!irq_can_set_affinity(irq))
304 return 0;
305
306
307
308
309
310 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
311 if (cpumask_intersects(desc->irq_data.affinity,
312 cpu_online_mask))
313 set = desc->irq_data.affinity;
314 else
315 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
316 }
317
318 cpumask_and(mask, cpu_online_mask, set);
319 if (node != NUMA_NO_NODE) {
320 const struct cpumask *nodemask = cpumask_of_node(node);
321
322
323 if (cpumask_intersects(mask, nodemask))
324 cpumask_and(mask, mask, nodemask);
325 }
326 irq_do_set_affinity(&desc->irq_data, mask, false);
327 return 0;
328}
329#else
330static inline int
331setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
332{
333 return irq_select_affinity(irq);
334}
335#endif
336
337
338
339
340int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
341{
342 struct irq_desc *desc = irq_to_desc(irq);
343 unsigned long flags;
344 int ret;
345
346 raw_spin_lock_irqsave(&desc->lock, flags);
347 ret = setup_affinity(irq, desc, mask);
348 raw_spin_unlock_irqrestore(&desc->lock, flags);
349 return ret;
350}
351
352#else
353static inline int
354setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
355{
356 return 0;
357}
358#endif
359
360void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
361{
362 if (suspend) {
363 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
364 return;
365 desc->istate |= IRQS_SUSPENDED;
366 }
367
368 if (!desc->depth++)
369 irq_disable(desc);
370}
371
372static int __disable_irq_nosync(unsigned int irq)
373{
374 unsigned long flags;
375 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
376
377 if (!desc)
378 return -EINVAL;
379 __disable_irq(desc, irq, false);
380 irq_put_desc_busunlock(desc, flags);
381 return 0;
382}
383
384
385
386
387
388
389
390
391
392
393
394
395void disable_irq_nosync(unsigned int irq)
396{
397 __disable_irq_nosync(irq);
398}
399EXPORT_SYMBOL(disable_irq_nosync);
400
401
402
403
404
405
406
407
408
409
410
411
412
413void disable_irq(unsigned int irq)
414{
415 if (!__disable_irq_nosync(irq))
416 synchronize_irq(irq);
417}
418EXPORT_SYMBOL(disable_irq);
419
420void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
421{
422 if (resume) {
423 if (!(desc->istate & IRQS_SUSPENDED)) {
424 if (!desc->action)
425 return;
426 if (!(desc->action->flags & IRQF_FORCE_RESUME))
427 return;
428
429 desc->depth++;
430 }
431 desc->istate &= ~IRQS_SUSPENDED;
432 }
433
434 switch (desc->depth) {
435 case 0:
436 err_out:
437 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
438 break;
439 case 1: {
440 if (desc->istate & IRQS_SUSPENDED)
441 goto err_out;
442
443 irq_settings_set_noprobe(desc);
444 irq_enable(desc);
445 check_irq_resend(desc, irq);
446
447 }
448 default:
449 desc->depth--;
450 }
451}
452
453
454
455
456
457
458
459
460
461
462
463
464void enable_irq(unsigned int irq)
465{
466 unsigned long flags;
467 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
468
469 if (!desc)
470 return;
471 if (WARN(!desc->irq_data.chip,
472 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
473 goto out;
474
475 __enable_irq(desc, irq, false);
476out:
477 irq_put_desc_busunlock(desc, flags);
478}
479EXPORT_SYMBOL(enable_irq);
480
481static int set_irq_wake_real(unsigned int irq, unsigned int on)
482{
483 struct irq_desc *desc = irq_to_desc(irq);
484 int ret = -ENXIO;
485
486 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
487 return 0;
488
489 if (desc->irq_data.chip->irq_set_wake)
490 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
491
492 return ret;
493}
494
495
496
497
498
499
500
501
502
503
504
505
506
507int irq_set_irq_wake(unsigned int irq, unsigned int on)
508{
509 unsigned long flags;
510 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
511 int ret = 0;
512
513 if (!desc)
514 return -EINVAL;
515
516
517
518
519 if (on) {
520 if (desc->wake_depth++ == 0) {
521 ret = set_irq_wake_real(irq, on);
522 if (ret)
523 desc->wake_depth = 0;
524 else
525 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
526 }
527 } else {
528 if (desc->wake_depth == 0) {
529 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
530 } else if (--desc->wake_depth == 0) {
531 ret = set_irq_wake_real(irq, on);
532 if (ret)
533 desc->wake_depth = 1;
534 else
535 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
536 }
537 }
538 irq_put_desc_busunlock(desc, flags);
539 return ret;
540}
541EXPORT_SYMBOL(irq_set_irq_wake);
542
543
544
545
546
547
548int can_request_irq(unsigned int irq, unsigned long irqflags)
549{
550 unsigned long flags;
551 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
552 int canrequest = 0;
553
554 if (!desc)
555 return 0;
556
557 if (irq_settings_can_request(desc)) {
558 if (!desc->action ||
559 irqflags & desc->action->flags & IRQF_SHARED)
560 canrequest = 1;
561 }
562 irq_put_desc_unlock(desc, flags);
563 return canrequest;
564}
565
566int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
567 unsigned long flags)
568{
569 struct irq_chip *chip = desc->irq_data.chip;
570 int ret, unmask = 0;
571
572 if (!chip || !chip->irq_set_type) {
573
574
575
576
577 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
578 chip ? (chip->name ? : "unknown") : "unknown");
579 return 0;
580 }
581
582 flags &= IRQ_TYPE_SENSE_MASK;
583
584 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
585 if (!irqd_irq_masked(&desc->irq_data))
586 mask_irq(desc);
587 if (!irqd_irq_disabled(&desc->irq_data))
588 unmask = 1;
589 }
590
591
592 ret = chip->irq_set_type(&desc->irq_data, flags);
593
594 switch (ret) {
595 case IRQ_SET_MASK_OK:
596 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
597 irqd_set(&desc->irq_data, flags);
598
599 case IRQ_SET_MASK_OK_NOCOPY:
600 flags = irqd_get_trigger_type(&desc->irq_data);
601 irq_settings_set_trigger_mask(desc, flags);
602 irqd_clear(&desc->irq_data, IRQD_LEVEL);
603 irq_settings_clr_level(desc);
604 if (flags & IRQ_TYPE_LEVEL_MASK) {
605 irq_settings_set_level(desc);
606 irqd_set(&desc->irq_data, IRQD_LEVEL);
607 }
608
609 ret = 0;
610 break;
611 default:
612 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
613 flags, irq, chip->irq_set_type);
614 }
615 if (unmask)
616 unmask_irq(desc);
617 return ret;
618}
619
620#ifdef CONFIG_HARDIRQS_SW_RESEND
621int irq_set_parent(int irq, int parent_irq)
622{
623 unsigned long flags;
624 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
625
626 if (!desc)
627 return -EINVAL;
628
629 desc->parent_irq = parent_irq;
630
631 irq_put_desc_unlock(desc, flags);
632 return 0;
633}
634#endif
635
636
637
638
639
640
641static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
642{
643 return IRQ_WAKE_THREAD;
644}
645
646
647
648
649
650static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
651{
652 WARN(1, "Primary handler called for nested irq %d\n", irq);
653 return IRQ_NONE;
654}
655
656static int irq_wait_for_interrupt(struct irqaction *action)
657{
658 set_current_state(TASK_INTERRUPTIBLE);
659
660 while (!kthread_should_stop()) {
661
662 if (test_and_clear_bit(IRQTF_RUNTHREAD,
663 &action->thread_flags)) {
664 __set_current_state(TASK_RUNNING);
665 return 0;
666 }
667 schedule();
668 set_current_state(TASK_INTERRUPTIBLE);
669 }
670 __set_current_state(TASK_RUNNING);
671 return -1;
672}
673
674
675
676
677
678
679static void irq_finalize_oneshot(struct irq_desc *desc,
680 struct irqaction *action)
681{
682 if (!(desc->istate & IRQS_ONESHOT))
683 return;
684again:
685 chip_bus_lock(desc);
686 raw_spin_lock_irq(&desc->lock);
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
703 raw_spin_unlock_irq(&desc->lock);
704 chip_bus_sync_unlock(desc);
705 cpu_relax();
706 goto again;
707 }
708
709
710
711
712
713
714 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
715 goto out_unlock;
716
717 desc->threads_oneshot &= ~action->thread_mask;
718
719 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
720 irqd_irq_masked(&desc->irq_data))
721 unmask_irq(desc);
722
723out_unlock:
724 raw_spin_unlock_irq(&desc->lock);
725 chip_bus_sync_unlock(desc);
726}
727
728#ifdef CONFIG_SMP
729
730
731
732static void
733irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
734{
735 cpumask_var_t mask;
736 bool valid = true;
737
738 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
739 return;
740
741
742
743
744
745 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
746 set_bit(IRQTF_AFFINITY, &action->thread_flags);
747 return;
748 }
749
750 raw_spin_lock_irq(&desc->lock);
751
752
753
754
755 if (desc->irq_data.affinity)
756 cpumask_copy(mask, desc->irq_data.affinity);
757 else
758 valid = false;
759 raw_spin_unlock_irq(&desc->lock);
760
761 if (valid)
762 set_cpus_allowed_ptr(current, mask);
763 free_cpumask_var(mask);
764}
765#else
766static inline void
767irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
768#endif
769
770
771
772
773
774
775
776static irqreturn_t
777irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
778{
779 irqreturn_t ret;
780
781 local_bh_disable();
782 ret = action->thread_fn(action->irq, action->dev_id);
783 irq_finalize_oneshot(desc, action);
784 local_bh_enable();
785 return ret;
786}
787
788
789
790
791
792
793static irqreturn_t irq_thread_fn(struct irq_desc *desc,
794 struct irqaction *action)
795{
796 irqreturn_t ret;
797
798 ret = action->thread_fn(action->irq, action->dev_id);
799 irq_finalize_oneshot(desc, action);
800 return ret;
801}
802
803static void wake_threads_waitq(struct irq_desc *desc)
804{
805 if (atomic_dec_and_test(&desc->threads_active) &&
806 waitqueue_active(&desc->wait_for_threads))
807 wake_up(&desc->wait_for_threads);
808}
809
810static void irq_thread_dtor(struct callback_head *unused)
811{
812 struct task_struct *tsk = current;
813 struct irq_desc *desc;
814 struct irqaction *action;
815
816 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
817 return;
818
819 action = kthread_data(tsk);
820
821 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
822 tsk->comm, tsk->pid, action->irq);
823
824
825 desc = irq_to_desc(action->irq);
826
827
828
829
830 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
831 wake_threads_waitq(desc);
832
833
834 irq_finalize_oneshot(desc, action);
835}
836
837
838
839
840static int irq_thread(void *data)
841{
842 struct callback_head on_exit_work;
843 struct irqaction *action = data;
844 struct irq_desc *desc = irq_to_desc(action->irq);
845 irqreturn_t (*handler_fn)(struct irq_desc *desc,
846 struct irqaction *action);
847
848 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
849 &action->thread_flags))
850 handler_fn = irq_forced_thread_fn;
851 else
852 handler_fn = irq_thread_fn;
853
854 init_task_work(&on_exit_work, irq_thread_dtor);
855 task_work_add(current, &on_exit_work, false);
856
857 irq_thread_check_affinity(desc, action);
858
859 while (!irq_wait_for_interrupt(action)) {
860 irqreturn_t action_ret;
861
862 irq_thread_check_affinity(desc, action);
863
864 action_ret = handler_fn(desc, action);
865 if (!noirqdebug)
866 note_interrupt(action->irq, desc, action_ret);
867
868 wake_threads_waitq(desc);
869 }
870
871
872
873
874
875
876
877
878
879
880 task_work_cancel(current, irq_thread_dtor);
881 return 0;
882}
883
884static void irq_setup_forced_threading(struct irqaction *new)
885{
886 if (!force_irqthreads)
887 return;
888 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
889 return;
890
891 new->flags |= IRQF_ONESHOT;
892
893 if (!new->thread_fn) {
894 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
895 new->thread_fn = new->handler;
896 new->handler = irq_default_primary_handler;
897 }
898}
899
900
901
902
903
904static int
905__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
906{
907 struct irqaction *old, **old_ptr;
908 unsigned long flags, thread_mask = 0;
909 int ret, nested, shared = 0;
910 cpumask_var_t mask;
911
912 if (!desc)
913 return -EINVAL;
914
915 if (desc->irq_data.chip == &no_irq_chip)
916 return -ENOSYS;
917 if (!try_module_get(desc->owner))
918 return -ENODEV;
919
920
921
922
923
924 nested = irq_settings_is_nested_thread(desc);
925 if (nested) {
926 if (!new->thread_fn) {
927 ret = -EINVAL;
928 goto out_mput;
929 }
930
931
932
933
934
935 new->handler = irq_nested_primary_handler;
936 } else {
937 if (irq_settings_can_thread(desc))
938 irq_setup_forced_threading(new);
939 }
940
941
942
943
944
945
946 if (new->thread_fn && !nested) {
947 struct task_struct *t;
948 static const struct sched_param param = {
949 .sched_priority = MAX_USER_RT_PRIO/2,
950 };
951
952 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
953 new->name);
954 if (IS_ERR(t)) {
955 ret = PTR_ERR(t);
956 goto out_mput;
957 }
958
959 sched_setscheduler(t, SCHED_FIFO, ¶m);
960
961
962
963
964
965
966 get_task_struct(t);
967 new->thread = t;
968
969
970
971
972
973
974
975
976
977 set_bit(IRQTF_AFFINITY, &new->thread_flags);
978 }
979
980 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
981 ret = -ENOMEM;
982 goto out_thread;
983 }
984
985
986
987
988
989
990
991
992
993
994 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
995 new->flags &= ~IRQF_ONESHOT;
996
997
998
999
1000 raw_spin_lock_irqsave(&desc->lock, flags);
1001 old_ptr = &desc->action;
1002 old = *old_ptr;
1003 if (old) {
1004
1005
1006
1007
1008
1009
1010
1011 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1012 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1013 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1014 goto mismatch;
1015
1016
1017 if ((old->flags & IRQF_PERCPU) !=
1018 (new->flags & IRQF_PERCPU))
1019 goto mismatch;
1020
1021
1022 do {
1023
1024
1025
1026
1027
1028 thread_mask |= old->thread_mask;
1029 old_ptr = &old->next;
1030 old = *old_ptr;
1031 } while (old);
1032 shared = 1;
1033 }
1034
1035
1036
1037
1038
1039
1040 if (new->flags & IRQF_ONESHOT) {
1041
1042
1043
1044
1045 if (thread_mask == ~0UL) {
1046 ret = -EBUSY;
1047 goto out_mask;
1048 }
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 new->thread_mask = 1 << ffz(thread_mask);
1070
1071 } else if (new->handler == irq_default_primary_handler &&
1072 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1089 irq);
1090 ret = -EINVAL;
1091 goto out_mask;
1092 }
1093
1094 if (!shared) {
1095 init_waitqueue_head(&desc->wait_for_threads);
1096
1097
1098 if (new->flags & IRQF_TRIGGER_MASK) {
1099 ret = __irq_set_trigger(desc, irq,
1100 new->flags & IRQF_TRIGGER_MASK);
1101
1102 if (ret)
1103 goto out_mask;
1104 }
1105
1106 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1107 IRQS_ONESHOT | IRQS_WAITING);
1108 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1109
1110 if (new->flags & IRQF_PERCPU) {
1111 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1112 irq_settings_set_per_cpu(desc);
1113 }
1114
1115 if (new->flags & IRQF_ONESHOT)
1116 desc->istate |= IRQS_ONESHOT;
1117
1118 if (irq_settings_can_autoenable(desc))
1119 irq_startup(desc, true);
1120 else
1121
1122 desc->depth = 1;
1123
1124
1125 if (new->flags & IRQF_NOBALANCING) {
1126 irq_settings_set_no_balancing(desc);
1127 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1128 }
1129
1130
1131 setup_affinity(irq, desc, mask);
1132
1133 } else if (new->flags & IRQF_TRIGGER_MASK) {
1134 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1135 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1136
1137 if (nmsk != omsk)
1138
1139 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1140 irq, nmsk, omsk);
1141 }
1142
1143 new->irq = irq;
1144 *old_ptr = new;
1145
1146
1147 desc->irq_count = 0;
1148 desc->irqs_unhandled = 0;
1149
1150
1151
1152
1153
1154 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1155 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1156 __enable_irq(desc, irq, false);
1157 }
1158
1159 raw_spin_unlock_irqrestore(&desc->lock, flags);
1160
1161
1162
1163
1164
1165 if (new->thread)
1166 wake_up_process(new->thread);
1167
1168 register_irq_proc(irq, desc);
1169 new->dir = NULL;
1170 register_handler_proc(irq, new);
1171 free_cpumask_var(mask);
1172
1173 return 0;
1174
1175mismatch:
1176 if (!(new->flags & IRQF_PROBE_SHARED)) {
1177 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1178 irq, new->flags, new->name, old->flags, old->name);
1179#ifdef CONFIG_DEBUG_SHIRQ
1180 dump_stack();
1181#endif
1182 }
1183 ret = -EBUSY;
1184
1185out_mask:
1186 raw_spin_unlock_irqrestore(&desc->lock, flags);
1187 free_cpumask_var(mask);
1188
1189out_thread:
1190 if (new->thread) {
1191 struct task_struct *t = new->thread;
1192
1193 new->thread = NULL;
1194 kthread_stop(t);
1195 put_task_struct(t);
1196 }
1197out_mput:
1198 module_put(desc->owner);
1199 return ret;
1200}
1201
1202
1203
1204
1205
1206
1207
1208
1209int setup_irq(unsigned int irq, struct irqaction *act)
1210{
1211 int retval;
1212 struct irq_desc *desc = irq_to_desc(irq);
1213
1214 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1215 return -EINVAL;
1216 chip_bus_lock(desc);
1217 retval = __setup_irq(irq, desc, act);
1218 chip_bus_sync_unlock(desc);
1219
1220 return retval;
1221}
1222EXPORT_SYMBOL_GPL(setup_irq);
1223
1224
1225
1226
1227
1228static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1229{
1230 struct irq_desc *desc = irq_to_desc(irq);
1231 struct irqaction *action, **action_ptr;
1232 unsigned long flags;
1233
1234 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1235
1236 if (!desc)
1237 return NULL;
1238
1239 raw_spin_lock_irqsave(&desc->lock, flags);
1240
1241
1242
1243
1244
1245 action_ptr = &desc->action;
1246 for (;;) {
1247 action = *action_ptr;
1248
1249 if (!action) {
1250 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1251 raw_spin_unlock_irqrestore(&desc->lock, flags);
1252
1253 return NULL;
1254 }
1255
1256 if (action->dev_id == dev_id)
1257 break;
1258 action_ptr = &action->next;
1259 }
1260
1261
1262 *action_ptr = action->next;
1263
1264
1265 if (!desc->action)
1266 irq_shutdown(desc);
1267
1268#ifdef CONFIG_SMP
1269
1270 if (WARN_ON_ONCE(desc->affinity_hint))
1271 desc->affinity_hint = NULL;
1272#endif
1273
1274 raw_spin_unlock_irqrestore(&desc->lock, flags);
1275
1276 unregister_handler_proc(irq, action);
1277
1278
1279 synchronize_irq(irq);
1280
1281#ifdef CONFIG_DEBUG_SHIRQ
1282
1283
1284
1285
1286
1287
1288
1289
1290 if (action->flags & IRQF_SHARED) {
1291 local_irq_save(flags);
1292 action->handler(irq, dev_id);
1293 local_irq_restore(flags);
1294 }
1295#endif
1296
1297 if (action->thread) {
1298 kthread_stop(action->thread);
1299 put_task_struct(action->thread);
1300 }
1301
1302 module_put(desc->owner);
1303 return action;
1304}
1305
1306
1307
1308
1309
1310
1311
1312
1313void remove_irq(unsigned int irq, struct irqaction *act)
1314{
1315 struct irq_desc *desc = irq_to_desc(irq);
1316
1317 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1318 __free_irq(irq, act->dev_id);
1319}
1320EXPORT_SYMBOL_GPL(remove_irq);
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336void free_irq(unsigned int irq, void *dev_id)
1337{
1338 struct irq_desc *desc = irq_to_desc(irq);
1339
1340 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1341 return;
1342
1343#ifdef CONFIG_SMP
1344 if (WARN_ON(desc->affinity_notify))
1345 desc->affinity_notify = NULL;
1346#endif
1347
1348 chip_bus_lock(desc);
1349 kfree(__free_irq(irq, dev_id));
1350 chip_bus_sync_unlock(desc);
1351}
1352EXPORT_SYMBOL(free_irq);
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1397 irq_handler_t thread_fn, unsigned long irqflags,
1398 const char *devname, void *dev_id)
1399{
1400 struct irqaction *action;
1401 struct irq_desc *desc;
1402 int retval;
1403
1404
1405
1406
1407
1408
1409
1410 if ((irqflags & IRQF_SHARED) && !dev_id)
1411 return -EINVAL;
1412
1413 desc = irq_to_desc(irq);
1414 if (!desc)
1415 return -EINVAL;
1416
1417 if (!irq_settings_can_request(desc) ||
1418 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1419 return -EINVAL;
1420
1421 if (!handler) {
1422 if (!thread_fn)
1423 return -EINVAL;
1424 handler = irq_default_primary_handler;
1425 }
1426
1427 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1428 if (!action)
1429 return -ENOMEM;
1430
1431 action->handler = handler;
1432 action->thread_fn = thread_fn;
1433 action->flags = irqflags;
1434 action->name = devname;
1435 action->dev_id = dev_id;
1436
1437 chip_bus_lock(desc);
1438 retval = __setup_irq(irq, desc, action);
1439 chip_bus_sync_unlock(desc);
1440
1441 if (retval)
1442 kfree(action);
1443
1444#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1445 if (!retval && (irqflags & IRQF_SHARED)) {
1446
1447
1448
1449
1450
1451
1452 unsigned long flags;
1453
1454 disable_irq(irq);
1455 local_irq_save(flags);
1456
1457 handler(irq, dev_id);
1458
1459 local_irq_restore(flags);
1460 enable_irq(irq);
1461 }
1462#endif
1463 return retval;
1464}
1465EXPORT_SYMBOL(request_threaded_irq);
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1485 unsigned long flags, const char *name, void *dev_id)
1486{
1487 struct irq_desc *desc = irq_to_desc(irq);
1488 int ret;
1489
1490 if (!desc)
1491 return -EINVAL;
1492
1493 if (irq_settings_is_nested_thread(desc)) {
1494 ret = request_threaded_irq(irq, NULL, handler,
1495 flags, name, dev_id);
1496 return !ret ? IRQC_IS_NESTED : ret;
1497 }
1498
1499 ret = request_irq(irq, handler, flags, name, dev_id);
1500 return !ret ? IRQC_IS_HARDIRQ : ret;
1501}
1502EXPORT_SYMBOL_GPL(request_any_context_irq);
1503
1504void enable_percpu_irq(unsigned int irq, unsigned int type)
1505{
1506 unsigned int cpu = smp_processor_id();
1507 unsigned long flags;
1508 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1509
1510 if (!desc)
1511 return;
1512
1513 type &= IRQ_TYPE_SENSE_MASK;
1514 if (type != IRQ_TYPE_NONE) {
1515 int ret;
1516
1517 ret = __irq_set_trigger(desc, irq, type);
1518
1519 if (ret) {
1520 WARN(1, "failed to set type for IRQ%d\n", irq);
1521 goto out;
1522 }
1523 }
1524
1525 irq_percpu_enable(desc, cpu);
1526out:
1527 irq_put_desc_unlock(desc, flags);
1528}
1529EXPORT_SYMBOL_GPL(enable_percpu_irq);
1530
1531void disable_percpu_irq(unsigned int irq)
1532{
1533 unsigned int cpu = smp_processor_id();
1534 unsigned long flags;
1535 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1536
1537 if (!desc)
1538 return;
1539
1540 irq_percpu_disable(desc, cpu);
1541 irq_put_desc_unlock(desc, flags);
1542}
1543EXPORT_SYMBOL_GPL(disable_percpu_irq);
1544
1545
1546
1547
1548static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1549{
1550 struct irq_desc *desc = irq_to_desc(irq);
1551 struct irqaction *action;
1552 unsigned long flags;
1553
1554 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1555
1556 if (!desc)
1557 return NULL;
1558
1559 raw_spin_lock_irqsave(&desc->lock, flags);
1560
1561 action = desc->action;
1562 if (!action || action->percpu_dev_id != dev_id) {
1563 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1564 goto bad;
1565 }
1566
1567 if (!cpumask_empty(desc->percpu_enabled)) {
1568 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1569 irq, cpumask_first(desc->percpu_enabled));
1570 goto bad;
1571 }
1572
1573
1574 desc->action = NULL;
1575
1576 raw_spin_unlock_irqrestore(&desc->lock, flags);
1577
1578 unregister_handler_proc(irq, action);
1579
1580 module_put(desc->owner);
1581 return action;
1582
1583bad:
1584 raw_spin_unlock_irqrestore(&desc->lock, flags);
1585 return NULL;
1586}
1587
1588
1589
1590
1591
1592
1593
1594
1595void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1596{
1597 struct irq_desc *desc = irq_to_desc(irq);
1598
1599 if (desc && irq_settings_is_per_cpu_devid(desc))
1600 __free_percpu_irq(irq, act->percpu_dev_id);
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1616{
1617 struct irq_desc *desc = irq_to_desc(irq);
1618
1619 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1620 return;
1621
1622 chip_bus_lock(desc);
1623 kfree(__free_percpu_irq(irq, dev_id));
1624 chip_bus_sync_unlock(desc);
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1635{
1636 struct irq_desc *desc = irq_to_desc(irq);
1637 int retval;
1638
1639 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1640 return -EINVAL;
1641 chip_bus_lock(desc);
1642 retval = __setup_irq(irq, desc, act);
1643 chip_bus_sync_unlock(desc);
1644
1645 return retval;
1646}
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1664 const char *devname, void __percpu *dev_id)
1665{
1666 struct irqaction *action;
1667 struct irq_desc *desc;
1668 int retval;
1669
1670 if (!dev_id)
1671 return -EINVAL;
1672
1673 desc = irq_to_desc(irq);
1674 if (!desc || !irq_settings_can_request(desc) ||
1675 !irq_settings_is_per_cpu_devid(desc))
1676 return -EINVAL;
1677
1678 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1679 if (!action)
1680 return -ENOMEM;
1681
1682 action->handler = handler;
1683 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1684 action->name = devname;
1685 action->percpu_dev_id = dev_id;
1686
1687 chip_bus_lock(desc);
1688 retval = __setup_irq(irq, desc, action);
1689 chip_bus_sync_unlock(desc);
1690
1691 if (retval)
1692 kfree(action);
1693
1694 return retval;
1695}
1696