1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/task_work.h>
20
21#include "internals.h"
22
23#ifdef CONFIG_IRQ_FORCED_THREADING
24__read_mostly bool force_irqthreads;
25
26static int __init setup_forced_irqthreads(char *arg)
27{
28 force_irqthreads = true;
29 return 0;
30}
31early_param("threadirqs", setup_forced_irqthreads);
32#endif
33
34
35
36
37
38
39
40
41
42
43
44void synchronize_irq(unsigned int irq)
45{
46 struct irq_desc *desc = irq_to_desc(irq);
47 bool inprogress;
48
49 if (!desc)
50 return;
51
52 do {
53 unsigned long flags;
54
55
56
57
58
59 while (irqd_irq_inprogress(&desc->irq_data))
60 cpu_relax();
61
62
63 raw_spin_lock_irqsave(&desc->lock, flags);
64 inprogress = irqd_irq_inprogress(&desc->irq_data);
65 raw_spin_unlock_irqrestore(&desc->lock, flags);
66
67
68 } while (inprogress);
69
70
71
72
73
74 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
75}
76EXPORT_SYMBOL(synchronize_irq);
77
78#ifdef CONFIG_SMP
79cpumask_var_t irq_default_affinity;
80
81
82
83
84
85
86int irq_can_set_affinity(unsigned int irq)
87{
88 struct irq_desc *desc = irq_to_desc(irq);
89
90 if (!desc || !irqd_can_balance(&desc->irq_data) ||
91 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
92 return 0;
93
94 return 1;
95}
96
97
98
99
100
101
102
103
104
105
106void irq_set_thread_affinity(struct irq_desc *desc)
107{
108 struct irqaction *action = desc->action;
109
110 while (action) {
111 if (action->thread)
112 set_bit(IRQTF_AFFINITY, &action->thread_flags);
113 action = action->next;
114 }
115}
116
117#ifdef CONFIG_GENERIC_PENDING_IRQ
118static inline bool irq_can_move_pcntxt(struct irq_data *data)
119{
120 return irqd_can_move_in_process_context(data);
121}
122static inline bool irq_move_pending(struct irq_data *data)
123{
124 return irqd_is_setaffinity_pending(data);
125}
126static inline void
127irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
128{
129 cpumask_copy(desc->pending_mask, mask);
130}
131static inline void
132irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
133{
134 cpumask_copy(mask, desc->pending_mask);
135}
136#else
137static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
138static inline bool irq_move_pending(struct irq_data *data) { return false; }
139static inline void
140irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
141static inline void
142irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
143#endif
144
145int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
146 bool force)
147{
148 struct irq_desc *desc = irq_data_to_desc(data);
149 struct irq_chip *chip = irq_data_get_irq_chip(data);
150 int ret;
151
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160
161 return ret;
162}
163
164int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
165{
166 struct irq_chip *chip = irq_data_get_irq_chip(data);
167 struct irq_desc *desc = irq_data_to_desc(data);
168 int ret = 0;
169
170 if (!chip || !chip->irq_set_affinity)
171 return -EINVAL;
172
173 if (irq_can_move_pcntxt(data)) {
174 ret = irq_do_set_affinity(data, mask, false);
175 } else {
176 irqd_set_move_pending(data);
177 irq_copy_pending(desc, mask);
178 }
179
180 if (desc->affinity_notify) {
181 kref_get(&desc->affinity_notify->kref);
182 schedule_work(&desc->affinity_notify->work);
183 }
184 irqd_set(data, IRQD_AFFINITY_SET);
185
186 return ret;
187}
188
189
190
191
192
193
194
195int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
196{
197 struct irq_desc *desc = irq_to_desc(irq);
198 unsigned long flags;
199 int ret;
200
201 if (!desc)
202 return -EINVAL;
203
204 raw_spin_lock_irqsave(&desc->lock, flags);
205 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
206 raw_spin_unlock_irqrestore(&desc->lock, flags);
207 return ret;
208}
209
210int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
211{
212 unsigned long flags;
213 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
214
215 if (!desc)
216 return -EINVAL;
217 desc->affinity_hint = m;
218 irq_put_desc_unlock(desc, flags);
219 return 0;
220}
221EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
222
223static void irq_affinity_notify(struct work_struct *work)
224{
225 struct irq_affinity_notify *notify =
226 container_of(work, struct irq_affinity_notify, work);
227 struct irq_desc *desc = irq_to_desc(notify->irq);
228 cpumask_var_t cpumask;
229 unsigned long flags;
230
231 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
232 goto out;
233
234 raw_spin_lock_irqsave(&desc->lock, flags);
235 if (irq_move_pending(&desc->irq_data))
236 irq_get_pending(cpumask, desc);
237 else
238 cpumask_copy(cpumask, desc->irq_data.affinity);
239 raw_spin_unlock_irqrestore(&desc->lock, flags);
240
241 notify->notify(notify, cpumask);
242
243 free_cpumask_var(cpumask);
244out:
245 kref_put(¬ify->kref, notify->release);
246}
247
248
249
250
251
252
253
254
255
256
257
258
259int
260irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
261{
262 struct irq_desc *desc = irq_to_desc(irq);
263 struct irq_affinity_notify *old_notify;
264 unsigned long flags;
265
266
267 might_sleep();
268
269 if (!desc)
270 return -EINVAL;
271
272
273 if (notify) {
274 notify->irq = irq;
275 kref_init(¬ify->kref);
276 INIT_WORK(¬ify->work, irq_affinity_notify);
277 }
278
279 raw_spin_lock_irqsave(&desc->lock, flags);
280 old_notify = desc->affinity_notify;
281 desc->affinity_notify = notify;
282 raw_spin_unlock_irqrestore(&desc->lock, flags);
283
284 if (old_notify)
285 kref_put(&old_notify->kref, old_notify->release);
286
287 return 0;
288}
289EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
290
291#ifndef CONFIG_AUTO_IRQ_AFFINITY
292
293
294
295static int
296setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
297{
298 struct cpumask *set = irq_default_affinity;
299 int node = desc->irq_data.node;
300
301
302 if (!irq_can_set_affinity(irq))
303 return 0;
304
305
306
307
308
309 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
310 if (cpumask_intersects(desc->irq_data.affinity,
311 cpu_online_mask))
312 set = desc->irq_data.affinity;
313 else
314 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
315 }
316
317 cpumask_and(mask, cpu_online_mask, set);
318 if (node != NUMA_NO_NODE) {
319 const struct cpumask *nodemask = cpumask_of_node(node);
320
321
322 if (cpumask_intersects(mask, nodemask))
323 cpumask_and(mask, mask, nodemask);
324 }
325 irq_do_set_affinity(&desc->irq_data, mask, false);
326 return 0;
327}
328#else
329static inline int
330setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
331{
332 return irq_select_affinity(irq);
333}
334#endif
335
336
337
338
339int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
340{
341 struct irq_desc *desc = irq_to_desc(irq);
342 unsigned long flags;
343 int ret;
344
345 raw_spin_lock_irqsave(&desc->lock, flags);
346 ret = setup_affinity(irq, desc, mask);
347 raw_spin_unlock_irqrestore(&desc->lock, flags);
348 return ret;
349}
350
351#else
352static inline int
353setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
354{
355 return 0;
356}
357#endif
358
359void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
360{
361 if (suspend) {
362 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
363 return;
364 desc->istate |= IRQS_SUSPENDED;
365 }
366
367 if (!desc->depth++)
368 irq_disable(desc);
369}
370
371static int __disable_irq_nosync(unsigned int irq)
372{
373 unsigned long flags;
374 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
375
376 if (!desc)
377 return -EINVAL;
378 __disable_irq(desc, irq, false);
379 irq_put_desc_busunlock(desc, flags);
380 return 0;
381}
382
383
384
385
386
387
388
389
390
391
392
393
394void disable_irq_nosync(unsigned int irq)
395{
396 __disable_irq_nosync(irq);
397}
398EXPORT_SYMBOL(disable_irq_nosync);
399
400
401
402
403
404
405
406
407
408
409
410
411
412void disable_irq(unsigned int irq)
413{
414 if (!__disable_irq_nosync(irq))
415 synchronize_irq(irq);
416}
417EXPORT_SYMBOL(disable_irq);
418
419void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
420{
421 if (resume) {
422 if (!(desc->istate & IRQS_SUSPENDED)) {
423 if (!desc->action)
424 return;
425 if (!(desc->action->flags & IRQF_FORCE_RESUME))
426 return;
427
428 desc->depth++;
429 }
430 desc->istate &= ~IRQS_SUSPENDED;
431 }
432
433 switch (desc->depth) {
434 case 0:
435 err_out:
436 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
437 break;
438 case 1: {
439 if (desc->istate & IRQS_SUSPENDED)
440 goto err_out;
441
442 irq_settings_set_noprobe(desc);
443 irq_enable(desc);
444 check_irq_resend(desc, irq);
445
446 }
447 default:
448 desc->depth--;
449 }
450}
451
452
453
454
455
456
457
458
459
460
461
462
463void enable_irq(unsigned int irq)
464{
465 unsigned long flags;
466 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
467
468 if (!desc)
469 return;
470 if (WARN(!desc->irq_data.chip,
471 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
472 goto out;
473
474 __enable_irq(desc, irq, false);
475out:
476 irq_put_desc_busunlock(desc, flags);
477}
478EXPORT_SYMBOL(enable_irq);
479
480static int set_irq_wake_real(unsigned int irq, unsigned int on)
481{
482 struct irq_desc *desc = irq_to_desc(irq);
483 int ret = -ENXIO;
484
485 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
486 return 0;
487
488 if (desc->irq_data.chip->irq_set_wake)
489 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
490
491 return ret;
492}
493
494
495
496
497
498
499
500
501
502
503
504
505
506int irq_set_irq_wake(unsigned int irq, unsigned int on)
507{
508 unsigned long flags;
509 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
510 int ret = 0;
511
512 if (!desc)
513 return -EINVAL;
514
515
516
517
518 if (on) {
519 if (desc->wake_depth++ == 0) {
520 ret = set_irq_wake_real(irq, on);
521 if (ret)
522 desc->wake_depth = 0;
523 else
524 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
525 }
526 } else {
527 if (desc->wake_depth == 0) {
528 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
529 } else if (--desc->wake_depth == 0) {
530 ret = set_irq_wake_real(irq, on);
531 if (ret)
532 desc->wake_depth = 1;
533 else
534 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
535 }
536 }
537 irq_put_desc_busunlock(desc, flags);
538 return ret;
539}
540EXPORT_SYMBOL(irq_set_irq_wake);
541
542
543
544
545
546
547int can_request_irq(unsigned int irq, unsigned long irqflags)
548{
549 unsigned long flags;
550 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
551 int canrequest = 0;
552
553 if (!desc)
554 return 0;
555
556 if (irq_settings_can_request(desc)) {
557 if (desc->action)
558 if (irqflags & desc->action->flags & IRQF_SHARED)
559 canrequest =1;
560 }
561 irq_put_desc_unlock(desc, flags);
562 return canrequest;
563}
564
565int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
566 unsigned long flags)
567{
568 struct irq_chip *chip = desc->irq_data.chip;
569 int ret, unmask = 0;
570
571 if (!chip || !chip->irq_set_type) {
572
573
574
575
576 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
577 chip ? (chip->name ? : "unknown") : "unknown");
578 return 0;
579 }
580
581 flags &= IRQ_TYPE_SENSE_MASK;
582
583 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
584 if (!irqd_irq_masked(&desc->irq_data))
585 mask_irq(desc);
586 if (!irqd_irq_disabled(&desc->irq_data))
587 unmask = 1;
588 }
589
590
591 ret = chip->irq_set_type(&desc->irq_data, flags);
592
593 switch (ret) {
594 case IRQ_SET_MASK_OK:
595 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
596 irqd_set(&desc->irq_data, flags);
597
598 case IRQ_SET_MASK_OK_NOCOPY:
599 flags = irqd_get_trigger_type(&desc->irq_data);
600 irq_settings_set_trigger_mask(desc, flags);
601 irqd_clear(&desc->irq_data, IRQD_LEVEL);
602 irq_settings_clr_level(desc);
603 if (flags & IRQ_TYPE_LEVEL_MASK) {
604 irq_settings_set_level(desc);
605 irqd_set(&desc->irq_data, IRQD_LEVEL);
606 }
607
608 ret = 0;
609 break;
610 default:
611 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
612 flags, irq, chip->irq_set_type);
613 }
614 if (unmask)
615 unmask_irq(desc);
616 return ret;
617}
618
619
620
621
622
623
624static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
625{
626 return IRQ_WAKE_THREAD;
627}
628
629
630
631
632
633static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
634{
635 WARN(1, "Primary handler called for nested irq %d\n", irq);
636 return IRQ_NONE;
637}
638
639static int irq_wait_for_interrupt(struct irqaction *action)
640{
641 set_current_state(TASK_INTERRUPTIBLE);
642
643 while (!kthread_should_stop()) {
644
645 if (test_and_clear_bit(IRQTF_RUNTHREAD,
646 &action->thread_flags)) {
647 __set_current_state(TASK_RUNNING);
648 return 0;
649 }
650 schedule();
651 set_current_state(TASK_INTERRUPTIBLE);
652 }
653 __set_current_state(TASK_RUNNING);
654 return -1;
655}
656
657
658
659
660
661
662static void irq_finalize_oneshot(struct irq_desc *desc,
663 struct irqaction *action)
664{
665 if (!(desc->istate & IRQS_ONESHOT))
666 return;
667again:
668 chip_bus_lock(desc);
669 raw_spin_lock_irq(&desc->lock);
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
686 raw_spin_unlock_irq(&desc->lock);
687 chip_bus_sync_unlock(desc);
688 cpu_relax();
689 goto again;
690 }
691
692
693
694
695
696
697 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
698 goto out_unlock;
699
700 desc->threads_oneshot &= ~action->thread_mask;
701
702 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
703 irqd_irq_masked(&desc->irq_data))
704 unmask_irq(desc);
705
706out_unlock:
707 raw_spin_unlock_irq(&desc->lock);
708 chip_bus_sync_unlock(desc);
709}
710
711#ifdef CONFIG_SMP
712
713
714
715static void
716irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
717{
718 cpumask_var_t mask;
719
720 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
721 return;
722
723
724
725
726
727 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
728 set_bit(IRQTF_AFFINITY, &action->thread_flags);
729 return;
730 }
731
732 raw_spin_lock_irq(&desc->lock);
733 cpumask_copy(mask, desc->irq_data.affinity);
734 raw_spin_unlock_irq(&desc->lock);
735
736 set_cpus_allowed_ptr(current, mask);
737 free_cpumask_var(mask);
738}
739#else
740static inline void
741irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
742#endif
743
744
745
746
747
748
749
750static irqreturn_t
751irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
752{
753 irqreturn_t ret;
754
755 local_bh_disable();
756 ret = action->thread_fn(action->irq, action->dev_id);
757 irq_finalize_oneshot(desc, action);
758 local_bh_enable();
759 return ret;
760}
761
762
763
764
765
766
767static irqreturn_t irq_thread_fn(struct irq_desc *desc,
768 struct irqaction *action)
769{
770 irqreturn_t ret;
771
772 ret = action->thread_fn(action->irq, action->dev_id);
773 irq_finalize_oneshot(desc, action);
774 return ret;
775}
776
777static void wake_threads_waitq(struct irq_desc *desc)
778{
779 if (atomic_dec_and_test(&desc->threads_active) &&
780 waitqueue_active(&desc->wait_for_threads))
781 wake_up(&desc->wait_for_threads);
782}
783
784static void irq_thread_dtor(struct task_work *unused)
785{
786 struct task_struct *tsk = current;
787 struct irq_desc *desc;
788 struct irqaction *action;
789
790 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
791 return;
792
793 action = kthread_data(tsk);
794
795 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
796 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
797
798
799 desc = irq_to_desc(action->irq);
800
801
802
803
804 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
805 wake_threads_waitq(desc);
806
807
808 irq_finalize_oneshot(desc, action);
809}
810
811
812
813
814static int irq_thread(void *data)
815{
816 struct task_work on_exit_work;
817 static const struct sched_param param = {
818 .sched_priority = MAX_USER_RT_PRIO/2,
819 };
820 struct irqaction *action = data;
821 struct irq_desc *desc = irq_to_desc(action->irq);
822 irqreturn_t (*handler_fn)(struct irq_desc *desc,
823 struct irqaction *action);
824
825 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
826 &action->thread_flags))
827 handler_fn = irq_forced_thread_fn;
828 else
829 handler_fn = irq_thread_fn;
830
831 sched_setscheduler(current, SCHED_FIFO, ¶m);
832
833 init_task_work(&on_exit_work, irq_thread_dtor, NULL);
834 task_work_add(current, &on_exit_work, false);
835
836 while (!irq_wait_for_interrupt(action)) {
837 irqreturn_t action_ret;
838
839 irq_thread_check_affinity(desc, action);
840
841 action_ret = handler_fn(desc, action);
842 if (!noirqdebug)
843 note_interrupt(action->irq, desc, action_ret);
844
845 wake_threads_waitq(desc);
846 }
847
848
849
850
851
852
853
854
855
856
857 task_work_cancel(current, irq_thread_dtor);
858 return 0;
859}
860
861static void irq_setup_forced_threading(struct irqaction *new)
862{
863 if (!force_irqthreads)
864 return;
865 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
866 return;
867
868 new->flags |= IRQF_ONESHOT;
869
870 if (!new->thread_fn) {
871 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
872 new->thread_fn = new->handler;
873 new->handler = irq_default_primary_handler;
874 }
875}
876
877
878
879
880
881static int
882__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
883{
884 struct irqaction *old, **old_ptr;
885 unsigned long flags, thread_mask = 0;
886 int ret, nested, shared = 0;
887 cpumask_var_t mask;
888
889 if (!desc)
890 return -EINVAL;
891
892 if (desc->irq_data.chip == &no_irq_chip)
893 return -ENOSYS;
894 if (!try_module_get(desc->owner))
895 return -ENODEV;
896
897
898
899
900
901 if (new->flags & IRQF_SAMPLE_RANDOM) {
902
903
904
905
906
907
908
909
910 rand_initialize_irq(irq);
911 }
912
913
914
915
916
917 nested = irq_settings_is_nested_thread(desc);
918 if (nested) {
919 if (!new->thread_fn) {
920 ret = -EINVAL;
921 goto out_mput;
922 }
923
924
925
926
927
928 new->handler = irq_nested_primary_handler;
929 } else {
930 if (irq_settings_can_thread(desc))
931 irq_setup_forced_threading(new);
932 }
933
934
935
936
937
938
939 if (new->thread_fn && !nested) {
940 struct task_struct *t;
941
942 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
943 new->name);
944 if (IS_ERR(t)) {
945 ret = PTR_ERR(t);
946 goto out_mput;
947 }
948
949
950
951
952
953 get_task_struct(t);
954 new->thread = t;
955 }
956
957 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
958 ret = -ENOMEM;
959 goto out_thread;
960 }
961
962
963
964
965 raw_spin_lock_irqsave(&desc->lock, flags);
966 old_ptr = &desc->action;
967 old = *old_ptr;
968 if (old) {
969
970
971
972
973
974
975
976 if (!((old->flags & new->flags) & IRQF_SHARED) ||
977 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
978 ((old->flags ^ new->flags) & IRQF_ONESHOT))
979 goto mismatch;
980
981
982 if ((old->flags & IRQF_PERCPU) !=
983 (new->flags & IRQF_PERCPU))
984 goto mismatch;
985
986
987 do {
988
989
990
991
992
993 thread_mask |= old->thread_mask;
994 old_ptr = &old->next;
995 old = *old_ptr;
996 } while (old);
997 shared = 1;
998 }
999
1000
1001
1002
1003
1004
1005 if (new->flags & IRQF_ONESHOT) {
1006
1007
1008
1009
1010 if (thread_mask == ~0UL) {
1011 ret = -EBUSY;
1012 goto out_mask;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034 new->thread_mask = 1 << ffz(thread_mask);
1035
1036 } else if (new->handler == irq_default_primary_handler) {
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1053 irq);
1054 ret = -EINVAL;
1055 goto out_mask;
1056 }
1057
1058 if (!shared) {
1059 init_waitqueue_head(&desc->wait_for_threads);
1060
1061
1062 if (new->flags & IRQF_TRIGGER_MASK) {
1063 ret = __irq_set_trigger(desc, irq,
1064 new->flags & IRQF_TRIGGER_MASK);
1065
1066 if (ret)
1067 goto out_mask;
1068 }
1069
1070 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1071 IRQS_ONESHOT | IRQS_WAITING);
1072 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1073
1074 if (new->flags & IRQF_PERCPU) {
1075 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1076 irq_settings_set_per_cpu(desc);
1077 }
1078
1079 if (new->flags & IRQF_ONESHOT)
1080 desc->istate |= IRQS_ONESHOT;
1081
1082 if (irq_settings_can_autoenable(desc))
1083 irq_startup(desc, true);
1084 else
1085
1086 desc->depth = 1;
1087
1088
1089 if (new->flags & IRQF_NOBALANCING) {
1090 irq_settings_set_no_balancing(desc);
1091 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1092 }
1093
1094
1095 setup_affinity(irq, desc, mask);
1096
1097 } else if (new->flags & IRQF_TRIGGER_MASK) {
1098 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1099 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1100
1101 if (nmsk != omsk)
1102
1103 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1104 irq, nmsk, omsk);
1105 }
1106
1107 new->irq = irq;
1108 *old_ptr = new;
1109
1110
1111 desc->irq_count = 0;
1112 desc->irqs_unhandled = 0;
1113
1114
1115
1116
1117
1118 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1119 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1120 __enable_irq(desc, irq, false);
1121 }
1122
1123 raw_spin_unlock_irqrestore(&desc->lock, flags);
1124
1125
1126
1127
1128
1129 if (new->thread)
1130 wake_up_process(new->thread);
1131
1132 register_irq_proc(irq, desc);
1133 new->dir = NULL;
1134 register_handler_proc(irq, new);
1135 free_cpumask_var(mask);
1136
1137 return 0;
1138
1139mismatch:
1140 if (!(new->flags & IRQF_PROBE_SHARED)) {
1141 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1142 irq, new->flags, new->name, old->flags, old->name);
1143#ifdef CONFIG_DEBUG_SHIRQ
1144 dump_stack();
1145#endif
1146 }
1147 ret = -EBUSY;
1148
1149out_mask:
1150 raw_spin_unlock_irqrestore(&desc->lock, flags);
1151 free_cpumask_var(mask);
1152
1153out_thread:
1154 if (new->thread) {
1155 struct task_struct *t = new->thread;
1156
1157 new->thread = NULL;
1158 kthread_stop(t);
1159 put_task_struct(t);
1160 }
1161out_mput:
1162 module_put(desc->owner);
1163 return ret;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173int setup_irq(unsigned int irq, struct irqaction *act)
1174{
1175 int retval;
1176 struct irq_desc *desc = irq_to_desc(irq);
1177
1178 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1179 return -EINVAL;
1180 chip_bus_lock(desc);
1181 retval = __setup_irq(irq, desc, act);
1182 chip_bus_sync_unlock(desc);
1183
1184 return retval;
1185}
1186EXPORT_SYMBOL_GPL(setup_irq);
1187
1188
1189
1190
1191
1192static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1193{
1194 struct irq_desc *desc = irq_to_desc(irq);
1195 struct irqaction *action, **action_ptr;
1196 unsigned long flags;
1197
1198 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1199
1200 if (!desc)
1201 return NULL;
1202
1203 raw_spin_lock_irqsave(&desc->lock, flags);
1204
1205
1206
1207
1208
1209 action_ptr = &desc->action;
1210 for (;;) {
1211 action = *action_ptr;
1212
1213 if (!action) {
1214 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1215 raw_spin_unlock_irqrestore(&desc->lock, flags);
1216
1217 return NULL;
1218 }
1219
1220 if (action->dev_id == dev_id)
1221 break;
1222 action_ptr = &action->next;
1223 }
1224
1225
1226 *action_ptr = action->next;
1227
1228
1229 if (!desc->action)
1230 irq_shutdown(desc);
1231
1232#ifdef CONFIG_SMP
1233
1234 if (WARN_ON_ONCE(desc->affinity_hint))
1235 desc->affinity_hint = NULL;
1236#endif
1237
1238 raw_spin_unlock_irqrestore(&desc->lock, flags);
1239
1240 unregister_handler_proc(irq, action);
1241
1242
1243 synchronize_irq(irq);
1244
1245#ifdef CONFIG_DEBUG_SHIRQ
1246
1247
1248
1249
1250
1251
1252
1253
1254 if (action->flags & IRQF_SHARED) {
1255 local_irq_save(flags);
1256 action->handler(irq, dev_id);
1257 local_irq_restore(flags);
1258 }
1259#endif
1260
1261 if (action->thread) {
1262 kthread_stop(action->thread);
1263 put_task_struct(action->thread);
1264 }
1265
1266 module_put(desc->owner);
1267 return action;
1268}
1269
1270
1271
1272
1273
1274
1275
1276
1277void remove_irq(unsigned int irq, struct irqaction *act)
1278{
1279 struct irq_desc *desc = irq_to_desc(irq);
1280
1281 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1282 __free_irq(irq, act->dev_id);
1283}
1284EXPORT_SYMBOL_GPL(remove_irq);
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300void free_irq(unsigned int irq, void *dev_id)
1301{
1302 struct irq_desc *desc = irq_to_desc(irq);
1303
1304 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1305 return;
1306
1307#ifdef CONFIG_SMP
1308 if (WARN_ON(desc->affinity_notify))
1309 desc->affinity_notify = NULL;
1310#endif
1311
1312 chip_bus_lock(desc);
1313 kfree(__free_irq(irq, dev_id));
1314 chip_bus_sync_unlock(desc);
1315}
1316EXPORT_SYMBOL(free_irq);
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1362 irq_handler_t thread_fn, unsigned long irqflags,
1363 const char *devname, void *dev_id)
1364{
1365 struct irqaction *action;
1366 struct irq_desc *desc;
1367 int retval;
1368
1369
1370
1371
1372
1373
1374
1375 if ((irqflags & IRQF_SHARED) && !dev_id)
1376 return -EINVAL;
1377
1378 desc = irq_to_desc(irq);
1379 if (!desc)
1380 return -EINVAL;
1381
1382 if (!irq_settings_can_request(desc) ||
1383 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1384 return -EINVAL;
1385
1386 if (!handler) {
1387 if (!thread_fn)
1388 return -EINVAL;
1389 handler = irq_default_primary_handler;
1390 }
1391
1392 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1393 if (!action)
1394 return -ENOMEM;
1395
1396 action->handler = handler;
1397 action->thread_fn = thread_fn;
1398 action->flags = irqflags;
1399 action->name = devname;
1400 action->dev_id = dev_id;
1401
1402 chip_bus_lock(desc);
1403 retval = __setup_irq(irq, desc, action);
1404 chip_bus_sync_unlock(desc);
1405
1406 if (retval)
1407 kfree(action);
1408
1409#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1410 if (!retval && (irqflags & IRQF_SHARED)) {
1411
1412
1413
1414
1415
1416
1417 unsigned long flags;
1418
1419 disable_irq(irq);
1420 local_irq_save(flags);
1421
1422 handler(irq, dev_id);
1423
1424 local_irq_restore(flags);
1425 enable_irq(irq);
1426 }
1427#endif
1428 return retval;
1429}
1430EXPORT_SYMBOL(request_threaded_irq);
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1450 unsigned long flags, const char *name, void *dev_id)
1451{
1452 struct irq_desc *desc = irq_to_desc(irq);
1453 int ret;
1454
1455 if (!desc)
1456 return -EINVAL;
1457
1458 if (irq_settings_is_nested_thread(desc)) {
1459 ret = request_threaded_irq(irq, NULL, handler,
1460 flags, name, dev_id);
1461 return !ret ? IRQC_IS_NESTED : ret;
1462 }
1463
1464 ret = request_irq(irq, handler, flags, name, dev_id);
1465 return !ret ? IRQC_IS_HARDIRQ : ret;
1466}
1467EXPORT_SYMBOL_GPL(request_any_context_irq);
1468
1469void enable_percpu_irq(unsigned int irq, unsigned int type)
1470{
1471 unsigned int cpu = smp_processor_id();
1472 unsigned long flags;
1473 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1474
1475 if (!desc)
1476 return;
1477
1478 type &= IRQ_TYPE_SENSE_MASK;
1479 if (type != IRQ_TYPE_NONE) {
1480 int ret;
1481
1482 ret = __irq_set_trigger(desc, irq, type);
1483
1484 if (ret) {
1485 WARN(1, "failed to set type for IRQ%d\n", irq);
1486 goto out;
1487 }
1488 }
1489
1490 irq_percpu_enable(desc, cpu);
1491out:
1492 irq_put_desc_unlock(desc, flags);
1493}
1494
1495void disable_percpu_irq(unsigned int irq)
1496{
1497 unsigned int cpu = smp_processor_id();
1498 unsigned long flags;
1499 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1500
1501 if (!desc)
1502 return;
1503
1504 irq_percpu_disable(desc, cpu);
1505 irq_put_desc_unlock(desc, flags);
1506}
1507
1508
1509
1510
1511static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1512{
1513 struct irq_desc *desc = irq_to_desc(irq);
1514 struct irqaction *action;
1515 unsigned long flags;
1516
1517 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1518
1519 if (!desc)
1520 return NULL;
1521
1522 raw_spin_lock_irqsave(&desc->lock, flags);
1523
1524 action = desc->action;
1525 if (!action || action->percpu_dev_id != dev_id) {
1526 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1527 goto bad;
1528 }
1529
1530 if (!cpumask_empty(desc->percpu_enabled)) {
1531 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1532 irq, cpumask_first(desc->percpu_enabled));
1533 goto bad;
1534 }
1535
1536
1537 desc->action = NULL;
1538
1539 raw_spin_unlock_irqrestore(&desc->lock, flags);
1540
1541 unregister_handler_proc(irq, action);
1542
1543 module_put(desc->owner);
1544 return action;
1545
1546bad:
1547 raw_spin_unlock_irqrestore(&desc->lock, flags);
1548 return NULL;
1549}
1550
1551
1552
1553
1554
1555
1556
1557
1558void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1559{
1560 struct irq_desc *desc = irq_to_desc(irq);
1561
1562 if (desc && irq_settings_is_per_cpu_devid(desc))
1563 __free_percpu_irq(irq, act->percpu_dev_id);
1564}
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1579{
1580 struct irq_desc *desc = irq_to_desc(irq);
1581
1582 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1583 return;
1584
1585 chip_bus_lock(desc);
1586 kfree(__free_percpu_irq(irq, dev_id));
1587 chip_bus_sync_unlock(desc);
1588}
1589
1590
1591
1592
1593
1594
1595
1596
1597int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1598{
1599 struct irq_desc *desc = irq_to_desc(irq);
1600 int retval;
1601
1602 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1603 return -EINVAL;
1604 chip_bus_lock(desc);
1605 retval = __setup_irq(irq, desc, act);
1606 chip_bus_sync_unlock(desc);
1607
1608 return retval;
1609}
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1627 const char *devname, void __percpu *dev_id)
1628{
1629 struct irqaction *action;
1630 struct irq_desc *desc;
1631 int retval;
1632
1633 if (!dev_id)
1634 return -EINVAL;
1635
1636 desc = irq_to_desc(irq);
1637 if (!desc || !irq_settings_can_request(desc) ||
1638 !irq_settings_is_per_cpu_devid(desc))
1639 return -EINVAL;
1640
1641 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1642 if (!action)
1643 return -ENOMEM;
1644
1645 action->handler = handler;
1646 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1647 action->name = devname;
1648 action->percpu_dev_id = dev_id;
1649
1650 chip_bus_lock(desc);
1651 retval = __setup_irq(irq, desc, action);
1652 chip_bus_sync_unlock(desc);
1653
1654 if (retval)
1655 kfree(action);
1656
1657 return retval;
1658}
1659