1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42
43
44
45
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54
55 } while (inprogress);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75bool synchronize_hardirq(unsigned int irq)
76{
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85}
86EXPORT_SYMBOL(synchronize_hardirq);
87
88
89
90
91
92
93
94
95
96
97
98void synchronize_irq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104
105
106
107
108
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112}
113EXPORT_SYMBOL(synchronize_irq);
114
115#ifdef CONFIG_SMP
116cpumask_var_t irq_default_affinity;
117
118static int __irq_can_set_affinity(struct irq_desc *desc)
119{
120 if (!desc || !irqd_can_balance(&desc->irq_data) ||
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122 return 0;
123 return 1;
124}
125
126
127
128
129
130
131int irq_can_set_affinity(unsigned int irq)
132{
133 return __irq_can_set_affinity(irq_to_desc(irq));
134}
135
136
137
138
139
140
141
142
143
144
145void irq_set_thread_affinity(struct irq_desc *desc)
146{
147 struct irqaction *action = desc->action;
148
149 while (action) {
150 if (action->thread)
151 set_bit(IRQTF_AFFINITY, &action->thread_flags);
152 action = action->next;
153 }
154}
155
156#ifdef CONFIG_GENERIC_PENDING_IRQ
157static inline bool irq_can_move_pcntxt(struct irq_data *data)
158{
159 return irqd_can_move_in_process_context(data);
160}
161static inline bool irq_move_pending(struct irq_data *data)
162{
163 return irqd_is_setaffinity_pending(data);
164}
165static inline void
166irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
167{
168 cpumask_copy(desc->pending_mask, mask);
169}
170static inline void
171irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
172{
173 cpumask_copy(mask, desc->pending_mask);
174}
175#else
176static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
177static inline bool irq_move_pending(struct irq_data *data) { return false; }
178static inline void
179irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
180static inline void
181irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
182#endif
183
184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
191 ret = chip->irq_set_affinity(data, mask, force);
192 switch (ret) {
193 case IRQ_SET_MASK_OK:
194 case IRQ_SET_MASK_OK_DONE:
195 cpumask_copy(desc->irq_common_data.affinity, mask);
196 case IRQ_SET_MASK_OK_NOCOPY:
197 irq_set_thread_affinity(desc);
198 ret = 0;
199 }
200
201 return ret;
202}
203
204int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
205 bool force)
206{
207 struct irq_chip *chip = irq_data_get_irq_chip(data);
208 struct irq_desc *desc = irq_data_to_desc(data);
209 int ret = 0;
210
211 if (!chip || !chip->irq_set_affinity)
212 return -EINVAL;
213
214 if (irq_can_move_pcntxt(data)) {
215 ret = irq_do_set_affinity(data, mask, force);
216 } else {
217 irqd_set_move_pending(data);
218 irq_copy_pending(desc, mask);
219 }
220
221 if (desc->affinity_notify) {
222 kref_get(&desc->affinity_notify->kref);
223 schedule_work(&desc->affinity_notify->work);
224 }
225 irqd_set(data, IRQD_AFFINITY_SET);
226
227 return ret;
228}
229
230int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
231{
232 struct irq_desc *desc = irq_to_desc(irq);
233 unsigned long flags;
234 int ret;
235
236 if (!desc)
237 return -EINVAL;
238
239 raw_spin_lock_irqsave(&desc->lock, flags);
240 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
241 raw_spin_unlock_irqrestore(&desc->lock, flags);
242 return ret;
243}
244
245int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
246{
247 unsigned long flags;
248 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
249
250 if (!desc)
251 return -EINVAL;
252 desc->affinity_hint = m;
253 irq_put_desc_unlock(desc, flags);
254
255 if (m)
256 __irq_set_affinity(irq, m, false);
257 return 0;
258}
259EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
260
261
262
263
264
265
266
267
268
269
270
271int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
272{
273 unsigned long flags;
274 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
275 struct irq_data *data;
276 struct irq_chip *chip;
277 int ret = -ENOSYS;
278
279 if (!desc)
280 return -EINVAL;
281
282 data = irq_desc_get_irq_data(desc);
283 chip = irq_data_get_irq_chip(data);
284 if (chip && chip->irq_set_vcpu_affinity)
285 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
286 irq_put_desc_unlock(desc, flags);
287
288 return ret;
289}
290EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
291
292static void irq_affinity_notify(struct work_struct *work)
293{
294 struct irq_affinity_notify *notify =
295 container_of(work, struct irq_affinity_notify, work);
296 struct irq_desc *desc = irq_to_desc(notify->irq);
297 cpumask_var_t cpumask;
298 unsigned long flags;
299
300 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
301 goto out;
302
303 raw_spin_lock_irqsave(&desc->lock, flags);
304 if (irq_move_pending(&desc->irq_data))
305 irq_get_pending(cpumask, desc);
306 else
307 cpumask_copy(cpumask, desc->irq_common_data.affinity);
308 raw_spin_unlock_irqrestore(&desc->lock, flags);
309
310 notify->notify(notify, cpumask);
311
312 free_cpumask_var(cpumask);
313out:
314 kref_put(¬ify->kref, notify->release);
315}
316
317
318
319
320
321
322
323
324
325
326
327
328int
329irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
330{
331 struct irq_desc *desc = irq_to_desc(irq);
332 struct irq_affinity_notify *old_notify;
333 unsigned long flags;
334
335
336 might_sleep();
337
338 if (!desc)
339 return -EINVAL;
340
341
342 if (notify) {
343 notify->irq = irq;
344 kref_init(¬ify->kref);
345 INIT_WORK(¬ify->work, irq_affinity_notify);
346 }
347
348 raw_spin_lock_irqsave(&desc->lock, flags);
349 old_notify = desc->affinity_notify;
350 desc->affinity_notify = notify;
351 raw_spin_unlock_irqrestore(&desc->lock, flags);
352
353 if (old_notify)
354 kref_put(&old_notify->kref, old_notify->release);
355
356 return 0;
357}
358EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
359
360#ifndef CONFIG_AUTO_IRQ_AFFINITY
361
362
363
364static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
365{
366 struct cpumask *set = irq_default_affinity;
367 int node = irq_desc_get_node(desc);
368
369
370 if (!__irq_can_set_affinity(desc))
371 return 0;
372
373
374
375
376
377 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
378 if (cpumask_intersects(desc->irq_common_data.affinity,
379 cpu_online_mask))
380 set = desc->irq_common_data.affinity;
381 else
382 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
383 }
384
385 cpumask_and(mask, cpu_online_mask, set);
386 if (node != NUMA_NO_NODE) {
387 const struct cpumask *nodemask = cpumask_of_node(node);
388
389
390 if (cpumask_intersects(mask, nodemask))
391 cpumask_and(mask, mask, nodemask);
392 }
393 irq_do_set_affinity(&desc->irq_data, mask, false);
394 return 0;
395}
396#else
397
398static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
399{
400 return irq_select_affinity(irq_desc_get_irq(d));
401}
402#endif
403
404
405
406
407int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
408{
409 struct irq_desc *desc = irq_to_desc(irq);
410 unsigned long flags;
411 int ret;
412
413 raw_spin_lock_irqsave(&desc->lock, flags);
414 ret = setup_affinity(desc, mask);
415 raw_spin_unlock_irqrestore(&desc->lock, flags);
416 return ret;
417}
418
419#else
420static inline int
421setup_affinity(struct irq_desc *desc, struct cpumask *mask)
422{
423 return 0;
424}
425#endif
426
427void __disable_irq(struct irq_desc *desc)
428{
429 if (!desc->depth++)
430 irq_disable(desc);
431}
432
433static int __disable_irq_nosync(unsigned int irq)
434{
435 unsigned long flags;
436 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
437
438 if (!desc)
439 return -EINVAL;
440 __disable_irq(desc);
441 irq_put_desc_busunlock(desc, flags);
442 return 0;
443}
444
445
446
447
448
449
450
451
452
453
454
455
456void disable_irq_nosync(unsigned int irq)
457{
458 __disable_irq_nosync(irq);
459}
460EXPORT_SYMBOL(disable_irq_nosync);
461
462
463
464
465
466
467
468
469
470
471
472
473
474void disable_irq(unsigned int irq)
475{
476 if (!__disable_irq_nosync(irq))
477 synchronize_irq(irq);
478}
479EXPORT_SYMBOL(disable_irq);
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498bool disable_hardirq(unsigned int irq)
499{
500 if (!__disable_irq_nosync(irq))
501 return synchronize_hardirq(irq);
502
503 return false;
504}
505EXPORT_SYMBOL_GPL(disable_hardirq);
506
507void __enable_irq(struct irq_desc *desc)
508{
509 switch (desc->depth) {
510 case 0:
511 err_out:
512 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
513 irq_desc_get_irq(desc));
514 break;
515 case 1: {
516 if (desc->istate & IRQS_SUSPENDED)
517 goto err_out;
518
519 irq_settings_set_noprobe(desc);
520 irq_enable(desc);
521 check_irq_resend(desc);
522
523 }
524 default:
525 desc->depth--;
526 }
527}
528
529
530
531
532
533
534
535
536
537
538
539
540void enable_irq(unsigned int irq)
541{
542 unsigned long flags;
543 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
544
545 if (!desc)
546 return;
547 if (WARN(!desc->irq_data.chip,
548 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
549 goto out;
550
551 __enable_irq(desc);
552out:
553 irq_put_desc_busunlock(desc, flags);
554}
555EXPORT_SYMBOL(enable_irq);
556
557static int set_irq_wake_real(unsigned int irq, unsigned int on)
558{
559 struct irq_desc *desc = irq_to_desc(irq);
560 int ret = -ENXIO;
561
562 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
563 return 0;
564
565 if (desc->irq_data.chip->irq_set_wake)
566 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
567
568 return ret;
569}
570
571
572
573
574
575
576
577
578
579
580
581
582
583int irq_set_irq_wake(unsigned int irq, unsigned int on)
584{
585 unsigned long flags;
586 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
587 int ret = 0;
588
589 if (!desc)
590 return -EINVAL;
591
592
593
594
595 if (on) {
596 if (desc->wake_depth++ == 0) {
597 ret = set_irq_wake_real(irq, on);
598 if (ret)
599 desc->wake_depth = 0;
600 else
601 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
602 }
603 } else {
604 if (desc->wake_depth == 0) {
605 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
606 } else if (--desc->wake_depth == 0) {
607 ret = set_irq_wake_real(irq, on);
608 if (ret)
609 desc->wake_depth = 1;
610 else
611 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
612 }
613 }
614 irq_put_desc_busunlock(desc, flags);
615 return ret;
616}
617EXPORT_SYMBOL(irq_set_irq_wake);
618
619
620
621
622
623
624int can_request_irq(unsigned int irq, unsigned long irqflags)
625{
626 unsigned long flags;
627 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
628 int canrequest = 0;
629
630 if (!desc)
631 return 0;
632
633 if (irq_settings_can_request(desc)) {
634 if (!desc->action ||
635 irqflags & desc->action->flags & IRQF_SHARED)
636 canrequest = 1;
637 }
638 irq_put_desc_unlock(desc, flags);
639 return canrequest;
640}
641
642int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
643{
644 struct irq_chip *chip = desc->irq_data.chip;
645 int ret, unmask = 0;
646
647 if (!chip || !chip->irq_set_type) {
648
649
650
651
652 pr_debug("No set_type function for IRQ %d (%s)\n",
653 irq_desc_get_irq(desc),
654 chip ? (chip->name ? : "unknown") : "unknown");
655 return 0;
656 }
657
658 flags &= IRQ_TYPE_SENSE_MASK;
659
660 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
661 if (!irqd_irq_masked(&desc->irq_data))
662 mask_irq(desc);
663 if (!irqd_irq_disabled(&desc->irq_data))
664 unmask = 1;
665 }
666
667
668 ret = chip->irq_set_type(&desc->irq_data, flags);
669
670 switch (ret) {
671 case IRQ_SET_MASK_OK:
672 case IRQ_SET_MASK_OK_DONE:
673 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
674 irqd_set(&desc->irq_data, flags);
675
676 case IRQ_SET_MASK_OK_NOCOPY:
677 flags = irqd_get_trigger_type(&desc->irq_data);
678 irq_settings_set_trigger_mask(desc, flags);
679 irqd_clear(&desc->irq_data, IRQD_LEVEL);
680 irq_settings_clr_level(desc);
681 if (flags & IRQ_TYPE_LEVEL_MASK) {
682 irq_settings_set_level(desc);
683 irqd_set(&desc->irq_data, IRQD_LEVEL);
684 }
685
686 ret = 0;
687 break;
688 default:
689 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
690 flags, irq_desc_get_irq(desc), chip->irq_set_type);
691 }
692 if (unmask)
693 unmask_irq(desc);
694 return ret;
695}
696
697#ifdef CONFIG_HARDIRQS_SW_RESEND
698int irq_set_parent(int irq, int parent_irq)
699{
700 unsigned long flags;
701 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
702
703 if (!desc)
704 return -EINVAL;
705
706 desc->parent_irq = parent_irq;
707
708 irq_put_desc_unlock(desc, flags);
709 return 0;
710}
711#endif
712
713
714
715
716
717
718static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
719{
720 return IRQ_WAKE_THREAD;
721}
722
723
724
725
726
727static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
728{
729 WARN(1, "Primary handler called for nested irq %d\n", irq);
730 return IRQ_NONE;
731}
732
733static int irq_wait_for_interrupt(struct irqaction *action)
734{
735 set_current_state(TASK_INTERRUPTIBLE);
736
737 while (!kthread_should_stop()) {
738
739 if (test_and_clear_bit(IRQTF_RUNTHREAD,
740 &action->thread_flags)) {
741 __set_current_state(TASK_RUNNING);
742 return 0;
743 }
744 schedule();
745 set_current_state(TASK_INTERRUPTIBLE);
746 }
747 __set_current_state(TASK_RUNNING);
748 return -1;
749}
750
751
752
753
754
755
756static void irq_finalize_oneshot(struct irq_desc *desc,
757 struct irqaction *action)
758{
759 if (!(desc->istate & IRQS_ONESHOT))
760 return;
761again:
762 chip_bus_lock(desc);
763 raw_spin_lock_irq(&desc->lock);
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
780 raw_spin_unlock_irq(&desc->lock);
781 chip_bus_sync_unlock(desc);
782 cpu_relax();
783 goto again;
784 }
785
786
787
788
789
790
791 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
792 goto out_unlock;
793
794 desc->threads_oneshot &= ~action->thread_mask;
795
796 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
797 irqd_irq_masked(&desc->irq_data))
798 unmask_threaded_irq(desc);
799
800out_unlock:
801 raw_spin_unlock_irq(&desc->lock);
802 chip_bus_sync_unlock(desc);
803}
804
805#ifdef CONFIG_SMP
806
807
808
809static void
810irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
811{
812 cpumask_var_t mask;
813 bool valid = true;
814
815 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
816 return;
817
818
819
820
821
822 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
823 set_bit(IRQTF_AFFINITY, &action->thread_flags);
824 return;
825 }
826
827 raw_spin_lock_irq(&desc->lock);
828
829
830
831
832 if (desc->irq_common_data.affinity)
833 cpumask_copy(mask, desc->irq_common_data.affinity);
834 else
835 valid = false;
836 raw_spin_unlock_irq(&desc->lock);
837
838 if (valid)
839 set_cpus_allowed_ptr(current, mask);
840 free_cpumask_var(mask);
841}
842#else
843static inline void
844irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
845#endif
846
847
848
849
850
851
852
853static irqreturn_t
854irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
855{
856 irqreturn_t ret;
857
858 local_bh_disable();
859 ret = action->thread_fn(action->irq, action->dev_id);
860 irq_finalize_oneshot(desc, action);
861 local_bh_enable();
862 return ret;
863}
864
865
866
867
868
869
870static irqreturn_t irq_thread_fn(struct irq_desc *desc,
871 struct irqaction *action)
872{
873 irqreturn_t ret;
874
875 ret = action->thread_fn(action->irq, action->dev_id);
876 irq_finalize_oneshot(desc, action);
877 return ret;
878}
879
880static void wake_threads_waitq(struct irq_desc *desc)
881{
882 if (atomic_dec_and_test(&desc->threads_active))
883 wake_up(&desc->wait_for_threads);
884}
885
886static void irq_thread_dtor(struct callback_head *unused)
887{
888 struct task_struct *tsk = current;
889 struct irq_desc *desc;
890 struct irqaction *action;
891
892 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
893 return;
894
895 action = kthread_data(tsk);
896
897 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
898 tsk->comm, tsk->pid, action->irq);
899
900
901 desc = irq_to_desc(action->irq);
902
903
904
905
906 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
907 wake_threads_waitq(desc);
908
909
910 irq_finalize_oneshot(desc, action);
911}
912
913
914
915
916static int irq_thread(void *data)
917{
918 struct callback_head on_exit_work;
919 struct irqaction *action = data;
920 struct irq_desc *desc = irq_to_desc(action->irq);
921 irqreturn_t (*handler_fn)(struct irq_desc *desc,
922 struct irqaction *action);
923
924 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
925 &action->thread_flags))
926 handler_fn = irq_forced_thread_fn;
927 else
928 handler_fn = irq_thread_fn;
929
930 init_task_work(&on_exit_work, irq_thread_dtor);
931 task_work_add(current, &on_exit_work, false);
932
933 irq_thread_check_affinity(desc, action);
934
935 while (!irq_wait_for_interrupt(action)) {
936 irqreturn_t action_ret;
937
938 irq_thread_check_affinity(desc, action);
939
940 action_ret = handler_fn(desc, action);
941 if (action_ret == IRQ_HANDLED)
942 atomic_inc(&desc->threads_handled);
943
944 wake_threads_waitq(desc);
945 }
946
947
948
949
950
951
952
953
954
955
956 task_work_cancel(current, irq_thread_dtor);
957 return 0;
958}
959
960
961
962
963
964
965
966void irq_wake_thread(unsigned int irq, void *dev_id)
967{
968 struct irq_desc *desc = irq_to_desc(irq);
969 struct irqaction *action;
970 unsigned long flags;
971
972 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
973 return;
974
975 raw_spin_lock_irqsave(&desc->lock, flags);
976 for (action = desc->action; action; action = action->next) {
977 if (action->dev_id == dev_id) {
978 if (action->thread)
979 __irq_wake_thread(desc, action);
980 break;
981 }
982 }
983 raw_spin_unlock_irqrestore(&desc->lock, flags);
984}
985EXPORT_SYMBOL_GPL(irq_wake_thread);
986
987static void irq_setup_forced_threading(struct irqaction *new)
988{
989 if (!force_irqthreads)
990 return;
991 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
992 return;
993
994 new->flags |= IRQF_ONESHOT;
995
996 if (!new->thread_fn) {
997 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
998 new->thread_fn = new->handler;
999 new->handler = irq_default_primary_handler;
1000 }
1001}
1002
1003static int irq_request_resources(struct irq_desc *desc)
1004{
1005 struct irq_data *d = &desc->irq_data;
1006 struct irq_chip *c = d->chip;
1007
1008 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1009}
1010
1011static void irq_release_resources(struct irq_desc *desc)
1012{
1013 struct irq_data *d = &desc->irq_data;
1014 struct irq_chip *c = d->chip;
1015
1016 if (c->irq_release_resources)
1017 c->irq_release_resources(d);
1018}
1019
1020
1021
1022
1023
1024static int
1025__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1026{
1027 struct irqaction *old, **old_ptr;
1028 unsigned long flags, thread_mask = 0;
1029 int ret, nested, shared = 0;
1030 cpumask_var_t mask;
1031
1032 if (!desc)
1033 return -EINVAL;
1034
1035 if (desc->irq_data.chip == &no_irq_chip)
1036 return -ENOSYS;
1037 if (!try_module_get(desc->owner))
1038 return -ENODEV;
1039
1040
1041
1042
1043
1044 nested = irq_settings_is_nested_thread(desc);
1045 if (nested) {
1046 if (!new->thread_fn) {
1047 ret = -EINVAL;
1048 goto out_mput;
1049 }
1050
1051
1052
1053
1054
1055 new->handler = irq_nested_primary_handler;
1056 } else {
1057 if (irq_settings_can_thread(desc))
1058 irq_setup_forced_threading(new);
1059 }
1060
1061
1062
1063
1064
1065
1066 if (new->thread_fn && !nested) {
1067 struct task_struct *t;
1068 static const struct sched_param param = {
1069 .sched_priority = MAX_USER_RT_PRIO/2,
1070 };
1071
1072 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1073 new->name);
1074 if (IS_ERR(t)) {
1075 ret = PTR_ERR(t);
1076 goto out_mput;
1077 }
1078
1079 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1080
1081
1082
1083
1084
1085
1086 get_task_struct(t);
1087 new->thread = t;
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1098 }
1099
1100 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1101 ret = -ENOMEM;
1102 goto out_thread;
1103 }
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1115 new->flags &= ~IRQF_ONESHOT;
1116
1117
1118
1119
1120 raw_spin_lock_irqsave(&desc->lock, flags);
1121 old_ptr = &desc->action;
1122 old = *old_ptr;
1123 if (old) {
1124
1125
1126
1127
1128
1129
1130
1131 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1132 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1133 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1134 goto mismatch;
1135
1136
1137 if ((old->flags & IRQF_PERCPU) !=
1138 (new->flags & IRQF_PERCPU))
1139 goto mismatch;
1140
1141
1142 do {
1143
1144
1145
1146
1147
1148 thread_mask |= old->thread_mask;
1149 old_ptr = &old->next;
1150 old = *old_ptr;
1151 } while (old);
1152 shared = 1;
1153 }
1154
1155
1156
1157
1158
1159
1160 if (new->flags & IRQF_ONESHOT) {
1161
1162
1163
1164
1165 if (thread_mask == ~0UL) {
1166 ret = -EBUSY;
1167 goto out_mask;
1168 }
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189 new->thread_mask = 1 << ffz(thread_mask);
1190
1191 } else if (new->handler == irq_default_primary_handler &&
1192 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1209 irq);
1210 ret = -EINVAL;
1211 goto out_mask;
1212 }
1213
1214 if (!shared) {
1215 ret = irq_request_resources(desc);
1216 if (ret) {
1217 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1218 new->name, irq, desc->irq_data.chip->name);
1219 goto out_mask;
1220 }
1221
1222 init_waitqueue_head(&desc->wait_for_threads);
1223
1224
1225 if (new->flags & IRQF_TRIGGER_MASK) {
1226 ret = __irq_set_trigger(desc,
1227 new->flags & IRQF_TRIGGER_MASK);
1228
1229 if (ret)
1230 goto out_mask;
1231 }
1232
1233 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1234 IRQS_ONESHOT | IRQS_WAITING);
1235 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1236
1237 if (new->flags & IRQF_PERCPU) {
1238 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1239 irq_settings_set_per_cpu(desc);
1240 }
1241
1242 if (new->flags & IRQF_ONESHOT)
1243 desc->istate |= IRQS_ONESHOT;
1244
1245 if (irq_settings_can_autoenable(desc))
1246 irq_startup(desc, true);
1247 else
1248
1249 desc->depth = 1;
1250
1251
1252 if (new->flags & IRQF_NOBALANCING) {
1253 irq_settings_set_no_balancing(desc);
1254 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1255 }
1256
1257
1258 setup_affinity(desc, mask);
1259
1260 } else if (new->flags & IRQF_TRIGGER_MASK) {
1261 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1262 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1263
1264 if (nmsk != omsk)
1265
1266 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1267 irq, nmsk, omsk);
1268 }
1269
1270 new->irq = irq;
1271 *old_ptr = new;
1272
1273 irq_pm_install_action(desc, new);
1274
1275
1276 desc->irq_count = 0;
1277 desc->irqs_unhandled = 0;
1278
1279
1280
1281
1282
1283 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1284 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1285 __enable_irq(desc);
1286 }
1287
1288 raw_spin_unlock_irqrestore(&desc->lock, flags);
1289
1290
1291
1292
1293
1294 if (new->thread)
1295 wake_up_process(new->thread);
1296
1297 register_irq_proc(irq, desc);
1298 new->dir = NULL;
1299 register_handler_proc(irq, new);
1300 free_cpumask_var(mask);
1301
1302 return 0;
1303
1304mismatch:
1305 if (!(new->flags & IRQF_PROBE_SHARED)) {
1306 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1307 irq, new->flags, new->name, old->flags, old->name);
1308#ifdef CONFIG_DEBUG_SHIRQ
1309 dump_stack();
1310#endif
1311 }
1312 ret = -EBUSY;
1313
1314out_mask:
1315 raw_spin_unlock_irqrestore(&desc->lock, flags);
1316 free_cpumask_var(mask);
1317
1318out_thread:
1319 if (new->thread) {
1320 struct task_struct *t = new->thread;
1321
1322 new->thread = NULL;
1323 kthread_stop(t);
1324 put_task_struct(t);
1325 }
1326out_mput:
1327 module_put(desc->owner);
1328 return ret;
1329}
1330
1331
1332
1333
1334
1335
1336
1337
1338int setup_irq(unsigned int irq, struct irqaction *act)
1339{
1340 int retval;
1341 struct irq_desc *desc = irq_to_desc(irq);
1342
1343 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1344 return -EINVAL;
1345 chip_bus_lock(desc);
1346 retval = __setup_irq(irq, desc, act);
1347 chip_bus_sync_unlock(desc);
1348
1349 return retval;
1350}
1351EXPORT_SYMBOL_GPL(setup_irq);
1352
1353
1354
1355
1356
1357static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1358{
1359 struct irq_desc *desc = irq_to_desc(irq);
1360 struct irqaction *action, **action_ptr;
1361 unsigned long flags;
1362
1363 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1364
1365 if (!desc)
1366 return NULL;
1367
1368 raw_spin_lock_irqsave(&desc->lock, flags);
1369
1370
1371
1372
1373
1374 action_ptr = &desc->action;
1375 for (;;) {
1376 action = *action_ptr;
1377
1378 if (!action) {
1379 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1380 raw_spin_unlock_irqrestore(&desc->lock, flags);
1381
1382 return NULL;
1383 }
1384
1385 if (action->dev_id == dev_id)
1386 break;
1387 action_ptr = &action->next;
1388 }
1389
1390
1391 *action_ptr = action->next;
1392
1393 irq_pm_remove_action(desc, action);
1394
1395
1396 if (!desc->action) {
1397 irq_shutdown(desc);
1398 irq_release_resources(desc);
1399 }
1400
1401#ifdef CONFIG_SMP
1402
1403 if (WARN_ON_ONCE(desc->affinity_hint))
1404 desc->affinity_hint = NULL;
1405#endif
1406
1407 raw_spin_unlock_irqrestore(&desc->lock, flags);
1408
1409 unregister_handler_proc(irq, action);
1410
1411
1412 synchronize_irq(irq);
1413
1414#ifdef CONFIG_DEBUG_SHIRQ
1415
1416
1417
1418
1419
1420
1421
1422
1423 if (action->flags & IRQF_SHARED) {
1424 local_irq_save(flags);
1425 action->handler(irq, dev_id);
1426 local_irq_restore(flags);
1427 }
1428#endif
1429
1430 if (action->thread) {
1431 kthread_stop(action->thread);
1432 put_task_struct(action->thread);
1433 }
1434
1435 module_put(desc->owner);
1436 return action;
1437}
1438
1439
1440
1441
1442
1443
1444
1445
1446void remove_irq(unsigned int irq, struct irqaction *act)
1447{
1448 struct irq_desc *desc = irq_to_desc(irq);
1449
1450 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1451 __free_irq(irq, act->dev_id);
1452}
1453EXPORT_SYMBOL_GPL(remove_irq);
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469void free_irq(unsigned int irq, void *dev_id)
1470{
1471 struct irq_desc *desc = irq_to_desc(irq);
1472
1473 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1474 return;
1475
1476#ifdef CONFIG_SMP
1477 if (WARN_ON(desc->affinity_notify))
1478 desc->affinity_notify = NULL;
1479#endif
1480
1481 chip_bus_lock(desc);
1482 kfree(__free_irq(irq, dev_id));
1483 chip_bus_sync_unlock(desc);
1484}
1485EXPORT_SYMBOL(free_irq);
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1530 irq_handler_t thread_fn, unsigned long irqflags,
1531 const char *devname, void *dev_id)
1532{
1533 struct irqaction *action;
1534 struct irq_desc *desc;
1535 int retval;
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1547 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1548 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1549 return -EINVAL;
1550
1551 desc = irq_to_desc(irq);
1552 if (!desc)
1553 return -EINVAL;
1554
1555 if (!irq_settings_can_request(desc) ||
1556 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1557 return -EINVAL;
1558
1559 if (!handler) {
1560 if (!thread_fn)
1561 return -EINVAL;
1562 handler = irq_default_primary_handler;
1563 }
1564
1565 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1566 if (!action)
1567 return -ENOMEM;
1568
1569 action->handler = handler;
1570 action->thread_fn = thread_fn;
1571 action->flags = irqflags;
1572 action->name = devname;
1573 action->dev_id = dev_id;
1574
1575 chip_bus_lock(desc);
1576 retval = __setup_irq(irq, desc, action);
1577 chip_bus_sync_unlock(desc);
1578
1579 if (retval)
1580 kfree(action);
1581
1582#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1583 if (!retval && (irqflags & IRQF_SHARED)) {
1584
1585
1586
1587
1588
1589
1590 unsigned long flags;
1591
1592 disable_irq(irq);
1593 local_irq_save(flags);
1594
1595 handler(irq, dev_id);
1596
1597 local_irq_restore(flags);
1598 enable_irq(irq);
1599 }
1600#endif
1601 return retval;
1602}
1603EXPORT_SYMBOL(request_threaded_irq);
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1623 unsigned long flags, const char *name, void *dev_id)
1624{
1625 struct irq_desc *desc = irq_to_desc(irq);
1626 int ret;
1627
1628 if (!desc)
1629 return -EINVAL;
1630
1631 if (irq_settings_is_nested_thread(desc)) {
1632 ret = request_threaded_irq(irq, NULL, handler,
1633 flags, name, dev_id);
1634 return !ret ? IRQC_IS_NESTED : ret;
1635 }
1636
1637 ret = request_irq(irq, handler, flags, name, dev_id);
1638 return !ret ? IRQC_IS_HARDIRQ : ret;
1639}
1640EXPORT_SYMBOL_GPL(request_any_context_irq);
1641
1642void enable_percpu_irq(unsigned int irq, unsigned int type)
1643{
1644 unsigned int cpu = smp_processor_id();
1645 unsigned long flags;
1646 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1647
1648 if (!desc)
1649 return;
1650
1651 type &= IRQ_TYPE_SENSE_MASK;
1652 if (type != IRQ_TYPE_NONE) {
1653 int ret;
1654
1655 ret = __irq_set_trigger(desc, type);
1656
1657 if (ret) {
1658 WARN(1, "failed to set type for IRQ%d\n", irq);
1659 goto out;
1660 }
1661 }
1662
1663 irq_percpu_enable(desc, cpu);
1664out:
1665 irq_put_desc_unlock(desc, flags);
1666}
1667EXPORT_SYMBOL_GPL(enable_percpu_irq);
1668
1669void disable_percpu_irq(unsigned int irq)
1670{
1671 unsigned int cpu = smp_processor_id();
1672 unsigned long flags;
1673 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1674
1675 if (!desc)
1676 return;
1677
1678 irq_percpu_disable(desc, cpu);
1679 irq_put_desc_unlock(desc, flags);
1680}
1681EXPORT_SYMBOL_GPL(disable_percpu_irq);
1682
1683
1684
1685
1686static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1687{
1688 struct irq_desc *desc = irq_to_desc(irq);
1689 struct irqaction *action;
1690 unsigned long flags;
1691
1692 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1693
1694 if (!desc)
1695 return NULL;
1696
1697 raw_spin_lock_irqsave(&desc->lock, flags);
1698
1699 action = desc->action;
1700 if (!action || action->percpu_dev_id != dev_id) {
1701 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1702 goto bad;
1703 }
1704
1705 if (!cpumask_empty(desc->percpu_enabled)) {
1706 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1707 irq, cpumask_first(desc->percpu_enabled));
1708 goto bad;
1709 }
1710
1711
1712 desc->action = NULL;
1713
1714 raw_spin_unlock_irqrestore(&desc->lock, flags);
1715
1716 unregister_handler_proc(irq, action);
1717
1718 module_put(desc->owner);
1719 return action;
1720
1721bad:
1722 raw_spin_unlock_irqrestore(&desc->lock, flags);
1723 return NULL;
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1734{
1735 struct irq_desc *desc = irq_to_desc(irq);
1736
1737 if (desc && irq_settings_is_per_cpu_devid(desc))
1738 __free_percpu_irq(irq, act->percpu_dev_id);
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1754{
1755 struct irq_desc *desc = irq_to_desc(irq);
1756
1757 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1758 return;
1759
1760 chip_bus_lock(desc);
1761 kfree(__free_percpu_irq(irq, dev_id));
1762 chip_bus_sync_unlock(desc);
1763}
1764
1765
1766
1767
1768
1769
1770
1771
1772int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1773{
1774 struct irq_desc *desc = irq_to_desc(irq);
1775 int retval;
1776
1777 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1778 return -EINVAL;
1779 chip_bus_lock(desc);
1780 retval = __setup_irq(irq, desc, act);
1781 chip_bus_sync_unlock(desc);
1782
1783 return retval;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1802 const char *devname, void __percpu *dev_id)
1803{
1804 struct irqaction *action;
1805 struct irq_desc *desc;
1806 int retval;
1807
1808 if (!dev_id)
1809 return -EINVAL;
1810
1811 desc = irq_to_desc(irq);
1812 if (!desc || !irq_settings_can_request(desc) ||
1813 !irq_settings_is_per_cpu_devid(desc))
1814 return -EINVAL;
1815
1816 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1817 if (!action)
1818 return -ENOMEM;
1819
1820 action->handler = handler;
1821 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1822 action->name = devname;
1823 action->percpu_dev_id = dev_id;
1824
1825 chip_bus_lock(desc);
1826 retval = __setup_irq(irq, desc, action);
1827 chip_bus_sync_unlock(desc);
1828
1829 if (retval)
1830 kfree(action);
1831
1832 return retval;
1833}
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1849 bool *state)
1850{
1851 struct irq_desc *desc;
1852 struct irq_data *data;
1853 struct irq_chip *chip;
1854 unsigned long flags;
1855 int err = -EINVAL;
1856
1857 desc = irq_get_desc_buslock(irq, &flags, 0);
1858 if (!desc)
1859 return err;
1860
1861 data = irq_desc_get_irq_data(desc);
1862
1863 do {
1864 chip = irq_data_get_irq_chip(data);
1865 if (chip->irq_get_irqchip_state)
1866 break;
1867#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1868 data = data->parent_data;
1869#else
1870 data = NULL;
1871#endif
1872 } while (data);
1873
1874 if (data)
1875 err = chip->irq_get_irqchip_state(data, which, state);
1876
1877 irq_put_desc_busunlock(desc, flags);
1878 return err;
1879}
1880EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1895 bool val)
1896{
1897 struct irq_desc *desc;
1898 struct irq_data *data;
1899 struct irq_chip *chip;
1900 unsigned long flags;
1901 int err = -EINVAL;
1902
1903 desc = irq_get_desc_buslock(irq, &flags, 0);
1904 if (!desc)
1905 return err;
1906
1907 data = irq_desc_get_irq_data(desc);
1908
1909 do {
1910 chip = irq_data_get_irq_chip(data);
1911 if (chip->irq_set_irqchip_state)
1912 break;
1913#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1914 data = data->parent_data;
1915#else
1916 data = NULL;
1917#endif
1918 } while (data);
1919
1920 if (data)
1921 err = chip->irq_set_irqchip_state(data, which, val);
1922
1923 irq_put_desc_busunlock(desc, flags);
1924 return err;
1925}
1926EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
1927