1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42
43
44
45
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54
55 } while (inprogress);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73void synchronize_hardirq(unsigned int irq)
74{
75 struct irq_desc *desc = irq_to_desc(irq);
76
77 if (desc)
78 __synchronize_hardirq(desc);
79}
80EXPORT_SYMBOL(synchronize_hardirq);
81
82
83
84
85
86
87
88
89
90
91
92void synchronize_irq(unsigned int irq)
93{
94 struct irq_desc *desc = irq_to_desc(irq);
95
96 if (desc) {
97 __synchronize_hardirq(desc);
98
99
100
101
102
103 wait_event(desc->wait_for_threads,
104 !atomic_read(&desc->threads_active));
105 }
106}
107EXPORT_SYMBOL(synchronize_irq);
108
109#ifdef CONFIG_SMP
110cpumask_var_t irq_default_affinity;
111
112
113
114
115
116
117int irq_can_set_affinity(unsigned int irq)
118{
119 struct irq_desc *desc = irq_to_desc(irq);
120
121 if (!desc || !irqd_can_balance(&desc->irq_data) ||
122 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
123 return 0;
124
125 return 1;
126}
127
128
129
130
131
132
133
134
135
136
137void irq_set_thread_affinity(struct irq_desc *desc)
138{
139 struct irqaction *action = desc->action;
140
141 while (action) {
142 if (action->thread)
143 set_bit(IRQTF_AFFINITY, &action->thread_flags);
144 action = action->next;
145 }
146}
147
148#ifdef CONFIG_GENERIC_PENDING_IRQ
149static inline bool irq_can_move_pcntxt(struct irq_data *data)
150{
151 return irqd_can_move_in_process_context(data);
152}
153static inline bool irq_move_pending(struct irq_data *data)
154{
155 return irqd_is_setaffinity_pending(data);
156}
157static inline void
158irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
159{
160 cpumask_copy(desc->pending_mask, mask);
161}
162static inline void
163irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
164{
165 cpumask_copy(mask, desc->pending_mask);
166}
167#else
168static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
169static inline bool irq_move_pending(struct irq_data *data) { return false; }
170static inline void
171irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
172static inline void
173irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
174#endif
175
176int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
177 bool force)
178{
179 struct irq_desc *desc = irq_data_to_desc(data);
180 struct irq_chip *chip = irq_data_get_irq_chip(data);
181 int ret;
182
183 ret = chip->irq_set_affinity(data, mask, force);
184 switch (ret) {
185 case IRQ_SET_MASK_OK:
186 case IRQ_SET_MASK_OK_DONE:
187 cpumask_copy(data->affinity, mask);
188 case IRQ_SET_MASK_OK_NOCOPY:
189 irq_set_thread_affinity(desc);
190 ret = 0;
191 }
192
193 return ret;
194}
195
196int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
197 bool force)
198{
199 struct irq_chip *chip = irq_data_get_irq_chip(data);
200 struct irq_desc *desc = irq_data_to_desc(data);
201 int ret = 0;
202
203 if (!chip || !chip->irq_set_affinity)
204 return -EINVAL;
205
206 if (irq_can_move_pcntxt(data)) {
207 ret = irq_do_set_affinity(data, mask, force);
208 } else {
209 irqd_set_move_pending(data);
210 irq_copy_pending(desc, mask);
211 }
212
213 if (desc->affinity_notify) {
214 kref_get(&desc->affinity_notify->kref);
215 schedule_work(&desc->affinity_notify->work);
216 }
217 irqd_set(data, IRQD_AFFINITY_SET);
218
219 return ret;
220}
221
222int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
223{
224 struct irq_desc *desc = irq_to_desc(irq);
225 unsigned long flags;
226 int ret;
227
228 if (!desc)
229 return -EINVAL;
230
231 raw_spin_lock_irqsave(&desc->lock, flags);
232 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
233 raw_spin_unlock_irqrestore(&desc->lock, flags);
234 return ret;
235}
236
237int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
238{
239 unsigned long flags;
240 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
241
242 if (!desc)
243 return -EINVAL;
244 desc->affinity_hint = m;
245 irq_put_desc_unlock(desc, flags);
246 return 0;
247}
248EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
249
250static void irq_affinity_notify(struct work_struct *work)
251{
252 struct irq_affinity_notify *notify =
253 container_of(work, struct irq_affinity_notify, work);
254 struct irq_desc *desc = irq_to_desc(notify->irq);
255 cpumask_var_t cpumask;
256 unsigned long flags;
257
258 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
259 goto out;
260
261 raw_spin_lock_irqsave(&desc->lock, flags);
262 if (irq_move_pending(&desc->irq_data))
263 irq_get_pending(cpumask, desc);
264 else
265 cpumask_copy(cpumask, desc->irq_data.affinity);
266 raw_spin_unlock_irqrestore(&desc->lock, flags);
267
268 notify->notify(notify, cpumask);
269
270 free_cpumask_var(cpumask);
271out:
272 kref_put(¬ify->kref, notify->release);
273}
274
275
276
277
278
279
280
281
282
283
284
285
286int
287irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
288{
289 struct irq_desc *desc = irq_to_desc(irq);
290 struct irq_affinity_notify *old_notify;
291 unsigned long flags;
292
293
294 might_sleep();
295
296 if (!desc)
297 return -EINVAL;
298
299
300 if (notify) {
301 notify->irq = irq;
302 kref_init(¬ify->kref);
303 INIT_WORK(¬ify->work, irq_affinity_notify);
304 }
305
306 raw_spin_lock_irqsave(&desc->lock, flags);
307 old_notify = desc->affinity_notify;
308 desc->affinity_notify = notify;
309 raw_spin_unlock_irqrestore(&desc->lock, flags);
310
311 if (old_notify)
312 kref_put(&old_notify->kref, old_notify->release);
313
314 return 0;
315}
316EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
317
318#ifndef CONFIG_AUTO_IRQ_AFFINITY
319
320
321
322static int
323setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
324{
325 struct cpumask *set = irq_default_affinity;
326 int node = desc->irq_data.node;
327
328
329 if (!irq_can_set_affinity(irq))
330 return 0;
331
332
333
334
335
336 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
337 if (cpumask_intersects(desc->irq_data.affinity,
338 cpu_online_mask))
339 set = desc->irq_data.affinity;
340 else
341 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
342 }
343
344 cpumask_and(mask, cpu_online_mask, set);
345 if (node != NUMA_NO_NODE) {
346 const struct cpumask *nodemask = cpumask_of_node(node);
347
348
349 if (cpumask_intersects(mask, nodemask))
350 cpumask_and(mask, mask, nodemask);
351 }
352 irq_do_set_affinity(&desc->irq_data, mask, false);
353 return 0;
354}
355#else
356static inline int
357setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
358{
359 return irq_select_affinity(irq);
360}
361#endif
362
363
364
365
366int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
367{
368 struct irq_desc *desc = irq_to_desc(irq);
369 unsigned long flags;
370 int ret;
371
372 raw_spin_lock_irqsave(&desc->lock, flags);
373 ret = setup_affinity(irq, desc, mask);
374 raw_spin_unlock_irqrestore(&desc->lock, flags);
375 return ret;
376}
377
378#else
379static inline int
380setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
381{
382 return 0;
383}
384#endif
385
386void __disable_irq(struct irq_desc *desc, unsigned int irq)
387{
388 if (!desc->depth++)
389 irq_disable(desc);
390}
391
392static int __disable_irq_nosync(unsigned int irq)
393{
394 unsigned long flags;
395 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
396
397 if (!desc)
398 return -EINVAL;
399 __disable_irq(desc, irq);
400 irq_put_desc_busunlock(desc, flags);
401 return 0;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415void disable_irq_nosync(unsigned int irq)
416{
417 __disable_irq_nosync(irq);
418}
419EXPORT_SYMBOL(disable_irq_nosync);
420
421
422
423
424
425
426
427
428
429
430
431
432
433void disable_irq(unsigned int irq)
434{
435 if (!__disable_irq_nosync(irq))
436 synchronize_irq(irq);
437}
438EXPORT_SYMBOL(disable_irq);
439
440void __enable_irq(struct irq_desc *desc, unsigned int irq)
441{
442 switch (desc->depth) {
443 case 0:
444 err_out:
445 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
446 break;
447 case 1: {
448 if (desc->istate & IRQS_SUSPENDED)
449 goto err_out;
450
451 irq_settings_set_noprobe(desc);
452 irq_enable(desc);
453 check_irq_resend(desc, irq);
454
455 }
456 default:
457 desc->depth--;
458 }
459}
460
461
462
463
464
465
466
467
468
469
470
471
472void enable_irq(unsigned int irq)
473{
474 unsigned long flags;
475 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
476
477 if (!desc)
478 return;
479 if (WARN(!desc->irq_data.chip,
480 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
481 goto out;
482
483 __enable_irq(desc, irq);
484out:
485 irq_put_desc_busunlock(desc, flags);
486}
487EXPORT_SYMBOL(enable_irq);
488
489static int set_irq_wake_real(unsigned int irq, unsigned int on)
490{
491 struct irq_desc *desc = irq_to_desc(irq);
492 int ret = -ENXIO;
493
494 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
495 return 0;
496
497 if (desc->irq_data.chip->irq_set_wake)
498 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
499
500 return ret;
501}
502
503
504
505
506
507
508
509
510
511
512
513
514
515int irq_set_irq_wake(unsigned int irq, unsigned int on)
516{
517 unsigned long flags;
518 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
519 int ret = 0;
520
521 if (!desc)
522 return -EINVAL;
523
524
525
526
527 if (on) {
528 if (desc->wake_depth++ == 0) {
529 ret = set_irq_wake_real(irq, on);
530 if (ret)
531 desc->wake_depth = 0;
532 else
533 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
534 }
535 } else {
536 if (desc->wake_depth == 0) {
537 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
538 } else if (--desc->wake_depth == 0) {
539 ret = set_irq_wake_real(irq, on);
540 if (ret)
541 desc->wake_depth = 1;
542 else
543 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
544 }
545 }
546 irq_put_desc_busunlock(desc, flags);
547 return ret;
548}
549EXPORT_SYMBOL(irq_set_irq_wake);
550
551
552
553
554
555
556int can_request_irq(unsigned int irq, unsigned long irqflags)
557{
558 unsigned long flags;
559 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
560 int canrequest = 0;
561
562 if (!desc)
563 return 0;
564
565 if (irq_settings_can_request(desc)) {
566 if (!desc->action ||
567 irqflags & desc->action->flags & IRQF_SHARED)
568 canrequest = 1;
569 }
570 irq_put_desc_unlock(desc, flags);
571 return canrequest;
572}
573
574int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
575 unsigned long flags)
576{
577 struct irq_chip *chip = desc->irq_data.chip;
578 int ret, unmask = 0;
579
580 if (!chip || !chip->irq_set_type) {
581
582
583
584
585 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
586 chip ? (chip->name ? : "unknown") : "unknown");
587 return 0;
588 }
589
590 flags &= IRQ_TYPE_SENSE_MASK;
591
592 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
593 if (!irqd_irq_masked(&desc->irq_data))
594 mask_irq(desc);
595 if (!irqd_irq_disabled(&desc->irq_data))
596 unmask = 1;
597 }
598
599
600 ret = chip->irq_set_type(&desc->irq_data, flags);
601
602 switch (ret) {
603 case IRQ_SET_MASK_OK:
604 case IRQ_SET_MASK_OK_DONE:
605 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
606 irqd_set(&desc->irq_data, flags);
607
608 case IRQ_SET_MASK_OK_NOCOPY:
609 flags = irqd_get_trigger_type(&desc->irq_data);
610 irq_settings_set_trigger_mask(desc, flags);
611 irqd_clear(&desc->irq_data, IRQD_LEVEL);
612 irq_settings_clr_level(desc);
613 if (flags & IRQ_TYPE_LEVEL_MASK) {
614 irq_settings_set_level(desc);
615 irqd_set(&desc->irq_data, IRQD_LEVEL);
616 }
617
618 ret = 0;
619 break;
620 default:
621 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
622 flags, irq, chip->irq_set_type);
623 }
624 if (unmask)
625 unmask_irq(desc);
626 return ret;
627}
628
629#ifdef CONFIG_HARDIRQS_SW_RESEND
630int irq_set_parent(int irq, int parent_irq)
631{
632 unsigned long flags;
633 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
634
635 if (!desc)
636 return -EINVAL;
637
638 desc->parent_irq = parent_irq;
639
640 irq_put_desc_unlock(desc, flags);
641 return 0;
642}
643#endif
644
645
646
647
648
649
650static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
651{
652 return IRQ_WAKE_THREAD;
653}
654
655
656
657
658
659static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
660{
661 WARN(1, "Primary handler called for nested irq %d\n", irq);
662 return IRQ_NONE;
663}
664
665static int irq_wait_for_interrupt(struct irqaction *action)
666{
667 set_current_state(TASK_INTERRUPTIBLE);
668
669 while (!kthread_should_stop()) {
670
671 if (test_and_clear_bit(IRQTF_RUNTHREAD,
672 &action->thread_flags)) {
673 __set_current_state(TASK_RUNNING);
674 return 0;
675 }
676 schedule();
677 set_current_state(TASK_INTERRUPTIBLE);
678 }
679 __set_current_state(TASK_RUNNING);
680 return -1;
681}
682
683
684
685
686
687
688static void irq_finalize_oneshot(struct irq_desc *desc,
689 struct irqaction *action)
690{
691 if (!(desc->istate & IRQS_ONESHOT))
692 return;
693again:
694 chip_bus_lock(desc);
695 raw_spin_lock_irq(&desc->lock);
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
712 raw_spin_unlock_irq(&desc->lock);
713 chip_bus_sync_unlock(desc);
714 cpu_relax();
715 goto again;
716 }
717
718
719
720
721
722
723 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
724 goto out_unlock;
725
726 desc->threads_oneshot &= ~action->thread_mask;
727
728 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
729 irqd_irq_masked(&desc->irq_data))
730 unmask_threaded_irq(desc);
731
732out_unlock:
733 raw_spin_unlock_irq(&desc->lock);
734 chip_bus_sync_unlock(desc);
735}
736
737#ifdef CONFIG_SMP
738
739
740
741static void
742irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
743{
744 cpumask_var_t mask;
745 bool valid = true;
746
747 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
748 return;
749
750
751
752
753
754 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
755 set_bit(IRQTF_AFFINITY, &action->thread_flags);
756 return;
757 }
758
759 raw_spin_lock_irq(&desc->lock);
760
761
762
763
764 if (desc->irq_data.affinity)
765 cpumask_copy(mask, desc->irq_data.affinity);
766 else
767 valid = false;
768 raw_spin_unlock_irq(&desc->lock);
769
770 if (valid)
771 set_cpus_allowed_ptr(current, mask);
772 free_cpumask_var(mask);
773}
774#else
775static inline void
776irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
777#endif
778
779
780
781
782
783
784
785static irqreturn_t
786irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
787{
788 irqreturn_t ret;
789
790 local_bh_disable();
791 ret = action->thread_fn(action->irq, action->dev_id);
792 irq_finalize_oneshot(desc, action);
793 local_bh_enable();
794 return ret;
795}
796
797
798
799
800
801
802static irqreturn_t irq_thread_fn(struct irq_desc *desc,
803 struct irqaction *action)
804{
805 irqreturn_t ret;
806
807 ret = action->thread_fn(action->irq, action->dev_id);
808 irq_finalize_oneshot(desc, action);
809 return ret;
810}
811
812static void wake_threads_waitq(struct irq_desc *desc)
813{
814 if (atomic_dec_and_test(&desc->threads_active))
815 wake_up(&desc->wait_for_threads);
816}
817
818static void irq_thread_dtor(struct callback_head *unused)
819{
820 struct task_struct *tsk = current;
821 struct irq_desc *desc;
822 struct irqaction *action;
823
824 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
825 return;
826
827 action = kthread_data(tsk);
828
829 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
830 tsk->comm, tsk->pid, action->irq);
831
832
833 desc = irq_to_desc(action->irq);
834
835
836
837
838 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
839 wake_threads_waitq(desc);
840
841
842 irq_finalize_oneshot(desc, action);
843}
844
845
846
847
848static int irq_thread(void *data)
849{
850 struct callback_head on_exit_work;
851 struct irqaction *action = data;
852 struct irq_desc *desc = irq_to_desc(action->irq);
853 irqreturn_t (*handler_fn)(struct irq_desc *desc,
854 struct irqaction *action);
855
856 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
857 &action->thread_flags))
858 handler_fn = irq_forced_thread_fn;
859 else
860 handler_fn = irq_thread_fn;
861
862 init_task_work(&on_exit_work, irq_thread_dtor);
863 task_work_add(current, &on_exit_work, false);
864
865 irq_thread_check_affinity(desc, action);
866
867 while (!irq_wait_for_interrupt(action)) {
868 irqreturn_t action_ret;
869
870 irq_thread_check_affinity(desc, action);
871
872 action_ret = handler_fn(desc, action);
873 if (action_ret == IRQ_HANDLED)
874 atomic_inc(&desc->threads_handled);
875
876 wake_threads_waitq(desc);
877 }
878
879
880
881
882
883
884
885
886
887
888 task_work_cancel(current, irq_thread_dtor);
889 return 0;
890}
891
892
893
894
895
896
897
898void irq_wake_thread(unsigned int irq, void *dev_id)
899{
900 struct irq_desc *desc = irq_to_desc(irq);
901 struct irqaction *action;
902 unsigned long flags;
903
904 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
905 return;
906
907 raw_spin_lock_irqsave(&desc->lock, flags);
908 for (action = desc->action; action; action = action->next) {
909 if (action->dev_id == dev_id) {
910 if (action->thread)
911 __irq_wake_thread(desc, action);
912 break;
913 }
914 }
915 raw_spin_unlock_irqrestore(&desc->lock, flags);
916}
917EXPORT_SYMBOL_GPL(irq_wake_thread);
918
919static void irq_setup_forced_threading(struct irqaction *new)
920{
921 if (!force_irqthreads)
922 return;
923 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
924 return;
925
926 new->flags |= IRQF_ONESHOT;
927
928 if (!new->thread_fn) {
929 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
930 new->thread_fn = new->handler;
931 new->handler = irq_default_primary_handler;
932 }
933}
934
935static int irq_request_resources(struct irq_desc *desc)
936{
937 struct irq_data *d = &desc->irq_data;
938 struct irq_chip *c = d->chip;
939
940 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
941}
942
943static void irq_release_resources(struct irq_desc *desc)
944{
945 struct irq_data *d = &desc->irq_data;
946 struct irq_chip *c = d->chip;
947
948 if (c->irq_release_resources)
949 c->irq_release_resources(d);
950}
951
952
953
954
955
956static int
957__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
958{
959 struct irqaction *old, **old_ptr;
960 unsigned long flags, thread_mask = 0;
961 int ret, nested, shared = 0;
962 cpumask_var_t mask;
963
964 if (!desc)
965 return -EINVAL;
966
967 if (desc->irq_data.chip == &no_irq_chip)
968 return -ENOSYS;
969 if (!try_module_get(desc->owner))
970 return -ENODEV;
971
972
973
974
975
976 nested = irq_settings_is_nested_thread(desc);
977 if (nested) {
978 if (!new->thread_fn) {
979 ret = -EINVAL;
980 goto out_mput;
981 }
982
983
984
985
986
987 new->handler = irq_nested_primary_handler;
988 } else {
989 if (irq_settings_can_thread(desc))
990 irq_setup_forced_threading(new);
991 }
992
993
994
995
996
997
998 if (new->thread_fn && !nested) {
999 struct task_struct *t;
1000 static const struct sched_param param = {
1001 .sched_priority = MAX_USER_RT_PRIO/2,
1002 };
1003
1004 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1005 new->name);
1006 if (IS_ERR(t)) {
1007 ret = PTR_ERR(t);
1008 goto out_mput;
1009 }
1010
1011 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1012
1013
1014
1015
1016
1017
1018 get_task_struct(t);
1019 new->thread = t;
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1030 }
1031
1032 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1033 ret = -ENOMEM;
1034 goto out_thread;
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1047 new->flags &= ~IRQF_ONESHOT;
1048
1049
1050
1051
1052 raw_spin_lock_irqsave(&desc->lock, flags);
1053 old_ptr = &desc->action;
1054 old = *old_ptr;
1055 if (old) {
1056
1057
1058
1059
1060
1061
1062
1063 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1064 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1065 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1066 goto mismatch;
1067
1068
1069 if ((old->flags & IRQF_PERCPU) !=
1070 (new->flags & IRQF_PERCPU))
1071 goto mismatch;
1072
1073
1074 do {
1075
1076
1077
1078
1079
1080 thread_mask |= old->thread_mask;
1081 old_ptr = &old->next;
1082 old = *old_ptr;
1083 } while (old);
1084 shared = 1;
1085 }
1086
1087
1088
1089
1090
1091
1092 if (new->flags & IRQF_ONESHOT) {
1093
1094
1095
1096
1097 if (thread_mask == ~0UL) {
1098 ret = -EBUSY;
1099 goto out_mask;
1100 }
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 new->thread_mask = 1 << ffz(thread_mask);
1122
1123 } else if (new->handler == irq_default_primary_handler &&
1124 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1141 irq);
1142 ret = -EINVAL;
1143 goto out_mask;
1144 }
1145
1146 if (!shared) {
1147 ret = irq_request_resources(desc);
1148 if (ret) {
1149 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1150 new->name, irq, desc->irq_data.chip->name);
1151 goto out_mask;
1152 }
1153
1154 init_waitqueue_head(&desc->wait_for_threads);
1155
1156
1157 if (new->flags & IRQF_TRIGGER_MASK) {
1158 ret = __irq_set_trigger(desc, irq,
1159 new->flags & IRQF_TRIGGER_MASK);
1160
1161 if (ret)
1162 goto out_mask;
1163 }
1164
1165 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1166 IRQS_ONESHOT | IRQS_WAITING);
1167 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1168
1169 if (new->flags & IRQF_PERCPU) {
1170 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1171 irq_settings_set_per_cpu(desc);
1172 }
1173
1174 if (new->flags & IRQF_ONESHOT)
1175 desc->istate |= IRQS_ONESHOT;
1176
1177 if (irq_settings_can_autoenable(desc))
1178 irq_startup(desc, true);
1179 else
1180
1181 desc->depth = 1;
1182
1183
1184 if (new->flags & IRQF_NOBALANCING) {
1185 irq_settings_set_no_balancing(desc);
1186 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1187 }
1188
1189
1190 setup_affinity(irq, desc, mask);
1191
1192 } else if (new->flags & IRQF_TRIGGER_MASK) {
1193 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1194 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1195
1196 if (nmsk != omsk)
1197
1198 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1199 irq, nmsk, omsk);
1200 }
1201
1202 new->irq = irq;
1203 *old_ptr = new;
1204
1205 irq_pm_install_action(desc, new);
1206
1207
1208 desc->irq_count = 0;
1209 desc->irqs_unhandled = 0;
1210
1211
1212
1213
1214
1215 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1216 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1217 __enable_irq(desc, irq);
1218 }
1219
1220 raw_spin_unlock_irqrestore(&desc->lock, flags);
1221
1222
1223
1224
1225
1226 if (new->thread)
1227 wake_up_process(new->thread);
1228
1229 register_irq_proc(irq, desc);
1230 new->dir = NULL;
1231 register_handler_proc(irq, new);
1232 free_cpumask_var(mask);
1233
1234 return 0;
1235
1236mismatch:
1237 if (!(new->flags & IRQF_PROBE_SHARED)) {
1238 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1239 irq, new->flags, new->name, old->flags, old->name);
1240#ifdef CONFIG_DEBUG_SHIRQ
1241 dump_stack();
1242#endif
1243 }
1244 ret = -EBUSY;
1245
1246out_mask:
1247 raw_spin_unlock_irqrestore(&desc->lock, flags);
1248 free_cpumask_var(mask);
1249
1250out_thread:
1251 if (new->thread) {
1252 struct task_struct *t = new->thread;
1253
1254 new->thread = NULL;
1255 kthread_stop(t);
1256 put_task_struct(t);
1257 }
1258out_mput:
1259 module_put(desc->owner);
1260 return ret;
1261}
1262
1263
1264
1265
1266
1267
1268
1269
1270int setup_irq(unsigned int irq, struct irqaction *act)
1271{
1272 int retval;
1273 struct irq_desc *desc = irq_to_desc(irq);
1274
1275 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1276 return -EINVAL;
1277 chip_bus_lock(desc);
1278 retval = __setup_irq(irq, desc, act);
1279 chip_bus_sync_unlock(desc);
1280
1281 return retval;
1282}
1283EXPORT_SYMBOL_GPL(setup_irq);
1284
1285
1286
1287
1288
1289static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1290{
1291 struct irq_desc *desc = irq_to_desc(irq);
1292 struct irqaction *action, **action_ptr;
1293 unsigned long flags;
1294
1295 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1296
1297 if (!desc)
1298 return NULL;
1299
1300 raw_spin_lock_irqsave(&desc->lock, flags);
1301
1302
1303
1304
1305
1306 action_ptr = &desc->action;
1307 for (;;) {
1308 action = *action_ptr;
1309
1310 if (!action) {
1311 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1312 raw_spin_unlock_irqrestore(&desc->lock, flags);
1313
1314 return NULL;
1315 }
1316
1317 if (action->dev_id == dev_id)
1318 break;
1319 action_ptr = &action->next;
1320 }
1321
1322
1323 *action_ptr = action->next;
1324
1325 irq_pm_remove_action(desc, action);
1326
1327
1328 if (!desc->action) {
1329 irq_shutdown(desc);
1330 irq_release_resources(desc);
1331 }
1332
1333#ifdef CONFIG_SMP
1334
1335 if (WARN_ON_ONCE(desc->affinity_hint))
1336 desc->affinity_hint = NULL;
1337#endif
1338
1339 raw_spin_unlock_irqrestore(&desc->lock, flags);
1340
1341 unregister_handler_proc(irq, action);
1342
1343
1344 synchronize_irq(irq);
1345
1346#ifdef CONFIG_DEBUG_SHIRQ
1347
1348
1349
1350
1351
1352
1353
1354
1355 if (action->flags & IRQF_SHARED) {
1356 local_irq_save(flags);
1357 action->handler(irq, dev_id);
1358 local_irq_restore(flags);
1359 }
1360#endif
1361
1362 if (action->thread) {
1363 kthread_stop(action->thread);
1364 put_task_struct(action->thread);
1365 }
1366
1367 module_put(desc->owner);
1368 return action;
1369}
1370
1371
1372
1373
1374
1375
1376
1377
1378void remove_irq(unsigned int irq, struct irqaction *act)
1379{
1380 struct irq_desc *desc = irq_to_desc(irq);
1381
1382 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1383 __free_irq(irq, act->dev_id);
1384}
1385EXPORT_SYMBOL_GPL(remove_irq);
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401void free_irq(unsigned int irq, void *dev_id)
1402{
1403 struct irq_desc *desc = irq_to_desc(irq);
1404
1405 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1406 return;
1407
1408#ifdef CONFIG_SMP
1409 if (WARN_ON(desc->affinity_notify))
1410 desc->affinity_notify = NULL;
1411#endif
1412
1413 chip_bus_lock(desc);
1414 kfree(__free_irq(irq, dev_id));
1415 chip_bus_sync_unlock(desc);
1416}
1417EXPORT_SYMBOL(free_irq);
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1462 irq_handler_t thread_fn, unsigned long irqflags,
1463 const char *devname, void *dev_id)
1464{
1465 struct irqaction *action;
1466 struct irq_desc *desc;
1467 int retval;
1468
1469
1470
1471
1472
1473
1474
1475 if ((irqflags & IRQF_SHARED) && !dev_id)
1476 return -EINVAL;
1477
1478 desc = irq_to_desc(irq);
1479 if (!desc)
1480 return -EINVAL;
1481
1482 if (!irq_settings_can_request(desc) ||
1483 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1484 return -EINVAL;
1485
1486 if (!handler) {
1487 if (!thread_fn)
1488 return -EINVAL;
1489 handler = irq_default_primary_handler;
1490 }
1491
1492 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1493 if (!action)
1494 return -ENOMEM;
1495
1496 action->handler = handler;
1497 action->thread_fn = thread_fn;
1498 action->flags = irqflags;
1499 action->name = devname;
1500 action->dev_id = dev_id;
1501
1502 chip_bus_lock(desc);
1503 retval = __setup_irq(irq, desc, action);
1504 chip_bus_sync_unlock(desc);
1505
1506 if (retval)
1507 kfree(action);
1508
1509#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1510 if (!retval && (irqflags & IRQF_SHARED)) {
1511
1512
1513
1514
1515
1516
1517 unsigned long flags;
1518
1519 disable_irq(irq);
1520 local_irq_save(flags);
1521
1522 handler(irq, dev_id);
1523
1524 local_irq_restore(flags);
1525 enable_irq(irq);
1526 }
1527#endif
1528 return retval;
1529}
1530EXPORT_SYMBOL(request_threaded_irq);
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1550 unsigned long flags, const char *name, void *dev_id)
1551{
1552 struct irq_desc *desc = irq_to_desc(irq);
1553 int ret;
1554
1555 if (!desc)
1556 return -EINVAL;
1557
1558 if (irq_settings_is_nested_thread(desc)) {
1559 ret = request_threaded_irq(irq, NULL, handler,
1560 flags, name, dev_id);
1561 return !ret ? IRQC_IS_NESTED : ret;
1562 }
1563
1564 ret = request_irq(irq, handler, flags, name, dev_id);
1565 return !ret ? IRQC_IS_HARDIRQ : ret;
1566}
1567EXPORT_SYMBOL_GPL(request_any_context_irq);
1568
1569void enable_percpu_irq(unsigned int irq, unsigned int type)
1570{
1571 unsigned int cpu = smp_processor_id();
1572 unsigned long flags;
1573 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1574
1575 if (!desc)
1576 return;
1577
1578 type &= IRQ_TYPE_SENSE_MASK;
1579 if (type != IRQ_TYPE_NONE) {
1580 int ret;
1581
1582 ret = __irq_set_trigger(desc, irq, type);
1583
1584 if (ret) {
1585 WARN(1, "failed to set type for IRQ%d\n", irq);
1586 goto out;
1587 }
1588 }
1589
1590 irq_percpu_enable(desc, cpu);
1591out:
1592 irq_put_desc_unlock(desc, flags);
1593}
1594EXPORT_SYMBOL_GPL(enable_percpu_irq);
1595
1596void disable_percpu_irq(unsigned int irq)
1597{
1598 unsigned int cpu = smp_processor_id();
1599 unsigned long flags;
1600 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1601
1602 if (!desc)
1603 return;
1604
1605 irq_percpu_disable(desc, cpu);
1606 irq_put_desc_unlock(desc, flags);
1607}
1608EXPORT_SYMBOL_GPL(disable_percpu_irq);
1609
1610
1611
1612
1613static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1614{
1615 struct irq_desc *desc = irq_to_desc(irq);
1616 struct irqaction *action;
1617 unsigned long flags;
1618
1619 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1620
1621 if (!desc)
1622 return NULL;
1623
1624 raw_spin_lock_irqsave(&desc->lock, flags);
1625
1626 action = desc->action;
1627 if (!action || action->percpu_dev_id != dev_id) {
1628 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1629 goto bad;
1630 }
1631
1632 if (!cpumask_empty(desc->percpu_enabled)) {
1633 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1634 irq, cpumask_first(desc->percpu_enabled));
1635 goto bad;
1636 }
1637
1638
1639 desc->action = NULL;
1640
1641 raw_spin_unlock_irqrestore(&desc->lock, flags);
1642
1643 unregister_handler_proc(irq, action);
1644
1645 module_put(desc->owner);
1646 return action;
1647
1648bad:
1649 raw_spin_unlock_irqrestore(&desc->lock, flags);
1650 return NULL;
1651}
1652
1653
1654
1655
1656
1657
1658
1659
1660void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1661{
1662 struct irq_desc *desc = irq_to_desc(irq);
1663
1664 if (desc && irq_settings_is_per_cpu_devid(desc))
1665 __free_percpu_irq(irq, act->percpu_dev_id);
1666}
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1681{
1682 struct irq_desc *desc = irq_to_desc(irq);
1683
1684 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1685 return;
1686
1687 chip_bus_lock(desc);
1688 kfree(__free_percpu_irq(irq, dev_id));
1689 chip_bus_sync_unlock(desc);
1690}
1691
1692
1693
1694
1695
1696
1697
1698
1699int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1700{
1701 struct irq_desc *desc = irq_to_desc(irq);
1702 int retval;
1703
1704 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1705 return -EINVAL;
1706 chip_bus_lock(desc);
1707 retval = __setup_irq(irq, desc, act);
1708 chip_bus_sync_unlock(desc);
1709
1710 return retval;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1729 const char *devname, void __percpu *dev_id)
1730{
1731 struct irqaction *action;
1732 struct irq_desc *desc;
1733 int retval;
1734
1735 if (!dev_id)
1736 return -EINVAL;
1737
1738 desc = irq_to_desc(irq);
1739 if (!desc || !irq_settings_can_request(desc) ||
1740 !irq_settings_is_per_cpu_devid(desc))
1741 return -EINVAL;
1742
1743 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1744 if (!action)
1745 return -ENOMEM;
1746
1747 action->handler = handler;
1748 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1749 action->name = devname;
1750 action->percpu_dev_id = dev_id;
1751
1752 chip_bus_lock(desc);
1753 retval = __setup_irq(irq, desc, action);
1754 chip_bus_sync_unlock(desc);
1755
1756 if (retval)
1757 kfree(action);
1758
1759 return retval;
1760}
1761