1
2
3
4
5
6
7
8
9
10#include <linux/irq.h>
11#include <linux/kthread.h>
12#include <linux/module.h>
13#include <linux/random.h>
14#include <linux/interrupt.h>
15#include <linux/slab.h>
16#include <linux/sched.h>
17
18#include "internals.h"
19
20#ifdef CONFIG_IRQ_FORCED_THREADING
21__read_mostly bool force_irqthreads;
22
23static int __init setup_forced_irqthreads(char *arg)
24{
25 force_irqthreads = true;
26 return 0;
27}
28early_param("threadirqs", setup_forced_irqthreads);
29#endif
30
31
32
33
34
35
36
37
38
39
40
41void synchronize_irq(unsigned int irq)
42{
43 struct irq_desc *desc = irq_to_desc(irq);
44 bool inprogress;
45
46 if (!desc)
47 return;
48
49 do {
50 unsigned long flags;
51
52
53
54
55
56 while (irqd_irq_inprogress(&desc->irq_data))
57 cpu_relax();
58
59
60 raw_spin_lock_irqsave(&desc->lock, flags);
61 inprogress = irqd_irq_inprogress(&desc->irq_data);
62 raw_spin_unlock_irqrestore(&desc->lock, flags);
63
64
65 } while (inprogress);
66
67
68
69
70
71 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
72}
73EXPORT_SYMBOL(synchronize_irq);
74
75#ifdef CONFIG_SMP
76cpumask_var_t irq_default_affinity;
77
78
79
80
81
82
83int irq_can_set_affinity(unsigned int irq)
84{
85 struct irq_desc *desc = irq_to_desc(irq);
86
87 if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
89 return 0;
90
91 return 1;
92}
93
94
95
96
97
98
99
100
101
102
103void irq_set_thread_affinity(struct irq_desc *desc)
104{
105 struct irqaction *action = desc->action;
106
107 while (action) {
108 if (action->thread)
109 set_bit(IRQTF_AFFINITY, &action->thread_flags);
110 action = action->next;
111 }
112}
113
114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_data *data)
116{
117 return irqd_can_move_in_process_context(data);
118}
119static inline bool irq_move_pending(struct irq_data *data)
120{
121 return irqd_is_setaffinity_pending(data);
122}
123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125{
126 cpumask_copy(desc->pending_mask, mask);
127}
128static inline void
129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130{
131 cpumask_copy(mask, desc->pending_mask);
132}
133#else
134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135static inline bool irq_move_pending(struct irq_data *data) { return false; }
136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif
141
142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143{
144 struct irq_chip *chip = irq_data_get_irq_chip(data);
145 struct irq_desc *desc = irq_data_to_desc(data);
146 int ret = 0;
147
148 if (!chip || !chip->irq_set_affinity)
149 return -EINVAL;
150
151 if (irq_can_move_pcntxt(data)) {
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else {
161 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask);
163 }
164
165 if (desc->affinity_notify) {
166 kref_get(&desc->affinity_notify->kref);
167 schedule_work(&desc->affinity_notify->work);
168 }
169 irqd_set(data, IRQD_AFFINITY_SET);
170
171 return ret;
172}
173
174
175
176
177
178
179
180int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
181{
182 struct irq_desc *desc = irq_to_desc(irq);
183 unsigned long flags;
184 int ret;
185
186 if (!desc)
187 return -EINVAL;
188
189 raw_spin_lock_irqsave(&desc->lock, flags);
190 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
191 raw_spin_unlock_irqrestore(&desc->lock, flags);
192 return ret;
193}
194
195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
196{
197 unsigned long flags;
198 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
199
200 if (!desc)
201 return -EINVAL;
202 desc->affinity_hint = m;
203 irq_put_desc_unlock(desc, flags);
204 return 0;
205}
206EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
207
208static void irq_affinity_notify(struct work_struct *work)
209{
210 struct irq_affinity_notify *notify =
211 container_of(work, struct irq_affinity_notify, work);
212 struct irq_desc *desc = irq_to_desc(notify->irq);
213 cpumask_var_t cpumask;
214 unsigned long flags;
215
216 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
217 goto out;
218
219 raw_spin_lock_irqsave(&desc->lock, flags);
220 if (irq_move_pending(&desc->irq_data))
221 irq_get_pending(cpumask, desc);
222 else
223 cpumask_copy(cpumask, desc->irq_data.affinity);
224 raw_spin_unlock_irqrestore(&desc->lock, flags);
225
226 notify->notify(notify, cpumask);
227
228 free_cpumask_var(cpumask);
229out:
230 kref_put(¬ify->kref, notify->release);
231}
232
233
234
235
236
237
238
239
240
241
242
243
244int
245irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
246{
247 struct irq_desc *desc = irq_to_desc(irq);
248 struct irq_affinity_notify *old_notify;
249 unsigned long flags;
250
251
252 might_sleep();
253
254 if (!desc)
255 return -EINVAL;
256
257
258 if (notify) {
259 notify->irq = irq;
260 kref_init(¬ify->kref);
261 INIT_WORK(¬ify->work, irq_affinity_notify);
262 }
263
264 raw_spin_lock_irqsave(&desc->lock, flags);
265 old_notify = desc->affinity_notify;
266 desc->affinity_notify = notify;
267 raw_spin_unlock_irqrestore(&desc->lock, flags);
268
269 if (old_notify)
270 kref_put(&old_notify->kref, old_notify->release);
271
272 return 0;
273}
274EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
275
276#ifndef CONFIG_AUTO_IRQ_AFFINITY
277
278
279
280static int
281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282{
283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity;
285 int ret;
286
287
288 if (!irq_can_set_affinity(irq))
289 return 0;
290
291
292
293
294
295 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
296 if (cpumask_intersects(desc->irq_data.affinity,
297 cpu_online_mask))
298 set = desc->irq_data.affinity;
299 else
300 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
301 }
302
303 cpumask_and(mask, cpu_online_mask, set);
304 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
305 switch (ret) {
306 case IRQ_SET_MASK_OK:
307 cpumask_copy(desc->irq_data.affinity, mask);
308 case IRQ_SET_MASK_OK_NOCOPY:
309 irq_set_thread_affinity(desc);
310 }
311 return 0;
312}
313#else
314static inline int
315setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
316{
317 return irq_select_affinity(irq);
318}
319#endif
320
321
322
323
324int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
325{
326 struct irq_desc *desc = irq_to_desc(irq);
327 unsigned long flags;
328 int ret;
329
330 raw_spin_lock_irqsave(&desc->lock, flags);
331 ret = setup_affinity(irq, desc, mask);
332 raw_spin_unlock_irqrestore(&desc->lock, flags);
333 return ret;
334}
335
336#else
337static inline int
338setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
339{
340 return 0;
341}
342#endif
343
344void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
345{
346 if (suspend) {
347 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
348 return;
349 desc->istate |= IRQS_SUSPENDED;
350 }
351
352 if (!desc->depth++)
353 irq_disable(desc);
354}
355
356static int __disable_irq_nosync(unsigned int irq)
357{
358 unsigned long flags;
359 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
360
361 if (!desc)
362 return -EINVAL;
363 __disable_irq(desc, irq, false);
364 irq_put_desc_busunlock(desc, flags);
365 return 0;
366}
367
368
369
370
371
372
373
374
375
376
377
378
379void disable_irq_nosync(unsigned int irq)
380{
381 __disable_irq_nosync(irq);
382}
383EXPORT_SYMBOL(disable_irq_nosync);
384
385
386
387
388
389
390
391
392
393
394
395
396
397void disable_irq(unsigned int irq)
398{
399 if (!__disable_irq_nosync(irq))
400 synchronize_irq(irq);
401}
402EXPORT_SYMBOL(disable_irq);
403
404void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
405{
406 if (resume) {
407 if (!(desc->istate & IRQS_SUSPENDED)) {
408 if (!desc->action)
409 return;
410 if (!(desc->action->flags & IRQF_FORCE_RESUME))
411 return;
412
413 desc->depth++;
414 }
415 desc->istate &= ~IRQS_SUSPENDED;
416 }
417
418 switch (desc->depth) {
419 case 0:
420 err_out:
421 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
422 break;
423 case 1: {
424 if (desc->istate & IRQS_SUSPENDED)
425 goto err_out;
426
427 irq_settings_set_noprobe(desc);
428 irq_enable(desc);
429 check_irq_resend(desc, irq);
430
431 }
432 default:
433 desc->depth--;
434 }
435}
436
437
438
439
440
441
442
443
444
445
446
447
448void enable_irq(unsigned int irq)
449{
450 unsigned long flags;
451 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
452
453 if (!desc)
454 return;
455 if (WARN(!desc->irq_data.chip,
456 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
457 goto out;
458
459 __enable_irq(desc, irq, false);
460out:
461 irq_put_desc_busunlock(desc, flags);
462}
463EXPORT_SYMBOL(enable_irq);
464
465static int set_irq_wake_real(unsigned int irq, unsigned int on)
466{
467 struct irq_desc *desc = irq_to_desc(irq);
468 int ret = -ENXIO;
469
470 if (desc->irq_data.chip->irq_set_wake)
471 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
472
473 return ret;
474}
475
476
477
478
479
480
481
482
483
484
485
486
487
488int irq_set_irq_wake(unsigned int irq, unsigned int on)
489{
490 unsigned long flags;
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
492 int ret = 0;
493
494
495
496
497 if (on) {
498 if (desc->wake_depth++ == 0) {
499 ret = set_irq_wake_real(irq, on);
500 if (ret)
501 desc->wake_depth = 0;
502 else
503 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
504 }
505 } else {
506 if (desc->wake_depth == 0) {
507 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
508 } else if (--desc->wake_depth == 0) {
509 ret = set_irq_wake_real(irq, on);
510 if (ret)
511 desc->wake_depth = 1;
512 else
513 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
514 }
515 }
516 irq_put_desc_busunlock(desc, flags);
517 return ret;
518}
519EXPORT_SYMBOL(irq_set_irq_wake);
520
521
522
523
524
525
526int can_request_irq(unsigned int irq, unsigned long irqflags)
527{
528 unsigned long flags;
529 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
530 int canrequest = 0;
531
532 if (!desc)
533 return 0;
534
535 if (irq_settings_can_request(desc)) {
536 if (desc->action)
537 if (irqflags & desc->action->flags & IRQF_SHARED)
538 canrequest =1;
539 }
540 irq_put_desc_unlock(desc, flags);
541 return canrequest;
542}
543
544int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
545 unsigned long flags)
546{
547 struct irq_chip *chip = desc->irq_data.chip;
548 int ret, unmask = 0;
549
550 if (!chip || !chip->irq_set_type) {
551
552
553
554
555 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
556 chip ? (chip->name ? : "unknown") : "unknown");
557 return 0;
558 }
559
560 flags &= IRQ_TYPE_SENSE_MASK;
561
562 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
563 if (!irqd_irq_masked(&desc->irq_data))
564 mask_irq(desc);
565 if (!irqd_irq_disabled(&desc->irq_data))
566 unmask = 1;
567 }
568
569
570 ret = chip->irq_set_type(&desc->irq_data, flags);
571
572 switch (ret) {
573 case IRQ_SET_MASK_OK:
574 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
575 irqd_set(&desc->irq_data, flags);
576
577 case IRQ_SET_MASK_OK_NOCOPY:
578 flags = irqd_get_trigger_type(&desc->irq_data);
579 irq_settings_set_trigger_mask(desc, flags);
580 irqd_clear(&desc->irq_data, IRQD_LEVEL);
581 irq_settings_clr_level(desc);
582 if (flags & IRQ_TYPE_LEVEL_MASK) {
583 irq_settings_set_level(desc);
584 irqd_set(&desc->irq_data, IRQD_LEVEL);
585 }
586
587 ret = 0;
588 break;
589 default:
590 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
591 flags, irq, chip->irq_set_type);
592 }
593 if (unmask)
594 unmask_irq(desc);
595 return ret;
596}
597
598
599
600
601
602
603static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
604{
605 return IRQ_WAKE_THREAD;
606}
607
608
609
610
611
612static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
613{
614 WARN(1, "Primary handler called for nested irq %d\n", irq);
615 return IRQ_NONE;
616}
617
618static int irq_wait_for_interrupt(struct irqaction *action)
619{
620 while (!kthread_should_stop()) {
621 set_current_state(TASK_INTERRUPTIBLE);
622
623 if (test_and_clear_bit(IRQTF_RUNTHREAD,
624 &action->thread_flags)) {
625 __set_current_state(TASK_RUNNING);
626 return 0;
627 }
628 schedule();
629 }
630 return -1;
631}
632
633
634
635
636
637
638static void irq_finalize_oneshot(struct irq_desc *desc,
639 struct irqaction *action, bool force)
640{
641 if (!(desc->istate & IRQS_ONESHOT))
642 return;
643again:
644 chip_bus_lock(desc);
645 raw_spin_lock_irq(&desc->lock);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
662 raw_spin_unlock_irq(&desc->lock);
663 chip_bus_sync_unlock(desc);
664 cpu_relax();
665 goto again;
666 }
667
668
669
670
671
672
673 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
674 goto out_unlock;
675
676 desc->threads_oneshot &= ~action->thread_mask;
677
678 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
679 irqd_irq_masked(&desc->irq_data))
680 unmask_irq(desc);
681
682out_unlock:
683 raw_spin_unlock_irq(&desc->lock);
684 chip_bus_sync_unlock(desc);
685}
686
687#ifdef CONFIG_SMP
688
689
690
691static void
692irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
693{
694 cpumask_var_t mask;
695
696 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
697 return;
698
699
700
701
702
703 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
704 set_bit(IRQTF_AFFINITY, &action->thread_flags);
705 return;
706 }
707
708 raw_spin_lock_irq(&desc->lock);
709 cpumask_copy(mask, desc->irq_data.affinity);
710 raw_spin_unlock_irq(&desc->lock);
711
712 set_cpus_allowed_ptr(current, mask);
713 free_cpumask_var(mask);
714}
715#else
716static inline void
717irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
718#endif
719
720
721
722
723
724
725
726static void
727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
728{
729 local_bh_disable();
730 action->thread_fn(action->irq, action->dev_id);
731 irq_finalize_oneshot(desc, action, false);
732 local_bh_enable();
733}
734
735
736
737
738
739
740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
741{
742 action->thread_fn(action->irq, action->dev_id);
743 irq_finalize_oneshot(desc, action, false);
744}
745
746
747
748
749static int irq_thread(void *data)
750{
751 static const struct sched_param param = {
752 .sched_priority = MAX_USER_RT_PRIO/2,
753 };
754 struct irqaction *action = data;
755 struct irq_desc *desc = irq_to_desc(action->irq);
756 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
757 int wake;
758
759 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
760 &action->thread_flags))
761 handler_fn = irq_forced_thread_fn;
762 else
763 handler_fn = irq_thread_fn;
764
765 sched_setscheduler(current, SCHED_FIFO, ¶m);
766 current->irqaction = action;
767
768 while (!irq_wait_for_interrupt(action)) {
769
770 irq_thread_check_affinity(desc, action);
771
772 atomic_inc(&desc->threads_active);
773
774 raw_spin_lock_irq(&desc->lock);
775 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
776
777
778
779
780
781
782
783 desc->istate |= IRQS_PENDING;
784 raw_spin_unlock_irq(&desc->lock);
785 } else {
786 raw_spin_unlock_irq(&desc->lock);
787 handler_fn(desc, action);
788 }
789
790 wake = atomic_dec_and_test(&desc->threads_active);
791
792 if (wake && waitqueue_active(&desc->wait_for_threads))
793 wake_up(&desc->wait_for_threads);
794 }
795
796
797 irq_finalize_oneshot(desc, action, true);
798
799
800
801
802
803 current->irqaction = NULL;
804 return 0;
805}
806
807
808
809
810void exit_irq_thread(void)
811{
812 struct task_struct *tsk = current;
813 struct irq_desc *desc;
814
815 if (!tsk->irqaction)
816 return;
817
818 printk(KERN_ERR
819 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
820 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
821
822 desc = irq_to_desc(tsk->irqaction->irq);
823
824
825
826
827
828 irq_finalize_oneshot(desc, tsk->irqaction, true);
829
830
831
832
833
834 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
835}
836
837static void irq_setup_forced_threading(struct irqaction *new)
838{
839 if (!force_irqthreads)
840 return;
841 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
842 return;
843
844 new->flags |= IRQF_ONESHOT;
845
846 if (!new->thread_fn) {
847 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
848 new->thread_fn = new->handler;
849 new->handler = irq_default_primary_handler;
850 }
851}
852
853
854
855
856
857static int
858__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
859{
860 struct irqaction *old, **old_ptr;
861 const char *old_name = NULL;
862 unsigned long flags, thread_mask = 0;
863 int ret, nested, shared = 0;
864 cpumask_var_t mask;
865
866 if (!desc)
867 return -EINVAL;
868
869 if (desc->irq_data.chip == &no_irq_chip)
870 return -ENOSYS;
871
872
873
874
875
876 if (new->flags & IRQF_SAMPLE_RANDOM) {
877
878
879
880
881
882
883
884
885 rand_initialize_irq(irq);
886 }
887
888
889
890
891
892 nested = irq_settings_is_nested_thread(desc);
893 if (nested) {
894 if (!new->thread_fn)
895 return -EINVAL;
896
897
898
899
900
901 new->handler = irq_nested_primary_handler;
902 } else {
903 irq_setup_forced_threading(new);
904 }
905
906
907
908
909
910
911 if (new->thread_fn && !nested) {
912 struct task_struct *t;
913
914 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
915 new->name);
916 if (IS_ERR(t))
917 return PTR_ERR(t);
918
919
920
921
922
923 get_task_struct(t);
924 new->thread = t;
925 }
926
927 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
928 ret = -ENOMEM;
929 goto out_thread;
930 }
931
932
933
934
935 raw_spin_lock_irqsave(&desc->lock, flags);
936 old_ptr = &desc->action;
937 old = *old_ptr;
938 if (old) {
939
940
941
942
943
944
945
946 if (!((old->flags & new->flags) & IRQF_SHARED) ||
947 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
948 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
949 old_name = old->name;
950 goto mismatch;
951 }
952
953
954 if ((old->flags & IRQF_PERCPU) !=
955 (new->flags & IRQF_PERCPU))
956 goto mismatch;
957
958
959 do {
960 thread_mask |= old->thread_mask;
961 old_ptr = &old->next;
962 old = *old_ptr;
963 } while (old);
964 shared = 1;
965 }
966
967
968
969
970
971 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
972 ret = -EBUSY;
973 goto out_mask;
974 }
975 new->thread_mask = 1 << ffz(thread_mask);
976
977 if (!shared) {
978 init_waitqueue_head(&desc->wait_for_threads);
979
980
981 if (new->flags & IRQF_TRIGGER_MASK) {
982 ret = __irq_set_trigger(desc, irq,
983 new->flags & IRQF_TRIGGER_MASK);
984
985 if (ret)
986 goto out_mask;
987 }
988
989 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
990 IRQS_ONESHOT | IRQS_WAITING);
991 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
992
993 if (new->flags & IRQF_PERCPU) {
994 irqd_set(&desc->irq_data, IRQD_PER_CPU);
995 irq_settings_set_per_cpu(desc);
996 }
997
998 if (new->flags & IRQF_ONESHOT)
999 desc->istate |= IRQS_ONESHOT;
1000
1001 if (irq_settings_can_autoenable(desc))
1002 irq_startup(desc);
1003 else
1004
1005 desc->depth = 1;
1006
1007
1008 if (new->flags & IRQF_NOBALANCING) {
1009 irq_settings_set_no_balancing(desc);
1010 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1011 }
1012
1013
1014 setup_affinity(irq, desc, mask);
1015
1016 } else if (new->flags & IRQF_TRIGGER_MASK) {
1017 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1018 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1019
1020 if (nmsk != omsk)
1021
1022 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1023 irq, nmsk, omsk);
1024 }
1025
1026 new->irq = irq;
1027 *old_ptr = new;
1028
1029
1030 desc->irq_count = 0;
1031 desc->irqs_unhandled = 0;
1032
1033
1034
1035
1036
1037 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1038 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1039 __enable_irq(desc, irq, false);
1040 }
1041
1042 raw_spin_unlock_irqrestore(&desc->lock, flags);
1043
1044
1045
1046
1047
1048 if (new->thread)
1049 wake_up_process(new->thread);
1050
1051 register_irq_proc(irq, desc);
1052 new->dir = NULL;
1053 register_handler_proc(irq, new);
1054 free_cpumask_var(mask);
1055
1056 return 0;
1057
1058mismatch:
1059#ifdef CONFIG_DEBUG_SHIRQ
1060 if (!(new->flags & IRQF_PROBE_SHARED)) {
1061 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1062 if (old_name)
1063 printk(KERN_ERR "current handler: %s\n", old_name);
1064 dump_stack();
1065 }
1066#endif
1067 ret = -EBUSY;
1068
1069out_mask:
1070 raw_spin_unlock_irqrestore(&desc->lock, flags);
1071 free_cpumask_var(mask);
1072
1073out_thread:
1074 if (new->thread) {
1075 struct task_struct *t = new->thread;
1076
1077 new->thread = NULL;
1078 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1079 kthread_stop(t);
1080 put_task_struct(t);
1081 }
1082 return ret;
1083}
1084
1085
1086
1087
1088
1089
1090
1091
1092int setup_irq(unsigned int irq, struct irqaction *act)
1093{
1094 int retval;
1095 struct irq_desc *desc = irq_to_desc(irq);
1096
1097 chip_bus_lock(desc);
1098 retval = __setup_irq(irq, desc, act);
1099 chip_bus_sync_unlock(desc);
1100
1101 return retval;
1102}
1103EXPORT_SYMBOL_GPL(setup_irq);
1104
1105
1106
1107
1108
1109static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1110{
1111 struct irq_desc *desc = irq_to_desc(irq);
1112 struct irqaction *action, **action_ptr;
1113 unsigned long flags;
1114
1115 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1116
1117 if (!desc)
1118 return NULL;
1119
1120 raw_spin_lock_irqsave(&desc->lock, flags);
1121
1122
1123
1124
1125
1126 action_ptr = &desc->action;
1127 for (;;) {
1128 action = *action_ptr;
1129
1130 if (!action) {
1131 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1132 raw_spin_unlock_irqrestore(&desc->lock, flags);
1133
1134 return NULL;
1135 }
1136
1137 if (action->dev_id == dev_id)
1138 break;
1139 action_ptr = &action->next;
1140 }
1141
1142
1143 *action_ptr = action->next;
1144
1145
1146#ifdef CONFIG_IRQ_RELEASE_METHOD
1147 if (desc->irq_data.chip->release)
1148 desc->irq_data.chip->release(irq, dev_id);
1149#endif
1150
1151
1152 if (!desc->action)
1153 irq_shutdown(desc);
1154
1155#ifdef CONFIG_SMP
1156
1157 if (WARN_ON_ONCE(desc->affinity_hint))
1158 desc->affinity_hint = NULL;
1159#endif
1160
1161 raw_spin_unlock_irqrestore(&desc->lock, flags);
1162
1163 unregister_handler_proc(irq, action);
1164
1165
1166 synchronize_irq(irq);
1167
1168#ifdef CONFIG_DEBUG_SHIRQ
1169
1170
1171
1172
1173
1174
1175
1176
1177 if (action->flags & IRQF_SHARED) {
1178 local_irq_save(flags);
1179 action->handler(irq, dev_id);
1180 local_irq_restore(flags);
1181 }
1182#endif
1183
1184 if (action->thread) {
1185 if (!test_bit(IRQTF_DIED, &action->thread_flags))
1186 kthread_stop(action->thread);
1187 put_task_struct(action->thread);
1188 }
1189
1190 return action;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200void remove_irq(unsigned int irq, struct irqaction *act)
1201{
1202 __free_irq(irq, act->dev_id);
1203}
1204EXPORT_SYMBOL_GPL(remove_irq);
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220void free_irq(unsigned int irq, void *dev_id)
1221{
1222 struct irq_desc *desc = irq_to_desc(irq);
1223
1224 if (!desc)
1225 return;
1226
1227#ifdef CONFIG_SMP
1228 if (WARN_ON(desc->affinity_notify))
1229 desc->affinity_notify = NULL;
1230#endif
1231
1232 chip_bus_lock(desc);
1233 kfree(__free_irq(irq, dev_id));
1234 chip_bus_sync_unlock(desc);
1235}
1236EXPORT_SYMBOL(free_irq);
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1282 irq_handler_t thread_fn, unsigned long irqflags,
1283 const char *devname, void *dev_id)
1284{
1285 struct irqaction *action;
1286 struct irq_desc *desc;
1287 int retval;
1288
1289
1290
1291
1292
1293
1294
1295 if ((irqflags & IRQF_SHARED) && !dev_id)
1296 return -EINVAL;
1297
1298 desc = irq_to_desc(irq);
1299 if (!desc)
1300 return -EINVAL;
1301
1302 if (!irq_settings_can_request(desc))
1303 return -EINVAL;
1304
1305 if (!handler) {
1306 if (!thread_fn)
1307 return -EINVAL;
1308 handler = irq_default_primary_handler;
1309 }
1310
1311 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1312 if (!action)
1313 return -ENOMEM;
1314
1315 action->handler = handler;
1316 action->thread_fn = thread_fn;
1317 action->flags = irqflags;
1318 action->name = devname;
1319 action->dev_id = dev_id;
1320
1321 chip_bus_lock(desc);
1322 retval = __setup_irq(irq, desc, action);
1323 chip_bus_sync_unlock(desc);
1324
1325 if (retval)
1326 kfree(action);
1327
1328#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1329 if (!retval && (irqflags & IRQF_SHARED)) {
1330
1331
1332
1333
1334
1335
1336 unsigned long flags;
1337
1338 disable_irq(irq);
1339 local_irq_save(flags);
1340
1341 handler(irq, dev_id);
1342
1343 local_irq_restore(flags);
1344 enable_irq(irq);
1345 }
1346#endif
1347 return retval;
1348}
1349EXPORT_SYMBOL(request_threaded_irq);
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1369 unsigned long flags, const char *name, void *dev_id)
1370{
1371 struct irq_desc *desc = irq_to_desc(irq);
1372 int ret;
1373
1374 if (!desc)
1375 return -EINVAL;
1376
1377 if (irq_settings_is_nested_thread(desc)) {
1378 ret = request_threaded_irq(irq, NULL, handler,
1379 flags, name, dev_id);
1380 return !ret ? IRQC_IS_NESTED : ret;
1381 }
1382
1383 ret = request_irq(irq, handler, flags, name, dev_id);
1384 return !ret ? IRQC_IS_HARDIRQ : ret;
1385}
1386EXPORT_SYMBOL_GPL(request_any_context_irq);
1387