1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/task_work.h>
21
22#include "internals.h"
23
24#ifdef CONFIG_IRQ_FORCED_THREADING
25__read_mostly bool force_irqthreads;
26
27static int __init setup_forced_irqthreads(char *arg)
28{
29 force_irqthreads = true;
30 return 0;
31}
32early_param("threadirqs", setup_forced_irqthreads);
33#endif
34
35static void __synchronize_hardirq(struct irq_desc *desc)
36{
37 bool inprogress;
38
39 do {
40 unsigned long flags;
41
42
43
44
45
46 while (irqd_irq_inprogress(&desc->irq_data))
47 cpu_relax();
48
49
50 raw_spin_lock_irqsave(&desc->lock, flags);
51 inprogress = irqd_irq_inprogress(&desc->irq_data);
52 raw_spin_unlock_irqrestore(&desc->lock, flags);
53
54
55 } while (inprogress);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75bool synchronize_hardirq(unsigned int irq)
76{
77 struct irq_desc *desc = irq_to_desc(irq);
78
79 if (desc) {
80 __synchronize_hardirq(desc);
81 return !atomic_read(&desc->threads_active);
82 }
83
84 return true;
85}
86EXPORT_SYMBOL(synchronize_hardirq);
87
88
89
90
91
92
93
94
95
96
97
98void synchronize_irq(unsigned int irq)
99{
100 struct irq_desc *desc = irq_to_desc(irq);
101
102 if (desc) {
103 __synchronize_hardirq(desc);
104
105
106
107
108
109 wait_event(desc->wait_for_threads,
110 !atomic_read(&desc->threads_active));
111 }
112}
113EXPORT_SYMBOL(synchronize_irq);
114
115#ifdef CONFIG_SMP
116cpumask_var_t irq_default_affinity;
117
118static bool __irq_can_set_affinity(struct irq_desc *desc)
119{
120 if (!desc || !irqd_can_balance(&desc->irq_data) ||
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
122 return false;
123 return true;
124}
125
126
127
128
129
130
131int irq_can_set_affinity(unsigned int irq)
132{
133 return __irq_can_set_affinity(irq_to_desc(irq));
134}
135
136
137
138
139
140
141
142
143bool irq_can_set_affinity_usr(unsigned int irq)
144{
145 struct irq_desc *desc = irq_to_desc(irq);
146
147 return __irq_can_set_affinity(desc) &&
148 !irqd_affinity_is_managed(&desc->irq_data);
149}
150
151
152
153
154
155
156
157
158
159
160void irq_set_thread_affinity(struct irq_desc *desc)
161{
162 struct irqaction *action;
163
164 for_each_action_of_desc(desc, action)
165 if (action->thread)
166 set_bit(IRQTF_AFFINITY, &action->thread_flags);
167}
168
169#ifdef CONFIG_GENERIC_PENDING_IRQ
170static inline bool irq_can_move_pcntxt(struct irq_data *data)
171{
172 return irqd_can_move_in_process_context(data);
173}
174static inline bool irq_move_pending(struct irq_data *data)
175{
176 return irqd_is_setaffinity_pending(data);
177}
178static inline void
179irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
180{
181 cpumask_copy(desc->pending_mask, mask);
182}
183static inline void
184irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
185{
186 cpumask_copy(mask, desc->pending_mask);
187}
188#else
189static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
190static inline bool irq_move_pending(struct irq_data *data) { return false; }
191static inline void
192irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
193static inline void
194irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
195#endif
196
197int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
198 bool force)
199{
200 struct irq_desc *desc = irq_data_to_desc(data);
201 struct irq_chip *chip = irq_data_get_irq_chip(data);
202 int ret;
203
204 ret = chip->irq_set_affinity(data, mask, force);
205 switch (ret) {
206 case IRQ_SET_MASK_OK:
207 case IRQ_SET_MASK_OK_DONE:
208 cpumask_copy(desc->irq_common_data.affinity, mask);
209 case IRQ_SET_MASK_OK_NOCOPY:
210 irq_set_thread_affinity(desc);
211 ret = 0;
212 }
213
214 return ret;
215}
216
217int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
218 bool force)
219{
220 struct irq_chip *chip = irq_data_get_irq_chip(data);
221 struct irq_desc *desc = irq_data_to_desc(data);
222 int ret = 0;
223
224 if (!chip || !chip->irq_set_affinity)
225 return -EINVAL;
226
227 if (irq_can_move_pcntxt(data)) {
228 ret = irq_do_set_affinity(data, mask, force);
229 } else {
230 irqd_set_move_pending(data);
231 irq_copy_pending(desc, mask);
232 }
233
234 if (desc->affinity_notify) {
235 kref_get(&desc->affinity_notify->kref);
236 schedule_work(&desc->affinity_notify->work);
237 }
238 irqd_set(data, IRQD_AFFINITY_SET);
239
240 return ret;
241}
242
243int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
244{
245 struct irq_desc *desc = irq_to_desc(irq);
246 unsigned long flags;
247 int ret;
248
249 if (!desc)
250 return -EINVAL;
251
252 raw_spin_lock_irqsave(&desc->lock, flags);
253 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
254 raw_spin_unlock_irqrestore(&desc->lock, flags);
255 return ret;
256}
257
258int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
259{
260 unsigned long flags;
261 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
262
263 if (!desc)
264 return -EINVAL;
265 desc->affinity_hint = m;
266 irq_put_desc_unlock(desc, flags);
267
268 if (m)
269 __irq_set_affinity(irq, m, false);
270 return 0;
271}
272EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
273
274static void irq_affinity_notify(struct work_struct *work)
275{
276 struct irq_affinity_notify *notify =
277 container_of(work, struct irq_affinity_notify, work);
278 struct irq_desc *desc = irq_to_desc(notify->irq);
279 cpumask_var_t cpumask;
280 unsigned long flags;
281
282 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
283 goto out;
284
285 raw_spin_lock_irqsave(&desc->lock, flags);
286 if (irq_move_pending(&desc->irq_data))
287 irq_get_pending(cpumask, desc);
288 else
289 cpumask_copy(cpumask, desc->irq_common_data.affinity);
290 raw_spin_unlock_irqrestore(&desc->lock, flags);
291
292 notify->notify(notify, cpumask);
293
294 free_cpumask_var(cpumask);
295out:
296 kref_put(¬ify->kref, notify->release);
297}
298
299
300
301
302
303
304
305
306
307
308
309
310int
311irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
312{
313 struct irq_desc *desc = irq_to_desc(irq);
314 struct irq_affinity_notify *old_notify;
315 unsigned long flags;
316
317
318 might_sleep();
319
320 if (!desc)
321 return -EINVAL;
322
323
324 if (notify) {
325 notify->irq = irq;
326 kref_init(¬ify->kref);
327 INIT_WORK(¬ify->work, irq_affinity_notify);
328 }
329
330 raw_spin_lock_irqsave(&desc->lock, flags);
331 old_notify = desc->affinity_notify;
332 desc->affinity_notify = notify;
333 raw_spin_unlock_irqrestore(&desc->lock, flags);
334
335 if (old_notify)
336 kref_put(&old_notify->kref, old_notify->release);
337
338 return 0;
339}
340EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
341
342#ifndef CONFIG_AUTO_IRQ_AFFINITY
343
344
345
346static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
347{
348 struct cpumask *set = irq_default_affinity;
349 int node = irq_desc_get_node(desc);
350
351
352 if (!__irq_can_set_affinity(desc))
353 return 0;
354
355
356
357
358
359 if (irqd_affinity_is_managed(&desc->irq_data) ||
360 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
361 if (cpumask_intersects(desc->irq_common_data.affinity,
362 cpu_online_mask))
363 set = desc->irq_common_data.affinity;
364 else
365 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
366 }
367
368 cpumask_and(mask, cpu_online_mask, set);
369 if (node != NUMA_NO_NODE) {
370 const struct cpumask *nodemask = cpumask_of_node(node);
371
372
373 if (cpumask_intersects(mask, nodemask))
374 cpumask_and(mask, mask, nodemask);
375 }
376 irq_do_set_affinity(&desc->irq_data, mask, false);
377 return 0;
378}
379#else
380
381static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
382{
383 return irq_select_affinity(irq_desc_get_irq(d));
384}
385#endif
386
387
388
389
390int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
391{
392 struct irq_desc *desc = irq_to_desc(irq);
393 unsigned long flags;
394 int ret;
395
396 raw_spin_lock_irqsave(&desc->lock, flags);
397 ret = setup_affinity(desc, mask);
398 raw_spin_unlock_irqrestore(&desc->lock, flags);
399 return ret;
400}
401
402#else
403static inline int
404setup_affinity(struct irq_desc *desc, struct cpumask *mask)
405{
406 return 0;
407}
408#endif
409
410
411
412
413
414
415
416
417
418
419
420int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
421{
422 unsigned long flags;
423 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
424 struct irq_data *data;
425 struct irq_chip *chip;
426 int ret = -ENOSYS;
427
428 if (!desc)
429 return -EINVAL;
430
431 data = irq_desc_get_irq_data(desc);
432 chip = irq_data_get_irq_chip(data);
433 if (chip && chip->irq_set_vcpu_affinity)
434 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
435 irq_put_desc_unlock(desc, flags);
436
437 return ret;
438}
439EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
440
441void __disable_irq(struct irq_desc *desc)
442{
443 if (!desc->depth++)
444 irq_disable(desc);
445}
446
447static int __disable_irq_nosync(unsigned int irq)
448{
449 unsigned long flags;
450 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
451
452 if (!desc)
453 return -EINVAL;
454 __disable_irq(desc);
455 irq_put_desc_busunlock(desc, flags);
456 return 0;
457}
458
459
460
461
462
463
464
465
466
467
468
469
470void disable_irq_nosync(unsigned int irq)
471{
472 __disable_irq_nosync(irq);
473}
474EXPORT_SYMBOL(disable_irq_nosync);
475
476
477
478
479
480
481
482
483
484
485
486
487
488void disable_irq(unsigned int irq)
489{
490 if (!__disable_irq_nosync(irq))
491 synchronize_irq(irq);
492}
493EXPORT_SYMBOL(disable_irq);
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512bool disable_hardirq(unsigned int irq)
513{
514 if (!__disable_irq_nosync(irq))
515 return synchronize_hardirq(irq);
516
517 return false;
518}
519EXPORT_SYMBOL_GPL(disable_hardirq);
520
521void __enable_irq(struct irq_desc *desc)
522{
523 switch (desc->depth) {
524 case 0:
525 err_out:
526 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
527 irq_desc_get_irq(desc));
528 break;
529 case 1: {
530 if (desc->istate & IRQS_SUSPENDED)
531 goto err_out;
532
533 irq_settings_set_noprobe(desc);
534 irq_enable(desc);
535 check_irq_resend(desc);
536
537 }
538 default:
539 desc->depth--;
540 }
541}
542
543
544
545
546
547
548
549
550
551
552
553
554void enable_irq(unsigned int irq)
555{
556 unsigned long flags;
557 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
558
559 if (!desc)
560 return;
561 if (WARN(!desc->irq_data.chip,
562 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
563 goto out;
564
565 __enable_irq(desc);
566out:
567 irq_put_desc_busunlock(desc, flags);
568}
569EXPORT_SYMBOL(enable_irq);
570
571static int set_irq_wake_real(unsigned int irq, unsigned int on)
572{
573 struct irq_desc *desc = irq_to_desc(irq);
574 int ret = -ENXIO;
575
576 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
577 return 0;
578
579 if (desc->irq_data.chip->irq_set_wake)
580 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
581
582 return ret;
583}
584
585
586
587
588
589
590
591
592
593
594
595
596
597int irq_set_irq_wake(unsigned int irq, unsigned int on)
598{
599 unsigned long flags;
600 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
601 int ret = 0;
602
603 if (!desc)
604 return -EINVAL;
605
606
607
608
609 if (on) {
610 if (desc->wake_depth++ == 0) {
611 ret = set_irq_wake_real(irq, on);
612 if (ret)
613 desc->wake_depth = 0;
614 else
615 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
616 }
617 } else {
618 if (desc->wake_depth == 0) {
619 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
620 } else if (--desc->wake_depth == 0) {
621 ret = set_irq_wake_real(irq, on);
622 if (ret)
623 desc->wake_depth = 1;
624 else
625 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
626 }
627 }
628 irq_put_desc_busunlock(desc, flags);
629 return ret;
630}
631EXPORT_SYMBOL(irq_set_irq_wake);
632
633
634
635
636
637
638int can_request_irq(unsigned int irq, unsigned long irqflags)
639{
640 unsigned long flags;
641 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
642 int canrequest = 0;
643
644 if (!desc)
645 return 0;
646
647 if (irq_settings_can_request(desc)) {
648 if (!desc->action ||
649 irqflags & desc->action->flags & IRQF_SHARED)
650 canrequest = 1;
651 }
652 irq_put_desc_unlock(desc, flags);
653 return canrequest;
654}
655
656int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
657{
658 struct irq_chip *chip = desc->irq_data.chip;
659 int ret, unmask = 0;
660
661 if (!chip || !chip->irq_set_type) {
662
663
664
665
666 pr_debug("No set_type function for IRQ %d (%s)\n",
667 irq_desc_get_irq(desc),
668 chip ? (chip->name ? : "unknown") : "unknown");
669 return 0;
670 }
671
672 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
673 if (!irqd_irq_masked(&desc->irq_data))
674 mask_irq(desc);
675 if (!irqd_irq_disabled(&desc->irq_data))
676 unmask = 1;
677 }
678
679
680 flags &= IRQ_TYPE_SENSE_MASK;
681 ret = chip->irq_set_type(&desc->irq_data, flags);
682
683 switch (ret) {
684 case IRQ_SET_MASK_OK:
685 case IRQ_SET_MASK_OK_DONE:
686 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
687 irqd_set(&desc->irq_data, flags);
688
689 case IRQ_SET_MASK_OK_NOCOPY:
690 flags = irqd_get_trigger_type(&desc->irq_data);
691 irq_settings_set_trigger_mask(desc, flags);
692 irqd_clear(&desc->irq_data, IRQD_LEVEL);
693 irq_settings_clr_level(desc);
694 if (flags & IRQ_TYPE_LEVEL_MASK) {
695 irq_settings_set_level(desc);
696 irqd_set(&desc->irq_data, IRQD_LEVEL);
697 }
698
699 ret = 0;
700 break;
701 default:
702 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
703 flags, irq_desc_get_irq(desc), chip->irq_set_type);
704 }
705 if (unmask)
706 unmask_irq(desc);
707 return ret;
708}
709
710#ifdef CONFIG_HARDIRQS_SW_RESEND
711int irq_set_parent(int irq, int parent_irq)
712{
713 unsigned long flags;
714 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
715
716 if (!desc)
717 return -EINVAL;
718
719 desc->parent_irq = parent_irq;
720
721 irq_put_desc_unlock(desc, flags);
722 return 0;
723}
724EXPORT_SYMBOL_GPL(irq_set_parent);
725#endif
726
727
728
729
730
731
732static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
733{
734 return IRQ_WAKE_THREAD;
735}
736
737
738
739
740
741static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
742{
743 WARN(1, "Primary handler called for nested irq %d\n", irq);
744 return IRQ_NONE;
745}
746
747static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
748{
749 WARN(1, "Secondary action handler called for irq %d\n", irq);
750 return IRQ_NONE;
751}
752
753static int irq_wait_for_interrupt(struct irqaction *action)
754{
755 set_current_state(TASK_INTERRUPTIBLE);
756
757 while (!kthread_should_stop()) {
758
759 if (test_and_clear_bit(IRQTF_RUNTHREAD,
760 &action->thread_flags)) {
761 __set_current_state(TASK_RUNNING);
762 return 0;
763 }
764 schedule();
765 set_current_state(TASK_INTERRUPTIBLE);
766 }
767 __set_current_state(TASK_RUNNING);
768 return -1;
769}
770
771
772
773
774
775
776static void irq_finalize_oneshot(struct irq_desc *desc,
777 struct irqaction *action)
778{
779 if (!(desc->istate & IRQS_ONESHOT) ||
780 action->handler == irq_forced_secondary_handler)
781 return;
782again:
783 chip_bus_lock(desc);
784 raw_spin_lock_irq(&desc->lock);
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
801 raw_spin_unlock_irq(&desc->lock);
802 chip_bus_sync_unlock(desc);
803 cpu_relax();
804 goto again;
805 }
806
807
808
809
810
811
812 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
813 goto out_unlock;
814
815 desc->threads_oneshot &= ~action->thread_mask;
816
817 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
818 irqd_irq_masked(&desc->irq_data))
819 unmask_threaded_irq(desc);
820
821out_unlock:
822 raw_spin_unlock_irq(&desc->lock);
823 chip_bus_sync_unlock(desc);
824}
825
826#ifdef CONFIG_SMP
827
828
829
830static void
831irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
832{
833 cpumask_var_t mask;
834 bool valid = true;
835
836 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
837 return;
838
839
840
841
842
843 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
844 set_bit(IRQTF_AFFINITY, &action->thread_flags);
845 return;
846 }
847
848 raw_spin_lock_irq(&desc->lock);
849
850
851
852
853 if (desc->irq_common_data.affinity)
854 cpumask_copy(mask, desc->irq_common_data.affinity);
855 else
856 valid = false;
857 raw_spin_unlock_irq(&desc->lock);
858
859 if (valid)
860 set_cpus_allowed_ptr(current, mask);
861 free_cpumask_var(mask);
862}
863#else
864static inline void
865irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
866#endif
867
868
869
870
871
872
873
874static irqreturn_t
875irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
876{
877 irqreturn_t ret;
878
879 local_bh_disable();
880 ret = action->thread_fn(action->irq, action->dev_id);
881 irq_finalize_oneshot(desc, action);
882 local_bh_enable();
883 return ret;
884}
885
886
887
888
889
890
891static irqreturn_t irq_thread_fn(struct irq_desc *desc,
892 struct irqaction *action)
893{
894 irqreturn_t ret;
895
896 ret = action->thread_fn(action->irq, action->dev_id);
897 irq_finalize_oneshot(desc, action);
898 return ret;
899}
900
901static void wake_threads_waitq(struct irq_desc *desc)
902{
903 if (atomic_dec_and_test(&desc->threads_active))
904 wake_up(&desc->wait_for_threads);
905}
906
907static void irq_thread_dtor(struct callback_head *unused)
908{
909 struct task_struct *tsk = current;
910 struct irq_desc *desc;
911 struct irqaction *action;
912
913 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
914 return;
915
916 action = kthread_data(tsk);
917
918 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
919 tsk->comm, tsk->pid, action->irq);
920
921
922 desc = irq_to_desc(action->irq);
923
924
925
926
927 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
928 wake_threads_waitq(desc);
929
930
931 irq_finalize_oneshot(desc, action);
932}
933
934static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
935{
936 struct irqaction *secondary = action->secondary;
937
938 if (WARN_ON_ONCE(!secondary))
939 return;
940
941 raw_spin_lock_irq(&desc->lock);
942 __irq_wake_thread(desc, secondary);
943 raw_spin_unlock_irq(&desc->lock);
944}
945
946
947
948
949static int irq_thread(void *data)
950{
951 struct callback_head on_exit_work;
952 struct irqaction *action = data;
953 struct irq_desc *desc = irq_to_desc(action->irq);
954 irqreturn_t (*handler_fn)(struct irq_desc *desc,
955 struct irqaction *action);
956
957 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
958 &action->thread_flags))
959 handler_fn = irq_forced_thread_fn;
960 else
961 handler_fn = irq_thread_fn;
962
963 init_task_work(&on_exit_work, irq_thread_dtor);
964 task_work_add(current, &on_exit_work, false);
965
966 irq_thread_check_affinity(desc, action);
967
968 while (!irq_wait_for_interrupt(action)) {
969 irqreturn_t action_ret;
970
971 irq_thread_check_affinity(desc, action);
972
973 action_ret = handler_fn(desc, action);
974 if (action_ret == IRQ_HANDLED)
975 atomic_inc(&desc->threads_handled);
976 if (action_ret == IRQ_WAKE_THREAD)
977 irq_wake_secondary(desc, action);
978
979 wake_threads_waitq(desc);
980 }
981
982
983
984
985
986
987
988
989
990
991 task_work_cancel(current, irq_thread_dtor);
992 return 0;
993}
994
995
996
997
998
999
1000
1001void irq_wake_thread(unsigned int irq, void *dev_id)
1002{
1003 struct irq_desc *desc = irq_to_desc(irq);
1004 struct irqaction *action;
1005 unsigned long flags;
1006
1007 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1008 return;
1009
1010 raw_spin_lock_irqsave(&desc->lock, flags);
1011 for_each_action_of_desc(desc, action) {
1012 if (action->dev_id == dev_id) {
1013 if (action->thread)
1014 __irq_wake_thread(desc, action);
1015 break;
1016 }
1017 }
1018 raw_spin_unlock_irqrestore(&desc->lock, flags);
1019}
1020EXPORT_SYMBOL_GPL(irq_wake_thread);
1021
1022static int irq_setup_forced_threading(struct irqaction *new)
1023{
1024 if (!force_irqthreads)
1025 return 0;
1026 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1027 return 0;
1028
1029 new->flags |= IRQF_ONESHOT;
1030
1031
1032
1033
1034
1035
1036 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1037
1038 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1039 if (!new->secondary)
1040 return -ENOMEM;
1041 new->secondary->handler = irq_forced_secondary_handler;
1042 new->secondary->thread_fn = new->thread_fn;
1043 new->secondary->dev_id = new->dev_id;
1044 new->secondary->irq = new->irq;
1045 new->secondary->name = new->name;
1046 }
1047
1048 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1049 new->thread_fn = new->handler;
1050 new->handler = irq_default_primary_handler;
1051 return 0;
1052}
1053
1054static int irq_request_resources(struct irq_desc *desc)
1055{
1056 struct irq_data *d = &desc->irq_data;
1057 struct irq_chip *c = d->chip;
1058
1059 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1060}
1061
1062static void irq_release_resources(struct irq_desc *desc)
1063{
1064 struct irq_data *d = &desc->irq_data;
1065 struct irq_chip *c = d->chip;
1066
1067 if (c->irq_release_resources)
1068 c->irq_release_resources(d);
1069}
1070
1071static int
1072setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1073{
1074 struct task_struct *t;
1075 struct sched_param param = {
1076 .sched_priority = MAX_USER_RT_PRIO/2,
1077 };
1078
1079 if (!secondary) {
1080 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1081 new->name);
1082 } else {
1083 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1084 new->name);
1085 param.sched_priority -= 1;
1086 }
1087
1088 if (IS_ERR(t))
1089 return PTR_ERR(t);
1090
1091 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1092
1093
1094
1095
1096
1097
1098 get_task_struct(t);
1099 new->thread = t;
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1110 return 0;
1111}
1112
1113
1114
1115
1116
1117static int
1118__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1119{
1120 struct irqaction *old, **old_ptr;
1121 unsigned long flags, thread_mask = 0;
1122 int ret, nested, shared = 0;
1123 cpumask_var_t mask;
1124
1125 if (!desc)
1126 return -EINVAL;
1127
1128 if (desc->irq_data.chip == &no_irq_chip)
1129 return -ENOSYS;
1130 if (!try_module_get(desc->owner))
1131 return -ENODEV;
1132
1133 new->irq = irq;
1134
1135
1136
1137
1138
1139 if (!(new->flags & IRQF_TRIGGER_MASK))
1140 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1141
1142
1143
1144
1145
1146 nested = irq_settings_is_nested_thread(desc);
1147 if (nested) {
1148 if (!new->thread_fn) {
1149 ret = -EINVAL;
1150 goto out_mput;
1151 }
1152
1153
1154
1155
1156
1157 new->handler = irq_nested_primary_handler;
1158 } else {
1159 if (irq_settings_can_thread(desc)) {
1160 ret = irq_setup_forced_threading(new);
1161 if (ret)
1162 goto out_mput;
1163 }
1164 }
1165
1166
1167
1168
1169
1170
1171 if (new->thread_fn && !nested) {
1172 ret = setup_irq_thread(new, irq, false);
1173 if (ret)
1174 goto out_mput;
1175 if (new->secondary) {
1176 ret = setup_irq_thread(new->secondary, irq, true);
1177 if (ret)
1178 goto out_thread;
1179 }
1180 }
1181
1182 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1183 ret = -ENOMEM;
1184 goto out_thread;
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1197 new->flags &= ~IRQF_ONESHOT;
1198
1199
1200
1201
1202 raw_spin_lock_irqsave(&desc->lock, flags);
1203 old_ptr = &desc->action;
1204 old = *old_ptr;
1205 if (old) {
1206
1207
1208
1209
1210
1211
1212
1213 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1214 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1215 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1216 goto mismatch;
1217
1218
1219 if ((old->flags & IRQF_PERCPU) !=
1220 (new->flags & IRQF_PERCPU))
1221 goto mismatch;
1222
1223
1224 do {
1225
1226
1227
1228
1229
1230 thread_mask |= old->thread_mask;
1231 old_ptr = &old->next;
1232 old = *old_ptr;
1233 } while (old);
1234 shared = 1;
1235 }
1236
1237
1238
1239
1240
1241
1242 if (new->flags & IRQF_ONESHOT) {
1243
1244
1245
1246
1247 if (thread_mask == ~0UL) {
1248 ret = -EBUSY;
1249 goto out_mask;
1250 }
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 new->thread_mask = 1 << ffz(thread_mask);
1272
1273 } else if (new->handler == irq_default_primary_handler &&
1274 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1291 irq);
1292 ret = -EINVAL;
1293 goto out_mask;
1294 }
1295
1296 if (!shared) {
1297 ret = irq_request_resources(desc);
1298 if (ret) {
1299 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1300 new->name, irq, desc->irq_data.chip->name);
1301 goto out_mask;
1302 }
1303
1304 init_waitqueue_head(&desc->wait_for_threads);
1305
1306
1307 if (new->flags & IRQF_TRIGGER_MASK) {
1308 ret = __irq_set_trigger(desc,
1309 new->flags & IRQF_TRIGGER_MASK);
1310
1311 if (ret)
1312 goto out_mask;
1313 }
1314
1315 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1316 IRQS_ONESHOT | IRQS_WAITING);
1317 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1318
1319 if (new->flags & IRQF_PERCPU) {
1320 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1321 irq_settings_set_per_cpu(desc);
1322 }
1323
1324 if (new->flags & IRQF_ONESHOT)
1325 desc->istate |= IRQS_ONESHOT;
1326
1327 if (irq_settings_can_autoenable(desc))
1328 irq_startup(desc, true);
1329 else
1330
1331 desc->depth = 1;
1332
1333
1334 if (new->flags & IRQF_NOBALANCING) {
1335 irq_settings_set_no_balancing(desc);
1336 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1337 }
1338
1339
1340 setup_affinity(desc, mask);
1341
1342 } else if (new->flags & IRQF_TRIGGER_MASK) {
1343 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1344 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1345
1346 if (nmsk != omsk)
1347
1348 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1349 irq, omsk, nmsk);
1350 }
1351
1352 *old_ptr = new;
1353
1354 irq_pm_install_action(desc, new);
1355
1356
1357 desc->irq_count = 0;
1358 desc->irqs_unhandled = 0;
1359
1360
1361
1362
1363
1364 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1365 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1366 __enable_irq(desc);
1367 }
1368
1369 raw_spin_unlock_irqrestore(&desc->lock, flags);
1370
1371
1372
1373
1374
1375 if (new->thread)
1376 wake_up_process(new->thread);
1377 if (new->secondary)
1378 wake_up_process(new->secondary->thread);
1379
1380 register_irq_proc(irq, desc);
1381 new->dir = NULL;
1382 register_handler_proc(irq, new);
1383 free_cpumask_var(mask);
1384
1385 return 0;
1386
1387mismatch:
1388 if (!(new->flags & IRQF_PROBE_SHARED)) {
1389 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1390 irq, new->flags, new->name, old->flags, old->name);
1391#ifdef CONFIG_DEBUG_SHIRQ
1392 dump_stack();
1393#endif
1394 }
1395 ret = -EBUSY;
1396
1397out_mask:
1398 raw_spin_unlock_irqrestore(&desc->lock, flags);
1399 free_cpumask_var(mask);
1400
1401out_thread:
1402 if (new->thread) {
1403 struct task_struct *t = new->thread;
1404
1405 new->thread = NULL;
1406 kthread_stop(t);
1407 put_task_struct(t);
1408 }
1409 if (new->secondary && new->secondary->thread) {
1410 struct task_struct *t = new->secondary->thread;
1411
1412 new->secondary->thread = NULL;
1413 kthread_stop(t);
1414 put_task_struct(t);
1415 }
1416out_mput:
1417 module_put(desc->owner);
1418 return ret;
1419}
1420
1421
1422
1423
1424
1425
1426
1427
1428int setup_irq(unsigned int irq, struct irqaction *act)
1429{
1430 int retval;
1431 struct irq_desc *desc = irq_to_desc(irq);
1432
1433 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1434 return -EINVAL;
1435
1436 retval = irq_chip_pm_get(&desc->irq_data);
1437 if (retval < 0)
1438 return retval;
1439
1440 chip_bus_lock(desc);
1441 retval = __setup_irq(irq, desc, act);
1442 chip_bus_sync_unlock(desc);
1443
1444 if (retval)
1445 irq_chip_pm_put(&desc->irq_data);
1446
1447 return retval;
1448}
1449EXPORT_SYMBOL_GPL(setup_irq);
1450
1451
1452
1453
1454
1455static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1456{
1457 struct irq_desc *desc = irq_to_desc(irq);
1458 struct irqaction *action, **action_ptr;
1459 unsigned long flags;
1460
1461 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1462
1463 if (!desc)
1464 return NULL;
1465
1466 chip_bus_lock(desc);
1467 raw_spin_lock_irqsave(&desc->lock, flags);
1468
1469
1470
1471
1472
1473 action_ptr = &desc->action;
1474 for (;;) {
1475 action = *action_ptr;
1476
1477 if (!action) {
1478 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1479 raw_spin_unlock_irqrestore(&desc->lock, flags);
1480 chip_bus_sync_unlock(desc);
1481 return NULL;
1482 }
1483
1484 if (action->dev_id == dev_id)
1485 break;
1486 action_ptr = &action->next;
1487 }
1488
1489
1490 *action_ptr = action->next;
1491
1492 irq_pm_remove_action(desc, action);
1493
1494
1495 if (!desc->action) {
1496 irq_settings_clr_disable_unlazy(desc);
1497 irq_shutdown(desc);
1498 irq_release_resources(desc);
1499 }
1500
1501#ifdef CONFIG_SMP
1502
1503 if (WARN_ON_ONCE(desc->affinity_hint))
1504 desc->affinity_hint = NULL;
1505#endif
1506
1507 raw_spin_unlock_irqrestore(&desc->lock, flags);
1508 chip_bus_sync_unlock(desc);
1509
1510 unregister_handler_proc(irq, action);
1511
1512
1513 synchronize_irq(irq);
1514
1515#ifdef CONFIG_DEBUG_SHIRQ
1516
1517
1518
1519
1520
1521
1522
1523
1524 if (action->flags & IRQF_SHARED) {
1525 local_irq_save(flags);
1526 action->handler(irq, dev_id);
1527 local_irq_restore(flags);
1528 }
1529#endif
1530
1531 if (action->thread) {
1532 kthread_stop(action->thread);
1533 put_task_struct(action->thread);
1534 if (action->secondary && action->secondary->thread) {
1535 kthread_stop(action->secondary->thread);
1536 put_task_struct(action->secondary->thread);
1537 }
1538 }
1539
1540 irq_chip_pm_put(&desc->irq_data);
1541 module_put(desc->owner);
1542 kfree(action->secondary);
1543 return action;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553void remove_irq(unsigned int irq, struct irqaction *act)
1554{
1555 struct irq_desc *desc = irq_to_desc(irq);
1556
1557 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1558 __free_irq(irq, act->dev_id);
1559}
1560EXPORT_SYMBOL_GPL(remove_irq);
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576void free_irq(unsigned int irq, void *dev_id)
1577{
1578 struct irq_desc *desc = irq_to_desc(irq);
1579
1580 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1581 return;
1582
1583#ifdef CONFIG_SMP
1584 if (WARN_ON(desc->affinity_notify))
1585 desc->affinity_notify = NULL;
1586#endif
1587
1588 kfree(__free_irq(irq, dev_id));
1589}
1590EXPORT_SYMBOL(free_irq);
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1635 irq_handler_t thread_fn, unsigned long irqflags,
1636 const char *devname, void *dev_id)
1637{
1638 struct irqaction *action;
1639 struct irq_desc *desc;
1640 int retval;
1641
1642 if (irq == IRQ_NOTCONNECTED)
1643 return -ENOTCONN;
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1655 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1656 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1657 return -EINVAL;
1658
1659 desc = irq_to_desc(irq);
1660 if (!desc)
1661 return -EINVAL;
1662
1663 if (!irq_settings_can_request(desc) ||
1664 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1665 return -EINVAL;
1666
1667 if (!handler) {
1668 if (!thread_fn)
1669 return -EINVAL;
1670 handler = irq_default_primary_handler;
1671 }
1672
1673 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1674 if (!action)
1675 return -ENOMEM;
1676
1677 action->handler = handler;
1678 action->thread_fn = thread_fn;
1679 action->flags = irqflags;
1680 action->name = devname;
1681 action->dev_id = dev_id;
1682
1683 retval = irq_chip_pm_get(&desc->irq_data);
1684 if (retval < 0) {
1685 kfree(action);
1686 return retval;
1687 }
1688
1689 chip_bus_lock(desc);
1690 retval = __setup_irq(irq, desc, action);
1691 chip_bus_sync_unlock(desc);
1692
1693 if (retval) {
1694 irq_chip_pm_put(&desc->irq_data);
1695 kfree(action->secondary);
1696 kfree(action);
1697 }
1698
1699#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1700 if (!retval && (irqflags & IRQF_SHARED)) {
1701
1702
1703
1704
1705
1706
1707 unsigned long flags;
1708
1709 disable_irq(irq);
1710 local_irq_save(flags);
1711
1712 handler(irq, dev_id);
1713
1714 local_irq_restore(flags);
1715 enable_irq(irq);
1716 }
1717#endif
1718 return retval;
1719}
1720EXPORT_SYMBOL(request_threaded_irq);
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1740 unsigned long flags, const char *name, void *dev_id)
1741{
1742 struct irq_desc *desc;
1743 int ret;
1744
1745 if (irq == IRQ_NOTCONNECTED)
1746 return -ENOTCONN;
1747
1748 desc = irq_to_desc(irq);
1749 if (!desc)
1750 return -EINVAL;
1751
1752 if (irq_settings_is_nested_thread(desc)) {
1753 ret = request_threaded_irq(irq, NULL, handler,
1754 flags, name, dev_id);
1755 return !ret ? IRQC_IS_NESTED : ret;
1756 }
1757
1758 ret = request_irq(irq, handler, flags, name, dev_id);
1759 return !ret ? IRQC_IS_HARDIRQ : ret;
1760}
1761EXPORT_SYMBOL_GPL(request_any_context_irq);
1762
1763void enable_percpu_irq(unsigned int irq, unsigned int type)
1764{
1765 unsigned int cpu = smp_processor_id();
1766 unsigned long flags;
1767 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1768
1769 if (!desc)
1770 return;
1771
1772
1773
1774
1775
1776 type &= IRQ_TYPE_SENSE_MASK;
1777 if (type == IRQ_TYPE_NONE)
1778 type = irqd_get_trigger_type(&desc->irq_data);
1779
1780 if (type != IRQ_TYPE_NONE) {
1781 int ret;
1782
1783 ret = __irq_set_trigger(desc, type);
1784
1785 if (ret) {
1786 WARN(1, "failed to set type for IRQ%d\n", irq);
1787 goto out;
1788 }
1789 }
1790
1791 irq_percpu_enable(desc, cpu);
1792out:
1793 irq_put_desc_unlock(desc, flags);
1794}
1795EXPORT_SYMBOL_GPL(enable_percpu_irq);
1796
1797
1798
1799
1800
1801
1802
1803
1804bool irq_percpu_is_enabled(unsigned int irq)
1805{
1806 unsigned int cpu = smp_processor_id();
1807 struct irq_desc *desc;
1808 unsigned long flags;
1809 bool is_enabled;
1810
1811 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1812 if (!desc)
1813 return false;
1814
1815 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1816 irq_put_desc_unlock(desc, flags);
1817
1818 return is_enabled;
1819}
1820EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1821
1822void disable_percpu_irq(unsigned int irq)
1823{
1824 unsigned int cpu = smp_processor_id();
1825 unsigned long flags;
1826 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1827
1828 if (!desc)
1829 return;
1830
1831 irq_percpu_disable(desc, cpu);
1832 irq_put_desc_unlock(desc, flags);
1833}
1834EXPORT_SYMBOL_GPL(disable_percpu_irq);
1835
1836
1837
1838
1839static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1840{
1841 struct irq_desc *desc = irq_to_desc(irq);
1842 struct irqaction *action;
1843 unsigned long flags;
1844
1845 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1846
1847 if (!desc)
1848 return NULL;
1849
1850 raw_spin_lock_irqsave(&desc->lock, flags);
1851
1852 action = desc->action;
1853 if (!action || action->percpu_dev_id != dev_id) {
1854 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1855 goto bad;
1856 }
1857
1858 if (!cpumask_empty(desc->percpu_enabled)) {
1859 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1860 irq, cpumask_first(desc->percpu_enabled));
1861 goto bad;
1862 }
1863
1864
1865 desc->action = NULL;
1866
1867 raw_spin_unlock_irqrestore(&desc->lock, flags);
1868
1869 unregister_handler_proc(irq, action);
1870
1871 irq_chip_pm_put(&desc->irq_data);
1872 module_put(desc->owner);
1873 return action;
1874
1875bad:
1876 raw_spin_unlock_irqrestore(&desc->lock, flags);
1877 return NULL;
1878}
1879
1880
1881
1882
1883
1884
1885
1886
1887void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1888{
1889 struct irq_desc *desc = irq_to_desc(irq);
1890
1891 if (desc && irq_settings_is_per_cpu_devid(desc))
1892 __free_percpu_irq(irq, act->percpu_dev_id);
1893}
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1908{
1909 struct irq_desc *desc = irq_to_desc(irq);
1910
1911 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1912 return;
1913
1914 chip_bus_lock(desc);
1915 kfree(__free_percpu_irq(irq, dev_id));
1916 chip_bus_sync_unlock(desc);
1917}
1918EXPORT_SYMBOL_GPL(free_percpu_irq);
1919
1920
1921
1922
1923
1924
1925
1926
1927int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1928{
1929 struct irq_desc *desc = irq_to_desc(irq);
1930 int retval;
1931
1932 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1933 return -EINVAL;
1934
1935 retval = irq_chip_pm_get(&desc->irq_data);
1936 if (retval < 0)
1937 return retval;
1938
1939 chip_bus_lock(desc);
1940 retval = __setup_irq(irq, desc, act);
1941 chip_bus_sync_unlock(desc);
1942
1943 if (retval)
1944 irq_chip_pm_put(&desc->irq_data);
1945
1946 return retval;
1947}
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1966 const char *devname, void __percpu *dev_id)
1967{
1968 struct irqaction *action;
1969 struct irq_desc *desc;
1970 int retval;
1971
1972 if (!dev_id)
1973 return -EINVAL;
1974
1975 desc = irq_to_desc(irq);
1976 if (!desc || !irq_settings_can_request(desc) ||
1977 !irq_settings_is_per_cpu_devid(desc))
1978 return -EINVAL;
1979
1980 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1981 if (!action)
1982 return -ENOMEM;
1983
1984 action->handler = handler;
1985 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1986 action->name = devname;
1987 action->percpu_dev_id = dev_id;
1988
1989 retval = irq_chip_pm_get(&desc->irq_data);
1990 if (retval < 0) {
1991 kfree(action);
1992 return retval;
1993 }
1994
1995 chip_bus_lock(desc);
1996 retval = __setup_irq(irq, desc, action);
1997 chip_bus_sync_unlock(desc);
1998
1999 if (retval) {
2000 irq_chip_pm_put(&desc->irq_data);
2001 kfree(action);
2002 }
2003
2004 return retval;
2005}
2006EXPORT_SYMBOL_GPL(request_percpu_irq);
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2022 bool *state)
2023{
2024 struct irq_desc *desc;
2025 struct irq_data *data;
2026 struct irq_chip *chip;
2027 unsigned long flags;
2028 int err = -EINVAL;
2029
2030 desc = irq_get_desc_buslock(irq, &flags, 0);
2031 if (!desc)
2032 return err;
2033
2034 data = irq_desc_get_irq_data(desc);
2035
2036 do {
2037 chip = irq_data_get_irq_chip(data);
2038 if (chip->irq_get_irqchip_state)
2039 break;
2040#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2041 data = data->parent_data;
2042#else
2043 data = NULL;
2044#endif
2045 } while (data);
2046
2047 if (data)
2048 err = chip->irq_get_irqchip_state(data, which, state);
2049
2050 irq_put_desc_busunlock(desc, flags);
2051 return err;
2052}
2053EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2068 bool val)
2069{
2070 struct irq_desc *desc;
2071 struct irq_data *data;
2072 struct irq_chip *chip;
2073 unsigned long flags;
2074 int err = -EINVAL;
2075
2076 desc = irq_get_desc_buslock(irq, &flags, 0);
2077 if (!desc)
2078 return err;
2079
2080 data = irq_desc_get_irq_data(desc);
2081
2082 do {
2083 chip = irq_data_get_irq_chip(data);
2084 if (chip->irq_set_irqchip_state)
2085 break;
2086#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2087 data = data->parent_data;
2088#else
2089 data = NULL;
2090#endif
2091 } while (data);
2092
2093 if (data)
2094 err = chip->irq_set_irqchip_state(data, which, val);
2095
2096 irq_put_desc_busunlock(desc, flags);
2097 return err;
2098}
2099EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2100