1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/sched/rt.h>
19#include <linux/sched/task.h>
20#include <uapi/linux/sched/types.h>
21#include <linux/task_work.h>
22
23#include "internals.h"
24
25#ifdef CONFIG_IRQ_FORCED_THREADING
26__read_mostly bool force_irqthreads;
27EXPORT_SYMBOL_GPL(force_irqthreads);
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
37static void __synchronize_hardirq(struct irq_desc *desc)
38{
39 bool inprogress;
40
41 do {
42 unsigned long flags;
43
44
45
46
47
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50
51
52 raw_spin_lock_irqsave(&desc->lock, flags);
53 inprogress = irqd_irq_inprogress(&desc->irq_data);
54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56
57 } while (inprogress);
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77bool synchronize_hardirq(unsigned int irq)
78{
79 struct irq_desc *desc = irq_to_desc(irq);
80
81 if (desc) {
82 __synchronize_hardirq(desc);
83 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90
91
92
93
94
95
96
97
98
99
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106
107
108
109
110
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
114}
115EXPORT_SYMBOL(synchronize_irq);
116
117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
120static bool __irq_can_set_affinity(struct irq_desc *desc)
121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124 return false;
125 return true;
126}
127
128
129
130
131
132
133int irq_can_set_affinity(unsigned int irq)
134{
135 return __irq_can_set_affinity(irq_to_desc(irq));
136}
137
138
139
140
141
142
143
144
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153
154
155
156
157
158
159
160
161
162void irq_set_thread_affinity(struct irq_desc *desc)
163{
164 struct irqaction *action;
165
166 for_each_action_of_desc(desc, action)
167 if (action->thread)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169}
170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
194 ret = chip->irq_set_affinity(data, mask, force);
195 switch (ret) {
196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask);
199 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data);
201 irq_set_thread_affinity(desc);
202 ret = 0;
203 }
204
205 return ret;
206}
207
208#ifdef CONFIG_GENERIC_PENDING_IRQ
209static inline int irq_set_affinity_pending(struct irq_data *data,
210 const struct cpumask *dest)
211{
212 struct irq_desc *desc = irq_data_to_desc(data);
213
214 irqd_set_move_pending(data);
215 irq_copy_pending(desc, dest);
216 return 0;
217}
218#else
219static inline int irq_set_affinity_pending(struct irq_data *data,
220 const struct cpumask *dest)
221{
222 return -EBUSY;
223}
224#endif
225
226static int irq_try_set_affinity(struct irq_data *data,
227 const struct cpumask *dest, bool force)
228{
229 int ret = irq_do_set_affinity(data, dest, force);
230
231
232
233
234
235
236 if (ret == -EBUSY && !force)
237 ret = irq_set_affinity_pending(data, dest);
238 return ret;
239}
240
241int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
242 bool force)
243{
244 struct irq_chip *chip = irq_data_get_irq_chip(data);
245 struct irq_desc *desc = irq_data_to_desc(data);
246 int ret = 0;
247
248 if (!chip || !chip->irq_set_affinity)
249 return -EINVAL;
250
251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
252 ret = irq_try_set_affinity(data, mask, force);
253 } else {
254 irqd_set_move_pending(data);
255 irq_copy_pending(desc, mask);
256 }
257
258 if (desc->affinity_notify) {
259 kref_get(&desc->affinity_notify->kref);
260 schedule_work(&desc->affinity_notify->work);
261 }
262 irqd_set(data, IRQD_AFFINITY_SET);
263
264 return ret;
265}
266
267int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
268{
269 struct irq_desc *desc = irq_to_desc(irq);
270 unsigned long flags;
271 int ret;
272
273 if (!desc)
274 return -EINVAL;
275
276 raw_spin_lock_irqsave(&desc->lock, flags);
277 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
278 raw_spin_unlock_irqrestore(&desc->lock, flags);
279 return ret;
280}
281
282int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
283{
284 unsigned long flags;
285 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
286
287 if (!desc)
288 return -EINVAL;
289 desc->affinity_hint = m;
290 irq_put_desc_unlock(desc, flags);
291
292 if (m)
293 __irq_set_affinity(irq, m, false);
294 return 0;
295}
296EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
297
298static void irq_affinity_notify(struct work_struct *work)
299{
300 struct irq_affinity_notify *notify =
301 container_of(work, struct irq_affinity_notify, work);
302 struct irq_desc *desc = irq_to_desc(notify->irq);
303 cpumask_var_t cpumask;
304 unsigned long flags;
305
306 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
307 goto out;
308
309 raw_spin_lock_irqsave(&desc->lock, flags);
310 if (irq_move_pending(&desc->irq_data))
311 irq_get_pending(cpumask, desc);
312 else
313 cpumask_copy(cpumask, desc->irq_common_data.affinity);
314 raw_spin_unlock_irqrestore(&desc->lock, flags);
315
316 notify->notify(notify, cpumask);
317
318 free_cpumask_var(cpumask);
319out:
320 kref_put(¬ify->kref, notify->release);
321}
322
323
324
325
326
327
328
329
330
331
332
333
334int
335irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
336{
337 struct irq_desc *desc = irq_to_desc(irq);
338 struct irq_affinity_notify *old_notify;
339 unsigned long flags;
340
341
342 might_sleep();
343
344 if (!desc)
345 return -EINVAL;
346
347
348 if (notify) {
349 notify->irq = irq;
350 kref_init(¬ify->kref);
351 INIT_WORK(¬ify->work, irq_affinity_notify);
352 }
353
354 raw_spin_lock_irqsave(&desc->lock, flags);
355 old_notify = desc->affinity_notify;
356 desc->affinity_notify = notify;
357 raw_spin_unlock_irqrestore(&desc->lock, flags);
358
359 if (old_notify)
360 kref_put(&old_notify->kref, old_notify->release);
361
362 return 0;
363}
364EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
365
366#ifndef CONFIG_AUTO_IRQ_AFFINITY
367
368
369
370int irq_setup_affinity(struct irq_desc *desc)
371{
372 struct cpumask *set = irq_default_affinity;
373 int ret, node = irq_desc_get_node(desc);
374 static DEFINE_RAW_SPINLOCK(mask_lock);
375 static struct cpumask mask;
376
377
378 if (!__irq_can_set_affinity(desc))
379 return 0;
380
381 raw_spin_lock(&mask_lock);
382
383
384
385
386 if (irqd_affinity_is_managed(&desc->irq_data) ||
387 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
388 if (cpumask_intersects(desc->irq_common_data.affinity,
389 cpu_online_mask))
390 set = desc->irq_common_data.affinity;
391 else
392 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
393 }
394
395 cpumask_and(&mask, cpu_online_mask, set);
396 if (node != NUMA_NO_NODE) {
397 const struct cpumask *nodemask = cpumask_of_node(node);
398
399
400 if (cpumask_intersects(&mask, nodemask))
401 cpumask_and(&mask, &mask, nodemask);
402 }
403 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
404 raw_spin_unlock(&mask_lock);
405 return ret;
406}
407#else
408
409int irq_setup_affinity(struct irq_desc *desc)
410{
411 return irq_select_affinity(irq_desc_get_irq(desc));
412}
413#endif
414
415
416
417
418int irq_select_affinity_usr(unsigned int irq)
419{
420 struct irq_desc *desc = irq_to_desc(irq);
421 unsigned long flags;
422 int ret;
423
424 raw_spin_lock_irqsave(&desc->lock, flags);
425 ret = irq_setup_affinity(desc);
426 raw_spin_unlock_irqrestore(&desc->lock, flags);
427 return ret;
428}
429#endif
430
431
432
433
434
435
436
437
438
439
440
441
442int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
443{
444 unsigned long flags;
445 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
446 struct irq_data *data;
447 struct irq_chip *chip;
448 int ret = -ENOSYS;
449
450 if (!desc)
451 return -EINVAL;
452
453 data = irq_desc_get_irq_data(desc);
454 do {
455 chip = irq_data_get_irq_chip(data);
456 if (chip && chip->irq_set_vcpu_affinity)
457 break;
458#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
459 data = data->parent_data;
460#else
461 data = NULL;
462#endif
463 } while (data);
464
465 if (data)
466 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
467 irq_put_desc_unlock(desc, flags);
468
469 return ret;
470}
471EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
472
473void __disable_irq(struct irq_desc *desc)
474{
475 if (!desc->depth++)
476 irq_disable(desc);
477}
478
479static int __disable_irq_nosync(unsigned int irq)
480{
481 unsigned long flags;
482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
483
484 if (!desc)
485 return -EINVAL;
486 __disable_irq(desc);
487 irq_put_desc_busunlock(desc, flags);
488 return 0;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502void disable_irq_nosync(unsigned int irq)
503{
504 __disable_irq_nosync(irq);
505}
506EXPORT_SYMBOL(disable_irq_nosync);
507
508
509
510
511
512
513
514
515
516
517
518
519
520void disable_irq(unsigned int irq)
521{
522 if (!__disable_irq_nosync(irq))
523 synchronize_irq(irq);
524}
525EXPORT_SYMBOL(disable_irq);
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544bool disable_hardirq(unsigned int irq)
545{
546 if (!__disable_irq_nosync(irq))
547 return synchronize_hardirq(irq);
548
549 return false;
550}
551EXPORT_SYMBOL_GPL(disable_hardirq);
552
553void __enable_irq(struct irq_desc *desc)
554{
555 switch (desc->depth) {
556 case 0:
557 err_out:
558 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
559 irq_desc_get_irq(desc));
560 break;
561 case 1: {
562 if (desc->istate & IRQS_SUSPENDED)
563 goto err_out;
564
565 irq_settings_set_noprobe(desc);
566
567
568
569
570
571
572
573 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
574 break;
575 }
576 default:
577 desc->depth--;
578 }
579}
580
581
582
583
584
585
586
587
588
589
590
591
592void enable_irq(unsigned int irq)
593{
594 unsigned long flags;
595 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
596
597 if (!desc)
598 return;
599 if (WARN(!desc->irq_data.chip,
600 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
601 goto out;
602
603 __enable_irq(desc);
604out:
605 irq_put_desc_busunlock(desc, flags);
606}
607EXPORT_SYMBOL(enable_irq);
608
609static int set_irq_wake_real(unsigned int irq, unsigned int on)
610{
611 struct irq_desc *desc = irq_to_desc(irq);
612 int ret = -ENXIO;
613
614 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
615 return 0;
616
617 if (desc->irq_data.chip->irq_set_wake)
618 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
619
620 return ret;
621}
622
623
624
625
626
627
628
629
630
631
632
633
634
635int irq_set_irq_wake(unsigned int irq, unsigned int on)
636{
637 unsigned long flags;
638 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
639 int ret = 0;
640
641 if (!desc)
642 return -EINVAL;
643
644
645
646
647 if (on) {
648 if (desc->wake_depth++ == 0) {
649 ret = set_irq_wake_real(irq, on);
650 if (ret)
651 desc->wake_depth = 0;
652 else
653 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
654 }
655 } else {
656 if (desc->wake_depth == 0) {
657 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
658 } else if (--desc->wake_depth == 0) {
659 ret = set_irq_wake_real(irq, on);
660 if (ret)
661 desc->wake_depth = 1;
662 else
663 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
664 }
665 }
666 irq_put_desc_busunlock(desc, flags);
667 return ret;
668}
669EXPORT_SYMBOL(irq_set_irq_wake);
670
671
672
673
674
675
676int can_request_irq(unsigned int irq, unsigned long irqflags)
677{
678 unsigned long flags;
679 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
680 int canrequest = 0;
681
682 if (!desc)
683 return 0;
684
685 if (irq_settings_can_request(desc)) {
686 if (!desc->action ||
687 irqflags & desc->action->flags & IRQF_SHARED)
688 canrequest = 1;
689 }
690 irq_put_desc_unlock(desc, flags);
691 return canrequest;
692}
693
694int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
695{
696 struct irq_chip *chip = desc->irq_data.chip;
697 int ret, unmask = 0;
698
699 if (!chip || !chip->irq_set_type) {
700
701
702
703
704 pr_debug("No set_type function for IRQ %d (%s)\n",
705 irq_desc_get_irq(desc),
706 chip ? (chip->name ? : "unknown") : "unknown");
707 return 0;
708 }
709
710 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
711 if (!irqd_irq_masked(&desc->irq_data))
712 mask_irq(desc);
713 if (!irqd_irq_disabled(&desc->irq_data))
714 unmask = 1;
715 }
716
717
718 flags &= IRQ_TYPE_SENSE_MASK;
719 ret = chip->irq_set_type(&desc->irq_data, flags);
720
721 switch (ret) {
722 case IRQ_SET_MASK_OK:
723 case IRQ_SET_MASK_OK_DONE:
724 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
725 irqd_set(&desc->irq_data, flags);
726
727 case IRQ_SET_MASK_OK_NOCOPY:
728 flags = irqd_get_trigger_type(&desc->irq_data);
729 irq_settings_set_trigger_mask(desc, flags);
730 irqd_clear(&desc->irq_data, IRQD_LEVEL);
731 irq_settings_clr_level(desc);
732 if (flags & IRQ_TYPE_LEVEL_MASK) {
733 irq_settings_set_level(desc);
734 irqd_set(&desc->irq_data, IRQD_LEVEL);
735 }
736
737 ret = 0;
738 break;
739 default:
740 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
741 flags, irq_desc_get_irq(desc), chip->irq_set_type);
742 }
743 if (unmask)
744 unmask_irq(desc);
745 return ret;
746}
747
748#ifdef CONFIG_HARDIRQS_SW_RESEND
749int irq_set_parent(int irq, int parent_irq)
750{
751 unsigned long flags;
752 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
753
754 if (!desc)
755 return -EINVAL;
756
757 desc->parent_irq = parent_irq;
758
759 irq_put_desc_unlock(desc, flags);
760 return 0;
761}
762EXPORT_SYMBOL_GPL(irq_set_parent);
763#endif
764
765
766
767
768
769
770static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
771{
772 return IRQ_WAKE_THREAD;
773}
774
775
776
777
778
779static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
780{
781 WARN(1, "Primary handler called for nested irq %d\n", irq);
782 return IRQ_NONE;
783}
784
785static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
786{
787 WARN(1, "Secondary action handler called for irq %d\n", irq);
788 return IRQ_NONE;
789}
790
791static int irq_wait_for_interrupt(struct irqaction *action)
792{
793 for (;;) {
794 set_current_state(TASK_INTERRUPTIBLE);
795
796 if (kthread_should_stop()) {
797
798 if (test_and_clear_bit(IRQTF_RUNTHREAD,
799 &action->thread_flags)) {
800 __set_current_state(TASK_RUNNING);
801 return 0;
802 }
803 __set_current_state(TASK_RUNNING);
804 return -1;
805 }
806
807 if (test_and_clear_bit(IRQTF_RUNTHREAD,
808 &action->thread_flags)) {
809 __set_current_state(TASK_RUNNING);
810 return 0;
811 }
812 schedule();
813 }
814}
815
816
817
818
819
820
821static void irq_finalize_oneshot(struct irq_desc *desc,
822 struct irqaction *action)
823{
824 if (!(desc->istate & IRQS_ONESHOT) ||
825 action->handler == irq_forced_secondary_handler)
826 return;
827again:
828 chip_bus_lock(desc);
829 raw_spin_lock_irq(&desc->lock);
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
846 raw_spin_unlock_irq(&desc->lock);
847 chip_bus_sync_unlock(desc);
848 cpu_relax();
849 goto again;
850 }
851
852
853
854
855
856
857 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
858 goto out_unlock;
859
860 desc->threads_oneshot &= ~action->thread_mask;
861
862 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
863 irqd_irq_masked(&desc->irq_data))
864 unmask_threaded_irq(desc);
865
866out_unlock:
867 raw_spin_unlock_irq(&desc->lock);
868 chip_bus_sync_unlock(desc);
869}
870
871#ifdef CONFIG_SMP
872
873
874
875static void
876irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
877{
878 cpumask_var_t mask;
879 bool valid = true;
880
881 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
882 return;
883
884
885
886
887
888 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
889 set_bit(IRQTF_AFFINITY, &action->thread_flags);
890 return;
891 }
892
893 raw_spin_lock_irq(&desc->lock);
894
895
896
897
898 if (cpumask_available(desc->irq_common_data.affinity)) {
899 const struct cpumask *m;
900
901 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
902 cpumask_copy(mask, m);
903 } else {
904 valid = false;
905 }
906 raw_spin_unlock_irq(&desc->lock);
907
908 if (valid)
909 set_cpus_allowed_ptr(current, mask);
910 free_cpumask_var(mask);
911}
912#else
913static inline void
914irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
915#endif
916
917
918
919
920
921
922
923static irqreturn_t
924irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
925{
926 irqreturn_t ret;
927
928 local_bh_disable();
929 ret = action->thread_fn(action->irq, action->dev_id);
930 if (ret == IRQ_HANDLED)
931 atomic_inc(&desc->threads_handled);
932
933 irq_finalize_oneshot(desc, action);
934 local_bh_enable();
935 return ret;
936}
937
938
939
940
941
942
943static irqreturn_t irq_thread_fn(struct irq_desc *desc,
944 struct irqaction *action)
945{
946 irqreturn_t ret;
947
948 ret = action->thread_fn(action->irq, action->dev_id);
949 if (ret == IRQ_HANDLED)
950 atomic_inc(&desc->threads_handled);
951
952 irq_finalize_oneshot(desc, action);
953 return ret;
954}
955
956static void wake_threads_waitq(struct irq_desc *desc)
957{
958 if (atomic_dec_and_test(&desc->threads_active))
959 wake_up(&desc->wait_for_threads);
960}
961
962static void irq_thread_dtor(struct callback_head *unused)
963{
964 struct task_struct *tsk = current;
965 struct irq_desc *desc;
966 struct irqaction *action;
967
968 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
969 return;
970
971 action = kthread_data(tsk);
972
973 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
974 tsk->comm, tsk->pid, action->irq);
975
976
977 desc = irq_to_desc(action->irq);
978
979
980
981
982 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
983 wake_threads_waitq(desc);
984
985
986 irq_finalize_oneshot(desc, action);
987}
988
989static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
990{
991 struct irqaction *secondary = action->secondary;
992
993 if (WARN_ON_ONCE(!secondary))
994 return;
995
996 raw_spin_lock_irq(&desc->lock);
997 __irq_wake_thread(desc, secondary);
998 raw_spin_unlock_irq(&desc->lock);
999}
1000
1001
1002
1003
1004static int irq_thread(void *data)
1005{
1006 struct callback_head on_exit_work;
1007 struct irqaction *action = data;
1008 struct irq_desc *desc = irq_to_desc(action->irq);
1009 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1010 struct irqaction *action);
1011
1012 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1013 &action->thread_flags))
1014 handler_fn = irq_forced_thread_fn;
1015 else
1016 handler_fn = irq_thread_fn;
1017
1018 init_task_work(&on_exit_work, irq_thread_dtor);
1019 task_work_add(current, &on_exit_work, false);
1020
1021 irq_thread_check_affinity(desc, action);
1022
1023 while (!irq_wait_for_interrupt(action)) {
1024 irqreturn_t action_ret;
1025
1026 irq_thread_check_affinity(desc, action);
1027
1028 action_ret = handler_fn(desc, action);
1029 if (action_ret == IRQ_WAKE_THREAD)
1030 irq_wake_secondary(desc, action);
1031
1032 wake_threads_waitq(desc);
1033 }
1034
1035
1036
1037
1038
1039
1040
1041 task_work_cancel(current, irq_thread_dtor);
1042 return 0;
1043}
1044
1045
1046
1047
1048
1049
1050
1051void irq_wake_thread(unsigned int irq, void *dev_id)
1052{
1053 struct irq_desc *desc = irq_to_desc(irq);
1054 struct irqaction *action;
1055 unsigned long flags;
1056
1057 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1058 return;
1059
1060 raw_spin_lock_irqsave(&desc->lock, flags);
1061 for_each_action_of_desc(desc, action) {
1062 if (action->dev_id == dev_id) {
1063 if (action->thread)
1064 __irq_wake_thread(desc, action);
1065 break;
1066 }
1067 }
1068 raw_spin_unlock_irqrestore(&desc->lock, flags);
1069}
1070EXPORT_SYMBOL_GPL(irq_wake_thread);
1071
1072static int irq_setup_forced_threading(struct irqaction *new)
1073{
1074 if (!force_irqthreads)
1075 return 0;
1076 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1077 return 0;
1078
1079
1080
1081
1082
1083 if (new->handler == irq_default_primary_handler)
1084 return 0;
1085
1086 new->flags |= IRQF_ONESHOT;
1087
1088
1089
1090
1091
1092
1093 if (new->handler && new->thread_fn) {
1094
1095 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1096 if (!new->secondary)
1097 return -ENOMEM;
1098 new->secondary->handler = irq_forced_secondary_handler;
1099 new->secondary->thread_fn = new->thread_fn;
1100 new->secondary->dev_id = new->dev_id;
1101 new->secondary->irq = new->irq;
1102 new->secondary->name = new->name;
1103 }
1104
1105 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1106 new->thread_fn = new->handler;
1107 new->handler = irq_default_primary_handler;
1108 return 0;
1109}
1110
1111static int irq_request_resources(struct irq_desc *desc)
1112{
1113 struct irq_data *d = &desc->irq_data;
1114 struct irq_chip *c = d->chip;
1115
1116 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1117}
1118
1119static void irq_release_resources(struct irq_desc *desc)
1120{
1121 struct irq_data *d = &desc->irq_data;
1122 struct irq_chip *c = d->chip;
1123
1124 if (c->irq_release_resources)
1125 c->irq_release_resources(d);
1126}
1127
1128static int
1129setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1130{
1131 struct task_struct *t;
1132 struct sched_param param = {
1133 .sched_priority = MAX_USER_RT_PRIO/2,
1134 };
1135
1136 if (!secondary) {
1137 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1138 new->name);
1139 } else {
1140 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1141 new->name);
1142 param.sched_priority -= 1;
1143 }
1144
1145 if (IS_ERR(t))
1146 return PTR_ERR(t);
1147
1148 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1149
1150
1151
1152
1153
1154
1155 get_task_struct(t);
1156 new->thread = t;
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1167 return 0;
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184static int
1185__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1186{
1187 struct irqaction *old, **old_ptr;
1188 unsigned long flags, thread_mask = 0;
1189 int ret, nested, shared = 0;
1190
1191 if (!desc)
1192 return -EINVAL;
1193
1194 if (desc->irq_data.chip == &no_irq_chip)
1195 return -ENOSYS;
1196 if (!try_module_get(desc->owner))
1197 return -ENODEV;
1198
1199 new->irq = irq;
1200
1201
1202
1203
1204
1205 if (!(new->flags & IRQF_TRIGGER_MASK))
1206 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1207
1208
1209
1210
1211
1212 nested = irq_settings_is_nested_thread(desc);
1213 if (nested) {
1214 if (!new->thread_fn) {
1215 ret = -EINVAL;
1216 goto out_mput;
1217 }
1218
1219
1220
1221
1222
1223 new->handler = irq_nested_primary_handler;
1224 } else {
1225 if (irq_settings_can_thread(desc)) {
1226 ret = irq_setup_forced_threading(new);
1227 if (ret)
1228 goto out_mput;
1229 }
1230 }
1231
1232
1233
1234
1235
1236
1237 if (new->thread_fn && !nested) {
1238 ret = setup_irq_thread(new, irq, false);
1239 if (ret)
1240 goto out_mput;
1241 if (new->secondary) {
1242 ret = setup_irq_thread(new->secondary, irq, true);
1243 if (ret)
1244 goto out_thread;
1245 }
1246 }
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1258 new->flags &= ~IRQF_ONESHOT;
1259
1260
1261
1262
1263
1264
1265
1266
1267 mutex_lock(&desc->request_mutex);
1268
1269
1270
1271
1272
1273
1274 chip_bus_lock(desc);
1275
1276
1277 if (!desc->action) {
1278 ret = irq_request_resources(desc);
1279 if (ret) {
1280 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1281 new->name, irq, desc->irq_data.chip->name);
1282 goto out_bus_unlock;
1283 }
1284 }
1285
1286
1287
1288
1289
1290
1291
1292 raw_spin_lock_irqsave(&desc->lock, flags);
1293 old_ptr = &desc->action;
1294 old = *old_ptr;
1295 if (old) {
1296
1297
1298
1299
1300
1301
1302
1303 unsigned int oldtype;
1304
1305
1306
1307
1308
1309 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1310 oldtype = irqd_get_trigger_type(&desc->irq_data);
1311 } else {
1312 oldtype = new->flags & IRQF_TRIGGER_MASK;
1313 irqd_set_trigger_type(&desc->irq_data, oldtype);
1314 }
1315
1316 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1317 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1318 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1319 goto mismatch;
1320
1321
1322 if ((old->flags & IRQF_PERCPU) !=
1323 (new->flags & IRQF_PERCPU))
1324 goto mismatch;
1325
1326
1327 do {
1328
1329
1330
1331
1332
1333 thread_mask |= old->thread_mask;
1334 old_ptr = &old->next;
1335 old = *old_ptr;
1336 } while (old);
1337 shared = 1;
1338 }
1339
1340
1341
1342
1343
1344
1345 if (new->flags & IRQF_ONESHOT) {
1346
1347
1348
1349
1350 if (thread_mask == ~0UL) {
1351 ret = -EBUSY;
1352 goto out_unlock;
1353 }
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374 new->thread_mask = 1UL << ffz(thread_mask);
1375
1376 } else if (new->handler == irq_default_primary_handler &&
1377 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1394 irq);
1395 ret = -EINVAL;
1396 goto out_unlock;
1397 }
1398
1399 if (!shared) {
1400 init_waitqueue_head(&desc->wait_for_threads);
1401
1402
1403 if (new->flags & IRQF_TRIGGER_MASK) {
1404 ret = __irq_set_trigger(desc,
1405 new->flags & IRQF_TRIGGER_MASK);
1406
1407 if (ret)
1408 goto out_unlock;
1409 }
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 ret = irq_activate(desc);
1423 if (ret)
1424 goto out_unlock;
1425
1426 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1427 IRQS_ONESHOT | IRQS_WAITING);
1428 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1429
1430 if (new->flags & IRQF_PERCPU) {
1431 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1432 irq_settings_set_per_cpu(desc);
1433 }
1434
1435 if (new->flags & IRQF_ONESHOT)
1436 desc->istate |= IRQS_ONESHOT;
1437
1438
1439 if (new->flags & IRQF_NOBALANCING) {
1440 irq_settings_set_no_balancing(desc);
1441 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1442 }
1443
1444 if (irq_settings_can_autoenable(desc)) {
1445 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1446 } else {
1447
1448
1449
1450
1451
1452
1453 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1454
1455 desc->depth = 1;
1456 }
1457
1458 } else if (new->flags & IRQF_TRIGGER_MASK) {
1459 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1460 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1461
1462 if (nmsk != omsk)
1463
1464 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1465 irq, omsk, nmsk);
1466 }
1467
1468 *old_ptr = new;
1469
1470 irq_pm_install_action(desc, new);
1471
1472
1473 desc->irq_count = 0;
1474 desc->irqs_unhandled = 0;
1475
1476
1477
1478
1479
1480 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1481 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1482 __enable_irq(desc);
1483 }
1484
1485 raw_spin_unlock_irqrestore(&desc->lock, flags);
1486 chip_bus_sync_unlock(desc);
1487 mutex_unlock(&desc->request_mutex);
1488
1489 irq_setup_timings(desc, new);
1490
1491
1492
1493
1494
1495 if (new->thread)
1496 wake_up_process(new->thread);
1497 if (new->secondary)
1498 wake_up_process(new->secondary->thread);
1499
1500 register_irq_proc(irq, desc);
1501 new->dir = NULL;
1502 register_handler_proc(irq, new);
1503 return 0;
1504
1505mismatch:
1506 if (!(new->flags & IRQF_PROBE_SHARED)) {
1507 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1508 irq, new->flags, new->name, old->flags, old->name);
1509#ifdef CONFIG_DEBUG_SHIRQ
1510 dump_stack();
1511#endif
1512 }
1513 ret = -EBUSY;
1514
1515out_unlock:
1516 raw_spin_unlock_irqrestore(&desc->lock, flags);
1517
1518 if (!desc->action)
1519 irq_release_resources(desc);
1520out_bus_unlock:
1521 chip_bus_sync_unlock(desc);
1522 mutex_unlock(&desc->request_mutex);
1523
1524out_thread:
1525 if (new->thread) {
1526 struct task_struct *t = new->thread;
1527
1528 new->thread = NULL;
1529 kthread_stop(t);
1530 put_task_struct(t);
1531 }
1532 if (new->secondary && new->secondary->thread) {
1533 struct task_struct *t = new->secondary->thread;
1534
1535 new->secondary->thread = NULL;
1536 kthread_stop(t);
1537 put_task_struct(t);
1538 }
1539out_mput:
1540 module_put(desc->owner);
1541 return ret;
1542}
1543
1544
1545
1546
1547
1548
1549
1550
1551int setup_irq(unsigned int irq, struct irqaction *act)
1552{
1553 int retval;
1554 struct irq_desc *desc = irq_to_desc(irq);
1555
1556 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1557 return -EINVAL;
1558
1559 retval = irq_chip_pm_get(&desc->irq_data);
1560 if (retval < 0)
1561 return retval;
1562
1563 retval = __setup_irq(irq, desc, act);
1564
1565 if (retval)
1566 irq_chip_pm_put(&desc->irq_data);
1567
1568 return retval;
1569}
1570EXPORT_SYMBOL_GPL(setup_irq);
1571
1572
1573
1574
1575
1576static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1577{
1578 unsigned irq = desc->irq_data.irq;
1579 struct irqaction *action, **action_ptr;
1580 unsigned long flags;
1581
1582 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1583
1584 mutex_lock(&desc->request_mutex);
1585 chip_bus_lock(desc);
1586 raw_spin_lock_irqsave(&desc->lock, flags);
1587
1588
1589
1590
1591
1592 action_ptr = &desc->action;
1593 for (;;) {
1594 action = *action_ptr;
1595
1596 if (!action) {
1597 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1598 raw_spin_unlock_irqrestore(&desc->lock, flags);
1599 chip_bus_sync_unlock(desc);
1600 mutex_unlock(&desc->request_mutex);
1601 return NULL;
1602 }
1603
1604 if (action->dev_id == dev_id)
1605 break;
1606 action_ptr = &action->next;
1607 }
1608
1609
1610 *action_ptr = action->next;
1611
1612 irq_pm_remove_action(desc, action);
1613
1614
1615 if (!desc->action) {
1616 irq_settings_clr_disable_unlazy(desc);
1617 irq_shutdown(desc);
1618 }
1619
1620#ifdef CONFIG_SMP
1621
1622 if (WARN_ON_ONCE(desc->affinity_hint))
1623 desc->affinity_hint = NULL;
1624#endif
1625
1626 raw_spin_unlock_irqrestore(&desc->lock, flags);
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 chip_bus_sync_unlock(desc);
1642
1643 unregister_handler_proc(irq, action);
1644
1645
1646 synchronize_hardirq(irq);
1647
1648#ifdef CONFIG_DEBUG_SHIRQ
1649
1650
1651
1652
1653
1654
1655
1656
1657 if (action->flags & IRQF_SHARED) {
1658 local_irq_save(flags);
1659 action->handler(irq, dev_id);
1660 local_irq_restore(flags);
1661 }
1662#endif
1663
1664
1665
1666
1667
1668
1669
1670 if (action->thread) {
1671 kthread_stop(action->thread);
1672 put_task_struct(action->thread);
1673 if (action->secondary && action->secondary->thread) {
1674 kthread_stop(action->secondary->thread);
1675 put_task_struct(action->secondary->thread);
1676 }
1677 }
1678
1679
1680 if (!desc->action) {
1681
1682
1683
1684
1685 chip_bus_lock(desc);
1686 irq_release_resources(desc);
1687 chip_bus_sync_unlock(desc);
1688 irq_remove_timings(desc);
1689 }
1690
1691 mutex_unlock(&desc->request_mutex);
1692
1693 irq_chip_pm_put(&desc->irq_data);
1694 module_put(desc->owner);
1695 kfree(action->secondary);
1696 return action;
1697}
1698
1699
1700
1701
1702
1703
1704
1705
1706void remove_irq(unsigned int irq, struct irqaction *act)
1707{
1708 struct irq_desc *desc = irq_to_desc(irq);
1709
1710 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1711 __free_irq(desc, act->dev_id);
1712}
1713EXPORT_SYMBOL_GPL(remove_irq);
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731const void *free_irq(unsigned int irq, void *dev_id)
1732{
1733 struct irq_desc *desc = irq_to_desc(irq);
1734 struct irqaction *action;
1735 const char *devname;
1736
1737 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1738 return NULL;
1739
1740#ifdef CONFIG_SMP
1741 if (WARN_ON(desc->affinity_notify))
1742 desc->affinity_notify = NULL;
1743#endif
1744
1745 action = __free_irq(desc, dev_id);
1746
1747 if (!action)
1748 return NULL;
1749
1750 devname = action->name;
1751 kfree(action);
1752 return devname;
1753}
1754EXPORT_SYMBOL(free_irq);
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1799 irq_handler_t thread_fn, unsigned long irqflags,
1800 const char *devname, void *dev_id)
1801{
1802 struct irqaction *action;
1803 struct irq_desc *desc;
1804 int retval;
1805
1806 if (irq == IRQ_NOTCONNECTED)
1807 return -ENOTCONN;
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1819 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1820 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1821 return -EINVAL;
1822
1823 desc = irq_to_desc(irq);
1824 if (!desc)
1825 return -EINVAL;
1826
1827 if (!irq_settings_can_request(desc) ||
1828 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1829 return -EINVAL;
1830
1831 if (!handler) {
1832 if (!thread_fn)
1833 return -EINVAL;
1834 handler = irq_default_primary_handler;
1835 }
1836
1837 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1838 if (!action)
1839 return -ENOMEM;
1840
1841 action->handler = handler;
1842 action->thread_fn = thread_fn;
1843 action->flags = irqflags;
1844 action->name = devname;
1845 action->dev_id = dev_id;
1846
1847 retval = irq_chip_pm_get(&desc->irq_data);
1848 if (retval < 0) {
1849 kfree(action);
1850 return retval;
1851 }
1852
1853 retval = __setup_irq(irq, desc, action);
1854
1855 if (retval) {
1856 irq_chip_pm_put(&desc->irq_data);
1857 kfree(action->secondary);
1858 kfree(action);
1859 }
1860
1861#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1862 if (!retval && (irqflags & IRQF_SHARED)) {
1863
1864
1865
1866
1867
1868
1869 unsigned long flags;
1870
1871 disable_irq(irq);
1872 local_irq_save(flags);
1873
1874 handler(irq, dev_id);
1875
1876 local_irq_restore(flags);
1877 enable_irq(irq);
1878 }
1879#endif
1880 return retval;
1881}
1882EXPORT_SYMBOL(request_threaded_irq);
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1902 unsigned long flags, const char *name, void *dev_id)
1903{
1904 struct irq_desc *desc;
1905 int ret;
1906
1907 if (irq == IRQ_NOTCONNECTED)
1908 return -ENOTCONN;
1909
1910 desc = irq_to_desc(irq);
1911 if (!desc)
1912 return -EINVAL;
1913
1914 if (irq_settings_is_nested_thread(desc)) {
1915 ret = request_threaded_irq(irq, NULL, handler,
1916 flags, name, dev_id);
1917 return !ret ? IRQC_IS_NESTED : ret;
1918 }
1919
1920 ret = request_irq(irq, handler, flags, name, dev_id);
1921 return !ret ? IRQC_IS_HARDIRQ : ret;
1922}
1923EXPORT_SYMBOL_GPL(request_any_context_irq);
1924
1925void enable_percpu_irq(unsigned int irq, unsigned int type)
1926{
1927 unsigned int cpu = smp_processor_id();
1928 unsigned long flags;
1929 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1930
1931 if (!desc)
1932 return;
1933
1934
1935
1936
1937
1938 type &= IRQ_TYPE_SENSE_MASK;
1939 if (type == IRQ_TYPE_NONE)
1940 type = irqd_get_trigger_type(&desc->irq_data);
1941
1942 if (type != IRQ_TYPE_NONE) {
1943 int ret;
1944
1945 ret = __irq_set_trigger(desc, type);
1946
1947 if (ret) {
1948 WARN(1, "failed to set type for IRQ%d\n", irq);
1949 goto out;
1950 }
1951 }
1952
1953 irq_percpu_enable(desc, cpu);
1954out:
1955 irq_put_desc_unlock(desc, flags);
1956}
1957EXPORT_SYMBOL_GPL(enable_percpu_irq);
1958
1959
1960
1961
1962
1963
1964
1965
1966bool irq_percpu_is_enabled(unsigned int irq)
1967{
1968 unsigned int cpu = smp_processor_id();
1969 struct irq_desc *desc;
1970 unsigned long flags;
1971 bool is_enabled;
1972
1973 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1974 if (!desc)
1975 return false;
1976
1977 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1978 irq_put_desc_unlock(desc, flags);
1979
1980 return is_enabled;
1981}
1982EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1983
1984void disable_percpu_irq(unsigned int irq)
1985{
1986 unsigned int cpu = smp_processor_id();
1987 unsigned long flags;
1988 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1989
1990 if (!desc)
1991 return;
1992
1993 irq_percpu_disable(desc, cpu);
1994 irq_put_desc_unlock(desc, flags);
1995}
1996EXPORT_SYMBOL_GPL(disable_percpu_irq);
1997
1998
1999
2000
2001static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2002{
2003 struct irq_desc *desc = irq_to_desc(irq);
2004 struct irqaction *action;
2005 unsigned long flags;
2006
2007 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2008
2009 if (!desc)
2010 return NULL;
2011
2012 raw_spin_lock_irqsave(&desc->lock, flags);
2013
2014 action = desc->action;
2015 if (!action || action->percpu_dev_id != dev_id) {
2016 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2017 goto bad;
2018 }
2019
2020 if (!cpumask_empty(desc->percpu_enabled)) {
2021 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2022 irq, cpumask_first(desc->percpu_enabled));
2023 goto bad;
2024 }
2025
2026
2027 desc->action = NULL;
2028
2029 raw_spin_unlock_irqrestore(&desc->lock, flags);
2030
2031 unregister_handler_proc(irq, action);
2032
2033 irq_chip_pm_put(&desc->irq_data);
2034 module_put(desc->owner);
2035 return action;
2036
2037bad:
2038 raw_spin_unlock_irqrestore(&desc->lock, flags);
2039 return NULL;
2040}
2041
2042
2043
2044
2045
2046
2047
2048
2049void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2050{
2051 struct irq_desc *desc = irq_to_desc(irq);
2052
2053 if (desc && irq_settings_is_per_cpu_devid(desc))
2054 __free_percpu_irq(irq, act->percpu_dev_id);
2055}
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2070{
2071 struct irq_desc *desc = irq_to_desc(irq);
2072
2073 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2074 return;
2075
2076 chip_bus_lock(desc);
2077 kfree(__free_percpu_irq(irq, dev_id));
2078 chip_bus_sync_unlock(desc);
2079}
2080EXPORT_SYMBOL_GPL(free_percpu_irq);
2081
2082
2083
2084
2085
2086
2087
2088
2089int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2090{
2091 struct irq_desc *desc = irq_to_desc(irq);
2092 int retval;
2093
2094 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2095 return -EINVAL;
2096
2097 retval = irq_chip_pm_get(&desc->irq_data);
2098 if (retval < 0)
2099 return retval;
2100
2101 retval = __setup_irq(irq, desc, act);
2102
2103 if (retval)
2104 irq_chip_pm_put(&desc->irq_data);
2105
2106 return retval;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2127 unsigned long flags, const char *devname,
2128 void __percpu *dev_id)
2129{
2130 struct irqaction *action;
2131 struct irq_desc *desc;
2132 int retval;
2133
2134 if (!dev_id)
2135 return -EINVAL;
2136
2137 desc = irq_to_desc(irq);
2138 if (!desc || !irq_settings_can_request(desc) ||
2139 !irq_settings_is_per_cpu_devid(desc))
2140 return -EINVAL;
2141
2142 if (flags && flags != IRQF_TIMER)
2143 return -EINVAL;
2144
2145 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2146 if (!action)
2147 return -ENOMEM;
2148
2149 action->handler = handler;
2150 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2151 action->name = devname;
2152 action->percpu_dev_id = dev_id;
2153
2154 retval = irq_chip_pm_get(&desc->irq_data);
2155 if (retval < 0) {
2156 kfree(action);
2157 return retval;
2158 }
2159
2160 retval = __setup_irq(irq, desc, action);
2161
2162 if (retval) {
2163 irq_chip_pm_put(&desc->irq_data);
2164 kfree(action);
2165 }
2166
2167 return retval;
2168}
2169EXPORT_SYMBOL_GPL(__request_percpu_irq);
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2185 bool *state)
2186{
2187 struct irq_desc *desc;
2188 struct irq_data *data;
2189 struct irq_chip *chip;
2190 unsigned long flags;
2191 int err = -EINVAL;
2192
2193 desc = irq_get_desc_buslock(irq, &flags, 0);
2194 if (!desc)
2195 return err;
2196
2197 data = irq_desc_get_irq_data(desc);
2198
2199 do {
2200 chip = irq_data_get_irq_chip(data);
2201 if (chip->irq_get_irqchip_state)
2202 break;
2203#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2204 data = data->parent_data;
2205#else
2206 data = NULL;
2207#endif
2208 } while (data);
2209
2210 if (data)
2211 err = chip->irq_get_irqchip_state(data, which, state);
2212
2213 irq_put_desc_busunlock(desc, flags);
2214 return err;
2215}
2216EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2231 bool val)
2232{
2233 struct irq_desc *desc;
2234 struct irq_data *data;
2235 struct irq_chip *chip;
2236 unsigned long flags;
2237 int err = -EINVAL;
2238
2239 desc = irq_get_desc_buslock(irq, &flags, 0);
2240 if (!desc)
2241 return err;
2242
2243 data = irq_desc_get_irq_data(desc);
2244
2245 do {
2246 chip = irq_data_get_irq_chip(data);
2247 if (chip->irq_set_irqchip_state)
2248 break;
2249#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2250 data = data->parent_data;
2251#else
2252 data = NULL;
2253#endif
2254 } while (data);
2255
2256 if (data)
2257 err = chip->irq_set_irqchip_state(data, which, val);
2258
2259 irq_put_desc_busunlock(desc, flags);
2260 return err;
2261}
2262EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2263