1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/sched/rt.h>
19#include <linux/sched/task.h>
20#include <uapi/linux/sched/types.h>
21#include <linux/task_work.h>
22
23#include "internals.h"
24
25#ifdef CONFIG_IRQ_FORCED_THREADING
26__read_mostly bool force_irqthreads;
27EXPORT_SYMBOL_GPL(force_irqthreads);
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
37static void __synchronize_hardirq(struct irq_desc *desc)
38{
39 bool inprogress;
40
41 do {
42 unsigned long flags;
43
44
45
46
47
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50
51
52 raw_spin_lock_irqsave(&desc->lock, flags);
53 inprogress = irqd_irq_inprogress(&desc->irq_data);
54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56
57 } while (inprogress);
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77bool synchronize_hardirq(unsigned int irq)
78{
79 struct irq_desc *desc = irq_to_desc(irq);
80
81 if (desc) {
82 __synchronize_hardirq(desc);
83 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90
91
92
93
94
95
96
97
98
99
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106
107
108
109
110
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
114}
115EXPORT_SYMBOL(synchronize_irq);
116
117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
120static bool __irq_can_set_affinity(struct irq_desc *desc)
121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124 return false;
125 return true;
126}
127
128
129
130
131
132
133int irq_can_set_affinity(unsigned int irq)
134{
135 return __irq_can_set_affinity(irq_to_desc(irq));
136}
137
138
139
140
141
142
143
144
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153
154
155
156
157
158
159
160
161
162void irq_set_thread_affinity(struct irq_desc *desc)
163{
164 struct irqaction *action;
165
166 for_each_action_of_desc(desc, action)
167 if (action->thread)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169}
170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
194 ret = chip->irq_set_affinity(data, mask, force);
195 switch (ret) {
196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask);
199 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data);
201 irq_set_thread_affinity(desc);
202 ret = 0;
203 }
204
205 return ret;
206}
207
208#ifdef CONFIG_GENERIC_PENDING_IRQ
209static inline int irq_set_affinity_pending(struct irq_data *data,
210 const struct cpumask *dest)
211{
212 struct irq_desc *desc = irq_data_to_desc(data);
213
214 irqd_set_move_pending(data);
215 irq_copy_pending(desc, dest);
216 return 0;
217}
218#else
219static inline int irq_set_affinity_pending(struct irq_data *data,
220 const struct cpumask *dest)
221{
222 return -EBUSY;
223}
224#endif
225
226static int irq_try_set_affinity(struct irq_data *data,
227 const struct cpumask *dest, bool force)
228{
229 int ret = irq_do_set_affinity(data, dest, force);
230
231
232
233
234
235
236 if (ret == -EBUSY && !force)
237 ret = irq_set_affinity_pending(data, dest);
238 return ret;
239}
240
241int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
242 bool force)
243{
244 struct irq_chip *chip = irq_data_get_irq_chip(data);
245 struct irq_desc *desc = irq_data_to_desc(data);
246 int ret = 0;
247
248 if (!chip || !chip->irq_set_affinity)
249 return -EINVAL;
250
251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
252 ret = irq_try_set_affinity(data, mask, force);
253 } else {
254 irqd_set_move_pending(data);
255 irq_copy_pending(desc, mask);
256 }
257
258 if (desc->affinity_notify) {
259 kref_get(&desc->affinity_notify->kref);
260 schedule_work(&desc->affinity_notify->work);
261 }
262 irqd_set(data, IRQD_AFFINITY_SET);
263
264 return ret;
265}
266
267int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
268{
269 struct irq_desc *desc = irq_to_desc(irq);
270 unsigned long flags;
271 int ret;
272
273 if (!desc)
274 return -EINVAL;
275
276 raw_spin_lock_irqsave(&desc->lock, flags);
277 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
278 raw_spin_unlock_irqrestore(&desc->lock, flags);
279 return ret;
280}
281
282int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
283{
284 unsigned long flags;
285 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
286
287 if (!desc)
288 return -EINVAL;
289 desc->affinity_hint = m;
290 irq_put_desc_unlock(desc, flags);
291
292 if (m)
293 __irq_set_affinity(irq, m, false);
294 return 0;
295}
296EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
297
298static void irq_affinity_notify(struct work_struct *work)
299{
300 struct irq_affinity_notify *notify =
301 container_of(work, struct irq_affinity_notify, work);
302 struct irq_desc *desc = irq_to_desc(notify->irq);
303 cpumask_var_t cpumask;
304 unsigned long flags;
305
306 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
307 goto out;
308
309 raw_spin_lock_irqsave(&desc->lock, flags);
310 if (irq_move_pending(&desc->irq_data))
311 irq_get_pending(cpumask, desc);
312 else
313 cpumask_copy(cpumask, desc->irq_common_data.affinity);
314 raw_spin_unlock_irqrestore(&desc->lock, flags);
315
316 notify->notify(notify, cpumask);
317
318 free_cpumask_var(cpumask);
319out:
320 kref_put(¬ify->kref, notify->release);
321}
322
323
324
325
326
327
328
329
330
331
332
333
334int
335irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
336{
337 struct irq_desc *desc = irq_to_desc(irq);
338 struct irq_affinity_notify *old_notify;
339 unsigned long flags;
340
341
342 might_sleep();
343
344 if (!desc)
345 return -EINVAL;
346
347
348 if (notify) {
349 notify->irq = irq;
350 kref_init(¬ify->kref);
351 INIT_WORK(¬ify->work, irq_affinity_notify);
352 }
353
354 raw_spin_lock_irqsave(&desc->lock, flags);
355 old_notify = desc->affinity_notify;
356 desc->affinity_notify = notify;
357 raw_spin_unlock_irqrestore(&desc->lock, flags);
358
359 if (old_notify)
360 kref_put(&old_notify->kref, old_notify->release);
361
362 return 0;
363}
364EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
365
366#ifndef CONFIG_AUTO_IRQ_AFFINITY
367
368
369
370int irq_setup_affinity(struct irq_desc *desc)
371{
372 struct cpumask *set = irq_default_affinity;
373 int ret, node = irq_desc_get_node(desc);
374 static DEFINE_RAW_SPINLOCK(mask_lock);
375 static struct cpumask mask;
376
377
378 if (!__irq_can_set_affinity(desc))
379 return 0;
380
381 raw_spin_lock(&mask_lock);
382
383
384
385
386 if (irqd_affinity_is_managed(&desc->irq_data) ||
387 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
388 if (cpumask_intersects(desc->irq_common_data.affinity,
389 cpu_online_mask))
390 set = desc->irq_common_data.affinity;
391 else
392 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
393 }
394
395 cpumask_and(&mask, cpu_online_mask, set);
396 if (node != NUMA_NO_NODE) {
397 const struct cpumask *nodemask = cpumask_of_node(node);
398
399
400 if (cpumask_intersects(&mask, nodemask))
401 cpumask_and(&mask, &mask, nodemask);
402 }
403 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
404 raw_spin_unlock(&mask_lock);
405 return ret;
406}
407#else
408
409int irq_setup_affinity(struct irq_desc *desc)
410{
411 return irq_select_affinity(irq_desc_get_irq(desc));
412}
413#endif
414
415
416
417
418int irq_select_affinity_usr(unsigned int irq)
419{
420 struct irq_desc *desc = irq_to_desc(irq);
421 unsigned long flags;
422 int ret;
423
424 raw_spin_lock_irqsave(&desc->lock, flags);
425 ret = irq_setup_affinity(desc);
426 raw_spin_unlock_irqrestore(&desc->lock, flags);
427 return ret;
428}
429#endif
430
431
432
433
434
435
436
437
438
439
440
441
442int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
443{
444 unsigned long flags;
445 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
446 struct irq_data *data;
447 struct irq_chip *chip;
448 int ret = -ENOSYS;
449
450 if (!desc)
451 return -EINVAL;
452
453 data = irq_desc_get_irq_data(desc);
454 do {
455 chip = irq_data_get_irq_chip(data);
456 if (chip && chip->irq_set_vcpu_affinity)
457 break;
458#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
459 data = data->parent_data;
460#else
461 data = NULL;
462#endif
463 } while (data);
464
465 if (data)
466 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
467 irq_put_desc_unlock(desc, flags);
468
469 return ret;
470}
471EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
472
473void __disable_irq(struct irq_desc *desc)
474{
475 if (!desc->depth++)
476 irq_disable(desc);
477}
478
479static int __disable_irq_nosync(unsigned int irq)
480{
481 unsigned long flags;
482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
483
484 if (!desc)
485 return -EINVAL;
486 __disable_irq(desc);
487 irq_put_desc_busunlock(desc, flags);
488 return 0;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502void disable_irq_nosync(unsigned int irq)
503{
504 __disable_irq_nosync(irq);
505}
506EXPORT_SYMBOL(disable_irq_nosync);
507
508
509
510
511
512
513
514
515
516
517
518
519
520void disable_irq(unsigned int irq)
521{
522 if (!__disable_irq_nosync(irq))
523 synchronize_irq(irq);
524}
525EXPORT_SYMBOL(disable_irq);
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544bool disable_hardirq(unsigned int irq)
545{
546 if (!__disable_irq_nosync(irq))
547 return synchronize_hardirq(irq);
548
549 return false;
550}
551EXPORT_SYMBOL_GPL(disable_hardirq);
552
553void __enable_irq(struct irq_desc *desc)
554{
555 switch (desc->depth) {
556 case 0:
557 err_out:
558 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
559 irq_desc_get_irq(desc));
560 break;
561 case 1: {
562 if (desc->istate & IRQS_SUSPENDED)
563 goto err_out;
564
565 irq_settings_set_noprobe(desc);
566
567
568
569
570
571
572
573 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
574 break;
575 }
576 default:
577 desc->depth--;
578 }
579}
580
581
582
583
584
585
586
587
588
589
590
591
592void enable_irq(unsigned int irq)
593{
594 unsigned long flags;
595 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
596
597 if (!desc)
598 return;
599 if (WARN(!desc->irq_data.chip,
600 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
601 goto out;
602
603 __enable_irq(desc);
604out:
605 irq_put_desc_busunlock(desc, flags);
606}
607EXPORT_SYMBOL(enable_irq);
608
609static int set_irq_wake_real(unsigned int irq, unsigned int on)
610{
611 struct irq_desc *desc = irq_to_desc(irq);
612 int ret = -ENXIO;
613
614 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
615 return 0;
616
617 if (desc->irq_data.chip->irq_set_wake)
618 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
619
620 return ret;
621}
622
623
624
625
626
627
628
629
630
631
632
633
634
635int irq_set_irq_wake(unsigned int irq, unsigned int on)
636{
637 unsigned long flags;
638 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
639 int ret = 0;
640
641 if (!desc)
642 return -EINVAL;
643
644
645
646
647 if (on) {
648 if (desc->wake_depth++ == 0) {
649 ret = set_irq_wake_real(irq, on);
650 if (ret)
651 desc->wake_depth = 0;
652 else
653 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
654 }
655 } else {
656 if (desc->wake_depth == 0) {
657 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
658 } else if (--desc->wake_depth == 0) {
659 ret = set_irq_wake_real(irq, on);
660 if (ret)
661 desc->wake_depth = 1;
662 else
663 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
664 }
665 }
666 irq_put_desc_busunlock(desc, flags);
667 return ret;
668}
669EXPORT_SYMBOL(irq_set_irq_wake);
670
671
672
673
674
675
676int can_request_irq(unsigned int irq, unsigned long irqflags)
677{
678 unsigned long flags;
679 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
680 int canrequest = 0;
681
682 if (!desc)
683 return 0;
684
685 if (irq_settings_can_request(desc)) {
686 if (!desc->action ||
687 irqflags & desc->action->flags & IRQF_SHARED)
688 canrequest = 1;
689 }
690 irq_put_desc_unlock(desc, flags);
691 return canrequest;
692}
693
694int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
695{
696 struct irq_chip *chip = desc->irq_data.chip;
697 int ret, unmask = 0;
698
699 if (!chip || !chip->irq_set_type) {
700
701
702
703
704 pr_debug("No set_type function for IRQ %d (%s)\n",
705 irq_desc_get_irq(desc),
706 chip ? (chip->name ? : "unknown") : "unknown");
707 return 0;
708 }
709
710 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
711 if (!irqd_irq_masked(&desc->irq_data))
712 mask_irq(desc);
713 if (!irqd_irq_disabled(&desc->irq_data))
714 unmask = 1;
715 }
716
717
718 flags &= IRQ_TYPE_SENSE_MASK;
719 ret = chip->irq_set_type(&desc->irq_data, flags);
720
721 switch (ret) {
722 case IRQ_SET_MASK_OK:
723 case IRQ_SET_MASK_OK_DONE:
724 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
725 irqd_set(&desc->irq_data, flags);
726
727 case IRQ_SET_MASK_OK_NOCOPY:
728 flags = irqd_get_trigger_type(&desc->irq_data);
729 irq_settings_set_trigger_mask(desc, flags);
730 irqd_clear(&desc->irq_data, IRQD_LEVEL);
731 irq_settings_clr_level(desc);
732 if (flags & IRQ_TYPE_LEVEL_MASK) {
733 irq_settings_set_level(desc);
734 irqd_set(&desc->irq_data, IRQD_LEVEL);
735 }
736
737 ret = 0;
738 break;
739 default:
740 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
741 flags, irq_desc_get_irq(desc), chip->irq_set_type);
742 }
743 if (unmask)
744 unmask_irq(desc);
745 return ret;
746}
747
748#ifdef CONFIG_HARDIRQS_SW_RESEND
749int irq_set_parent(int irq, int parent_irq)
750{
751 unsigned long flags;
752 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
753
754 if (!desc)
755 return -EINVAL;
756
757 desc->parent_irq = parent_irq;
758
759 irq_put_desc_unlock(desc, flags);
760 return 0;
761}
762EXPORT_SYMBOL_GPL(irq_set_parent);
763#endif
764
765
766
767
768
769
770static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
771{
772 return IRQ_WAKE_THREAD;
773}
774
775
776
777
778
779static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
780{
781 WARN(1, "Primary handler called for nested irq %d\n", irq);
782 return IRQ_NONE;
783}
784
785static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
786{
787 WARN(1, "Secondary action handler called for irq %d\n", irq);
788 return IRQ_NONE;
789}
790
791static int irq_wait_for_interrupt(struct irqaction *action)
792{
793 for (;;) {
794 set_current_state(TASK_INTERRUPTIBLE);
795
796 if (kthread_should_stop()) {
797
798 if (test_and_clear_bit(IRQTF_RUNTHREAD,
799 &action->thread_flags)) {
800 __set_current_state(TASK_RUNNING);
801 return 0;
802 }
803 __set_current_state(TASK_RUNNING);
804 return -1;
805 }
806
807 if (test_and_clear_bit(IRQTF_RUNTHREAD,
808 &action->thread_flags)) {
809 __set_current_state(TASK_RUNNING);
810 return 0;
811 }
812 schedule();
813 }
814}
815
816
817
818
819
820
821static void irq_finalize_oneshot(struct irq_desc *desc,
822 struct irqaction *action)
823{
824 if (!(desc->istate & IRQS_ONESHOT) ||
825 action->handler == irq_forced_secondary_handler)
826 return;
827again:
828 chip_bus_lock(desc);
829 raw_spin_lock_irq(&desc->lock);
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
846 raw_spin_unlock_irq(&desc->lock);
847 chip_bus_sync_unlock(desc);
848 cpu_relax();
849 goto again;
850 }
851
852
853
854
855
856
857 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
858 goto out_unlock;
859
860 desc->threads_oneshot &= ~action->thread_mask;
861
862 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
863 irqd_irq_masked(&desc->irq_data))
864 unmask_threaded_irq(desc);
865
866out_unlock:
867 raw_spin_unlock_irq(&desc->lock);
868 chip_bus_sync_unlock(desc);
869}
870
871#ifdef CONFIG_SMP
872
873
874
875static void
876irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
877{
878 cpumask_var_t mask;
879 bool valid = true;
880
881 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
882 return;
883
884
885
886
887
888 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
889 set_bit(IRQTF_AFFINITY, &action->thread_flags);
890 return;
891 }
892
893 raw_spin_lock_irq(&desc->lock);
894
895
896
897
898 if (cpumask_available(desc->irq_common_data.affinity)) {
899 const struct cpumask *m;
900
901 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
902 cpumask_copy(mask, m);
903 } else {
904 valid = false;
905 }
906 raw_spin_unlock_irq(&desc->lock);
907
908 if (valid)
909 set_cpus_allowed_ptr(current, mask);
910 free_cpumask_var(mask);
911}
912#else
913static inline void
914irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
915#endif
916
917
918
919
920
921
922
923static irqreturn_t
924irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
925{
926 irqreturn_t ret;
927
928 local_bh_disable();
929 ret = action->thread_fn(action->irq, action->dev_id);
930 irq_finalize_oneshot(desc, action);
931 local_bh_enable();
932 return ret;
933}
934
935
936
937
938
939
940static irqreturn_t irq_thread_fn(struct irq_desc *desc,
941 struct irqaction *action)
942{
943 irqreturn_t ret;
944
945 ret = action->thread_fn(action->irq, action->dev_id);
946 irq_finalize_oneshot(desc, action);
947 return ret;
948}
949
950static void wake_threads_waitq(struct irq_desc *desc)
951{
952 if (atomic_dec_and_test(&desc->threads_active))
953 wake_up(&desc->wait_for_threads);
954}
955
956static void irq_thread_dtor(struct callback_head *unused)
957{
958 struct task_struct *tsk = current;
959 struct irq_desc *desc;
960 struct irqaction *action;
961
962 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
963 return;
964
965 action = kthread_data(tsk);
966
967 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
968 tsk->comm, tsk->pid, action->irq);
969
970
971 desc = irq_to_desc(action->irq);
972
973
974
975
976 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
977 wake_threads_waitq(desc);
978
979
980 irq_finalize_oneshot(desc, action);
981}
982
983static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
984{
985 struct irqaction *secondary = action->secondary;
986
987 if (WARN_ON_ONCE(!secondary))
988 return;
989
990 raw_spin_lock_irq(&desc->lock);
991 __irq_wake_thread(desc, secondary);
992 raw_spin_unlock_irq(&desc->lock);
993}
994
995
996
997
998static int irq_thread(void *data)
999{
1000 struct callback_head on_exit_work;
1001 struct irqaction *action = data;
1002 struct irq_desc *desc = irq_to_desc(action->irq);
1003 irqreturn_t (*handler_fn)(struct irq_desc *desc,
1004 struct irqaction *action);
1005
1006 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1007 &action->thread_flags))
1008 handler_fn = irq_forced_thread_fn;
1009 else
1010 handler_fn = irq_thread_fn;
1011
1012 init_task_work(&on_exit_work, irq_thread_dtor);
1013 task_work_add(current, &on_exit_work, false);
1014
1015 irq_thread_check_affinity(desc, action);
1016
1017 while (!irq_wait_for_interrupt(action)) {
1018 irqreturn_t action_ret;
1019
1020 irq_thread_check_affinity(desc, action);
1021
1022 action_ret = handler_fn(desc, action);
1023 if (action_ret == IRQ_HANDLED)
1024 atomic_inc(&desc->threads_handled);
1025 if (action_ret == IRQ_WAKE_THREAD)
1026 irq_wake_secondary(desc, action);
1027
1028 wake_threads_waitq(desc);
1029 }
1030
1031
1032
1033
1034
1035
1036
1037 task_work_cancel(current, irq_thread_dtor);
1038 return 0;
1039}
1040
1041
1042
1043
1044
1045
1046
1047void irq_wake_thread(unsigned int irq, void *dev_id)
1048{
1049 struct irq_desc *desc = irq_to_desc(irq);
1050 struct irqaction *action;
1051 unsigned long flags;
1052
1053 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1054 return;
1055
1056 raw_spin_lock_irqsave(&desc->lock, flags);
1057 for_each_action_of_desc(desc, action) {
1058 if (action->dev_id == dev_id) {
1059 if (action->thread)
1060 __irq_wake_thread(desc, action);
1061 break;
1062 }
1063 }
1064 raw_spin_unlock_irqrestore(&desc->lock, flags);
1065}
1066EXPORT_SYMBOL_GPL(irq_wake_thread);
1067
1068static int irq_setup_forced_threading(struct irqaction *new)
1069{
1070 if (!force_irqthreads)
1071 return 0;
1072 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1073 return 0;
1074
1075
1076
1077
1078
1079 if (new->handler == irq_default_primary_handler)
1080 return 0;
1081
1082 new->flags |= IRQF_ONESHOT;
1083
1084
1085
1086
1087
1088
1089 if (new->handler && new->thread_fn) {
1090
1091 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1092 if (!new->secondary)
1093 return -ENOMEM;
1094 new->secondary->handler = irq_forced_secondary_handler;
1095 new->secondary->thread_fn = new->thread_fn;
1096 new->secondary->dev_id = new->dev_id;
1097 new->secondary->irq = new->irq;
1098 new->secondary->name = new->name;
1099 }
1100
1101 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1102 new->thread_fn = new->handler;
1103 new->handler = irq_default_primary_handler;
1104 return 0;
1105}
1106
1107static int irq_request_resources(struct irq_desc *desc)
1108{
1109 struct irq_data *d = &desc->irq_data;
1110 struct irq_chip *c = d->chip;
1111
1112 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1113}
1114
1115static void irq_release_resources(struct irq_desc *desc)
1116{
1117 struct irq_data *d = &desc->irq_data;
1118 struct irq_chip *c = d->chip;
1119
1120 if (c->irq_release_resources)
1121 c->irq_release_resources(d);
1122}
1123
1124static int
1125setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1126{
1127 struct task_struct *t;
1128 struct sched_param param = {
1129 .sched_priority = MAX_USER_RT_PRIO/2,
1130 };
1131
1132 if (!secondary) {
1133 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1134 new->name);
1135 } else {
1136 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1137 new->name);
1138 param.sched_priority -= 1;
1139 }
1140
1141 if (IS_ERR(t))
1142 return PTR_ERR(t);
1143
1144 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1145
1146
1147
1148
1149
1150
1151 get_task_struct(t);
1152 new->thread = t;
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1163 return 0;
1164}
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180static int
1181__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1182{
1183 struct irqaction *old, **old_ptr;
1184 unsigned long flags, thread_mask = 0;
1185 int ret, nested, shared = 0;
1186
1187 if (!desc)
1188 return -EINVAL;
1189
1190 if (desc->irq_data.chip == &no_irq_chip)
1191 return -ENOSYS;
1192 if (!try_module_get(desc->owner))
1193 return -ENODEV;
1194
1195 new->irq = irq;
1196
1197
1198
1199
1200
1201 if (!(new->flags & IRQF_TRIGGER_MASK))
1202 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1203
1204
1205
1206
1207
1208 nested = irq_settings_is_nested_thread(desc);
1209 if (nested) {
1210 if (!new->thread_fn) {
1211 ret = -EINVAL;
1212 goto out_mput;
1213 }
1214
1215
1216
1217
1218
1219 new->handler = irq_nested_primary_handler;
1220 } else {
1221 if (irq_settings_can_thread(desc)) {
1222 ret = irq_setup_forced_threading(new);
1223 if (ret)
1224 goto out_mput;
1225 }
1226 }
1227
1228
1229
1230
1231
1232
1233 if (new->thread_fn && !nested) {
1234 ret = setup_irq_thread(new, irq, false);
1235 if (ret)
1236 goto out_mput;
1237 if (new->secondary) {
1238 ret = setup_irq_thread(new->secondary, irq, true);
1239 if (ret)
1240 goto out_thread;
1241 }
1242 }
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1254 new->flags &= ~IRQF_ONESHOT;
1255
1256
1257
1258
1259
1260
1261
1262
1263 mutex_lock(&desc->request_mutex);
1264
1265
1266
1267
1268
1269
1270 chip_bus_lock(desc);
1271
1272
1273 if (!desc->action) {
1274 ret = irq_request_resources(desc);
1275 if (ret) {
1276 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1277 new->name, irq, desc->irq_data.chip->name);
1278 goto out_bus_unlock;
1279 }
1280 }
1281
1282
1283
1284
1285
1286
1287
1288 raw_spin_lock_irqsave(&desc->lock, flags);
1289 old_ptr = &desc->action;
1290 old = *old_ptr;
1291 if (old) {
1292
1293
1294
1295
1296
1297
1298
1299 unsigned int oldtype;
1300
1301
1302
1303
1304
1305 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1306 oldtype = irqd_get_trigger_type(&desc->irq_data);
1307 } else {
1308 oldtype = new->flags & IRQF_TRIGGER_MASK;
1309 irqd_set_trigger_type(&desc->irq_data, oldtype);
1310 }
1311
1312 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1313 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1314 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1315 goto mismatch;
1316
1317
1318 if ((old->flags & IRQF_PERCPU) !=
1319 (new->flags & IRQF_PERCPU))
1320 goto mismatch;
1321
1322
1323 do {
1324
1325
1326
1327
1328
1329 thread_mask |= old->thread_mask;
1330 old_ptr = &old->next;
1331 old = *old_ptr;
1332 } while (old);
1333 shared = 1;
1334 }
1335
1336
1337
1338
1339
1340
1341 if (new->flags & IRQF_ONESHOT) {
1342
1343
1344
1345
1346 if (thread_mask == ~0UL) {
1347 ret = -EBUSY;
1348 goto out_unlock;
1349 }
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 new->thread_mask = 1UL << ffz(thread_mask);
1371
1372 } else if (new->handler == irq_default_primary_handler &&
1373 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1390 irq);
1391 ret = -EINVAL;
1392 goto out_unlock;
1393 }
1394
1395 if (!shared) {
1396 init_waitqueue_head(&desc->wait_for_threads);
1397
1398
1399 if (new->flags & IRQF_TRIGGER_MASK) {
1400 ret = __irq_set_trigger(desc,
1401 new->flags & IRQF_TRIGGER_MASK);
1402
1403 if (ret)
1404 goto out_unlock;
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 ret = irq_activate(desc);
1419 if (ret)
1420 goto out_unlock;
1421
1422 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1423 IRQS_ONESHOT | IRQS_WAITING);
1424 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1425
1426 if (new->flags & IRQF_PERCPU) {
1427 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1428 irq_settings_set_per_cpu(desc);
1429 }
1430
1431 if (new->flags & IRQF_ONESHOT)
1432 desc->istate |= IRQS_ONESHOT;
1433
1434
1435 if (new->flags & IRQF_NOBALANCING) {
1436 irq_settings_set_no_balancing(desc);
1437 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1438 }
1439
1440 if (irq_settings_can_autoenable(desc)) {
1441 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1442 } else {
1443
1444
1445
1446
1447
1448
1449 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1450
1451 desc->depth = 1;
1452 }
1453
1454 } else if (new->flags & IRQF_TRIGGER_MASK) {
1455 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1456 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1457
1458 if (nmsk != omsk)
1459
1460 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1461 irq, omsk, nmsk);
1462 }
1463
1464 *old_ptr = new;
1465
1466 irq_pm_install_action(desc, new);
1467
1468
1469 desc->irq_count = 0;
1470 desc->irqs_unhandled = 0;
1471
1472
1473
1474
1475
1476 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1477 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1478 __enable_irq(desc);
1479 }
1480
1481 raw_spin_unlock_irqrestore(&desc->lock, flags);
1482 chip_bus_sync_unlock(desc);
1483 mutex_unlock(&desc->request_mutex);
1484
1485 irq_setup_timings(desc, new);
1486
1487
1488
1489
1490
1491 if (new->thread)
1492 wake_up_process(new->thread);
1493 if (new->secondary)
1494 wake_up_process(new->secondary->thread);
1495
1496 register_irq_proc(irq, desc);
1497 new->dir = NULL;
1498 register_handler_proc(irq, new);
1499 return 0;
1500
1501mismatch:
1502 if (!(new->flags & IRQF_PROBE_SHARED)) {
1503 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1504 irq, new->flags, new->name, old->flags, old->name);
1505#ifdef CONFIG_DEBUG_SHIRQ
1506 dump_stack();
1507#endif
1508 }
1509 ret = -EBUSY;
1510
1511out_unlock:
1512 raw_spin_unlock_irqrestore(&desc->lock, flags);
1513
1514 if (!desc->action)
1515 irq_release_resources(desc);
1516out_bus_unlock:
1517 chip_bus_sync_unlock(desc);
1518 mutex_unlock(&desc->request_mutex);
1519
1520out_thread:
1521 if (new->thread) {
1522 struct task_struct *t = new->thread;
1523
1524 new->thread = NULL;
1525 kthread_stop(t);
1526 put_task_struct(t);
1527 }
1528 if (new->secondary && new->secondary->thread) {
1529 struct task_struct *t = new->secondary->thread;
1530
1531 new->secondary->thread = NULL;
1532 kthread_stop(t);
1533 put_task_struct(t);
1534 }
1535out_mput:
1536 module_put(desc->owner);
1537 return ret;
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547int setup_irq(unsigned int irq, struct irqaction *act)
1548{
1549 int retval;
1550 struct irq_desc *desc = irq_to_desc(irq);
1551
1552 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1553 return -EINVAL;
1554
1555 retval = irq_chip_pm_get(&desc->irq_data);
1556 if (retval < 0)
1557 return retval;
1558
1559 retval = __setup_irq(irq, desc, act);
1560
1561 if (retval)
1562 irq_chip_pm_put(&desc->irq_data);
1563
1564 return retval;
1565}
1566EXPORT_SYMBOL_GPL(setup_irq);
1567
1568
1569
1570
1571
1572static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1573{
1574 unsigned irq = desc->irq_data.irq;
1575 struct irqaction *action, **action_ptr;
1576 unsigned long flags;
1577
1578 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1579
1580 mutex_lock(&desc->request_mutex);
1581 chip_bus_lock(desc);
1582 raw_spin_lock_irqsave(&desc->lock, flags);
1583
1584
1585
1586
1587
1588 action_ptr = &desc->action;
1589 for (;;) {
1590 action = *action_ptr;
1591
1592 if (!action) {
1593 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1594 raw_spin_unlock_irqrestore(&desc->lock, flags);
1595 chip_bus_sync_unlock(desc);
1596 mutex_unlock(&desc->request_mutex);
1597 return NULL;
1598 }
1599
1600 if (action->dev_id == dev_id)
1601 break;
1602 action_ptr = &action->next;
1603 }
1604
1605
1606 *action_ptr = action->next;
1607
1608 irq_pm_remove_action(desc, action);
1609
1610
1611 if (!desc->action) {
1612 irq_settings_clr_disable_unlazy(desc);
1613 irq_shutdown(desc);
1614 }
1615
1616#ifdef CONFIG_SMP
1617
1618 if (WARN_ON_ONCE(desc->affinity_hint))
1619 desc->affinity_hint = NULL;
1620#endif
1621
1622 raw_spin_unlock_irqrestore(&desc->lock, flags);
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637 chip_bus_sync_unlock(desc);
1638
1639 unregister_handler_proc(irq, action);
1640
1641
1642 synchronize_hardirq(irq);
1643
1644#ifdef CONFIG_DEBUG_SHIRQ
1645
1646
1647
1648
1649
1650
1651
1652
1653 if (action->flags & IRQF_SHARED) {
1654 local_irq_save(flags);
1655 action->handler(irq, dev_id);
1656 local_irq_restore(flags);
1657 }
1658#endif
1659
1660
1661
1662
1663
1664
1665
1666 if (action->thread) {
1667 kthread_stop(action->thread);
1668 put_task_struct(action->thread);
1669 if (action->secondary && action->secondary->thread) {
1670 kthread_stop(action->secondary->thread);
1671 put_task_struct(action->secondary->thread);
1672 }
1673 }
1674
1675
1676 if (!desc->action) {
1677
1678
1679
1680
1681 chip_bus_lock(desc);
1682 irq_release_resources(desc);
1683 chip_bus_sync_unlock(desc);
1684 irq_remove_timings(desc);
1685 }
1686
1687 mutex_unlock(&desc->request_mutex);
1688
1689 irq_chip_pm_put(&desc->irq_data);
1690 module_put(desc->owner);
1691 kfree(action->secondary);
1692 return action;
1693}
1694
1695
1696
1697
1698
1699
1700
1701
1702void remove_irq(unsigned int irq, struct irqaction *act)
1703{
1704 struct irq_desc *desc = irq_to_desc(irq);
1705
1706 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1707 __free_irq(desc, act->dev_id);
1708}
1709EXPORT_SYMBOL_GPL(remove_irq);
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727const void *free_irq(unsigned int irq, void *dev_id)
1728{
1729 struct irq_desc *desc = irq_to_desc(irq);
1730 struct irqaction *action;
1731 const char *devname;
1732
1733 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1734 return NULL;
1735
1736#ifdef CONFIG_SMP
1737 if (WARN_ON(desc->affinity_notify))
1738 desc->affinity_notify = NULL;
1739#endif
1740
1741 action = __free_irq(desc, dev_id);
1742
1743 if (!action)
1744 return NULL;
1745
1746 devname = action->name;
1747 kfree(action);
1748 return devname;
1749}
1750EXPORT_SYMBOL(free_irq);
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1795 irq_handler_t thread_fn, unsigned long irqflags,
1796 const char *devname, void *dev_id)
1797{
1798 struct irqaction *action;
1799 struct irq_desc *desc;
1800 int retval;
1801
1802 if (irq == IRQ_NOTCONNECTED)
1803 return -ENOTCONN;
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1815 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1816 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1817 return -EINVAL;
1818
1819 desc = irq_to_desc(irq);
1820 if (!desc)
1821 return -EINVAL;
1822
1823 if (!irq_settings_can_request(desc) ||
1824 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1825 return -EINVAL;
1826
1827 if (!handler) {
1828 if (!thread_fn)
1829 return -EINVAL;
1830 handler = irq_default_primary_handler;
1831 }
1832
1833 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1834 if (!action)
1835 return -ENOMEM;
1836
1837 action->handler = handler;
1838 action->thread_fn = thread_fn;
1839 action->flags = irqflags;
1840 action->name = devname;
1841 action->dev_id = dev_id;
1842
1843 retval = irq_chip_pm_get(&desc->irq_data);
1844 if (retval < 0) {
1845 kfree(action);
1846 return retval;
1847 }
1848
1849 retval = __setup_irq(irq, desc, action);
1850
1851 if (retval) {
1852 irq_chip_pm_put(&desc->irq_data);
1853 kfree(action->secondary);
1854 kfree(action);
1855 }
1856
1857#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1858 if (!retval && (irqflags & IRQF_SHARED)) {
1859
1860
1861
1862
1863
1864
1865 unsigned long flags;
1866
1867 disable_irq(irq);
1868 local_irq_save(flags);
1869
1870 handler(irq, dev_id);
1871
1872 local_irq_restore(flags);
1873 enable_irq(irq);
1874 }
1875#endif
1876 return retval;
1877}
1878EXPORT_SYMBOL(request_threaded_irq);
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1898 unsigned long flags, const char *name, void *dev_id)
1899{
1900 struct irq_desc *desc;
1901 int ret;
1902
1903 if (irq == IRQ_NOTCONNECTED)
1904 return -ENOTCONN;
1905
1906 desc = irq_to_desc(irq);
1907 if (!desc)
1908 return -EINVAL;
1909
1910 if (irq_settings_is_nested_thread(desc)) {
1911 ret = request_threaded_irq(irq, NULL, handler,
1912 flags, name, dev_id);
1913 return !ret ? IRQC_IS_NESTED : ret;
1914 }
1915
1916 ret = request_irq(irq, handler, flags, name, dev_id);
1917 return !ret ? IRQC_IS_HARDIRQ : ret;
1918}
1919EXPORT_SYMBOL_GPL(request_any_context_irq);
1920
1921void enable_percpu_irq(unsigned int irq, unsigned int type)
1922{
1923 unsigned int cpu = smp_processor_id();
1924 unsigned long flags;
1925 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1926
1927 if (!desc)
1928 return;
1929
1930
1931
1932
1933
1934 type &= IRQ_TYPE_SENSE_MASK;
1935 if (type == IRQ_TYPE_NONE)
1936 type = irqd_get_trigger_type(&desc->irq_data);
1937
1938 if (type != IRQ_TYPE_NONE) {
1939 int ret;
1940
1941 ret = __irq_set_trigger(desc, type);
1942
1943 if (ret) {
1944 WARN(1, "failed to set type for IRQ%d\n", irq);
1945 goto out;
1946 }
1947 }
1948
1949 irq_percpu_enable(desc, cpu);
1950out:
1951 irq_put_desc_unlock(desc, flags);
1952}
1953EXPORT_SYMBOL_GPL(enable_percpu_irq);
1954
1955
1956
1957
1958
1959
1960
1961
1962bool irq_percpu_is_enabled(unsigned int irq)
1963{
1964 unsigned int cpu = smp_processor_id();
1965 struct irq_desc *desc;
1966 unsigned long flags;
1967 bool is_enabled;
1968
1969 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1970 if (!desc)
1971 return false;
1972
1973 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1974 irq_put_desc_unlock(desc, flags);
1975
1976 return is_enabled;
1977}
1978EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1979
1980void disable_percpu_irq(unsigned int irq)
1981{
1982 unsigned int cpu = smp_processor_id();
1983 unsigned long flags;
1984 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1985
1986 if (!desc)
1987 return;
1988
1989 irq_percpu_disable(desc, cpu);
1990 irq_put_desc_unlock(desc, flags);
1991}
1992EXPORT_SYMBOL_GPL(disable_percpu_irq);
1993
1994
1995
1996
1997static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1998{
1999 struct irq_desc *desc = irq_to_desc(irq);
2000 struct irqaction *action;
2001 unsigned long flags;
2002
2003 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2004
2005 if (!desc)
2006 return NULL;
2007
2008 raw_spin_lock_irqsave(&desc->lock, flags);
2009
2010 action = desc->action;
2011 if (!action || action->percpu_dev_id != dev_id) {
2012 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2013 goto bad;
2014 }
2015
2016 if (!cpumask_empty(desc->percpu_enabled)) {
2017 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2018 irq, cpumask_first(desc->percpu_enabled));
2019 goto bad;
2020 }
2021
2022
2023 desc->action = NULL;
2024
2025 raw_spin_unlock_irqrestore(&desc->lock, flags);
2026
2027 unregister_handler_proc(irq, action);
2028
2029 irq_chip_pm_put(&desc->irq_data);
2030 module_put(desc->owner);
2031 return action;
2032
2033bad:
2034 raw_spin_unlock_irqrestore(&desc->lock, flags);
2035 return NULL;
2036}
2037
2038
2039
2040
2041
2042
2043
2044
2045void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2046{
2047 struct irq_desc *desc = irq_to_desc(irq);
2048
2049 if (desc && irq_settings_is_per_cpu_devid(desc))
2050 __free_percpu_irq(irq, act->percpu_dev_id);
2051}
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2066{
2067 struct irq_desc *desc = irq_to_desc(irq);
2068
2069 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2070 return;
2071
2072 chip_bus_lock(desc);
2073 kfree(__free_percpu_irq(irq, dev_id));
2074 chip_bus_sync_unlock(desc);
2075}
2076EXPORT_SYMBOL_GPL(free_percpu_irq);
2077
2078
2079
2080
2081
2082
2083
2084
2085int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2086{
2087 struct irq_desc *desc = irq_to_desc(irq);
2088 int retval;
2089
2090 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2091 return -EINVAL;
2092
2093 retval = irq_chip_pm_get(&desc->irq_data);
2094 if (retval < 0)
2095 return retval;
2096
2097 retval = __setup_irq(irq, desc, act);
2098
2099 if (retval)
2100 irq_chip_pm_put(&desc->irq_data);
2101
2102 return retval;
2103}
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2123 unsigned long flags, const char *devname,
2124 void __percpu *dev_id)
2125{
2126 struct irqaction *action;
2127 struct irq_desc *desc;
2128 int retval;
2129
2130 if (!dev_id)
2131 return -EINVAL;
2132
2133 desc = irq_to_desc(irq);
2134 if (!desc || !irq_settings_can_request(desc) ||
2135 !irq_settings_is_per_cpu_devid(desc))
2136 return -EINVAL;
2137
2138 if (flags && flags != IRQF_TIMER)
2139 return -EINVAL;
2140
2141 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2142 if (!action)
2143 return -ENOMEM;
2144
2145 action->handler = handler;
2146 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2147 action->name = devname;
2148 action->percpu_dev_id = dev_id;
2149
2150 retval = irq_chip_pm_get(&desc->irq_data);
2151 if (retval < 0) {
2152 kfree(action);
2153 return retval;
2154 }
2155
2156 retval = __setup_irq(irq, desc, action);
2157
2158 if (retval) {
2159 irq_chip_pm_put(&desc->irq_data);
2160 kfree(action);
2161 }
2162
2163 return retval;
2164}
2165EXPORT_SYMBOL_GPL(__request_percpu_irq);
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2181 bool *state)
2182{
2183 struct irq_desc *desc;
2184 struct irq_data *data;
2185 struct irq_chip *chip;
2186 unsigned long flags;
2187 int err = -EINVAL;
2188
2189 desc = irq_get_desc_buslock(irq, &flags, 0);
2190 if (!desc)
2191 return err;
2192
2193 data = irq_desc_get_irq_data(desc);
2194
2195 do {
2196 chip = irq_data_get_irq_chip(data);
2197 if (chip->irq_get_irqchip_state)
2198 break;
2199#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2200 data = data->parent_data;
2201#else
2202 data = NULL;
2203#endif
2204 } while (data);
2205
2206 if (data)
2207 err = chip->irq_get_irqchip_state(data, which, state);
2208
2209 irq_put_desc_busunlock(desc, flags);
2210 return err;
2211}
2212EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2227 bool val)
2228{
2229 struct irq_desc *desc;
2230 struct irq_data *data;
2231 struct irq_chip *chip;
2232 unsigned long flags;
2233 int err = -EINVAL;
2234
2235 desc = irq_get_desc_buslock(irq, &flags, 0);
2236 if (!desc)
2237 return err;
2238
2239 data = irq_desc_get_irq_data(desc);
2240
2241 do {
2242 chip = irq_data_get_irq_chip(data);
2243 if (chip->irq_set_irqchip_state)
2244 break;
2245#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2246 data = data->parent_data;
2247#else
2248 data = NULL;
2249#endif
2250 } while (data);
2251
2252 if (data)
2253 err = chip->irq_set_irqchip_state(data, which, val);
2254
2255 irq_put_desc_busunlock(desc, flags);
2256 return err;
2257}
2258EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2259