1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) "genirq: " fmt
10
11#include <linux/irq.h>
12#include <linux/kthread.h>
13#include <linux/module.h>
14#include <linux/random.h>
15#include <linux/interrupt.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/sched/rt.h>
19#include <linux/sched/task.h>
20#include <uapi/linux/sched/types.h>
21#include <linux/task_work.h>
22
23#include "internals.h"
24
25#ifdef CONFIG_IRQ_FORCED_THREADING
26__read_mostly bool force_irqthreads;
27EXPORT_SYMBOL_GPL(force_irqthreads);
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
37static void __synchronize_hardirq(struct irq_desc *desc)
38{
39 bool inprogress;
40
41 do {
42 unsigned long flags;
43
44
45
46
47
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50
51
52 raw_spin_lock_irqsave(&desc->lock, flags);
53 inprogress = irqd_irq_inprogress(&desc->irq_data);
54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56
57 } while (inprogress);
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77bool synchronize_hardirq(unsigned int irq)
78{
79 struct irq_desc *desc = irq_to_desc(irq);
80
81 if (desc) {
82 __synchronize_hardirq(desc);
83 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90
91
92
93
94
95
96
97
98
99
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106
107
108
109
110
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
114}
115EXPORT_SYMBOL(synchronize_irq);
116
117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
120static bool __irq_can_set_affinity(struct irq_desc *desc)
121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124 return false;
125 return true;
126}
127
128
129
130
131
132
133int irq_can_set_affinity(unsigned int irq)
134{
135 return __irq_can_set_affinity(irq_to_desc(irq));
136}
137
138
139
140
141
142
143
144
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153
154
155
156
157
158
159
160
161
162void irq_set_thread_affinity(struct irq_desc *desc)
163{
164 struct irqaction *action;
165
166 for_each_action_of_desc(desc, action)
167 if (action->thread)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169}
170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
194 ret = chip->irq_set_affinity(data, mask, force);
195 switch (ret) {
196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask);
199 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data);
201 irq_set_thread_affinity(desc);
202 ret = 0;
203 }
204
205 return ret;
206}
207
208#ifdef CONFIG_GENERIC_PENDING_IRQ
209static inline int irq_set_affinity_pending(struct irq_data *data,
210 const struct cpumask *dest)
211{
212 struct irq_desc *desc = irq_data_to_desc(data);
213
214 irqd_set_move_pending(data);
215 irq_copy_pending(desc, dest);
216 return 0;
217}
218#else
219static inline int irq_set_affinity_pending(struct irq_data *data,
220 const struct cpumask *dest)
221{
222 return -EBUSY;
223}
224#endif
225
226static int irq_try_set_affinity(struct irq_data *data,
227 const struct cpumask *dest, bool force)
228{
229 int ret = irq_do_set_affinity(data, dest, force);
230
231
232
233
234
235
236 if (ret == -EBUSY && !force)
237 ret = irq_set_affinity_pending(data, dest);
238 return ret;
239}
240
241int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
242 bool force)
243{
244 struct irq_chip *chip = irq_data_get_irq_chip(data);
245 struct irq_desc *desc = irq_data_to_desc(data);
246 int ret = 0;
247
248 if (!chip || !chip->irq_set_affinity)
249 return -EINVAL;
250
251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
252 ret = irq_try_set_affinity(data, mask, force);
253 } else {
254 irqd_set_move_pending(data);
255 irq_copy_pending(desc, mask);
256 }
257
258 if (desc->affinity_notify) {
259 kref_get(&desc->affinity_notify->kref);
260 schedule_work(&desc->affinity_notify->work);
261 }
262 irqd_set(data, IRQD_AFFINITY_SET);
263
264 return ret;
265}
266
267int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
268{
269 struct irq_desc *desc = irq_to_desc(irq);
270 unsigned long flags;
271 int ret;
272
273 if (!desc)
274 return -EINVAL;
275
276 raw_spin_lock_irqsave(&desc->lock, flags);
277 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
278 raw_spin_unlock_irqrestore(&desc->lock, flags);
279 return ret;
280}
281
282int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
283{
284 unsigned long flags;
285 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
286
287 if (!desc)
288 return -EINVAL;
289 desc->affinity_hint = m;
290 irq_put_desc_unlock(desc, flags);
291
292 if (m)
293 __irq_set_affinity(irq, m, false);
294 return 0;
295}
296EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
297
298static void irq_affinity_notify(struct work_struct *work)
299{
300 struct irq_affinity_notify *notify =
301 container_of(work, struct irq_affinity_notify, work);
302 struct irq_desc *desc = irq_to_desc(notify->irq);
303 cpumask_var_t cpumask;
304 unsigned long flags;
305
306 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
307 goto out;
308
309 raw_spin_lock_irqsave(&desc->lock, flags);
310 if (irq_move_pending(&desc->irq_data))
311 irq_get_pending(cpumask, desc);
312 else
313 cpumask_copy(cpumask, desc->irq_common_data.affinity);
314 raw_spin_unlock_irqrestore(&desc->lock, flags);
315
316 notify->notify(notify, cpumask);
317
318 free_cpumask_var(cpumask);
319out:
320 kref_put(¬ify->kref, notify->release);
321}
322
323
324
325
326
327
328
329
330
331
332
333
334int
335irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
336{
337 struct irq_desc *desc = irq_to_desc(irq);
338 struct irq_affinity_notify *old_notify;
339 unsigned long flags;
340
341
342 might_sleep();
343
344 if (!desc)
345 return -EINVAL;
346
347
348 if (notify) {
349 notify->irq = irq;
350 kref_init(¬ify->kref);
351 INIT_WORK(¬ify->work, irq_affinity_notify);
352 }
353
354 raw_spin_lock_irqsave(&desc->lock, flags);
355 old_notify = desc->affinity_notify;
356 desc->affinity_notify = notify;
357 raw_spin_unlock_irqrestore(&desc->lock, flags);
358
359 if (old_notify)
360 kref_put(&old_notify->kref, old_notify->release);
361
362 return 0;
363}
364EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
365
366#ifndef CONFIG_AUTO_IRQ_AFFINITY
367
368
369
370int irq_setup_affinity(struct irq_desc *desc)
371{
372 struct cpumask *set = irq_default_affinity;
373 int ret, node = irq_desc_get_node(desc);
374 static DEFINE_RAW_SPINLOCK(mask_lock);
375 static struct cpumask mask;
376
377
378 if (!__irq_can_set_affinity(desc))
379 return 0;
380
381 raw_spin_lock(&mask_lock);
382
383
384
385
386 if (irqd_affinity_is_managed(&desc->irq_data) ||
387 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
388 if (cpumask_intersects(desc->irq_common_data.affinity,
389 cpu_online_mask))
390 set = desc->irq_common_data.affinity;
391 else
392 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
393 }
394
395 cpumask_and(&mask, cpu_online_mask, set);
396 if (node != NUMA_NO_NODE) {
397 const struct cpumask *nodemask = cpumask_of_node(node);
398
399
400 if (cpumask_intersects(&mask, nodemask))
401 cpumask_and(&mask, &mask, nodemask);
402 }
403 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
404 raw_spin_unlock(&mask_lock);
405 return ret;
406}
407#else
408
409int irq_setup_affinity(struct irq_desc *desc)
410{
411 return irq_select_affinity(irq_desc_get_irq(desc));
412}
413#endif
414
415
416
417
418int irq_select_affinity_usr(unsigned int irq)
419{
420 struct irq_desc *desc = irq_to_desc(irq);
421 unsigned long flags;
422 int ret;
423
424 raw_spin_lock_irqsave(&desc->lock, flags);
425 ret = irq_setup_affinity(desc);
426 raw_spin_unlock_irqrestore(&desc->lock, flags);
427 return ret;
428}
429#endif
430
431
432
433
434
435
436
437
438
439
440
441
442int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
443{
444 unsigned long flags;
445 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
446 struct irq_data *data;
447 struct irq_chip *chip;
448 int ret = -ENOSYS;
449
450 if (!desc)
451 return -EINVAL;
452
453 data = irq_desc_get_irq_data(desc);
454 do {
455 chip = irq_data_get_irq_chip(data);
456 if (chip && chip->irq_set_vcpu_affinity)
457 break;
458#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
459 data = data->parent_data;
460#else
461 data = NULL;
462#endif
463 } while (data);
464
465 if (data)
466 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
467 irq_put_desc_unlock(desc, flags);
468
469 return ret;
470}
471EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
472
473void __disable_irq(struct irq_desc *desc)
474{
475 if (!desc->depth++)
476 irq_disable(desc);
477}
478
479static int __disable_irq_nosync(unsigned int irq)
480{
481 unsigned long flags;
482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
483
484 if (!desc)
485 return -EINVAL;
486 __disable_irq(desc);
487 irq_put_desc_busunlock(desc, flags);
488 return 0;
489}
490
491
492
493
494
495
496
497
498
499
500
501
502void disable_irq_nosync(unsigned int irq)
503{
504 __disable_irq_nosync(irq);
505}
506EXPORT_SYMBOL(disable_irq_nosync);
507
508
509
510
511
512
513
514
515
516
517
518
519
520void disable_irq(unsigned int irq)
521{
522 if (!__disable_irq_nosync(irq))
523 synchronize_irq(irq);
524}
525EXPORT_SYMBOL(disable_irq);
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544bool disable_hardirq(unsigned int irq)
545{
546 if (!__disable_irq_nosync(irq))
547 return synchronize_hardirq(irq);
548
549 return false;
550}
551EXPORT_SYMBOL_GPL(disable_hardirq);
552
553void __enable_irq(struct irq_desc *desc)
554{
555 switch (desc->depth) {
556 case 0:
557 err_out:
558 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
559 irq_desc_get_irq(desc));
560 break;
561 case 1: {
562 if (desc->istate & IRQS_SUSPENDED)
563 goto err_out;
564
565 irq_settings_set_noprobe(desc);
566
567
568
569
570
571
572
573 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
574 break;
575 }
576 default:
577 desc->depth--;
578 }
579}
580
581
582
583
584
585
586
587
588
589
590
591
592void enable_irq(unsigned int irq)
593{
594 unsigned long flags;
595 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
596
597 if (!desc)
598 return;
599 if (WARN(!desc->irq_data.chip,
600 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
601 goto out;
602
603 __enable_irq(desc);
604out:
605 irq_put_desc_busunlock(desc, flags);
606}
607EXPORT_SYMBOL(enable_irq);
608
609static int set_irq_wake_real(unsigned int irq, unsigned int on)
610{
611 struct irq_desc *desc = irq_to_desc(irq);
612 int ret = -ENXIO;
613
614 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
615 return 0;
616
617 if (desc->irq_data.chip->irq_set_wake)
618 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
619
620 return ret;
621}
622
623
624
625
626
627
628
629
630
631
632
633
634
635int irq_set_irq_wake(unsigned int irq, unsigned int on)
636{
637 unsigned long flags;
638 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
639 int ret = 0;
640
641 if (!desc)
642 return -EINVAL;
643
644
645
646
647 if (on) {
648 if (desc->wake_depth++ == 0) {
649 ret = set_irq_wake_real(irq, on);
650 if (ret)
651 desc->wake_depth = 0;
652 else
653 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
654 }
655 } else {
656 if (desc->wake_depth == 0) {
657 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
658 } else if (--desc->wake_depth == 0) {
659 ret = set_irq_wake_real(irq, on);
660 if (ret)
661 desc->wake_depth = 1;
662 else
663 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
664 }
665 }
666 irq_put_desc_busunlock(desc, flags);
667 return ret;
668}
669EXPORT_SYMBOL(irq_set_irq_wake);
670
671
672
673
674
675
676int can_request_irq(unsigned int irq, unsigned long irqflags)
677{
678 unsigned long flags;
679 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
680 int canrequest = 0;
681
682 if (!desc)
683 return 0;
684
685 if (irq_settings_can_request(desc)) {
686 if (!desc->action ||
687 irqflags & desc->action->flags & IRQF_SHARED)
688 canrequest = 1;
689 }
690 irq_put_desc_unlock(desc, flags);
691 return canrequest;
692}
693
694int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
695{
696 struct irq_chip *chip = desc->irq_data.chip;
697 int ret, unmask = 0;
698
699 if (!chip || !chip->irq_set_type) {
700
701
702
703
704 pr_debug("No set_type function for IRQ %d (%s)\n",
705 irq_desc_get_irq(desc),
706 chip ? (chip->name ? : "unknown") : "unknown");
707 return 0;
708 }
709
710 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
711 if (!irqd_irq_masked(&desc->irq_data))
712 mask_irq(desc);
713 if (!irqd_irq_disabled(&desc->irq_data))
714 unmask = 1;
715 }
716
717
718 flags &= IRQ_TYPE_SENSE_MASK;
719 ret = chip->irq_set_type(&desc->irq_data, flags);
720
721 switch (ret) {
722 case IRQ_SET_MASK_OK:
723 case IRQ_SET_MASK_OK_DONE:
724 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
725 irqd_set(&desc->irq_data, flags);
726
727 case IRQ_SET_MASK_OK_NOCOPY:
728 flags = irqd_get_trigger_type(&desc->irq_data);
729 irq_settings_set_trigger_mask(desc, flags);
730 irqd_clear(&desc->irq_data, IRQD_LEVEL);
731 irq_settings_clr_level(desc);
732 if (flags & IRQ_TYPE_LEVEL_MASK) {
733 irq_settings_set_level(desc);
734 irqd_set(&desc->irq_data, IRQD_LEVEL);
735 }
736
737 ret = 0;
738 break;
739 default:
740 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
741 flags, irq_desc_get_irq(desc), chip->irq_set_type);
742 }
743 if (unmask)
744 unmask_irq(desc);
745 return ret;
746}
747
748#ifdef CONFIG_HARDIRQS_SW_RESEND
749int irq_set_parent(int irq, int parent_irq)
750{
751 unsigned long flags;
752 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
753
754 if (!desc)
755 return -EINVAL;
756
757 desc->parent_irq = parent_irq;
758
759 irq_put_desc_unlock(desc, flags);
760 return 0;
761}
762EXPORT_SYMBOL_GPL(irq_set_parent);
763#endif
764
765
766
767
768
769
770static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
771{
772 return IRQ_WAKE_THREAD;
773}
774
775
776
777
778
779static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
780{
781 WARN(1, "Primary handler called for nested irq %d\n", irq);
782 return IRQ_NONE;
783}
784
785static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
786{
787 WARN(1, "Secondary action handler called for irq %d\n", irq);
788 return IRQ_NONE;
789}
790
791static int irq_wait_for_interrupt(struct irqaction *action)
792{
793 set_current_state(TASK_INTERRUPTIBLE);
794
795 while (!kthread_should_stop()) {
796
797 if (test_and_clear_bit(IRQTF_RUNTHREAD,
798 &action->thread_flags)) {
799 __set_current_state(TASK_RUNNING);
800 return 0;
801 }
802 schedule();
803 set_current_state(TASK_INTERRUPTIBLE);
804 }
805 __set_current_state(TASK_RUNNING);
806 return -1;
807}
808
809
810
811
812
813
814static void irq_finalize_oneshot(struct irq_desc *desc,
815 struct irqaction *action)
816{
817 if (!(desc->istate & IRQS_ONESHOT) ||
818 action->handler == irq_forced_secondary_handler)
819 return;
820again:
821 chip_bus_lock(desc);
822 raw_spin_lock_irq(&desc->lock);
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
839 raw_spin_unlock_irq(&desc->lock);
840 chip_bus_sync_unlock(desc);
841 cpu_relax();
842 goto again;
843 }
844
845
846
847
848
849
850 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
851 goto out_unlock;
852
853 desc->threads_oneshot &= ~action->thread_mask;
854
855 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
856 irqd_irq_masked(&desc->irq_data))
857 unmask_threaded_irq(desc);
858
859out_unlock:
860 raw_spin_unlock_irq(&desc->lock);
861 chip_bus_sync_unlock(desc);
862}
863
864#ifdef CONFIG_SMP
865
866
867
868static void
869irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
870{
871 cpumask_var_t mask;
872 bool valid = true;
873
874 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
875 return;
876
877
878
879
880
881 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
882 set_bit(IRQTF_AFFINITY, &action->thread_flags);
883 return;
884 }
885
886 raw_spin_lock_irq(&desc->lock);
887
888
889
890
891 if (cpumask_available(desc->irq_common_data.affinity)) {
892 const struct cpumask *m;
893
894 m = irq_data_get_effective_affinity_mask(&desc->irq_data);
895 cpumask_copy(mask, m);
896 } else {
897 valid = false;
898 }
899 raw_spin_unlock_irq(&desc->lock);
900
901 if (valid)
902 set_cpus_allowed_ptr(current, mask);
903 free_cpumask_var(mask);
904}
905#else
906static inline void
907irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
908#endif
909
910
911
912
913
914
915
916static irqreturn_t
917irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
918{
919 irqreturn_t ret;
920
921 local_bh_disable();
922 ret = action->thread_fn(action->irq, action->dev_id);
923 irq_finalize_oneshot(desc, action);
924 local_bh_enable();
925 return ret;
926}
927
928
929
930
931
932
933static irqreturn_t irq_thread_fn(struct irq_desc *desc,
934 struct irqaction *action)
935{
936 irqreturn_t ret;
937
938 ret = action->thread_fn(action->irq, action->dev_id);
939 irq_finalize_oneshot(desc, action);
940 return ret;
941}
942
943static void wake_threads_waitq(struct irq_desc *desc)
944{
945 if (atomic_dec_and_test(&desc->threads_active))
946 wake_up(&desc->wait_for_threads);
947}
948
949static void irq_thread_dtor(struct callback_head *unused)
950{
951 struct task_struct *tsk = current;
952 struct irq_desc *desc;
953 struct irqaction *action;
954
955 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
956 return;
957
958 action = kthread_data(tsk);
959
960 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
961 tsk->comm, tsk->pid, action->irq);
962
963
964 desc = irq_to_desc(action->irq);
965
966
967
968
969 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
970 wake_threads_waitq(desc);
971
972
973 irq_finalize_oneshot(desc, action);
974}
975
976static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
977{
978 struct irqaction *secondary = action->secondary;
979
980 if (WARN_ON_ONCE(!secondary))
981 return;
982
983 raw_spin_lock_irq(&desc->lock);
984 __irq_wake_thread(desc, secondary);
985 raw_spin_unlock_irq(&desc->lock);
986}
987
988
989
990
991static int irq_thread(void *data)
992{
993 struct callback_head on_exit_work;
994 struct irqaction *action = data;
995 struct irq_desc *desc = irq_to_desc(action->irq);
996 irqreturn_t (*handler_fn)(struct irq_desc *desc,
997 struct irqaction *action);
998
999 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1000 &action->thread_flags))
1001 handler_fn = irq_forced_thread_fn;
1002 else
1003 handler_fn = irq_thread_fn;
1004
1005 init_task_work(&on_exit_work, irq_thread_dtor);
1006 task_work_add(current, &on_exit_work, false);
1007
1008 irq_thread_check_affinity(desc, action);
1009
1010 while (!irq_wait_for_interrupt(action)) {
1011 irqreturn_t action_ret;
1012
1013 irq_thread_check_affinity(desc, action);
1014
1015 action_ret = handler_fn(desc, action);
1016 if (action_ret == IRQ_HANDLED)
1017 atomic_inc(&desc->threads_handled);
1018 if (action_ret == IRQ_WAKE_THREAD)
1019 irq_wake_secondary(desc, action);
1020
1021 wake_threads_waitq(desc);
1022 }
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 task_work_cancel(current, irq_thread_dtor);
1034 return 0;
1035}
1036
1037
1038
1039
1040
1041
1042
1043void irq_wake_thread(unsigned int irq, void *dev_id)
1044{
1045 struct irq_desc *desc = irq_to_desc(irq);
1046 struct irqaction *action;
1047 unsigned long flags;
1048
1049 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1050 return;
1051
1052 raw_spin_lock_irqsave(&desc->lock, flags);
1053 for_each_action_of_desc(desc, action) {
1054 if (action->dev_id == dev_id) {
1055 if (action->thread)
1056 __irq_wake_thread(desc, action);
1057 break;
1058 }
1059 }
1060 raw_spin_unlock_irqrestore(&desc->lock, flags);
1061}
1062EXPORT_SYMBOL_GPL(irq_wake_thread);
1063
1064static int irq_setup_forced_threading(struct irqaction *new)
1065{
1066 if (!force_irqthreads)
1067 return 0;
1068 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1069 return 0;
1070
1071
1072
1073
1074
1075 if (new->handler == irq_default_primary_handler)
1076 return 0;
1077
1078 new->flags |= IRQF_ONESHOT;
1079
1080
1081
1082
1083
1084
1085 if (new->handler && new->thread_fn) {
1086
1087 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1088 if (!new->secondary)
1089 return -ENOMEM;
1090 new->secondary->handler = irq_forced_secondary_handler;
1091 new->secondary->thread_fn = new->thread_fn;
1092 new->secondary->dev_id = new->dev_id;
1093 new->secondary->irq = new->irq;
1094 new->secondary->name = new->name;
1095 }
1096
1097 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1098 new->thread_fn = new->handler;
1099 new->handler = irq_default_primary_handler;
1100 return 0;
1101}
1102
1103static int irq_request_resources(struct irq_desc *desc)
1104{
1105 struct irq_data *d = &desc->irq_data;
1106 struct irq_chip *c = d->chip;
1107
1108 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1109}
1110
1111static void irq_release_resources(struct irq_desc *desc)
1112{
1113 struct irq_data *d = &desc->irq_data;
1114 struct irq_chip *c = d->chip;
1115
1116 if (c->irq_release_resources)
1117 c->irq_release_resources(d);
1118}
1119
1120static int
1121setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1122{
1123 struct task_struct *t;
1124 struct sched_param param = {
1125 .sched_priority = MAX_USER_RT_PRIO/2,
1126 };
1127
1128 if (!secondary) {
1129 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1130 new->name);
1131 } else {
1132 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1133 new->name);
1134 param.sched_priority -= 1;
1135 }
1136
1137 if (IS_ERR(t))
1138 return PTR_ERR(t);
1139
1140 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1141
1142
1143
1144
1145
1146
1147 get_task_struct(t);
1148 new->thread = t;
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1159 return 0;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176static int
1177__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1178{
1179 struct irqaction *old, **old_ptr;
1180 unsigned long flags, thread_mask = 0;
1181 int ret, nested, shared = 0;
1182
1183 if (!desc)
1184 return -EINVAL;
1185
1186 if (desc->irq_data.chip == &no_irq_chip)
1187 return -ENOSYS;
1188 if (!try_module_get(desc->owner))
1189 return -ENODEV;
1190
1191 new->irq = irq;
1192
1193
1194
1195
1196
1197 if (!(new->flags & IRQF_TRIGGER_MASK))
1198 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1199
1200
1201
1202
1203
1204 nested = irq_settings_is_nested_thread(desc);
1205 if (nested) {
1206 if (!new->thread_fn) {
1207 ret = -EINVAL;
1208 goto out_mput;
1209 }
1210
1211
1212
1213
1214
1215 new->handler = irq_nested_primary_handler;
1216 } else {
1217 if (irq_settings_can_thread(desc)) {
1218 ret = irq_setup_forced_threading(new);
1219 if (ret)
1220 goto out_mput;
1221 }
1222 }
1223
1224
1225
1226
1227
1228
1229 if (new->thread_fn && !nested) {
1230 ret = setup_irq_thread(new, irq, false);
1231 if (ret)
1232 goto out_mput;
1233 if (new->secondary) {
1234 ret = setup_irq_thread(new->secondary, irq, true);
1235 if (ret)
1236 goto out_thread;
1237 }
1238 }
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1250 new->flags &= ~IRQF_ONESHOT;
1251
1252
1253
1254
1255
1256
1257 mutex_lock(&desc->request_mutex);
1258
1259
1260
1261
1262
1263
1264 chip_bus_lock(desc);
1265
1266
1267 if (!desc->action) {
1268 ret = irq_request_resources(desc);
1269 if (ret) {
1270 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1271 new->name, irq, desc->irq_data.chip->name);
1272 goto out_bus_unlock;
1273 }
1274 }
1275
1276
1277
1278
1279
1280
1281
1282 raw_spin_lock_irqsave(&desc->lock, flags);
1283 old_ptr = &desc->action;
1284 old = *old_ptr;
1285 if (old) {
1286
1287
1288
1289
1290
1291
1292
1293 unsigned int oldtype;
1294
1295
1296
1297
1298
1299 if (irqd_trigger_type_was_set(&desc->irq_data)) {
1300 oldtype = irqd_get_trigger_type(&desc->irq_data);
1301 } else {
1302 oldtype = new->flags & IRQF_TRIGGER_MASK;
1303 irqd_set_trigger_type(&desc->irq_data, oldtype);
1304 }
1305
1306 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1307 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1308 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1309 goto mismatch;
1310
1311
1312 if ((old->flags & IRQF_PERCPU) !=
1313 (new->flags & IRQF_PERCPU))
1314 goto mismatch;
1315
1316
1317 do {
1318
1319
1320
1321
1322
1323 thread_mask |= old->thread_mask;
1324 old_ptr = &old->next;
1325 old = *old_ptr;
1326 } while (old);
1327 shared = 1;
1328 }
1329
1330
1331
1332
1333
1334
1335 if (new->flags & IRQF_ONESHOT) {
1336
1337
1338
1339
1340 if (thread_mask == ~0UL) {
1341 ret = -EBUSY;
1342 goto out_unlock;
1343 }
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 new->thread_mask = 1UL << ffz(thread_mask);
1365
1366 } else if (new->handler == irq_default_primary_handler &&
1367 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1384 irq);
1385 ret = -EINVAL;
1386 goto out_unlock;
1387 }
1388
1389 if (!shared) {
1390 init_waitqueue_head(&desc->wait_for_threads);
1391
1392
1393 if (new->flags & IRQF_TRIGGER_MASK) {
1394 ret = __irq_set_trigger(desc,
1395 new->flags & IRQF_TRIGGER_MASK);
1396
1397 if (ret)
1398 goto out_unlock;
1399 }
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 ret = irq_activate(desc);
1413 if (ret)
1414 goto out_unlock;
1415
1416 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1417 IRQS_ONESHOT | IRQS_WAITING);
1418 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1419
1420 if (new->flags & IRQF_PERCPU) {
1421 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1422 irq_settings_set_per_cpu(desc);
1423 }
1424
1425 if (new->flags & IRQF_ONESHOT)
1426 desc->istate |= IRQS_ONESHOT;
1427
1428
1429 if (new->flags & IRQF_NOBALANCING) {
1430 irq_settings_set_no_balancing(desc);
1431 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1432 }
1433
1434 if (irq_settings_can_autoenable(desc)) {
1435 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1436 } else {
1437
1438
1439
1440
1441
1442
1443 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1444
1445 desc->depth = 1;
1446 }
1447
1448 } else if (new->flags & IRQF_TRIGGER_MASK) {
1449 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1450 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1451
1452 if (nmsk != omsk)
1453
1454 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1455 irq, omsk, nmsk);
1456 }
1457
1458 *old_ptr = new;
1459
1460 irq_pm_install_action(desc, new);
1461
1462
1463 desc->irq_count = 0;
1464 desc->irqs_unhandled = 0;
1465
1466
1467
1468
1469
1470 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1471 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1472 __enable_irq(desc);
1473 }
1474
1475 raw_spin_unlock_irqrestore(&desc->lock, flags);
1476 chip_bus_sync_unlock(desc);
1477 mutex_unlock(&desc->request_mutex);
1478
1479 irq_setup_timings(desc, new);
1480
1481
1482
1483
1484
1485 if (new->thread)
1486 wake_up_process(new->thread);
1487 if (new->secondary)
1488 wake_up_process(new->secondary->thread);
1489
1490 register_irq_proc(irq, desc);
1491 new->dir = NULL;
1492 register_handler_proc(irq, new);
1493 return 0;
1494
1495mismatch:
1496 if (!(new->flags & IRQF_PROBE_SHARED)) {
1497 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1498 irq, new->flags, new->name, old->flags, old->name);
1499#ifdef CONFIG_DEBUG_SHIRQ
1500 dump_stack();
1501#endif
1502 }
1503 ret = -EBUSY;
1504
1505out_unlock:
1506 raw_spin_unlock_irqrestore(&desc->lock, flags);
1507
1508 if (!desc->action)
1509 irq_release_resources(desc);
1510out_bus_unlock:
1511 chip_bus_sync_unlock(desc);
1512 mutex_unlock(&desc->request_mutex);
1513
1514out_thread:
1515 if (new->thread) {
1516 struct task_struct *t = new->thread;
1517
1518 new->thread = NULL;
1519 kthread_stop(t);
1520 put_task_struct(t);
1521 }
1522 if (new->secondary && new->secondary->thread) {
1523 struct task_struct *t = new->secondary->thread;
1524
1525 new->secondary->thread = NULL;
1526 kthread_stop(t);
1527 put_task_struct(t);
1528 }
1529out_mput:
1530 module_put(desc->owner);
1531 return ret;
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541int setup_irq(unsigned int irq, struct irqaction *act)
1542{
1543 int retval;
1544 struct irq_desc *desc = irq_to_desc(irq);
1545
1546 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1547 return -EINVAL;
1548
1549 retval = irq_chip_pm_get(&desc->irq_data);
1550 if (retval < 0)
1551 return retval;
1552
1553 retval = __setup_irq(irq, desc, act);
1554
1555 if (retval)
1556 irq_chip_pm_put(&desc->irq_data);
1557
1558 return retval;
1559}
1560EXPORT_SYMBOL_GPL(setup_irq);
1561
1562
1563
1564
1565
1566static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1567{
1568 unsigned irq = desc->irq_data.irq;
1569 struct irqaction *action, **action_ptr;
1570 unsigned long flags;
1571
1572 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1573
1574 if (!desc)
1575 return NULL;
1576
1577 mutex_lock(&desc->request_mutex);
1578 chip_bus_lock(desc);
1579 raw_spin_lock_irqsave(&desc->lock, flags);
1580
1581
1582
1583
1584
1585 action_ptr = &desc->action;
1586 for (;;) {
1587 action = *action_ptr;
1588
1589 if (!action) {
1590 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1591 raw_spin_unlock_irqrestore(&desc->lock, flags);
1592 chip_bus_sync_unlock(desc);
1593 mutex_unlock(&desc->request_mutex);
1594 return NULL;
1595 }
1596
1597 if (action->dev_id == dev_id)
1598 break;
1599 action_ptr = &action->next;
1600 }
1601
1602
1603 *action_ptr = action->next;
1604
1605 irq_pm_remove_action(desc, action);
1606
1607
1608 if (!desc->action) {
1609 irq_settings_clr_disable_unlazy(desc);
1610 irq_shutdown(desc);
1611 }
1612
1613#ifdef CONFIG_SMP
1614
1615 if (WARN_ON_ONCE(desc->affinity_hint))
1616 desc->affinity_hint = NULL;
1617#endif
1618
1619 raw_spin_unlock_irqrestore(&desc->lock, flags);
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634 chip_bus_sync_unlock(desc);
1635
1636 unregister_handler_proc(irq, action);
1637
1638
1639 synchronize_irq(irq);
1640
1641#ifdef CONFIG_DEBUG_SHIRQ
1642
1643
1644
1645
1646
1647
1648
1649
1650 if (action->flags & IRQF_SHARED) {
1651 local_irq_save(flags);
1652 action->handler(irq, dev_id);
1653 local_irq_restore(flags);
1654 }
1655#endif
1656
1657 if (action->thread) {
1658 kthread_stop(action->thread);
1659 put_task_struct(action->thread);
1660 if (action->secondary && action->secondary->thread) {
1661 kthread_stop(action->secondary->thread);
1662 put_task_struct(action->secondary->thread);
1663 }
1664 }
1665
1666
1667 if (!desc->action) {
1668
1669
1670
1671
1672 chip_bus_lock(desc);
1673 irq_release_resources(desc);
1674 chip_bus_sync_unlock(desc);
1675 irq_remove_timings(desc);
1676 }
1677
1678 mutex_unlock(&desc->request_mutex);
1679
1680 irq_chip_pm_put(&desc->irq_data);
1681 module_put(desc->owner);
1682 kfree(action->secondary);
1683 return action;
1684}
1685
1686
1687
1688
1689
1690
1691
1692
1693void remove_irq(unsigned int irq, struct irqaction *act)
1694{
1695 struct irq_desc *desc = irq_to_desc(irq);
1696
1697 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1698 __free_irq(desc, act->dev_id);
1699}
1700EXPORT_SYMBOL_GPL(remove_irq);
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718const void *free_irq(unsigned int irq, void *dev_id)
1719{
1720 struct irq_desc *desc = irq_to_desc(irq);
1721 struct irqaction *action;
1722 const char *devname;
1723
1724 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1725 return NULL;
1726
1727#ifdef CONFIG_SMP
1728 if (WARN_ON(desc->affinity_notify))
1729 desc->affinity_notify = NULL;
1730#endif
1731
1732 action = __free_irq(desc, dev_id);
1733
1734 if (!action)
1735 return NULL;
1736
1737 devname = action->name;
1738 kfree(action);
1739 return devname;
1740}
1741EXPORT_SYMBOL(free_irq);
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1786 irq_handler_t thread_fn, unsigned long irqflags,
1787 const char *devname, void *dev_id)
1788{
1789 struct irqaction *action;
1790 struct irq_desc *desc;
1791 int retval;
1792
1793 if (irq == IRQ_NOTCONNECTED)
1794 return -ENOTCONN;
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1806 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1807 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1808 return -EINVAL;
1809
1810 desc = irq_to_desc(irq);
1811 if (!desc)
1812 return -EINVAL;
1813
1814 if (!irq_settings_can_request(desc) ||
1815 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1816 return -EINVAL;
1817
1818 if (!handler) {
1819 if (!thread_fn)
1820 return -EINVAL;
1821 handler = irq_default_primary_handler;
1822 }
1823
1824 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1825 if (!action)
1826 return -ENOMEM;
1827
1828 action->handler = handler;
1829 action->thread_fn = thread_fn;
1830 action->flags = irqflags;
1831 action->name = devname;
1832 action->dev_id = dev_id;
1833
1834 retval = irq_chip_pm_get(&desc->irq_data);
1835 if (retval < 0) {
1836 kfree(action);
1837 return retval;
1838 }
1839
1840 retval = __setup_irq(irq, desc, action);
1841
1842 if (retval) {
1843 irq_chip_pm_put(&desc->irq_data);
1844 kfree(action->secondary);
1845 kfree(action);
1846 }
1847
1848#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1849 if (!retval && (irqflags & IRQF_SHARED)) {
1850
1851
1852
1853
1854
1855
1856 unsigned long flags;
1857
1858 disable_irq(irq);
1859 local_irq_save(flags);
1860
1861 handler(irq, dev_id);
1862
1863 local_irq_restore(flags);
1864 enable_irq(irq);
1865 }
1866#endif
1867 return retval;
1868}
1869EXPORT_SYMBOL(request_threaded_irq);
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1889 unsigned long flags, const char *name, void *dev_id)
1890{
1891 struct irq_desc *desc;
1892 int ret;
1893
1894 if (irq == IRQ_NOTCONNECTED)
1895 return -ENOTCONN;
1896
1897 desc = irq_to_desc(irq);
1898 if (!desc)
1899 return -EINVAL;
1900
1901 if (irq_settings_is_nested_thread(desc)) {
1902 ret = request_threaded_irq(irq, NULL, handler,
1903 flags, name, dev_id);
1904 return !ret ? IRQC_IS_NESTED : ret;
1905 }
1906
1907 ret = request_irq(irq, handler, flags, name, dev_id);
1908 return !ret ? IRQC_IS_HARDIRQ : ret;
1909}
1910EXPORT_SYMBOL_GPL(request_any_context_irq);
1911
1912void enable_percpu_irq(unsigned int irq, unsigned int type)
1913{
1914 unsigned int cpu = smp_processor_id();
1915 unsigned long flags;
1916 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1917
1918 if (!desc)
1919 return;
1920
1921
1922
1923
1924
1925 type &= IRQ_TYPE_SENSE_MASK;
1926 if (type == IRQ_TYPE_NONE)
1927 type = irqd_get_trigger_type(&desc->irq_data);
1928
1929 if (type != IRQ_TYPE_NONE) {
1930 int ret;
1931
1932 ret = __irq_set_trigger(desc, type);
1933
1934 if (ret) {
1935 WARN(1, "failed to set type for IRQ%d\n", irq);
1936 goto out;
1937 }
1938 }
1939
1940 irq_percpu_enable(desc, cpu);
1941out:
1942 irq_put_desc_unlock(desc, flags);
1943}
1944EXPORT_SYMBOL_GPL(enable_percpu_irq);
1945
1946
1947
1948
1949
1950
1951
1952
1953bool irq_percpu_is_enabled(unsigned int irq)
1954{
1955 unsigned int cpu = smp_processor_id();
1956 struct irq_desc *desc;
1957 unsigned long flags;
1958 bool is_enabled;
1959
1960 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1961 if (!desc)
1962 return false;
1963
1964 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1965 irq_put_desc_unlock(desc, flags);
1966
1967 return is_enabled;
1968}
1969EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1970
1971void disable_percpu_irq(unsigned int irq)
1972{
1973 unsigned int cpu = smp_processor_id();
1974 unsigned long flags;
1975 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1976
1977 if (!desc)
1978 return;
1979
1980 irq_percpu_disable(desc, cpu);
1981 irq_put_desc_unlock(desc, flags);
1982}
1983EXPORT_SYMBOL_GPL(disable_percpu_irq);
1984
1985
1986
1987
1988static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1989{
1990 struct irq_desc *desc = irq_to_desc(irq);
1991 struct irqaction *action;
1992 unsigned long flags;
1993
1994 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1995
1996 if (!desc)
1997 return NULL;
1998
1999 raw_spin_lock_irqsave(&desc->lock, flags);
2000
2001 action = desc->action;
2002 if (!action || action->percpu_dev_id != dev_id) {
2003 WARN(1, "Trying to free already-free IRQ %d\n", irq);
2004 goto bad;
2005 }
2006
2007 if (!cpumask_empty(desc->percpu_enabled)) {
2008 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2009 irq, cpumask_first(desc->percpu_enabled));
2010 goto bad;
2011 }
2012
2013
2014 desc->action = NULL;
2015
2016 raw_spin_unlock_irqrestore(&desc->lock, flags);
2017
2018 unregister_handler_proc(irq, action);
2019
2020 irq_chip_pm_put(&desc->irq_data);
2021 module_put(desc->owner);
2022 return action;
2023
2024bad:
2025 raw_spin_unlock_irqrestore(&desc->lock, flags);
2026 return NULL;
2027}
2028
2029
2030
2031
2032
2033
2034
2035
2036void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2037{
2038 struct irq_desc *desc = irq_to_desc(irq);
2039
2040 if (desc && irq_settings_is_per_cpu_devid(desc))
2041 __free_percpu_irq(irq, act->percpu_dev_id);
2042}
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2057{
2058 struct irq_desc *desc = irq_to_desc(irq);
2059
2060 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2061 return;
2062
2063 chip_bus_lock(desc);
2064 kfree(__free_percpu_irq(irq, dev_id));
2065 chip_bus_sync_unlock(desc);
2066}
2067EXPORT_SYMBOL_GPL(free_percpu_irq);
2068
2069
2070
2071
2072
2073
2074
2075
2076int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2077{
2078 struct irq_desc *desc = irq_to_desc(irq);
2079 int retval;
2080
2081 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2082 return -EINVAL;
2083
2084 retval = irq_chip_pm_get(&desc->irq_data);
2085 if (retval < 0)
2086 return retval;
2087
2088 retval = __setup_irq(irq, desc, act);
2089
2090 if (retval)
2091 irq_chip_pm_put(&desc->irq_data);
2092
2093 return retval;
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2114 unsigned long flags, const char *devname,
2115 void __percpu *dev_id)
2116{
2117 struct irqaction *action;
2118 struct irq_desc *desc;
2119 int retval;
2120
2121 if (!dev_id)
2122 return -EINVAL;
2123
2124 desc = irq_to_desc(irq);
2125 if (!desc || !irq_settings_can_request(desc) ||
2126 !irq_settings_is_per_cpu_devid(desc))
2127 return -EINVAL;
2128
2129 if (flags && flags != IRQF_TIMER)
2130 return -EINVAL;
2131
2132 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2133 if (!action)
2134 return -ENOMEM;
2135
2136 action->handler = handler;
2137 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2138 action->name = devname;
2139 action->percpu_dev_id = dev_id;
2140
2141 retval = irq_chip_pm_get(&desc->irq_data);
2142 if (retval < 0) {
2143 kfree(action);
2144 return retval;
2145 }
2146
2147 retval = __setup_irq(irq, desc, action);
2148
2149 if (retval) {
2150 irq_chip_pm_put(&desc->irq_data);
2151 kfree(action);
2152 }
2153
2154 return retval;
2155}
2156EXPORT_SYMBOL_GPL(__request_percpu_irq);
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2172 bool *state)
2173{
2174 struct irq_desc *desc;
2175 struct irq_data *data;
2176 struct irq_chip *chip;
2177 unsigned long flags;
2178 int err = -EINVAL;
2179
2180 desc = irq_get_desc_buslock(irq, &flags, 0);
2181 if (!desc)
2182 return err;
2183
2184 data = irq_desc_get_irq_data(desc);
2185
2186 do {
2187 chip = irq_data_get_irq_chip(data);
2188 if (chip->irq_get_irqchip_state)
2189 break;
2190#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2191 data = data->parent_data;
2192#else
2193 data = NULL;
2194#endif
2195 } while (data);
2196
2197 if (data)
2198 err = chip->irq_get_irqchip_state(data, which, state);
2199
2200 irq_put_desc_busunlock(desc, flags);
2201 return err;
2202}
2203EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2218 bool val)
2219{
2220 struct irq_desc *desc;
2221 struct irq_data *data;
2222 struct irq_chip *chip;
2223 unsigned long flags;
2224 int err = -EINVAL;
2225
2226 desc = irq_get_desc_buslock(irq, &flags, 0);
2227 if (!desc)
2228 return err;
2229
2230 data = irq_desc_get_irq_data(desc);
2231
2232 do {
2233 chip = irq_data_get_irq_chip(data);
2234 if (chip->irq_set_irqchip_state)
2235 break;
2236#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2237 data = data->parent_data;
2238#else
2239 data = NULL;
2240#endif
2241 } while (data);
2242
2243 if (data)
2244 err = chip->irq_set_irqchip_state(data, which, val);
2245
2246 irq_put_desc_busunlock(desc, flags);
2247 return err;
2248}
2249EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2250