1
2
3
4
5
6
7
8
9
10#define pr_fmt(fmt) "genirq: " fmt
11
12#include <linux/irq.h>
13#include <linux/kthread.h>
14#include <linux/module.h>
15#include <linux/random.h>
16#include <linux/interrupt.h>
17#include <linux/slab.h>
18#include <linux/sched.h>
19#include <linux/sched/rt.h>
20#include <linux/sched/task.h>
21#include <uapi/linux/sched/types.h>
22#include <linux/task_work.h>
23
24#include "internals.h"
25
26#ifdef CONFIG_IRQ_FORCED_THREADING
27__read_mostly bool force_irqthreads;
28
29static int __init setup_forced_irqthreads(char *arg)
30{
31 force_irqthreads = true;
32 return 0;
33}
34early_param("threadirqs", setup_forced_irqthreads);
35#endif
36
37static void __synchronize_hardirq(struct irq_desc *desc)
38{
39 bool inprogress;
40
41 do {
42 unsigned long flags;
43
44
45
46
47
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50
51
52 raw_spin_lock_irqsave(&desc->lock, flags);
53 inprogress = irqd_irq_inprogress(&desc->irq_data);
54 raw_spin_unlock_irqrestore(&desc->lock, flags);
55
56
57 } while (inprogress);
58}
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77bool synchronize_hardirq(unsigned int irq)
78{
79 struct irq_desc *desc = irq_to_desc(irq);
80
81 if (desc) {
82 __synchronize_hardirq(desc);
83 return !atomic_read(&desc->threads_active);
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(synchronize_hardirq);
89
90
91
92
93
94
95
96
97
98
99
100void synchronize_irq(unsigned int irq)
101{
102 struct irq_desc *desc = irq_to_desc(irq);
103
104 if (desc) {
105 __synchronize_hardirq(desc);
106
107
108
109
110
111 wait_event(desc->wait_for_threads,
112 !atomic_read(&desc->threads_active));
113 }
114}
115EXPORT_SYMBOL(synchronize_irq);
116
117#ifdef CONFIG_SMP
118cpumask_var_t irq_default_affinity;
119
120static bool __irq_can_set_affinity(struct irq_desc *desc)
121{
122 if (!desc || !irqd_can_balance(&desc->irq_data) ||
123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
124 return false;
125 return true;
126}
127
128
129
130
131
132
133int irq_can_set_affinity(unsigned int irq)
134{
135 return __irq_can_set_affinity(irq_to_desc(irq));
136}
137
138
139
140
141
142
143
144
145bool irq_can_set_affinity_usr(unsigned int irq)
146{
147 struct irq_desc *desc = irq_to_desc(irq);
148
149 return __irq_can_set_affinity(desc) &&
150 !irqd_affinity_is_managed(&desc->irq_data);
151}
152
153
154
155
156
157
158
159
160
161
162void irq_set_thread_affinity(struct irq_desc *desc)
163{
164 struct irqaction *action;
165
166 for_each_action_of_desc(desc, action)
167 if (action->thread)
168 set_bit(IRQTF_AFFINITY, &action->thread_flags);
169}
170
171static void irq_validate_effective_affinity(struct irq_data *data)
172{
173#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
175 struct irq_chip *chip = irq_data_get_irq_chip(data);
176
177 if (!cpumask_empty(m))
178 return;
179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
180 chip->name, data->irq);
181#endif
182}
183
184int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
185 bool force)
186{
187 struct irq_desc *desc = irq_data_to_desc(data);
188 struct irq_chip *chip = irq_data_get_irq_chip(data);
189 int ret;
190
191 if (!chip || !chip->irq_set_affinity)
192 return -EINVAL;
193
194 ret = chip->irq_set_affinity(data, mask, force);
195 switch (ret) {
196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask);
199 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data);
201 irq_set_thread_affinity(desc);
202 ret = 0;
203 }
204
205 return ret;
206}
207
208int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
209 bool force)
210{
211 struct irq_chip *chip = irq_data_get_irq_chip(data);
212 struct irq_desc *desc = irq_data_to_desc(data);
213 int ret = 0;
214
215 if (!chip || !chip->irq_set_affinity)
216 return -EINVAL;
217
218 if (irq_can_move_pcntxt(data)) {
219 ret = irq_do_set_affinity(data, mask, force);
220 } else {
221 irqd_set_move_pending(data);
222 irq_copy_pending(desc, mask);
223 }
224
225 if (desc->affinity_notify) {
226 kref_get(&desc->affinity_notify->kref);
227 schedule_work(&desc->affinity_notify->work);
228 }
229 irqd_set(data, IRQD_AFFINITY_SET);
230
231 return ret;
232}
233
234int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
235{
236 struct irq_desc *desc = irq_to_desc(irq);
237 unsigned long flags;
238 int ret;
239
240 if (!desc)
241 return -EINVAL;
242
243 raw_spin_lock_irqsave(&desc->lock, flags);
244 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
245 raw_spin_unlock_irqrestore(&desc->lock, flags);
246 return ret;
247}
248
249int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
250{
251 unsigned long flags;
252 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
253
254 if (!desc)
255 return -EINVAL;
256 desc->affinity_hint = m;
257 irq_put_desc_unlock(desc, flags);
258
259 if (m)
260 __irq_set_affinity(irq, m, false);
261 return 0;
262}
263EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
264
265static void irq_affinity_notify(struct work_struct *work)
266{
267 struct irq_affinity_notify *notify =
268 container_of(work, struct irq_affinity_notify, work);
269 struct irq_desc *desc = irq_to_desc(notify->irq);
270 cpumask_var_t cpumask;
271 unsigned long flags;
272
273 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
274 goto out;
275
276 raw_spin_lock_irqsave(&desc->lock, flags);
277 if (irq_move_pending(&desc->irq_data))
278 irq_get_pending(cpumask, desc);
279 else
280 cpumask_copy(cpumask, desc->irq_common_data.affinity);
281 raw_spin_unlock_irqrestore(&desc->lock, flags);
282
283 notify->notify(notify, cpumask);
284
285 free_cpumask_var(cpumask);
286out:
287 kref_put(¬ify->kref, notify->release);
288}
289
290
291
292
293
294
295
296
297
298
299
300
301int
302irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
303{
304 struct irq_desc *desc = irq_to_desc(irq);
305 struct irq_affinity_notify *old_notify;
306 unsigned long flags;
307
308
309 might_sleep();
310
311 if (!desc)
312 return -EINVAL;
313
314
315 if (notify) {
316 notify->irq = irq;
317 kref_init(¬ify->kref);
318 INIT_WORK(¬ify->work, irq_affinity_notify);
319 }
320
321 raw_spin_lock_irqsave(&desc->lock, flags);
322 old_notify = desc->affinity_notify;
323 desc->affinity_notify = notify;
324 raw_spin_unlock_irqrestore(&desc->lock, flags);
325
326 if (old_notify)
327 kref_put(&old_notify->kref, old_notify->release);
328
329 return 0;
330}
331EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
332
333#ifndef CONFIG_AUTO_IRQ_AFFINITY
334
335
336
337int irq_setup_affinity(struct irq_desc *desc)
338{
339 struct cpumask *set = irq_default_affinity;
340 int ret, node = irq_desc_get_node(desc);
341 static DEFINE_RAW_SPINLOCK(mask_lock);
342 static struct cpumask mask;
343
344
345 if (!__irq_can_set_affinity(desc))
346 return 0;
347
348 raw_spin_lock(&mask_lock);
349
350
351
352
353 if (irqd_affinity_is_managed(&desc->irq_data) ||
354 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
355 if (cpumask_intersects(desc->irq_common_data.affinity,
356 cpu_online_mask))
357 set = desc->irq_common_data.affinity;
358 else
359 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
360 }
361
362 cpumask_and(&mask, cpu_online_mask, set);
363 if (node != NUMA_NO_NODE) {
364 const struct cpumask *nodemask = cpumask_of_node(node);
365
366
367 if (cpumask_intersects(&mask, nodemask))
368 cpumask_and(&mask, &mask, nodemask);
369 }
370 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
371 raw_spin_unlock(&mask_lock);
372 return ret;
373}
374#else
375
376int irq_setup_affinity(struct irq_desc *desc)
377{
378 return irq_select_affinity(irq_desc_get_irq(desc));
379}
380#endif
381
382
383
384
385int irq_select_affinity_usr(unsigned int irq)
386{
387 struct irq_desc *desc = irq_to_desc(irq);
388 unsigned long flags;
389 int ret;
390
391 raw_spin_lock_irqsave(&desc->lock, flags);
392 ret = irq_setup_affinity(desc);
393 raw_spin_unlock_irqrestore(&desc->lock, flags);
394 return ret;
395}
396#endif
397
398
399
400
401
402
403
404
405
406
407
408int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
409{
410 unsigned long flags;
411 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
412 struct irq_data *data;
413 struct irq_chip *chip;
414 int ret = -ENOSYS;
415
416 if (!desc)
417 return -EINVAL;
418
419 data = irq_desc_get_irq_data(desc);
420 do {
421 chip = irq_data_get_irq_chip(data);
422 if (chip && chip->irq_set_vcpu_affinity)
423 break;
424#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
425 data = data->parent_data;
426#else
427 data = NULL;
428#endif
429 } while (data);
430
431 if (data)
432 ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
433 irq_put_desc_unlock(desc, flags);
434
435 return ret;
436}
437EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
438
439void __disable_irq(struct irq_desc *desc)
440{
441 if (!desc->depth++)
442 irq_disable(desc);
443}
444
445static int __disable_irq_nosync(unsigned int irq)
446{
447 unsigned long flags;
448 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
449
450 if (!desc)
451 return -EINVAL;
452 __disable_irq(desc);
453 irq_put_desc_busunlock(desc, flags);
454 return 0;
455}
456
457
458
459
460
461
462
463
464
465
466
467
468void disable_irq_nosync(unsigned int irq)
469{
470 __disable_irq_nosync(irq);
471}
472EXPORT_SYMBOL(disable_irq_nosync);
473
474
475
476
477
478
479
480
481
482
483
484
485
486void disable_irq(unsigned int irq)
487{
488 if (!__disable_irq_nosync(irq))
489 synchronize_irq(irq);
490}
491EXPORT_SYMBOL(disable_irq);
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510bool disable_hardirq(unsigned int irq)
511{
512 if (!__disable_irq_nosync(irq))
513 return synchronize_hardirq(irq);
514
515 return false;
516}
517EXPORT_SYMBOL_GPL(disable_hardirq);
518
519void __enable_irq(struct irq_desc *desc)
520{
521 switch (desc->depth) {
522 case 0:
523 err_out:
524 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
525 irq_desc_get_irq(desc));
526 break;
527 case 1: {
528 if (desc->istate & IRQS_SUSPENDED)
529 goto err_out;
530
531 irq_settings_set_noprobe(desc);
532
533
534
535
536
537
538
539 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
540 break;
541 }
542 default:
543 desc->depth--;
544 }
545}
546
547
548
549
550
551
552
553
554
555
556
557
558void enable_irq(unsigned int irq)
559{
560 unsigned long flags;
561 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
562
563 if (!desc)
564 return;
565 if (WARN(!desc->irq_data.chip,
566 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
567 goto out;
568
569 __enable_irq(desc);
570out:
571 irq_put_desc_busunlock(desc, flags);
572}
573EXPORT_SYMBOL(enable_irq);
574
575static int set_irq_wake_real(unsigned int irq, unsigned int on)
576{
577 struct irq_desc *desc = irq_to_desc(irq);
578 int ret = -ENXIO;
579
580 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
581 return 0;
582
583 if (desc->irq_data.chip->irq_set_wake)
584 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
585
586 return ret;
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601int irq_set_irq_wake(unsigned int irq, unsigned int on)
602{
603 unsigned long flags;
604 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
605 int ret = 0;
606
607 if (!desc)
608 return -EINVAL;
609
610
611
612
613 if (on) {
614 if (desc->wake_depth++ == 0) {
615 ret = set_irq_wake_real(irq, on);
616 if (ret)
617 desc->wake_depth = 0;
618 else
619 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
620 }
621 } else {
622 if (desc->wake_depth == 0) {
623 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
624 } else if (--desc->wake_depth == 0) {
625 ret = set_irq_wake_real(irq, on);
626 if (ret)
627 desc->wake_depth = 1;
628 else
629 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
630 }
631 }
632 irq_put_desc_busunlock(desc, flags);
633 return ret;
634}
635EXPORT_SYMBOL(irq_set_irq_wake);
636
637
638
639
640
641
642int can_request_irq(unsigned int irq, unsigned long irqflags)
643{
644 unsigned long flags;
645 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
646 int canrequest = 0;
647
648 if (!desc)
649 return 0;
650
651 if (irq_settings_can_request(desc)) {
652 if (!desc->action ||
653 irqflags & desc->action->flags & IRQF_SHARED)
654 canrequest = 1;
655 }
656 irq_put_desc_unlock(desc, flags);
657 return canrequest;
658}
659
660int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
661{
662 struct irq_chip *chip = desc->irq_data.chip;
663 int ret, unmask = 0;
664
665 if (!chip || !chip->irq_set_type) {
666
667
668
669
670 pr_debug("No set_type function for IRQ %d (%s)\n",
671 irq_desc_get_irq(desc),
672 chip ? (chip->name ? : "unknown") : "unknown");
673 return 0;
674 }
675
676 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
677 if (!irqd_irq_masked(&desc->irq_data))
678 mask_irq(desc);
679 if (!irqd_irq_disabled(&desc->irq_data))
680 unmask = 1;
681 }
682
683
684 flags &= IRQ_TYPE_SENSE_MASK;
685 ret = chip->irq_set_type(&desc->irq_data, flags);
686
687 switch (ret) {
688 case IRQ_SET_MASK_OK:
689 case IRQ_SET_MASK_OK_DONE:
690 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
691 irqd_set(&desc->irq_data, flags);
692
693 case IRQ_SET_MASK_OK_NOCOPY:
694 flags = irqd_get_trigger_type(&desc->irq_data);
695 irq_settings_set_trigger_mask(desc, flags);
696 irqd_clear(&desc->irq_data, IRQD_LEVEL);
697 irq_settings_clr_level(desc);
698 if (flags & IRQ_TYPE_LEVEL_MASK) {
699 irq_settings_set_level(desc);
700 irqd_set(&desc->irq_data, IRQD_LEVEL);
701 }
702
703 ret = 0;
704 break;
705 default:
706 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
707 flags, irq_desc_get_irq(desc), chip->irq_set_type);
708 }
709 if (unmask)
710 unmask_irq(desc);
711 return ret;
712}
713
714#ifdef CONFIG_HARDIRQS_SW_RESEND
715int irq_set_parent(int irq, int parent_irq)
716{
717 unsigned long flags;
718 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
719
720 if (!desc)
721 return -EINVAL;
722
723 desc->parent_irq = parent_irq;
724
725 irq_put_desc_unlock(desc, flags);
726 return 0;
727}
728EXPORT_SYMBOL_GPL(irq_set_parent);
729#endif
730
731
732
733
734
735
736static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
737{
738 return IRQ_WAKE_THREAD;
739}
740
741
742
743
744
745static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
746{
747 WARN(1, "Primary handler called for nested irq %d\n", irq);
748 return IRQ_NONE;
749}
750
751static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
752{
753 WARN(1, "Secondary action handler called for irq %d\n", irq);
754 return IRQ_NONE;
755}
756
757static int irq_wait_for_interrupt(struct irqaction *action)
758{
759 set_current_state(TASK_INTERRUPTIBLE);
760
761 while (!kthread_should_stop()) {
762
763 if (test_and_clear_bit(IRQTF_RUNTHREAD,
764 &action->thread_flags)) {
765 __set_current_state(TASK_RUNNING);
766 return 0;
767 }
768 schedule();
769 set_current_state(TASK_INTERRUPTIBLE);
770 }
771 __set_current_state(TASK_RUNNING);
772 return -1;
773}
774
775
776
777
778
779
780static void irq_finalize_oneshot(struct irq_desc *desc,
781 struct irqaction *action)
782{
783 if (!(desc->istate & IRQS_ONESHOT) ||
784 action->handler == irq_forced_secondary_handler)
785 return;
786again:
787 chip_bus_lock(desc);
788 raw_spin_lock_irq(&desc->lock);
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
805 raw_spin_unlock_irq(&desc->lock);
806 chip_bus_sync_unlock(desc);
807 cpu_relax();
808 goto again;
809 }
810
811
812
813
814
815
816 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
817 goto out_unlock;
818
819 desc->threads_oneshot &= ~action->thread_mask;
820
821 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
822 irqd_irq_masked(&desc->irq_data))
823 unmask_threaded_irq(desc);
824
825out_unlock:
826 raw_spin_unlock_irq(&desc->lock);
827 chip_bus_sync_unlock(desc);
828}
829
830#ifdef CONFIG_SMP
831
832
833
834static void
835irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
836{
837 cpumask_var_t mask;
838 bool valid = true;
839
840 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
841 return;
842
843
844
845
846
847 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
848 set_bit(IRQTF_AFFINITY, &action->thread_flags);
849 return;
850 }
851
852 raw_spin_lock_irq(&desc->lock);
853
854
855
856
857 if (cpumask_available(desc->irq_common_data.affinity))
858 cpumask_copy(mask, desc->irq_common_data.affinity);
859 else
860 valid = false;
861 raw_spin_unlock_irq(&desc->lock);
862
863 if (valid)
864 set_cpus_allowed_ptr(current, mask);
865 free_cpumask_var(mask);
866}
867#else
868static inline void
869irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
870#endif
871
872
873
874
875
876
877
878static irqreturn_t
879irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
880{
881 irqreturn_t ret;
882
883 local_bh_disable();
884 ret = action->thread_fn(action->irq, action->dev_id);
885 irq_finalize_oneshot(desc, action);
886 local_bh_enable();
887 return ret;
888}
889
890
891
892
893
894
895static irqreturn_t irq_thread_fn(struct irq_desc *desc,
896 struct irqaction *action)
897{
898 irqreturn_t ret;
899
900 ret = action->thread_fn(action->irq, action->dev_id);
901 irq_finalize_oneshot(desc, action);
902 return ret;
903}
904
905static void wake_threads_waitq(struct irq_desc *desc)
906{
907 if (atomic_dec_and_test(&desc->threads_active))
908 wake_up(&desc->wait_for_threads);
909}
910
911static void irq_thread_dtor(struct callback_head *unused)
912{
913 struct task_struct *tsk = current;
914 struct irq_desc *desc;
915 struct irqaction *action;
916
917 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
918 return;
919
920 action = kthread_data(tsk);
921
922 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
923 tsk->comm, tsk->pid, action->irq);
924
925
926 desc = irq_to_desc(action->irq);
927
928
929
930
931 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
932 wake_threads_waitq(desc);
933
934
935 irq_finalize_oneshot(desc, action);
936}
937
938static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
939{
940 struct irqaction *secondary = action->secondary;
941
942 if (WARN_ON_ONCE(!secondary))
943 return;
944
945 raw_spin_lock_irq(&desc->lock);
946 __irq_wake_thread(desc, secondary);
947 raw_spin_unlock_irq(&desc->lock);
948}
949
950
951
952
953static int irq_thread(void *data)
954{
955 struct callback_head on_exit_work;
956 struct irqaction *action = data;
957 struct irq_desc *desc = irq_to_desc(action->irq);
958 irqreturn_t (*handler_fn)(struct irq_desc *desc,
959 struct irqaction *action);
960
961 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
962 &action->thread_flags))
963 handler_fn = irq_forced_thread_fn;
964 else
965 handler_fn = irq_thread_fn;
966
967 init_task_work(&on_exit_work, irq_thread_dtor);
968 task_work_add(current, &on_exit_work, false);
969
970 irq_thread_check_affinity(desc, action);
971
972 while (!irq_wait_for_interrupt(action)) {
973 irqreturn_t action_ret;
974
975 irq_thread_check_affinity(desc, action);
976
977 action_ret = handler_fn(desc, action);
978 if (action_ret == IRQ_HANDLED)
979 atomic_inc(&desc->threads_handled);
980 if (action_ret == IRQ_WAKE_THREAD)
981 irq_wake_secondary(desc, action);
982
983 wake_threads_waitq(desc);
984 }
985
986
987
988
989
990
991
992
993
994
995 task_work_cancel(current, irq_thread_dtor);
996 return 0;
997}
998
999
1000
1001
1002
1003
1004
1005void irq_wake_thread(unsigned int irq, void *dev_id)
1006{
1007 struct irq_desc *desc = irq_to_desc(irq);
1008 struct irqaction *action;
1009 unsigned long flags;
1010
1011 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1012 return;
1013
1014 raw_spin_lock_irqsave(&desc->lock, flags);
1015 for_each_action_of_desc(desc, action) {
1016 if (action->dev_id == dev_id) {
1017 if (action->thread)
1018 __irq_wake_thread(desc, action);
1019 break;
1020 }
1021 }
1022 raw_spin_unlock_irqrestore(&desc->lock, flags);
1023}
1024EXPORT_SYMBOL_GPL(irq_wake_thread);
1025
1026static int irq_setup_forced_threading(struct irqaction *new)
1027{
1028 if (!force_irqthreads)
1029 return 0;
1030 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1031 return 0;
1032
1033 new->flags |= IRQF_ONESHOT;
1034
1035
1036
1037
1038
1039
1040 if (new->handler != irq_default_primary_handler && new->thread_fn) {
1041
1042 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1043 if (!new->secondary)
1044 return -ENOMEM;
1045 new->secondary->handler = irq_forced_secondary_handler;
1046 new->secondary->thread_fn = new->thread_fn;
1047 new->secondary->dev_id = new->dev_id;
1048 new->secondary->irq = new->irq;
1049 new->secondary->name = new->name;
1050 }
1051
1052 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1053 new->thread_fn = new->handler;
1054 new->handler = irq_default_primary_handler;
1055 return 0;
1056}
1057
1058static int irq_request_resources(struct irq_desc *desc)
1059{
1060 struct irq_data *d = &desc->irq_data;
1061 struct irq_chip *c = d->chip;
1062
1063 return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1064}
1065
1066static void irq_release_resources(struct irq_desc *desc)
1067{
1068 struct irq_data *d = &desc->irq_data;
1069 struct irq_chip *c = d->chip;
1070
1071 if (c->irq_release_resources)
1072 c->irq_release_resources(d);
1073}
1074
1075static int
1076setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1077{
1078 struct task_struct *t;
1079 struct sched_param param = {
1080 .sched_priority = MAX_USER_RT_PRIO/2,
1081 };
1082
1083 if (!secondary) {
1084 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1085 new->name);
1086 } else {
1087 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1088 new->name);
1089 param.sched_priority -= 1;
1090 }
1091
1092 if (IS_ERR(t))
1093 return PTR_ERR(t);
1094
1095 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m);
1096
1097
1098
1099
1100
1101
1102 get_task_struct(t);
1103 new->thread = t;
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 set_bit(IRQTF_AFFINITY, &new->thread_flags);
1114 return 0;
1115}
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131static int
1132__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1133{
1134 struct irqaction *old, **old_ptr;
1135 unsigned long flags, thread_mask = 0;
1136 int ret, nested, shared = 0;
1137
1138 if (!desc)
1139 return -EINVAL;
1140
1141 if (desc->irq_data.chip == &no_irq_chip)
1142 return -ENOSYS;
1143 if (!try_module_get(desc->owner))
1144 return -ENODEV;
1145
1146 new->irq = irq;
1147
1148
1149
1150
1151
1152 if (!(new->flags & IRQF_TRIGGER_MASK))
1153 new->flags |= irqd_get_trigger_type(&desc->irq_data);
1154
1155
1156
1157
1158
1159 nested = irq_settings_is_nested_thread(desc);
1160 if (nested) {
1161 if (!new->thread_fn) {
1162 ret = -EINVAL;
1163 goto out_mput;
1164 }
1165
1166
1167
1168
1169
1170 new->handler = irq_nested_primary_handler;
1171 } else {
1172 if (irq_settings_can_thread(desc)) {
1173 ret = irq_setup_forced_threading(new);
1174 if (ret)
1175 goto out_mput;
1176 }
1177 }
1178
1179
1180
1181
1182
1183
1184 if (new->thread_fn && !nested) {
1185 ret = setup_irq_thread(new, irq, false);
1186 if (ret)
1187 goto out_mput;
1188 if (new->secondary) {
1189 ret = setup_irq_thread(new->secondary, irq, true);
1190 if (ret)
1191 goto out_thread;
1192 }
1193 }
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1205 new->flags &= ~IRQF_ONESHOT;
1206
1207
1208
1209
1210
1211
1212 mutex_lock(&desc->request_mutex);
1213
1214
1215
1216
1217
1218
1219 chip_bus_lock(desc);
1220
1221
1222 if (!desc->action) {
1223 ret = irq_request_resources(desc);
1224 if (ret) {
1225 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1226 new->name, irq, desc->irq_data.chip->name);
1227 goto out_bus_unlock;
1228 }
1229 }
1230
1231
1232
1233
1234
1235
1236
1237 raw_spin_lock_irqsave(&desc->lock, flags);
1238 old_ptr = &desc->action;
1239 old = *old_ptr;
1240 if (old) {
1241
1242
1243
1244
1245
1246
1247
1248 unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
1249
1250 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1251 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1252 ((old->flags ^ new->flags) & IRQF_ONESHOT))
1253 goto mismatch;
1254
1255
1256 if ((old->flags & IRQF_PERCPU) !=
1257 (new->flags & IRQF_PERCPU))
1258 goto mismatch;
1259
1260
1261 do {
1262
1263
1264
1265
1266
1267 thread_mask |= old->thread_mask;
1268 old_ptr = &old->next;
1269 old = *old_ptr;
1270 } while (old);
1271 shared = 1;
1272 }
1273
1274
1275
1276
1277
1278
1279 if (new->flags & IRQF_ONESHOT) {
1280
1281
1282
1283
1284 if (thread_mask == ~0UL) {
1285 ret = -EBUSY;
1286 goto out_unlock;
1287 }
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 new->thread_mask = 1 << ffz(thread_mask);
1309
1310 } else if (new->handler == irq_default_primary_handler &&
1311 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1328 irq);
1329 ret = -EINVAL;
1330 goto out_unlock;
1331 }
1332
1333 if (!shared) {
1334 init_waitqueue_head(&desc->wait_for_threads);
1335
1336
1337 if (new->flags & IRQF_TRIGGER_MASK) {
1338 ret = __irq_set_trigger(desc,
1339 new->flags & IRQF_TRIGGER_MASK);
1340
1341 if (ret)
1342 goto out_unlock;
1343 }
1344
1345 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1346 IRQS_ONESHOT | IRQS_WAITING);
1347 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1348
1349 if (new->flags & IRQF_PERCPU) {
1350 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1351 irq_settings_set_per_cpu(desc);
1352 }
1353
1354 if (new->flags & IRQF_ONESHOT)
1355 desc->istate |= IRQS_ONESHOT;
1356
1357
1358 if (new->flags & IRQF_NOBALANCING) {
1359 irq_settings_set_no_balancing(desc);
1360 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1361 }
1362
1363 if (irq_settings_can_autoenable(desc)) {
1364 irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1365 } else {
1366
1367
1368
1369
1370
1371
1372 WARN_ON_ONCE(new->flags & IRQF_SHARED);
1373
1374 desc->depth = 1;
1375 }
1376
1377 } else if (new->flags & IRQF_TRIGGER_MASK) {
1378 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1379 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1380
1381 if (nmsk != omsk)
1382
1383 pr_warn("irq %d uses trigger mode %u; requested %u\n",
1384 irq, omsk, nmsk);
1385 }
1386
1387 *old_ptr = new;
1388
1389 irq_pm_install_action(desc, new);
1390
1391
1392 desc->irq_count = 0;
1393 desc->irqs_unhandled = 0;
1394
1395
1396
1397
1398
1399 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1400 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1401 __enable_irq(desc);
1402 }
1403
1404 raw_spin_unlock_irqrestore(&desc->lock, flags);
1405 chip_bus_sync_unlock(desc);
1406 mutex_unlock(&desc->request_mutex);
1407
1408 irq_setup_timings(desc, new);
1409
1410
1411
1412
1413
1414 if (new->thread)
1415 wake_up_process(new->thread);
1416 if (new->secondary)
1417 wake_up_process(new->secondary->thread);
1418
1419 register_irq_proc(irq, desc);
1420 irq_add_debugfs_entry(irq, desc);
1421 new->dir = NULL;
1422 register_handler_proc(irq, new);
1423 return 0;
1424
1425mismatch:
1426 if (!(new->flags & IRQF_PROBE_SHARED)) {
1427 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1428 irq, new->flags, new->name, old->flags, old->name);
1429#ifdef CONFIG_DEBUG_SHIRQ
1430 dump_stack();
1431#endif
1432 }
1433 ret = -EBUSY;
1434
1435out_unlock:
1436 raw_spin_unlock_irqrestore(&desc->lock, flags);
1437
1438 if (!desc->action)
1439 irq_release_resources(desc);
1440out_bus_unlock:
1441 chip_bus_sync_unlock(desc);
1442 mutex_unlock(&desc->request_mutex);
1443
1444out_thread:
1445 if (new->thread) {
1446 struct task_struct *t = new->thread;
1447
1448 new->thread = NULL;
1449 kthread_stop(t);
1450 put_task_struct(t);
1451 }
1452 if (new->secondary && new->secondary->thread) {
1453 struct task_struct *t = new->secondary->thread;
1454
1455 new->secondary->thread = NULL;
1456 kthread_stop(t);
1457 put_task_struct(t);
1458 }
1459out_mput:
1460 module_put(desc->owner);
1461 return ret;
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471int setup_irq(unsigned int irq, struct irqaction *act)
1472{
1473 int retval;
1474 struct irq_desc *desc = irq_to_desc(irq);
1475
1476 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1477 return -EINVAL;
1478
1479 retval = irq_chip_pm_get(&desc->irq_data);
1480 if (retval < 0)
1481 return retval;
1482
1483 retval = __setup_irq(irq, desc, act);
1484
1485 if (retval)
1486 irq_chip_pm_put(&desc->irq_data);
1487
1488 return retval;
1489}
1490EXPORT_SYMBOL_GPL(setup_irq);
1491
1492
1493
1494
1495
1496static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1497{
1498 struct irq_desc *desc = irq_to_desc(irq);
1499 struct irqaction *action, **action_ptr;
1500 unsigned long flags;
1501
1502 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1503
1504 if (!desc)
1505 return NULL;
1506
1507 mutex_lock(&desc->request_mutex);
1508 chip_bus_lock(desc);
1509 raw_spin_lock_irqsave(&desc->lock, flags);
1510
1511
1512
1513
1514
1515 action_ptr = &desc->action;
1516 for (;;) {
1517 action = *action_ptr;
1518
1519 if (!action) {
1520 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1521 raw_spin_unlock_irqrestore(&desc->lock, flags);
1522 chip_bus_sync_unlock(desc);
1523 mutex_unlock(&desc->request_mutex);
1524 return NULL;
1525 }
1526
1527 if (action->dev_id == dev_id)
1528 break;
1529 action_ptr = &action->next;
1530 }
1531
1532
1533 *action_ptr = action->next;
1534
1535 irq_pm_remove_action(desc, action);
1536
1537
1538 if (!desc->action) {
1539 irq_settings_clr_disable_unlazy(desc);
1540 irq_shutdown(desc);
1541 }
1542
1543#ifdef CONFIG_SMP
1544
1545 if (WARN_ON_ONCE(desc->affinity_hint))
1546 desc->affinity_hint = NULL;
1547#endif
1548
1549 raw_spin_unlock_irqrestore(&desc->lock, flags);
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 chip_bus_sync_unlock(desc);
1565
1566 unregister_handler_proc(irq, action);
1567
1568
1569 synchronize_irq(irq);
1570
1571#ifdef CONFIG_DEBUG_SHIRQ
1572
1573
1574
1575
1576
1577
1578
1579
1580 if (action->flags & IRQF_SHARED) {
1581 local_irq_save(flags);
1582 action->handler(irq, dev_id);
1583 local_irq_restore(flags);
1584 }
1585#endif
1586
1587 if (action->thread) {
1588 kthread_stop(action->thread);
1589 put_task_struct(action->thread);
1590 if (action->secondary && action->secondary->thread) {
1591 kthread_stop(action->secondary->thread);
1592 put_task_struct(action->secondary->thread);
1593 }
1594 }
1595
1596
1597 if (!desc->action) {
1598
1599
1600
1601
1602 chip_bus_lock(desc);
1603 irq_release_resources(desc);
1604 chip_bus_sync_unlock(desc);
1605 irq_remove_timings(desc);
1606 }
1607
1608 mutex_unlock(&desc->request_mutex);
1609
1610 irq_chip_pm_put(&desc->irq_data);
1611 module_put(desc->owner);
1612 kfree(action->secondary);
1613 return action;
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623void remove_irq(unsigned int irq, struct irqaction *act)
1624{
1625 struct irq_desc *desc = irq_to_desc(irq);
1626
1627 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1628 __free_irq(irq, act->dev_id);
1629}
1630EXPORT_SYMBOL_GPL(remove_irq);
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648const void *free_irq(unsigned int irq, void *dev_id)
1649{
1650 struct irq_desc *desc = irq_to_desc(irq);
1651 struct irqaction *action;
1652 const char *devname;
1653
1654 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1655 return NULL;
1656
1657#ifdef CONFIG_SMP
1658 if (WARN_ON(desc->affinity_notify))
1659 desc->affinity_notify = NULL;
1660#endif
1661
1662 action = __free_irq(irq, dev_id);
1663
1664 if (!action)
1665 return NULL;
1666
1667 devname = action->name;
1668 kfree(action);
1669 return devname;
1670}
1671EXPORT_SYMBOL(free_irq);
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1716 irq_handler_t thread_fn, unsigned long irqflags,
1717 const char *devname, void *dev_id)
1718{
1719 struct irqaction *action;
1720 struct irq_desc *desc;
1721 int retval;
1722
1723 if (irq == IRQ_NOTCONNECTED)
1724 return -ENOTCONN;
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 if (((irqflags & IRQF_SHARED) && !dev_id) ||
1736 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1737 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1738 return -EINVAL;
1739
1740 desc = irq_to_desc(irq);
1741 if (!desc)
1742 return -EINVAL;
1743
1744 if (!irq_settings_can_request(desc) ||
1745 WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1746 return -EINVAL;
1747
1748 if (!handler) {
1749 if (!thread_fn)
1750 return -EINVAL;
1751 handler = irq_default_primary_handler;
1752 }
1753
1754 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1755 if (!action)
1756 return -ENOMEM;
1757
1758 action->handler = handler;
1759 action->thread_fn = thread_fn;
1760 action->flags = irqflags;
1761 action->name = devname;
1762 action->dev_id = dev_id;
1763
1764 retval = irq_chip_pm_get(&desc->irq_data);
1765 if (retval < 0) {
1766 kfree(action);
1767 return retval;
1768 }
1769
1770 retval = __setup_irq(irq, desc, action);
1771
1772 if (retval) {
1773 irq_chip_pm_put(&desc->irq_data);
1774 kfree(action->secondary);
1775 kfree(action);
1776 }
1777
1778#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1779 if (!retval && (irqflags & IRQF_SHARED)) {
1780
1781
1782
1783
1784
1785
1786 unsigned long flags;
1787
1788 disable_irq(irq);
1789 local_irq_save(flags);
1790
1791 handler(irq, dev_id);
1792
1793 local_irq_restore(flags);
1794 enable_irq(irq);
1795 }
1796#endif
1797 return retval;
1798}
1799EXPORT_SYMBOL(request_threaded_irq);
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1819 unsigned long flags, const char *name, void *dev_id)
1820{
1821 struct irq_desc *desc;
1822 int ret;
1823
1824 if (irq == IRQ_NOTCONNECTED)
1825 return -ENOTCONN;
1826
1827 desc = irq_to_desc(irq);
1828 if (!desc)
1829 return -EINVAL;
1830
1831 if (irq_settings_is_nested_thread(desc)) {
1832 ret = request_threaded_irq(irq, NULL, handler,
1833 flags, name, dev_id);
1834 return !ret ? IRQC_IS_NESTED : ret;
1835 }
1836
1837 ret = request_irq(irq, handler, flags, name, dev_id);
1838 return !ret ? IRQC_IS_HARDIRQ : ret;
1839}
1840EXPORT_SYMBOL_GPL(request_any_context_irq);
1841
1842void enable_percpu_irq(unsigned int irq, unsigned int type)
1843{
1844 unsigned int cpu = smp_processor_id();
1845 unsigned long flags;
1846 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1847
1848 if (!desc)
1849 return;
1850
1851
1852
1853
1854
1855 type &= IRQ_TYPE_SENSE_MASK;
1856 if (type == IRQ_TYPE_NONE)
1857 type = irqd_get_trigger_type(&desc->irq_data);
1858
1859 if (type != IRQ_TYPE_NONE) {
1860 int ret;
1861
1862 ret = __irq_set_trigger(desc, type);
1863
1864 if (ret) {
1865 WARN(1, "failed to set type for IRQ%d\n", irq);
1866 goto out;
1867 }
1868 }
1869
1870 irq_percpu_enable(desc, cpu);
1871out:
1872 irq_put_desc_unlock(desc, flags);
1873}
1874EXPORT_SYMBOL_GPL(enable_percpu_irq);
1875
1876
1877
1878
1879
1880
1881
1882
1883bool irq_percpu_is_enabled(unsigned int irq)
1884{
1885 unsigned int cpu = smp_processor_id();
1886 struct irq_desc *desc;
1887 unsigned long flags;
1888 bool is_enabled;
1889
1890 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1891 if (!desc)
1892 return false;
1893
1894 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1895 irq_put_desc_unlock(desc, flags);
1896
1897 return is_enabled;
1898}
1899EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1900
1901void disable_percpu_irq(unsigned int irq)
1902{
1903 unsigned int cpu = smp_processor_id();
1904 unsigned long flags;
1905 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1906
1907 if (!desc)
1908 return;
1909
1910 irq_percpu_disable(desc, cpu);
1911 irq_put_desc_unlock(desc, flags);
1912}
1913EXPORT_SYMBOL_GPL(disable_percpu_irq);
1914
1915
1916
1917
1918static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1919{
1920 struct irq_desc *desc = irq_to_desc(irq);
1921 struct irqaction *action;
1922 unsigned long flags;
1923
1924 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1925
1926 if (!desc)
1927 return NULL;
1928
1929 raw_spin_lock_irqsave(&desc->lock, flags);
1930
1931 action = desc->action;
1932 if (!action || action->percpu_dev_id != dev_id) {
1933 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1934 goto bad;
1935 }
1936
1937 if (!cpumask_empty(desc->percpu_enabled)) {
1938 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1939 irq, cpumask_first(desc->percpu_enabled));
1940 goto bad;
1941 }
1942
1943
1944 desc->action = NULL;
1945
1946 raw_spin_unlock_irqrestore(&desc->lock, flags);
1947
1948 unregister_handler_proc(irq, action);
1949
1950 irq_chip_pm_put(&desc->irq_data);
1951 module_put(desc->owner);
1952 return action;
1953
1954bad:
1955 raw_spin_unlock_irqrestore(&desc->lock, flags);
1956 return NULL;
1957}
1958
1959
1960
1961
1962
1963
1964
1965
1966void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1967{
1968 struct irq_desc *desc = irq_to_desc(irq);
1969
1970 if (desc && irq_settings_is_per_cpu_devid(desc))
1971 __free_percpu_irq(irq, act->percpu_dev_id);
1972}
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1987{
1988 struct irq_desc *desc = irq_to_desc(irq);
1989
1990 if (!desc || !irq_settings_is_per_cpu_devid(desc))
1991 return;
1992
1993 chip_bus_lock(desc);
1994 kfree(__free_percpu_irq(irq, dev_id));
1995 chip_bus_sync_unlock(desc);
1996}
1997EXPORT_SYMBOL_GPL(free_percpu_irq);
1998
1999
2000
2001
2002
2003
2004
2005
2006int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2007{
2008 struct irq_desc *desc = irq_to_desc(irq);
2009 int retval;
2010
2011 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2012 return -EINVAL;
2013
2014 retval = irq_chip_pm_get(&desc->irq_data);
2015 if (retval < 0)
2016 return retval;
2017
2018 retval = __setup_irq(irq, desc, act);
2019
2020 if (retval)
2021 irq_chip_pm_put(&desc->irq_data);
2022
2023 return retval;
2024}
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2044 unsigned long flags, const char *devname,
2045 void __percpu *dev_id)
2046{
2047 struct irqaction *action;
2048 struct irq_desc *desc;
2049 int retval;
2050
2051 if (!dev_id)
2052 return -EINVAL;
2053
2054 desc = irq_to_desc(irq);
2055 if (!desc || !irq_settings_can_request(desc) ||
2056 !irq_settings_is_per_cpu_devid(desc))
2057 return -EINVAL;
2058
2059 if (flags && flags != IRQF_TIMER)
2060 return -EINVAL;
2061
2062 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2063 if (!action)
2064 return -ENOMEM;
2065
2066 action->handler = handler;
2067 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2068 action->name = devname;
2069 action->percpu_dev_id = dev_id;
2070
2071 retval = irq_chip_pm_get(&desc->irq_data);
2072 if (retval < 0) {
2073 kfree(action);
2074 return retval;
2075 }
2076
2077 retval = __setup_irq(irq, desc, action);
2078
2079 if (retval) {
2080 irq_chip_pm_put(&desc->irq_data);
2081 kfree(action);
2082 }
2083
2084 return retval;
2085}
2086EXPORT_SYMBOL_GPL(__request_percpu_irq);
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2102 bool *state)
2103{
2104 struct irq_desc *desc;
2105 struct irq_data *data;
2106 struct irq_chip *chip;
2107 unsigned long flags;
2108 int err = -EINVAL;
2109
2110 desc = irq_get_desc_buslock(irq, &flags, 0);
2111 if (!desc)
2112 return err;
2113
2114 data = irq_desc_get_irq_data(desc);
2115
2116 do {
2117 chip = irq_data_get_irq_chip(data);
2118 if (chip->irq_get_irqchip_state)
2119 break;
2120#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2121 data = data->parent_data;
2122#else
2123 data = NULL;
2124#endif
2125 } while (data);
2126
2127 if (data)
2128 err = chip->irq_get_irqchip_state(data, which, state);
2129
2130 irq_put_desc_busunlock(desc, flags);
2131 return err;
2132}
2133EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2148 bool val)
2149{
2150 struct irq_desc *desc;
2151 struct irq_data *data;
2152 struct irq_chip *chip;
2153 unsigned long flags;
2154 int err = -EINVAL;
2155
2156 desc = irq_get_desc_buslock(irq, &flags, 0);
2157 if (!desc)
2158 return err;
2159
2160 data = irq_desc_get_irq_data(desc);
2161
2162 do {
2163 chip = irq_data_get_irq_chip(data);
2164 if (chip->irq_set_irqchip_state)
2165 break;
2166#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2167 data = data->parent_data;
2168#else
2169 data = NULL;
2170#endif
2171 } while (data);
2172
2173 if (data)
2174 err = chip->irq_set_irqchip_state(data, which, val);
2175
2176 irq_put_desc_busunlock(desc, flags);
2177 return err;
2178}
2179EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2180