1
2
3
4
5
6
7
8
9
10
11#include <linux/irq.h>
12#include <linux/msi.h>
13#include <linux/module.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/irqdomain.h>
17
18#include <trace/events/irq.h>
19
20#include "internals.h"
21
22static irqreturn_t bad_chained_irq(int irq, void *dev_id)
23{
24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
25 return IRQ_NONE;
26}
27
28
29
30
31
32struct irqaction chained_action = {
33 .handler = bad_chained_irq,
34};
35
36
37
38
39
40
41int irq_set_chip(unsigned int irq, struct irq_chip *chip)
42{
43 unsigned long flags;
44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
45
46 if (!desc)
47 return -EINVAL;
48
49 if (!chip)
50 chip = &no_irq_chip;
51
52 desc->irq_data.chip = chip;
53 irq_put_desc_unlock(desc, flags);
54
55
56
57
58 irq_mark_irq(irq);
59 return 0;
60}
61EXPORT_SYMBOL(irq_set_chip);
62
63
64
65
66
67
68int irq_set_irq_type(unsigned int irq, unsigned int type)
69{
70 unsigned long flags;
71 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
72 int ret = 0;
73
74 if (!desc)
75 return -EINVAL;
76
77 ret = __irq_set_trigger(desc, type);
78 irq_put_desc_busunlock(desc, flags);
79 return ret;
80}
81EXPORT_SYMBOL(irq_set_irq_type);
82
83
84
85
86
87
88
89
90int irq_set_handler_data(unsigned int irq, void *data)
91{
92 unsigned long flags;
93 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
94
95 if (!desc)
96 return -EINVAL;
97 desc->irq_common_data.handler_data = data;
98 irq_put_desc_unlock(desc, flags);
99 return 0;
100}
101EXPORT_SYMBOL(irq_set_handler_data);
102
103
104
105
106
107
108
109
110
111int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
112 struct msi_desc *entry)
113{
114 unsigned long flags;
115 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
116
117 if (!desc)
118 return -EINVAL;
119 desc->irq_common_data.msi_desc = entry;
120 if (entry && !irq_offset)
121 entry->irq = irq_base;
122 irq_put_desc_unlock(desc, flags);
123 return 0;
124}
125
126
127
128
129
130
131
132
133int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
134{
135 return irq_set_msi_desc_off(irq, 0, entry);
136}
137
138
139
140
141
142
143
144
145int irq_set_chip_data(unsigned int irq, void *data)
146{
147 unsigned long flags;
148 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
149
150 if (!desc)
151 return -EINVAL;
152 desc->irq_data.chip_data = data;
153 irq_put_desc_unlock(desc, flags);
154 return 0;
155}
156EXPORT_SYMBOL(irq_set_chip_data);
157
158struct irq_data *irq_get_irq_data(unsigned int irq)
159{
160 struct irq_desc *desc = irq_to_desc(irq);
161
162 return desc ? &desc->irq_data : NULL;
163}
164EXPORT_SYMBOL_GPL(irq_get_irq_data);
165
166static void irq_state_clr_disabled(struct irq_desc *desc)
167{
168 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
169}
170
171static void irq_state_clr_masked(struct irq_desc *desc)
172{
173 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
174}
175
176static void irq_state_clr_started(struct irq_desc *desc)
177{
178 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
179}
180
181static void irq_state_set_started(struct irq_desc *desc)
182{
183 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
184}
185
186enum {
187 IRQ_STARTUP_NORMAL,
188 IRQ_STARTUP_MANAGED,
189 IRQ_STARTUP_ABORT,
190};
191
192#ifdef CONFIG_SMP
193static int
194__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
195{
196 struct irq_data *d = irq_desc_get_irq_data(desc);
197
198 if (!irqd_affinity_is_managed(d))
199 return IRQ_STARTUP_NORMAL;
200
201 irqd_clr_managed_shutdown(d);
202
203 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
204
205
206
207
208
209
210 if (WARN_ON_ONCE(force))
211 return IRQ_STARTUP_ABORT;
212
213
214
215
216
217
218 return IRQ_STARTUP_ABORT;
219 }
220
221
222
223
224 if (WARN_ON(irq_domain_activate_irq(d, false)))
225 return IRQ_STARTUP_ABORT;
226 return IRQ_STARTUP_MANAGED;
227}
228#else
229static __always_inline int
230__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
231{
232 return IRQ_STARTUP_NORMAL;
233}
234#endif
235
236static int __irq_startup(struct irq_desc *desc)
237{
238 struct irq_data *d = irq_desc_get_irq_data(desc);
239 int ret = 0;
240
241
242 WARN_ON_ONCE(!irqd_is_activated(d));
243
244 if (d->chip->irq_startup) {
245 ret = d->chip->irq_startup(d);
246 irq_state_clr_disabled(desc);
247 irq_state_clr_masked(desc);
248 } else {
249 irq_enable(desc);
250 }
251 irq_state_set_started(desc);
252 return ret;
253}
254
255int irq_startup(struct irq_desc *desc, bool resend, bool force)
256{
257 struct irq_data *d = irq_desc_get_irq_data(desc);
258 struct cpumask *aff = irq_data_get_affinity_mask(d);
259 int ret = 0;
260
261 desc->depth = 0;
262
263 if (irqd_is_started(d)) {
264 irq_enable(desc);
265 } else {
266 switch (__irq_startup_managed(desc, aff, force)) {
267 case IRQ_STARTUP_NORMAL:
268 ret = __irq_startup(desc);
269 irq_setup_affinity(desc);
270 break;
271 case IRQ_STARTUP_MANAGED:
272 irq_do_set_affinity(d, aff, false);
273 ret = __irq_startup(desc);
274 break;
275 case IRQ_STARTUP_ABORT:
276 irqd_set_managed_shutdown(d);
277 return 0;
278 }
279 }
280 if (resend)
281 check_irq_resend(desc);
282
283 return ret;
284}
285
286int irq_activate(struct irq_desc *desc)
287{
288 struct irq_data *d = irq_desc_get_irq_data(desc);
289
290 if (!irqd_affinity_is_managed(d))
291 return irq_domain_activate_irq(d, false);
292 return 0;
293}
294
295int irq_activate_and_startup(struct irq_desc *desc, bool resend)
296{
297 if (WARN_ON(irq_activate(desc)))
298 return 0;
299 return irq_startup(desc, resend, IRQ_START_FORCE);
300}
301
302static void __irq_disable(struct irq_desc *desc, bool mask);
303
304void irq_shutdown(struct irq_desc *desc)
305{
306 if (irqd_is_started(&desc->irq_data)) {
307 desc->depth = 1;
308 if (desc->irq_data.chip->irq_shutdown) {
309 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
310 irq_state_set_disabled(desc);
311 irq_state_set_masked(desc);
312 } else {
313 __irq_disable(desc, true);
314 }
315 irq_state_clr_started(desc);
316 }
317
318
319
320
321
322
323 irq_domain_deactivate_irq(&desc->irq_data);
324}
325
326void irq_enable(struct irq_desc *desc)
327{
328 if (!irqd_irq_disabled(&desc->irq_data)) {
329 unmask_irq(desc);
330 } else {
331 irq_state_clr_disabled(desc);
332 if (desc->irq_data.chip->irq_enable) {
333 desc->irq_data.chip->irq_enable(&desc->irq_data);
334 irq_state_clr_masked(desc);
335 } else {
336 unmask_irq(desc);
337 }
338 }
339}
340
341static void __irq_disable(struct irq_desc *desc, bool mask)
342{
343 if (irqd_irq_disabled(&desc->irq_data)) {
344 if (mask)
345 mask_irq(desc);
346 } else {
347 irq_state_set_disabled(desc);
348 if (desc->irq_data.chip->irq_disable) {
349 desc->irq_data.chip->irq_disable(&desc->irq_data);
350 irq_state_set_masked(desc);
351 } else if (mask) {
352 mask_irq(desc);
353 }
354 }
355}
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377void irq_disable(struct irq_desc *desc)
378{
379 __irq_disable(desc, irq_settings_disable_unlazy(desc));
380}
381
382void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
383{
384 if (desc->irq_data.chip->irq_enable)
385 desc->irq_data.chip->irq_enable(&desc->irq_data);
386 else
387 desc->irq_data.chip->irq_unmask(&desc->irq_data);
388 cpumask_set_cpu(cpu, desc->percpu_enabled);
389}
390
391void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
392{
393 if (desc->irq_data.chip->irq_disable)
394 desc->irq_data.chip->irq_disable(&desc->irq_data);
395 else
396 desc->irq_data.chip->irq_mask(&desc->irq_data);
397 cpumask_clear_cpu(cpu, desc->percpu_enabled);
398}
399
400static inline void mask_ack_irq(struct irq_desc *desc)
401{
402 if (desc->irq_data.chip->irq_mask_ack) {
403 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
404 irq_state_set_masked(desc);
405 } else {
406 mask_irq(desc);
407 if (desc->irq_data.chip->irq_ack)
408 desc->irq_data.chip->irq_ack(&desc->irq_data);
409 }
410}
411
412void mask_irq(struct irq_desc *desc)
413{
414 if (irqd_irq_masked(&desc->irq_data))
415 return;
416
417 if (desc->irq_data.chip->irq_mask) {
418 desc->irq_data.chip->irq_mask(&desc->irq_data);
419 irq_state_set_masked(desc);
420 }
421}
422
423void unmask_irq(struct irq_desc *desc)
424{
425 if (!irqd_irq_masked(&desc->irq_data))
426 return;
427
428 if (desc->irq_data.chip->irq_unmask) {
429 desc->irq_data.chip->irq_unmask(&desc->irq_data);
430 irq_state_clr_masked(desc);
431 }
432}
433
434void unmask_threaded_irq(struct irq_desc *desc)
435{
436 struct irq_chip *chip = desc->irq_data.chip;
437
438 if (chip->flags & IRQCHIP_EOI_THREADED)
439 chip->irq_eoi(&desc->irq_data);
440
441 unmask_irq(desc);
442}
443
444
445
446
447
448
449
450
451
452void handle_nested_irq(unsigned int irq)
453{
454 struct irq_desc *desc = irq_to_desc(irq);
455 struct irqaction *action;
456 irqreturn_t action_ret;
457
458 might_sleep();
459
460 raw_spin_lock_irq(&desc->lock);
461
462 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
463
464 action = desc->action;
465 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
466 desc->istate |= IRQS_PENDING;
467 goto out_unlock;
468 }
469
470 kstat_incr_irqs_this_cpu(desc);
471 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
472 raw_spin_unlock_irq(&desc->lock);
473
474 action_ret = IRQ_NONE;
475 for_each_action_of_desc(desc, action)
476 action_ret |= action->thread_fn(action->irq, action->dev_id);
477
478 if (!noirqdebug)
479 note_interrupt(desc, action_ret);
480
481 raw_spin_lock_irq(&desc->lock);
482 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
483
484out_unlock:
485 raw_spin_unlock_irq(&desc->lock);
486}
487EXPORT_SYMBOL_GPL(handle_nested_irq);
488
489static bool irq_check_poll(struct irq_desc *desc)
490{
491 if (!(desc->istate & IRQS_POLL_INPROGRESS))
492 return false;
493 return irq_wait_for_poll(desc);
494}
495
496static bool irq_may_run(struct irq_desc *desc)
497{
498 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
499
500
501
502
503
504 if (!irqd_has_set(&desc->irq_data, mask))
505 return true;
506
507
508
509
510
511
512 if (irq_pm_check_wakeup(desc))
513 return false;
514
515
516
517
518 return irq_check_poll(desc);
519}
520
521
522
523
524
525
526
527
528
529
530
531
532void handle_simple_irq(struct irq_desc *desc)
533{
534 raw_spin_lock(&desc->lock);
535
536 if (!irq_may_run(desc))
537 goto out_unlock;
538
539 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
540
541 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
542 desc->istate |= IRQS_PENDING;
543 goto out_unlock;
544 }
545
546 kstat_incr_irqs_this_cpu(desc);
547 handle_irq_event(desc);
548
549out_unlock:
550 raw_spin_unlock(&desc->lock);
551}
552EXPORT_SYMBOL_GPL(handle_simple_irq);
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567void handle_untracked_irq(struct irq_desc *desc)
568{
569 unsigned int flags = 0;
570
571 raw_spin_lock(&desc->lock);
572
573 if (!irq_may_run(desc))
574 goto out_unlock;
575
576 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
577
578 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
579 desc->istate |= IRQS_PENDING;
580 goto out_unlock;
581 }
582
583 desc->istate &= ~IRQS_PENDING;
584 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
585 raw_spin_unlock(&desc->lock);
586
587 __handle_irq_event_percpu(desc, &flags);
588
589 raw_spin_lock(&desc->lock);
590 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
591
592out_unlock:
593 raw_spin_unlock(&desc->lock);
594}
595EXPORT_SYMBOL_GPL(handle_untracked_irq);
596
597
598
599
600
601static void cond_unmask_irq(struct irq_desc *desc)
602{
603
604
605
606
607
608
609
610 if (!irqd_irq_disabled(&desc->irq_data) &&
611 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
612 unmask_irq(desc);
613}
614
615
616
617
618
619
620
621
622
623
624void handle_level_irq(struct irq_desc *desc)
625{
626 raw_spin_lock(&desc->lock);
627 mask_ack_irq(desc);
628
629 if (!irq_may_run(desc))
630 goto out_unlock;
631
632 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
633
634
635
636
637
638 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
639 desc->istate |= IRQS_PENDING;
640 goto out_unlock;
641 }
642
643 kstat_incr_irqs_this_cpu(desc);
644 handle_irq_event(desc);
645
646 cond_unmask_irq(desc);
647
648out_unlock:
649 raw_spin_unlock(&desc->lock);
650}
651EXPORT_SYMBOL_GPL(handle_level_irq);
652
653#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
654static inline void preflow_handler(struct irq_desc *desc)
655{
656 if (desc->preflow_handler)
657 desc->preflow_handler(&desc->irq_data);
658}
659#else
660static inline void preflow_handler(struct irq_desc *desc) { }
661#endif
662
663static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
664{
665 if (!(desc->istate & IRQS_ONESHOT)) {
666 chip->irq_eoi(&desc->irq_data);
667 return;
668 }
669
670
671
672
673
674
675 if (!irqd_irq_disabled(&desc->irq_data) &&
676 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
677 chip->irq_eoi(&desc->irq_data);
678 unmask_irq(desc);
679 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
680 chip->irq_eoi(&desc->irq_data);
681 }
682}
683
684
685
686
687
688
689
690
691
692
693void handle_fasteoi_irq(struct irq_desc *desc)
694{
695 struct irq_chip *chip = desc->irq_data.chip;
696
697 raw_spin_lock(&desc->lock);
698
699 if (!irq_may_run(desc))
700 goto out;
701
702 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
703
704
705
706
707
708 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
709 desc->istate |= IRQS_PENDING;
710 mask_irq(desc);
711 goto out;
712 }
713
714 kstat_incr_irqs_this_cpu(desc);
715 if (desc->istate & IRQS_ONESHOT)
716 mask_irq(desc);
717
718 preflow_handler(desc);
719 handle_irq_event(desc);
720
721 cond_unmask_eoi_irq(desc, chip);
722
723 raw_spin_unlock(&desc->lock);
724 return;
725out:
726 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
727 chip->irq_eoi(&desc->irq_data);
728 raw_spin_unlock(&desc->lock);
729}
730EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747void handle_edge_irq(struct irq_desc *desc)
748{
749 raw_spin_lock(&desc->lock);
750
751 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
752
753 if (!irq_may_run(desc)) {
754 desc->istate |= IRQS_PENDING;
755 mask_ack_irq(desc);
756 goto out_unlock;
757 }
758
759
760
761
762
763 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
764 desc->istate |= IRQS_PENDING;
765 mask_ack_irq(desc);
766 goto out_unlock;
767 }
768
769 kstat_incr_irqs_this_cpu(desc);
770
771
772 desc->irq_data.chip->irq_ack(&desc->irq_data);
773
774 do {
775 if (unlikely(!desc->action)) {
776 mask_irq(desc);
777 goto out_unlock;
778 }
779
780
781
782
783
784
785 if (unlikely(desc->istate & IRQS_PENDING)) {
786 if (!irqd_irq_disabled(&desc->irq_data) &&
787 irqd_irq_masked(&desc->irq_data))
788 unmask_irq(desc);
789 }
790
791 handle_irq_event(desc);
792
793 } while ((desc->istate & IRQS_PENDING) &&
794 !irqd_irq_disabled(&desc->irq_data));
795
796out_unlock:
797 raw_spin_unlock(&desc->lock);
798}
799EXPORT_SYMBOL(handle_edge_irq);
800
801#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
802
803
804
805
806
807
808
809void handle_edge_eoi_irq(struct irq_desc *desc)
810{
811 struct irq_chip *chip = irq_desc_get_chip(desc);
812
813 raw_spin_lock(&desc->lock);
814
815 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
816
817 if (!irq_may_run(desc)) {
818 desc->istate |= IRQS_PENDING;
819 goto out_eoi;
820 }
821
822
823
824
825
826 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
827 desc->istate |= IRQS_PENDING;
828 goto out_eoi;
829 }
830
831 kstat_incr_irqs_this_cpu(desc);
832
833 do {
834 if (unlikely(!desc->action))
835 goto out_eoi;
836
837 handle_irq_event(desc);
838
839 } while ((desc->istate & IRQS_PENDING) &&
840 !irqd_irq_disabled(&desc->irq_data));
841
842out_eoi:
843 chip->irq_eoi(&desc->irq_data);
844 raw_spin_unlock(&desc->lock);
845}
846#endif
847
848
849
850
851
852
853
854void handle_percpu_irq(struct irq_desc *desc)
855{
856 struct irq_chip *chip = irq_desc_get_chip(desc);
857
858
859
860
861
862 __kstat_incr_irqs_this_cpu(desc);
863
864 if (chip->irq_ack)
865 chip->irq_ack(&desc->irq_data);
866
867 handle_irq_event_percpu(desc);
868
869 if (chip->irq_eoi)
870 chip->irq_eoi(&desc->irq_data);
871}
872
873
874
875
876
877
878
879
880
881
882
883
884void handle_percpu_devid_irq(struct irq_desc *desc)
885{
886 struct irq_chip *chip = irq_desc_get_chip(desc);
887 struct irqaction *action = desc->action;
888 unsigned int irq = irq_desc_get_irq(desc);
889 irqreturn_t res;
890
891
892
893
894
895 __kstat_incr_irqs_this_cpu(desc);
896
897 if (chip->irq_ack)
898 chip->irq_ack(&desc->irq_data);
899
900 if (likely(action)) {
901 trace_irq_handler_entry(irq, action);
902 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
903 trace_irq_handler_exit(irq, action, res);
904 } else {
905 unsigned int cpu = smp_processor_id();
906 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
907
908 if (enabled)
909 irq_percpu_disable(desc, cpu);
910
911 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
912 enabled ? " and unmasked" : "", irq, cpu);
913 }
914
915 if (chip->irq_eoi)
916 chip->irq_eoi(&desc->irq_data);
917}
918
919static void
920__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
921 int is_chained, const char *name)
922{
923 if (!handle) {
924 handle = handle_bad_irq;
925 } else {
926 struct irq_data *irq_data = &desc->irq_data;
927#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
928
929
930
931
932
933
934
935 while (irq_data) {
936 if (irq_data->chip != &no_irq_chip)
937 break;
938
939
940
941
942
943 if (WARN_ON(is_chained))
944 return;
945
946 irq_data = irq_data->parent_data;
947 }
948#endif
949 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
950 return;
951 }
952
953
954 if (handle == handle_bad_irq) {
955 if (desc->irq_data.chip != &no_irq_chip)
956 mask_ack_irq(desc);
957 irq_state_set_disabled(desc);
958 if (is_chained)
959 desc->action = NULL;
960 desc->depth = 1;
961 }
962 desc->handle_irq = handle;
963 desc->name = name;
964
965 if (handle != handle_bad_irq && is_chained) {
966 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
967
968
969
970
971
972
973
974
975
976 if (type != IRQ_TYPE_NONE) {
977 __irq_set_trigger(desc, type);
978 desc->handle_irq = handle;
979 }
980
981 irq_settings_set_noprobe(desc);
982 irq_settings_set_norequest(desc);
983 irq_settings_set_nothread(desc);
984 desc->action = &chained_action;
985 irq_activate_and_startup(desc, IRQ_RESEND);
986 }
987}
988
989void
990__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
991 const char *name)
992{
993 unsigned long flags;
994 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
995
996 if (!desc)
997 return;
998
999 __irq_do_set_handler(desc, handle, is_chained, name);
1000 irq_put_desc_busunlock(desc, flags);
1001}
1002EXPORT_SYMBOL_GPL(__irq_set_handler);
1003
1004void
1005irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1006 void *data)
1007{
1008 unsigned long flags;
1009 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1010
1011 if (!desc)
1012 return;
1013
1014 desc->irq_common_data.handler_data = data;
1015 __irq_do_set_handler(desc, handle, 1, NULL);
1016
1017 irq_put_desc_busunlock(desc, flags);
1018}
1019EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1020
1021void
1022irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1023 irq_flow_handler_t handle, const char *name)
1024{
1025 irq_set_chip(irq, chip);
1026 __irq_set_handler(irq, handle, 0, name);
1027}
1028EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1029
1030void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1031{
1032 unsigned long flags, trigger, tmp;
1033 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1034
1035 if (!desc)
1036 return;
1037
1038
1039
1040
1041
1042 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1043
1044 irq_settings_clr_and_set(desc, clr, set);
1045
1046 trigger = irqd_get_trigger_type(&desc->irq_data);
1047
1048 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1049 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1050 if (irq_settings_has_no_balance_set(desc))
1051 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1052 if (irq_settings_is_per_cpu(desc))
1053 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1054 if (irq_settings_can_move_pcntxt(desc))
1055 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1056 if (irq_settings_is_level(desc))
1057 irqd_set(&desc->irq_data, IRQD_LEVEL);
1058
1059 tmp = irq_settings_get_trigger_mask(desc);
1060 if (tmp != IRQ_TYPE_NONE)
1061 trigger = tmp;
1062
1063 irqd_set(&desc->irq_data, trigger);
1064
1065 irq_put_desc_unlock(desc, flags);
1066}
1067EXPORT_SYMBOL_GPL(irq_modify_status);
1068
1069
1070
1071
1072
1073
1074
1075void irq_cpu_online(void)
1076{
1077 struct irq_desc *desc;
1078 struct irq_chip *chip;
1079 unsigned long flags;
1080 unsigned int irq;
1081
1082 for_each_active_irq(irq) {
1083 desc = irq_to_desc(irq);
1084 if (!desc)
1085 continue;
1086
1087 raw_spin_lock_irqsave(&desc->lock, flags);
1088
1089 chip = irq_data_get_irq_chip(&desc->irq_data);
1090 if (chip && chip->irq_cpu_online &&
1091 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1092 !irqd_irq_disabled(&desc->irq_data)))
1093 chip->irq_cpu_online(&desc->irq_data);
1094
1095 raw_spin_unlock_irqrestore(&desc->lock, flags);
1096 }
1097}
1098
1099
1100
1101
1102
1103
1104
1105void irq_cpu_offline(void)
1106{
1107 struct irq_desc *desc;
1108 struct irq_chip *chip;
1109 unsigned long flags;
1110 unsigned int irq;
1111
1112 for_each_active_irq(irq) {
1113 desc = irq_to_desc(irq);
1114 if (!desc)
1115 continue;
1116
1117 raw_spin_lock_irqsave(&desc->lock, flags);
1118
1119 chip = irq_data_get_irq_chip(&desc->irq_data);
1120 if (chip && chip->irq_cpu_offline &&
1121 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1122 !irqd_irq_disabled(&desc->irq_data)))
1123 chip->irq_cpu_offline(&desc->irq_data);
1124
1125 raw_spin_unlock_irqrestore(&desc->lock, flags);
1126 }
1127}
1128
1129#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1130
1131#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142void handle_fasteoi_ack_irq(struct irq_desc *desc)
1143{
1144 struct irq_chip *chip = desc->irq_data.chip;
1145
1146 raw_spin_lock(&desc->lock);
1147
1148 if (!irq_may_run(desc))
1149 goto out;
1150
1151 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1152
1153
1154
1155
1156
1157 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1158 desc->istate |= IRQS_PENDING;
1159 mask_irq(desc);
1160 goto out;
1161 }
1162
1163 kstat_incr_irqs_this_cpu(desc);
1164 if (desc->istate & IRQS_ONESHOT)
1165 mask_irq(desc);
1166
1167
1168 desc->irq_data.chip->irq_ack(&desc->irq_data);
1169
1170 preflow_handler(desc);
1171 handle_irq_event(desc);
1172
1173 cond_unmask_eoi_irq(desc, chip);
1174
1175 raw_spin_unlock(&desc->lock);
1176 return;
1177out:
1178 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1179 chip->irq_eoi(&desc->irq_data);
1180 raw_spin_unlock(&desc->lock);
1181}
1182EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194void handle_fasteoi_mask_irq(struct irq_desc *desc)
1195{
1196 struct irq_chip *chip = desc->irq_data.chip;
1197
1198 raw_spin_lock(&desc->lock);
1199 mask_ack_irq(desc);
1200
1201 if (!irq_may_run(desc))
1202 goto out;
1203
1204 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1205
1206
1207
1208
1209
1210 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1211 desc->istate |= IRQS_PENDING;
1212 mask_irq(desc);
1213 goto out;
1214 }
1215
1216 kstat_incr_irqs_this_cpu(desc);
1217 if (desc->istate & IRQS_ONESHOT)
1218 mask_irq(desc);
1219
1220 preflow_handler(desc);
1221 handle_irq_event(desc);
1222
1223 cond_unmask_eoi_irq(desc, chip);
1224
1225 raw_spin_unlock(&desc->lock);
1226 return;
1227out:
1228 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1229 chip->irq_eoi(&desc->irq_data);
1230 raw_spin_unlock(&desc->lock);
1231}
1232EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1233
1234#endif
1235
1236
1237
1238
1239
1240
1241void irq_chip_enable_parent(struct irq_data *data)
1242{
1243 data = data->parent_data;
1244 if (data->chip->irq_enable)
1245 data->chip->irq_enable(data);
1246 else
1247 data->chip->irq_unmask(data);
1248}
1249EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1250
1251
1252
1253
1254
1255
1256void irq_chip_disable_parent(struct irq_data *data)
1257{
1258 data = data->parent_data;
1259 if (data->chip->irq_disable)
1260 data->chip->irq_disable(data);
1261 else
1262 data->chip->irq_mask(data);
1263}
1264EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1265
1266
1267
1268
1269
1270void irq_chip_ack_parent(struct irq_data *data)
1271{
1272 data = data->parent_data;
1273 data->chip->irq_ack(data);
1274}
1275EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1276
1277
1278
1279
1280
1281void irq_chip_mask_parent(struct irq_data *data)
1282{
1283 data = data->parent_data;
1284 data->chip->irq_mask(data);
1285}
1286EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1287
1288
1289
1290
1291
1292void irq_chip_unmask_parent(struct irq_data *data)
1293{
1294 data = data->parent_data;
1295 data->chip->irq_unmask(data);
1296}
1297EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1298
1299
1300
1301
1302
1303void irq_chip_eoi_parent(struct irq_data *data)
1304{
1305 data = data->parent_data;
1306 data->chip->irq_eoi(data);
1307}
1308EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318int irq_chip_set_affinity_parent(struct irq_data *data,
1319 const struct cpumask *dest, bool force)
1320{
1321 data = data->parent_data;
1322 if (data->chip->irq_set_affinity)
1323 return data->chip->irq_set_affinity(data, dest, force);
1324
1325 return -ENOSYS;
1326}
1327EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1328
1329
1330
1331
1332
1333
1334
1335
1336int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1337{
1338 data = data->parent_data;
1339
1340 if (data->chip->irq_set_type)
1341 return data->chip->irq_set_type(data, type);
1342
1343 return -ENOSYS;
1344}
1345EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1346
1347
1348
1349
1350
1351
1352
1353
1354int irq_chip_retrigger_hierarchy(struct irq_data *data)
1355{
1356 for (data = data->parent_data; data; data = data->parent_data)
1357 if (data->chip && data->chip->irq_retrigger)
1358 return data->chip->irq_retrigger(data);
1359
1360 return 0;
1361}
1362
1363
1364
1365
1366
1367
1368int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1369{
1370 data = data->parent_data;
1371 if (data->chip->irq_set_vcpu_affinity)
1372 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1373
1374 return -ENOSYS;
1375}
1376
1377
1378
1379
1380
1381
1382
1383
1384int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1385{
1386 data = data->parent_data;
1387 if (data->chip->irq_set_wake)
1388 return data->chip->irq_set_wake(data, on);
1389
1390 return -ENOSYS;
1391}
1392#endif
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1404{
1405 struct irq_data *pos = NULL;
1406
1407#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1408 for (; data; data = data->parent_data)
1409#endif
1410 if (data->chip && data->chip->irq_compose_msi_msg)
1411 pos = data;
1412 if (!pos)
1413 return -ENOSYS;
1414
1415 pos->chip->irq_compose_msi_msg(pos, msg);
1416
1417 return 0;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427int irq_chip_pm_get(struct irq_data *data)
1428{
1429 int retval;
1430
1431 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1432 retval = pm_runtime_get_sync(data->chip->parent_device);
1433 if (retval < 0) {
1434 pm_runtime_put_noidle(data->chip->parent_device);
1435 return retval;
1436 }
1437 }
1438
1439 return 0;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450int irq_chip_pm_put(struct irq_data *data)
1451{
1452 int retval = 0;
1453
1454 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1455 retval = pm_runtime_put(data->chip->parent_device);
1456
1457 return (retval < 0) ? retval : 0;
1458}
1459