1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/irq.h>
14#include <linux/msi.h>
15#include <linux/module.h>
16#include <linux/interrupt.h>
17#include <linux/kernel_stat.h>
18#include <linux/irqdomain.h>
19
20#include <trace/events/irq.h>
21
22#include "internals.h"
23
24static irqreturn_t bad_chained_irq(int irq, void *dev_id)
25{
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
27 return IRQ_NONE;
28}
29
30
31
32
33
34struct irqaction chained_action = {
35 .handler = bad_chained_irq,
36};
37
38
39
40
41
42
43int irq_set_chip(unsigned int irq, struct irq_chip *chip)
44{
45 unsigned long flags;
46 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
47
48 if (!desc)
49 return -EINVAL;
50
51 if (!chip)
52 chip = &no_irq_chip;
53
54 desc->irq_data.chip = chip;
55 irq_put_desc_unlock(desc, flags);
56
57
58
59
60 irq_mark_irq(irq);
61 return 0;
62}
63EXPORT_SYMBOL(irq_set_chip);
64
65
66
67
68
69
70int irq_set_irq_type(unsigned int irq, unsigned int type)
71{
72 unsigned long flags;
73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
74 int ret = 0;
75
76 if (!desc)
77 return -EINVAL;
78
79 ret = __irq_set_trigger(desc, type);
80 irq_put_desc_busunlock(desc, flags);
81 return ret;
82}
83EXPORT_SYMBOL(irq_set_irq_type);
84
85
86
87
88
89
90
91
92int irq_set_handler_data(unsigned int irq, void *data)
93{
94 unsigned long flags;
95 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
96
97 if (!desc)
98 return -EINVAL;
99 desc->irq_common_data.handler_data = data;
100 irq_put_desc_unlock(desc, flags);
101 return 0;
102}
103EXPORT_SYMBOL(irq_set_handler_data);
104
105
106
107
108
109
110
111
112
113int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
114 struct msi_desc *entry)
115{
116 unsigned long flags;
117 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
118
119 if (!desc)
120 return -EINVAL;
121 desc->irq_common_data.msi_desc = entry;
122 if (entry && !irq_offset)
123 entry->irq = irq_base;
124 irq_put_desc_unlock(desc, flags);
125 return 0;
126}
127
128
129
130
131
132
133
134
135int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
136{
137 return irq_set_msi_desc_off(irq, 0, entry);
138}
139
140
141
142
143
144
145
146
147int irq_set_chip_data(unsigned int irq, void *data)
148{
149 unsigned long flags;
150 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
151
152 if (!desc)
153 return -EINVAL;
154 desc->irq_data.chip_data = data;
155 irq_put_desc_unlock(desc, flags);
156 return 0;
157}
158EXPORT_SYMBOL(irq_set_chip_data);
159
160struct irq_data *irq_get_irq_data(unsigned int irq)
161{
162 struct irq_desc *desc = irq_to_desc(irq);
163
164 return desc ? &desc->irq_data : NULL;
165}
166EXPORT_SYMBOL_GPL(irq_get_irq_data);
167
168static void irq_state_clr_disabled(struct irq_desc *desc)
169{
170 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
171}
172
173static void irq_state_clr_masked(struct irq_desc *desc)
174{
175 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
176}
177
178static void irq_state_clr_started(struct irq_desc *desc)
179{
180 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
181}
182
183static void irq_state_set_started(struct irq_desc *desc)
184{
185 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
186}
187
188enum {
189 IRQ_STARTUP_NORMAL,
190 IRQ_STARTUP_MANAGED,
191 IRQ_STARTUP_ABORT,
192};
193
194#ifdef CONFIG_SMP
195static int
196__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
197{
198 struct irq_data *d = irq_desc_get_irq_data(desc);
199
200 if (!irqd_affinity_is_managed(d))
201 return IRQ_STARTUP_NORMAL;
202
203 irqd_clr_managed_shutdown(d);
204
205 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
206
207
208
209
210
211
212
213 if (WARN_ON_ONCE(force))
214 return IRQ_STARTUP_NORMAL;
215
216
217
218
219
220
221 irqd_set_managed_shutdown(d);
222 return IRQ_STARTUP_ABORT;
223 }
224 return IRQ_STARTUP_MANAGED;
225}
226#else
227static __always_inline int
228__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
229{
230 return IRQ_STARTUP_NORMAL;
231}
232#endif
233
234static int __irq_startup(struct irq_desc *desc)
235{
236 struct irq_data *d = irq_desc_get_irq_data(desc);
237 int ret = 0;
238
239 irq_domain_activate_irq(d);
240 if (d->chip->irq_startup) {
241 ret = d->chip->irq_startup(d);
242 irq_state_clr_disabled(desc);
243 irq_state_clr_masked(desc);
244 } else {
245 irq_enable(desc);
246 }
247 irq_state_set_started(desc);
248 return ret;
249}
250
251int irq_startup(struct irq_desc *desc, bool resend, bool force)
252{
253 struct irq_data *d = irq_desc_get_irq_data(desc);
254 struct cpumask *aff = irq_data_get_affinity_mask(d);
255 int ret = 0;
256
257 desc->depth = 0;
258
259 if (irqd_is_started(d)) {
260 irq_enable(desc);
261 } else {
262 switch (__irq_startup_managed(desc, aff, force)) {
263 case IRQ_STARTUP_NORMAL:
264 ret = __irq_startup(desc);
265 irq_setup_affinity(desc);
266 break;
267 case IRQ_STARTUP_MANAGED:
268 irq_do_set_affinity(d, aff, false);
269 ret = __irq_startup(desc);
270 break;
271 case IRQ_STARTUP_ABORT:
272 return 0;
273 }
274 }
275 if (resend)
276 check_irq_resend(desc);
277
278 return ret;
279}
280
281static void __irq_disable(struct irq_desc *desc, bool mask);
282
283void irq_shutdown(struct irq_desc *desc)
284{
285 if (irqd_is_started(&desc->irq_data)) {
286 desc->depth = 1;
287 if (desc->irq_data.chip->irq_shutdown) {
288 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
289 irq_state_set_disabled(desc);
290 irq_state_set_masked(desc);
291 } else {
292 __irq_disable(desc, true);
293 }
294 irq_state_clr_started(desc);
295 }
296
297
298
299
300
301
302 irq_domain_deactivate_irq(&desc->irq_data);
303}
304
305void irq_enable(struct irq_desc *desc)
306{
307 if (!irqd_irq_disabled(&desc->irq_data)) {
308 unmask_irq(desc);
309 } else {
310 irq_state_clr_disabled(desc);
311 if (desc->irq_data.chip->irq_enable) {
312 desc->irq_data.chip->irq_enable(&desc->irq_data);
313 irq_state_clr_masked(desc);
314 } else {
315 unmask_irq(desc);
316 }
317 }
318}
319
320static void __irq_disable(struct irq_desc *desc, bool mask)
321{
322 if (irqd_irq_disabled(&desc->irq_data)) {
323 if (mask)
324 mask_irq(desc);
325 } else {
326 irq_state_set_disabled(desc);
327 if (desc->irq_data.chip->irq_disable) {
328 desc->irq_data.chip->irq_disable(&desc->irq_data);
329 irq_state_set_masked(desc);
330 } else if (mask) {
331 mask_irq(desc);
332 }
333 }
334}
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356void irq_disable(struct irq_desc *desc)
357{
358 __irq_disable(desc, irq_settings_disable_unlazy(desc));
359}
360
361void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
362{
363 if (desc->irq_data.chip->irq_enable)
364 desc->irq_data.chip->irq_enable(&desc->irq_data);
365 else
366 desc->irq_data.chip->irq_unmask(&desc->irq_data);
367 cpumask_set_cpu(cpu, desc->percpu_enabled);
368}
369
370void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
371{
372 if (desc->irq_data.chip->irq_disable)
373 desc->irq_data.chip->irq_disable(&desc->irq_data);
374 else
375 desc->irq_data.chip->irq_mask(&desc->irq_data);
376 cpumask_clear_cpu(cpu, desc->percpu_enabled);
377}
378
379static inline void mask_ack_irq(struct irq_desc *desc)
380{
381 if (desc->irq_data.chip->irq_mask_ack) {
382 desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
383 irq_state_set_masked(desc);
384 } else {
385 mask_irq(desc);
386 if (desc->irq_data.chip->irq_ack)
387 desc->irq_data.chip->irq_ack(&desc->irq_data);
388 }
389}
390
391void mask_irq(struct irq_desc *desc)
392{
393 if (irqd_irq_masked(&desc->irq_data))
394 return;
395
396 if (desc->irq_data.chip->irq_mask) {
397 desc->irq_data.chip->irq_mask(&desc->irq_data);
398 irq_state_set_masked(desc);
399 }
400}
401
402void unmask_irq(struct irq_desc *desc)
403{
404 if (!irqd_irq_masked(&desc->irq_data))
405 return;
406
407 if (desc->irq_data.chip->irq_unmask) {
408 desc->irq_data.chip->irq_unmask(&desc->irq_data);
409 irq_state_clr_masked(desc);
410 }
411}
412
413void unmask_threaded_irq(struct irq_desc *desc)
414{
415 struct irq_chip *chip = desc->irq_data.chip;
416
417 if (chip->flags & IRQCHIP_EOI_THREADED)
418 chip->irq_eoi(&desc->irq_data);
419
420 unmask_irq(desc);
421}
422
423
424
425
426
427
428
429
430
431void handle_nested_irq(unsigned int irq)
432{
433 struct irq_desc *desc = irq_to_desc(irq);
434 struct irqaction *action;
435 irqreturn_t action_ret;
436
437 might_sleep();
438
439 raw_spin_lock_irq(&desc->lock);
440
441 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
442
443 action = desc->action;
444 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
445 desc->istate |= IRQS_PENDING;
446 goto out_unlock;
447 }
448
449 kstat_incr_irqs_this_cpu(desc);
450 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
451 raw_spin_unlock_irq(&desc->lock);
452
453 action_ret = IRQ_NONE;
454 for_each_action_of_desc(desc, action)
455 action_ret |= action->thread_fn(action->irq, action->dev_id);
456
457 if (!noirqdebug)
458 note_interrupt(desc, action_ret);
459
460 raw_spin_lock_irq(&desc->lock);
461 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
462
463out_unlock:
464 raw_spin_unlock_irq(&desc->lock);
465}
466EXPORT_SYMBOL_GPL(handle_nested_irq);
467
468static bool irq_check_poll(struct irq_desc *desc)
469{
470 if (!(desc->istate & IRQS_POLL_INPROGRESS))
471 return false;
472 return irq_wait_for_poll(desc);
473}
474
475static bool irq_may_run(struct irq_desc *desc)
476{
477 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
478
479
480
481
482
483 if (!irqd_has_set(&desc->irq_data, mask))
484 return true;
485
486
487
488
489
490
491 if (irq_pm_check_wakeup(desc))
492 return false;
493
494
495
496
497 return irq_check_poll(desc);
498}
499
500
501
502
503
504
505
506
507
508
509
510
511void handle_simple_irq(struct irq_desc *desc)
512{
513 raw_spin_lock(&desc->lock);
514
515 if (!irq_may_run(desc))
516 goto out_unlock;
517
518 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
519
520 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
521 desc->istate |= IRQS_PENDING;
522 goto out_unlock;
523 }
524
525 kstat_incr_irqs_this_cpu(desc);
526 handle_irq_event(desc);
527
528out_unlock:
529 raw_spin_unlock(&desc->lock);
530}
531EXPORT_SYMBOL_GPL(handle_simple_irq);
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546void handle_untracked_irq(struct irq_desc *desc)
547{
548 unsigned int flags = 0;
549
550 raw_spin_lock(&desc->lock);
551
552 if (!irq_may_run(desc))
553 goto out_unlock;
554
555 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
556
557 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
558 desc->istate |= IRQS_PENDING;
559 goto out_unlock;
560 }
561
562 desc->istate &= ~IRQS_PENDING;
563 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
564 raw_spin_unlock(&desc->lock);
565
566 __handle_irq_event_percpu(desc, &flags);
567
568 raw_spin_lock(&desc->lock);
569 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
570
571out_unlock:
572 raw_spin_unlock(&desc->lock);
573}
574EXPORT_SYMBOL_GPL(handle_untracked_irq);
575
576
577
578
579
580static void cond_unmask_irq(struct irq_desc *desc)
581{
582
583
584
585
586
587
588
589 if (!irqd_irq_disabled(&desc->irq_data) &&
590 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
591 unmask_irq(desc);
592}
593
594
595
596
597
598
599
600
601
602
603void handle_level_irq(struct irq_desc *desc)
604{
605 raw_spin_lock(&desc->lock);
606 mask_ack_irq(desc);
607
608 if (!irq_may_run(desc))
609 goto out_unlock;
610
611 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
612
613
614
615
616
617 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
618 desc->istate |= IRQS_PENDING;
619 goto out_unlock;
620 }
621
622 kstat_incr_irqs_this_cpu(desc);
623 handle_irq_event(desc);
624
625 cond_unmask_irq(desc);
626
627out_unlock:
628 raw_spin_unlock(&desc->lock);
629}
630EXPORT_SYMBOL_GPL(handle_level_irq);
631
632#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
633static inline void preflow_handler(struct irq_desc *desc)
634{
635 if (desc->preflow_handler)
636 desc->preflow_handler(&desc->irq_data);
637}
638#else
639static inline void preflow_handler(struct irq_desc *desc) { }
640#endif
641
642static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
643{
644 if (!(desc->istate & IRQS_ONESHOT)) {
645 chip->irq_eoi(&desc->irq_data);
646 return;
647 }
648
649
650
651
652
653
654 if (!irqd_irq_disabled(&desc->irq_data) &&
655 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
656 chip->irq_eoi(&desc->irq_data);
657 unmask_irq(desc);
658 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
659 chip->irq_eoi(&desc->irq_data);
660 }
661}
662
663
664
665
666
667
668
669
670
671
672void handle_fasteoi_irq(struct irq_desc *desc)
673{
674 struct irq_chip *chip = desc->irq_data.chip;
675
676 raw_spin_lock(&desc->lock);
677
678 if (!irq_may_run(desc))
679 goto out;
680
681 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
682
683
684
685
686
687 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
688 desc->istate |= IRQS_PENDING;
689 mask_irq(desc);
690 goto out;
691 }
692
693 kstat_incr_irqs_this_cpu(desc);
694 if (desc->istate & IRQS_ONESHOT)
695 mask_irq(desc);
696
697 preflow_handler(desc);
698 handle_irq_event(desc);
699
700 cond_unmask_eoi_irq(desc, chip);
701
702 raw_spin_unlock(&desc->lock);
703 return;
704out:
705 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
706 chip->irq_eoi(&desc->irq_data);
707 raw_spin_unlock(&desc->lock);
708}
709EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726void handle_edge_irq(struct irq_desc *desc)
727{
728 raw_spin_lock(&desc->lock);
729
730 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
731
732 if (!irq_may_run(desc)) {
733 desc->istate |= IRQS_PENDING;
734 mask_ack_irq(desc);
735 goto out_unlock;
736 }
737
738
739
740
741
742 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
743 desc->istate |= IRQS_PENDING;
744 mask_ack_irq(desc);
745 goto out_unlock;
746 }
747
748 kstat_incr_irqs_this_cpu(desc);
749
750
751 desc->irq_data.chip->irq_ack(&desc->irq_data);
752
753 do {
754 if (unlikely(!desc->action)) {
755 mask_irq(desc);
756 goto out_unlock;
757 }
758
759
760
761
762
763
764 if (unlikely(desc->istate & IRQS_PENDING)) {
765 if (!irqd_irq_disabled(&desc->irq_data) &&
766 irqd_irq_masked(&desc->irq_data))
767 unmask_irq(desc);
768 }
769
770 handle_irq_event(desc);
771
772 } while ((desc->istate & IRQS_PENDING) &&
773 !irqd_irq_disabled(&desc->irq_data));
774
775out_unlock:
776 raw_spin_unlock(&desc->lock);
777}
778EXPORT_SYMBOL(handle_edge_irq);
779
780#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
781
782
783
784
785
786
787
788void handle_edge_eoi_irq(struct irq_desc *desc)
789{
790 struct irq_chip *chip = irq_desc_get_chip(desc);
791
792 raw_spin_lock(&desc->lock);
793
794 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
795
796 if (!irq_may_run(desc)) {
797 desc->istate |= IRQS_PENDING;
798 goto out_eoi;
799 }
800
801
802
803
804
805 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
806 desc->istate |= IRQS_PENDING;
807 goto out_eoi;
808 }
809
810 kstat_incr_irqs_this_cpu(desc);
811
812 do {
813 if (unlikely(!desc->action))
814 goto out_eoi;
815
816 handle_irq_event(desc);
817
818 } while ((desc->istate & IRQS_PENDING) &&
819 !irqd_irq_disabled(&desc->irq_data));
820
821out_eoi:
822 chip->irq_eoi(&desc->irq_data);
823 raw_spin_unlock(&desc->lock);
824}
825#endif
826
827
828
829
830
831
832
833void handle_percpu_irq(struct irq_desc *desc)
834{
835 struct irq_chip *chip = irq_desc_get_chip(desc);
836
837 kstat_incr_irqs_this_cpu(desc);
838
839 if (chip->irq_ack)
840 chip->irq_ack(&desc->irq_data);
841
842 handle_irq_event_percpu(desc);
843
844 if (chip->irq_eoi)
845 chip->irq_eoi(&desc->irq_data);
846}
847
848
849
850
851
852
853
854
855
856
857
858
859void handle_percpu_devid_irq(struct irq_desc *desc)
860{
861 struct irq_chip *chip = irq_desc_get_chip(desc);
862 struct irqaction *action = desc->action;
863 unsigned int irq = irq_desc_get_irq(desc);
864 irqreturn_t res;
865
866 kstat_incr_irqs_this_cpu(desc);
867
868 if (chip->irq_ack)
869 chip->irq_ack(&desc->irq_data);
870
871 if (likely(action)) {
872 trace_irq_handler_entry(irq, action);
873 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
874 trace_irq_handler_exit(irq, action, res);
875 } else {
876 unsigned int cpu = smp_processor_id();
877 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
878
879 if (enabled)
880 irq_percpu_disable(desc, cpu);
881
882 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
883 enabled ? " and unmasked" : "", irq, cpu);
884 }
885
886 if (chip->irq_eoi)
887 chip->irq_eoi(&desc->irq_data);
888}
889
890static void
891__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
892 int is_chained, const char *name)
893{
894 if (!handle) {
895 handle = handle_bad_irq;
896 } else {
897 struct irq_data *irq_data = &desc->irq_data;
898#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
899
900
901
902
903
904
905
906 while (irq_data) {
907 if (irq_data->chip != &no_irq_chip)
908 break;
909
910
911
912
913
914 if (WARN_ON(is_chained))
915 return;
916
917 irq_data = irq_data->parent_data;
918 }
919#endif
920 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
921 return;
922 }
923
924
925 if (handle == handle_bad_irq) {
926 if (desc->irq_data.chip != &no_irq_chip)
927 mask_ack_irq(desc);
928 irq_state_set_disabled(desc);
929 if (is_chained)
930 desc->action = NULL;
931 desc->depth = 1;
932 }
933 desc->handle_irq = handle;
934 desc->name = name;
935
936 if (handle != handle_bad_irq && is_chained) {
937 unsigned int type = irqd_get_trigger_type(&desc->irq_data);
938
939
940
941
942
943
944
945
946
947 if (type != IRQ_TYPE_NONE) {
948 __irq_set_trigger(desc, type);
949 desc->handle_irq = handle;
950 }
951
952 irq_settings_set_noprobe(desc);
953 irq_settings_set_norequest(desc);
954 irq_settings_set_nothread(desc);
955 desc->action = &chained_action;
956 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
957 }
958}
959
960void
961__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
962 const char *name)
963{
964 unsigned long flags;
965 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
966
967 if (!desc)
968 return;
969
970 __irq_do_set_handler(desc, handle, is_chained, name);
971 irq_put_desc_busunlock(desc, flags);
972}
973EXPORT_SYMBOL_GPL(__irq_set_handler);
974
975void
976irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
977 void *data)
978{
979 unsigned long flags;
980 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
981
982 if (!desc)
983 return;
984
985 desc->irq_common_data.handler_data = data;
986 __irq_do_set_handler(desc, handle, 1, NULL);
987
988 irq_put_desc_busunlock(desc, flags);
989}
990EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
991
992void
993irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
994 irq_flow_handler_t handle, const char *name)
995{
996 irq_set_chip(irq, chip);
997 __irq_set_handler(irq, handle, 0, name);
998}
999EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1000
1001void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1002{
1003 unsigned long flags, trigger, tmp;
1004 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1005
1006 if (!desc)
1007 return;
1008
1009
1010
1011
1012
1013 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1014
1015 irq_settings_clr_and_set(desc, clr, set);
1016
1017 trigger = irqd_get_trigger_type(&desc->irq_data);
1018
1019 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1020 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1021 if (irq_settings_has_no_balance_set(desc))
1022 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1023 if (irq_settings_is_per_cpu(desc))
1024 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1025 if (irq_settings_can_move_pcntxt(desc))
1026 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1027 if (irq_settings_is_level(desc))
1028 irqd_set(&desc->irq_data, IRQD_LEVEL);
1029
1030 tmp = irq_settings_get_trigger_mask(desc);
1031 if (tmp != IRQ_TYPE_NONE)
1032 trigger = tmp;
1033
1034 irqd_set(&desc->irq_data, trigger);
1035
1036 irq_put_desc_unlock(desc, flags);
1037}
1038EXPORT_SYMBOL_GPL(irq_modify_status);
1039
1040
1041
1042
1043
1044
1045
1046void irq_cpu_online(void)
1047{
1048 struct irq_desc *desc;
1049 struct irq_chip *chip;
1050 unsigned long flags;
1051 unsigned int irq;
1052
1053 for_each_active_irq(irq) {
1054 desc = irq_to_desc(irq);
1055 if (!desc)
1056 continue;
1057
1058 raw_spin_lock_irqsave(&desc->lock, flags);
1059
1060 chip = irq_data_get_irq_chip(&desc->irq_data);
1061 if (chip && chip->irq_cpu_online &&
1062 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1063 !irqd_irq_disabled(&desc->irq_data)))
1064 chip->irq_cpu_online(&desc->irq_data);
1065
1066 raw_spin_unlock_irqrestore(&desc->lock, flags);
1067 }
1068}
1069
1070
1071
1072
1073
1074
1075
1076void irq_cpu_offline(void)
1077{
1078 struct irq_desc *desc;
1079 struct irq_chip *chip;
1080 unsigned long flags;
1081 unsigned int irq;
1082
1083 for_each_active_irq(irq) {
1084 desc = irq_to_desc(irq);
1085 if (!desc)
1086 continue;
1087
1088 raw_spin_lock_irqsave(&desc->lock, flags);
1089
1090 chip = irq_data_get_irq_chip(&desc->irq_data);
1091 if (chip && chip->irq_cpu_offline &&
1092 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1093 !irqd_irq_disabled(&desc->irq_data)))
1094 chip->irq_cpu_offline(&desc->irq_data);
1095
1096 raw_spin_unlock_irqrestore(&desc->lock, flags);
1097 }
1098}
1099
1100#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1101
1102#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113void handle_fasteoi_ack_irq(struct irq_desc *desc)
1114{
1115 struct irq_chip *chip = desc->irq_data.chip;
1116
1117 raw_spin_lock(&desc->lock);
1118
1119 if (!irq_may_run(desc))
1120 goto out;
1121
1122 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1123
1124
1125
1126
1127
1128 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1129 desc->istate |= IRQS_PENDING;
1130 mask_irq(desc);
1131 goto out;
1132 }
1133
1134 kstat_incr_irqs_this_cpu(desc);
1135 if (desc->istate & IRQS_ONESHOT)
1136 mask_irq(desc);
1137
1138
1139 desc->irq_data.chip->irq_ack(&desc->irq_data);
1140
1141 preflow_handler(desc);
1142 handle_irq_event(desc);
1143
1144 cond_unmask_eoi_irq(desc, chip);
1145
1146 raw_spin_unlock(&desc->lock);
1147 return;
1148out:
1149 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1150 chip->irq_eoi(&desc->irq_data);
1151 raw_spin_unlock(&desc->lock);
1152}
1153EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165void handle_fasteoi_mask_irq(struct irq_desc *desc)
1166{
1167 struct irq_chip *chip = desc->irq_data.chip;
1168
1169 raw_spin_lock(&desc->lock);
1170 mask_ack_irq(desc);
1171
1172 if (!irq_may_run(desc))
1173 goto out;
1174
1175 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1176
1177
1178
1179
1180
1181 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1182 desc->istate |= IRQS_PENDING;
1183 mask_irq(desc);
1184 goto out;
1185 }
1186
1187 kstat_incr_irqs_this_cpu(desc);
1188 if (desc->istate & IRQS_ONESHOT)
1189 mask_irq(desc);
1190
1191 preflow_handler(desc);
1192 handle_irq_event(desc);
1193
1194 cond_unmask_eoi_irq(desc, chip);
1195
1196 raw_spin_unlock(&desc->lock);
1197 return;
1198out:
1199 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1200 chip->irq_eoi(&desc->irq_data);
1201 raw_spin_unlock(&desc->lock);
1202}
1203EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1204
1205#endif
1206
1207
1208
1209
1210
1211
1212void irq_chip_enable_parent(struct irq_data *data)
1213{
1214 data = data->parent_data;
1215 if (data->chip->irq_enable)
1216 data->chip->irq_enable(data);
1217 else
1218 data->chip->irq_unmask(data);
1219}
1220EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1221
1222
1223
1224
1225
1226
1227void irq_chip_disable_parent(struct irq_data *data)
1228{
1229 data = data->parent_data;
1230 if (data->chip->irq_disable)
1231 data->chip->irq_disable(data);
1232 else
1233 data->chip->irq_mask(data);
1234}
1235EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1236
1237
1238
1239
1240
1241void irq_chip_ack_parent(struct irq_data *data)
1242{
1243 data = data->parent_data;
1244 data->chip->irq_ack(data);
1245}
1246EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1247
1248
1249
1250
1251
1252void irq_chip_mask_parent(struct irq_data *data)
1253{
1254 data = data->parent_data;
1255 data->chip->irq_mask(data);
1256}
1257EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1258
1259
1260
1261
1262
1263void irq_chip_unmask_parent(struct irq_data *data)
1264{
1265 data = data->parent_data;
1266 data->chip->irq_unmask(data);
1267}
1268EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1269
1270
1271
1272
1273
1274void irq_chip_eoi_parent(struct irq_data *data)
1275{
1276 data = data->parent_data;
1277 data->chip->irq_eoi(data);
1278}
1279EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289int irq_chip_set_affinity_parent(struct irq_data *data,
1290 const struct cpumask *dest, bool force)
1291{
1292 data = data->parent_data;
1293 if (data->chip->irq_set_affinity)
1294 return data->chip->irq_set_affinity(data, dest, force);
1295
1296 return -ENOSYS;
1297}
1298EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1299
1300
1301
1302
1303
1304
1305
1306
1307int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1308{
1309 data = data->parent_data;
1310
1311 if (data->chip->irq_set_type)
1312 return data->chip->irq_set_type(data, type);
1313
1314 return -ENOSYS;
1315}
1316EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1317
1318
1319
1320
1321
1322
1323
1324
1325int irq_chip_retrigger_hierarchy(struct irq_data *data)
1326{
1327 for (data = data->parent_data; data; data = data->parent_data)
1328 if (data->chip && data->chip->irq_retrigger)
1329 return data->chip->irq_retrigger(data);
1330
1331 return 0;
1332}
1333
1334
1335
1336
1337
1338
1339int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1340{
1341 data = data->parent_data;
1342 if (data->chip->irq_set_vcpu_affinity)
1343 return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1344
1345 return -ENOSYS;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1356{
1357 data = data->parent_data;
1358 if (data->chip->irq_set_wake)
1359 return data->chip->irq_set_wake(data, on);
1360
1361 return -ENOSYS;
1362}
1363#endif
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1375{
1376 struct irq_data *pos = NULL;
1377
1378#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1379 for (; data; data = data->parent_data)
1380#endif
1381 if (data->chip && data->chip->irq_compose_msi_msg)
1382 pos = data;
1383 if (!pos)
1384 return -ENOSYS;
1385
1386 pos->chip->irq_compose_msi_msg(pos, msg);
1387
1388 return 0;
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398int irq_chip_pm_get(struct irq_data *data)
1399{
1400 int retval;
1401
1402 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1403 retval = pm_runtime_get_sync(data->chip->parent_device);
1404 if (retval < 0) {
1405 pm_runtime_put_noidle(data->chip->parent_device);
1406 return retval;
1407 }
1408 }
1409
1410 return 0;
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421int irq_chip_pm_put(struct irq_data *data)
1422{
1423 int retval = 0;
1424
1425 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1426 retval = pm_runtime_put(data->chip->parent_device);
1427
1428 return (retval < 0) ? retval : 0;
1429}
1430