1
2
3#ifndef _LINUX_INTERRUPT_H
4#define _LINUX_INTERRUPT_H
5
6#include <linux/kernel.h>
7#include <linux/bitops.h>
8#include <linux/cpumask.h>
9#include <linux/irqreturn.h>
10#include <linux/irqnr.h>
11#include <linux/hardirq.h>
12#include <linux/irqflags.h>
13#include <linux/hrtimer.h>
14#include <linux/kref.h>
15#include <linux/workqueue.h>
16#include <linux/jump_label.h>
17
18#include <linux/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/irq.h>
21#include <asm/sections.h>
22
23
24
25
26
27
28
29
30#define IRQF_TRIGGER_NONE 0x00000000
31#define IRQF_TRIGGER_RISING 0x00000001
32#define IRQF_TRIGGER_FALLING 0x00000002
33#define IRQF_TRIGGER_HIGH 0x00000004
34#define IRQF_TRIGGER_LOW 0x00000008
35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37#define IRQF_TRIGGER_PROBE 0x00000010
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#define IRQF_SHARED 0x00000080
72#define IRQF_PROBE_SHARED 0x00000100
73#define __IRQF_TIMER 0x00000200
74#define IRQF_PERCPU 0x00000400
75#define IRQF_NOBALANCING 0x00000800
76#define IRQF_IRQPOLL 0x00001000
77#define IRQF_ONESHOT 0x00002000
78#define IRQF_NO_SUSPEND 0x00004000
79#define IRQF_FORCE_RESUME 0x00008000
80#define IRQF_NO_THREAD 0x00010000
81#define IRQF_EARLY_RESUME 0x00020000
82#define IRQF_COND_SUSPEND 0x00040000
83#define IRQF_NO_AUTOEN 0x00080000
84#define IRQF_NO_DEBUG 0x00100000
85
86#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
87
88
89
90
91
92
93
94
95enum {
96 IRQC_IS_HARDIRQ = 0,
97 IRQC_IS_NESTED,
98};
99
100typedef irqreturn_t (*irq_handler_t)(int, void *);
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118struct irqaction {
119 irq_handler_t handler;
120 void *dev_id;
121 void __percpu *percpu_dev_id;
122 struct irqaction *next;
123 irq_handler_t thread_fn;
124 struct task_struct *thread;
125 struct irqaction *secondary;
126 unsigned int irq;
127 unsigned int flags;
128 unsigned long thread_flags;
129 unsigned long thread_mask;
130 const char *name;
131 struct proc_dir_entry *dir;
132} ____cacheline_internodealigned_in_smp;
133
134extern irqreturn_t no_action(int cpl, void *dev_id);
135
136
137
138
139
140
141
142
143
144#define IRQ_NOTCONNECTED (1U << 31)
145
146extern int __must_check
147request_threaded_irq(unsigned int irq, irq_handler_t handler,
148 irq_handler_t thread_fn,
149 unsigned long flags, const char *name, void *dev);
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164static inline int __must_check
165request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
166 const char *name, void *dev)
167{
168 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
169}
170
171extern int __must_check
172request_any_context_irq(unsigned int irq, irq_handler_t handler,
173 unsigned long flags, const char *name, void *dev_id);
174
175extern int __must_check
176__request_percpu_irq(unsigned int irq, irq_handler_t handler,
177 unsigned long flags, const char *devname,
178 void __percpu *percpu_dev_id);
179
180extern int __must_check
181request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
182 const char *name, void *dev);
183
184static inline int __must_check
185request_percpu_irq(unsigned int irq, irq_handler_t handler,
186 const char *devname, void __percpu *percpu_dev_id)
187{
188 return __request_percpu_irq(irq, handler, 0,
189 devname, percpu_dev_id);
190}
191
192extern int __must_check
193request_percpu_nmi(unsigned int irq, irq_handler_t handler,
194 const char *devname, void __percpu *dev);
195
196extern const void *free_irq(unsigned int, void *);
197extern void free_percpu_irq(unsigned int, void __percpu *);
198
199extern const void *free_nmi(unsigned int irq, void *dev_id);
200extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
201
202struct device;
203
204extern int __must_check
205devm_request_threaded_irq(struct device *dev, unsigned int irq,
206 irq_handler_t handler, irq_handler_t thread_fn,
207 unsigned long irqflags, const char *devname,
208 void *dev_id);
209
210static inline int __must_check
211devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
212 unsigned long irqflags, const char *devname, void *dev_id)
213{
214 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
215 devname, dev_id);
216}
217
218extern int __must_check
219devm_request_any_context_irq(struct device *dev, unsigned int irq,
220 irq_handler_t handler, unsigned long irqflags,
221 const char *devname, void *dev_id);
222
223extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
224
225bool irq_has_action(unsigned int irq);
226extern void disable_irq_nosync(unsigned int irq);
227extern bool disable_hardirq(unsigned int irq);
228extern void disable_irq(unsigned int irq);
229extern void disable_percpu_irq(unsigned int irq);
230extern void enable_irq(unsigned int irq);
231extern void enable_percpu_irq(unsigned int irq, unsigned int type);
232extern bool irq_percpu_is_enabled(unsigned int irq);
233extern void irq_wake_thread(unsigned int irq, void *dev_id);
234
235extern void disable_nmi_nosync(unsigned int irq);
236extern void disable_percpu_nmi(unsigned int irq);
237extern void enable_nmi(unsigned int irq);
238extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
239extern int prepare_percpu_nmi(unsigned int irq);
240extern void teardown_percpu_nmi(unsigned int irq);
241
242extern int irq_inject_interrupt(unsigned int irq);
243
244
245extern void suspend_device_irqs(void);
246extern void resume_device_irqs(void);
247extern void rearm_wake_irq(unsigned int irq);
248
249
250
251
252
253
254
255
256
257
258
259
260
261struct irq_affinity_notify {
262 unsigned int irq;
263 struct kref kref;
264 struct work_struct work;
265 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
266 void (*release)(struct kref *ref);
267};
268
269#define IRQ_AFFINITY_MAX_SETS 4
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285struct irq_affinity {
286 unsigned int pre_vectors;
287 unsigned int post_vectors;
288 unsigned int nr_sets;
289 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
290 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
291 void *priv;
292};
293
294
295
296
297
298
299struct irq_affinity_desc {
300 struct cpumask mask;
301 unsigned int is_managed : 1;
302};
303
304#if defined(CONFIG_SMP)
305
306extern cpumask_var_t irq_default_affinity;
307
308extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
309extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
310
311extern int irq_can_set_affinity(unsigned int irq);
312extern int irq_select_affinity(unsigned int irq);
313
314extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
315 bool setaffinity);
316
317
318
319
320
321
322
323
324static inline int
325irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
326{
327 return __irq_apply_affinity_hint(irq, m, false);
328}
329
330
331
332
333
334
335
336
337
338
339static inline int
340irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
341{
342 return __irq_apply_affinity_hint(irq, m, true);
343}
344
345
346
347
348
349static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
350{
351 return irq_set_affinity_and_hint(irq, m);
352}
353
354extern int irq_update_affinity_desc(unsigned int irq,
355 struct irq_affinity_desc *affinity);
356
357extern int
358irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
359
360struct irq_affinity_desc *
361irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
362
363unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
364 const struct irq_affinity *affd);
365
366#else
367
368static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
369{
370 return -EINVAL;
371}
372
373static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
374{
375 return 0;
376}
377
378static inline int irq_can_set_affinity(unsigned int irq)
379{
380 return 0;
381}
382
383static inline int irq_select_affinity(unsigned int irq) { return 0; }
384
385static inline int irq_update_affinity_hint(unsigned int irq,
386 const struct cpumask *m)
387{
388 return -EINVAL;
389}
390
391static inline int irq_set_affinity_and_hint(unsigned int irq,
392 const struct cpumask *m)
393{
394 return -EINVAL;
395}
396
397static inline int irq_set_affinity_hint(unsigned int irq,
398 const struct cpumask *m)
399{
400 return -EINVAL;
401}
402
403static inline int irq_update_affinity_desc(unsigned int irq,
404 struct irq_affinity_desc *affinity)
405{
406 return -EINVAL;
407}
408
409static inline int
410irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
411{
412 return 0;
413}
414
415static inline struct irq_affinity_desc *
416irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
417{
418 return NULL;
419}
420
421static inline unsigned int
422irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
423 const struct irq_affinity *affd)
424{
425 return maxvec;
426}
427
428#endif
429
430
431
432
433
434
435
436
437
438
439
440
441static inline void disable_irq_nosync_lockdep(unsigned int irq)
442{
443 disable_irq_nosync(irq);
444#ifdef CONFIG_LOCKDEP
445 local_irq_disable();
446#endif
447}
448
449static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
450{
451 disable_irq_nosync(irq);
452#ifdef CONFIG_LOCKDEP
453 local_irq_save(*flags);
454#endif
455}
456
457static inline void disable_irq_lockdep(unsigned int irq)
458{
459 disable_irq(irq);
460#ifdef CONFIG_LOCKDEP
461 local_irq_disable();
462#endif
463}
464
465static inline void enable_irq_lockdep(unsigned int irq)
466{
467#ifdef CONFIG_LOCKDEP
468 local_irq_enable();
469#endif
470 enable_irq(irq);
471}
472
473static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
474{
475#ifdef CONFIG_LOCKDEP
476 local_irq_restore(*flags);
477#endif
478 enable_irq(irq);
479}
480
481
482extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
483
484static inline int enable_irq_wake(unsigned int irq)
485{
486 return irq_set_irq_wake(irq, 1);
487}
488
489static inline int disable_irq_wake(unsigned int irq)
490{
491 return irq_set_irq_wake(irq, 0);
492}
493
494
495
496
497enum irqchip_irq_state {
498 IRQCHIP_STATE_PENDING,
499 IRQCHIP_STATE_ACTIVE,
500 IRQCHIP_STATE_MASKED,
501 IRQCHIP_STATE_LINE_LEVEL,
502};
503
504extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
505 bool *state);
506extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
507 bool state);
508
509#ifdef CONFIG_IRQ_FORCED_THREADING
510# ifdef CONFIG_PREEMPT_RT
511# define force_irqthreads() (true)
512# else
513DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
514# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
515# endif
516#else
517#define force_irqthreads() (false)
518#endif
519
520#ifndef local_softirq_pending
521
522#ifndef local_softirq_pending_ref
523#define local_softirq_pending_ref irq_stat.__softirq_pending
524#endif
525
526#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
527#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
528#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
529
530#endif
531
532
533
534
535
536
537
538#ifndef hard_irq_disable
539#define hard_irq_disable() do { } while(0)
540#endif
541
542
543
544
545
546
547
548enum
549{
550 HI_SOFTIRQ=0,
551 TIMER_SOFTIRQ,
552 NET_TX_SOFTIRQ,
553 NET_RX_SOFTIRQ,
554 BLOCK_SOFTIRQ,
555 IRQ_POLL_SOFTIRQ,
556 TASKLET_SOFTIRQ,
557 SCHED_SOFTIRQ,
558 HRTIMER_SOFTIRQ,
559 RCU_SOFTIRQ,
560
561 NR_SOFTIRQS
562};
563
564
565
566
567
568
569
570
571
572
573#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
574
575
576
577
578extern const char * const softirq_to_name[NR_SOFTIRQS];
579
580
581
582
583
584struct softirq_action
585{
586 void (*action)(struct softirq_action *);
587};
588
589asmlinkage void do_softirq(void);
590asmlinkage void __do_softirq(void);
591
592#ifdef CONFIG_PREEMPT_RT
593extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
594#else
595static inline void do_softirq_post_smp_call_flush(unsigned int unused)
596{
597 do_softirq();
598}
599#endif
600
601extern void open_softirq(int nr, void (*action)(struct softirq_action *));
602extern void softirq_init(void);
603extern void __raise_softirq_irqoff(unsigned int nr);
604
605extern void raise_softirq_irqoff(unsigned int nr);
606extern void raise_softirq(unsigned int nr);
607
608DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
609
610static inline struct task_struct *this_cpu_ksoftirqd(void)
611{
612 return this_cpu_read(ksoftirqd);
613}
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638struct tasklet_struct
639{
640 struct tasklet_struct *next;
641 unsigned long state;
642 atomic_t count;
643 bool use_callback;
644 union {
645 void (*func)(unsigned long data);
646 void (*callback)(struct tasklet_struct *t);
647 };
648 unsigned long data;
649};
650
651#define DECLARE_TASKLET(name, _callback) \
652struct tasklet_struct name = { \
653 .count = ATOMIC_INIT(0), \
654 .callback = _callback, \
655 .use_callback = true, \
656}
657
658#define DECLARE_TASKLET_DISABLED(name, _callback) \
659struct tasklet_struct name = { \
660 .count = ATOMIC_INIT(1), \
661 .callback = _callback, \
662 .use_callback = true, \
663}
664
665#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
666 container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
667
668#define DECLARE_TASKLET_OLD(name, _func) \
669struct tasklet_struct name = { \
670 .count = ATOMIC_INIT(0), \
671 .func = _func, \
672}
673
674#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
675struct tasklet_struct name = { \
676 .count = ATOMIC_INIT(1), \
677 .func = _func, \
678}
679
680enum
681{
682 TASKLET_STATE_SCHED,
683 TASKLET_STATE_RUN
684};
685
686#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
687static inline int tasklet_trylock(struct tasklet_struct *t)
688{
689 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
690}
691
692void tasklet_unlock(struct tasklet_struct *t);
693void tasklet_unlock_wait(struct tasklet_struct *t);
694void tasklet_unlock_spin_wait(struct tasklet_struct *t);
695
696#else
697static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
698static inline void tasklet_unlock(struct tasklet_struct *t) { }
699static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
700static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
701#endif
702
703extern void __tasklet_schedule(struct tasklet_struct *t);
704
705static inline void tasklet_schedule(struct tasklet_struct *t)
706{
707 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
708 __tasklet_schedule(t);
709}
710
711extern void __tasklet_hi_schedule(struct tasklet_struct *t);
712
713static inline void tasklet_hi_schedule(struct tasklet_struct *t)
714{
715 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
716 __tasklet_hi_schedule(t);
717}
718
719static inline void tasklet_disable_nosync(struct tasklet_struct *t)
720{
721 atomic_inc(&t->count);
722 smp_mb__after_atomic();
723}
724
725
726
727
728
729static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
730{
731 tasklet_disable_nosync(t);
732 tasklet_unlock_spin_wait(t);
733 smp_mb();
734}
735
736static inline void tasklet_disable(struct tasklet_struct *t)
737{
738 tasklet_disable_nosync(t);
739 tasklet_unlock_wait(t);
740 smp_mb();
741}
742
743static inline void tasklet_enable(struct tasklet_struct *t)
744{
745 smp_mb__before_atomic();
746 atomic_dec(&t->count);
747}
748
749extern void tasklet_kill(struct tasklet_struct *t);
750extern void tasklet_init(struct tasklet_struct *t,
751 void (*func)(unsigned long), unsigned long data);
752extern void tasklet_setup(struct tasklet_struct *t,
753 void (*callback)(struct tasklet_struct *));
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783#if !defined(CONFIG_GENERIC_IRQ_PROBE)
784static inline unsigned long probe_irq_on(void)
785{
786 return 0;
787}
788static inline int probe_irq_off(unsigned long val)
789{
790 return 0;
791}
792static inline unsigned int probe_irq_mask(unsigned long val)
793{
794 return 0;
795}
796#else
797extern unsigned long probe_irq_on(void);
798extern int probe_irq_off(unsigned long);
799extern unsigned int probe_irq_mask(unsigned long);
800#endif
801
802#ifdef CONFIG_PROC_FS
803
804extern void init_irq_proc(void);
805#else
806static inline void init_irq_proc(void)
807{
808}
809#endif
810
811#ifdef CONFIG_IRQ_TIMINGS
812void irq_timings_enable(void);
813void irq_timings_disable(void);
814u64 irq_timings_next_event(u64 now);
815#endif
816
817struct seq_file;
818int show_interrupts(struct seq_file *p, void *v);
819int arch_show_interrupts(struct seq_file *p, int prec);
820
821extern int early_irq_init(void);
822extern int arch_probe_nr_irqs(void);
823extern int arch_early_irq_init(void);
824
825
826
827
828#ifndef __irq_entry
829# define __irq_entry __section(".irqentry.text")
830#endif
831
832#define __softirq_entry __section(".softirqentry.text")
833
834#endif
835