1
2
3#ifndef _LINUX_INTERRUPT_H
4#define _LINUX_INTERRUPT_H
5
6#include <linux/kernel.h>
7#include <linux/bitops.h>
8#include <linux/cpumask.h>
9#include <linux/irqreturn.h>
10#include <linux/irqnr.h>
11#include <linux/hardirq.h>
12#include <linux/irqflags.h>
13#include <linux/hrtimer.h>
14#include <linux/kref.h>
15#include <linux/workqueue.h>
16#include <linux/jump_label.h>
17
18#include <linux/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/irq.h>
21#include <asm/sections.h>
22
23
24
25
26
27
28
29
30#define IRQF_TRIGGER_NONE 0x00000000
31#define IRQF_TRIGGER_RISING 0x00000001
32#define IRQF_TRIGGER_FALLING 0x00000002
33#define IRQF_TRIGGER_HIGH 0x00000004
34#define IRQF_TRIGGER_LOW 0x00000008
35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37#define IRQF_TRIGGER_PROBE 0x00000010
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71#define IRQF_SHARED 0x00000080
72#define IRQF_PROBE_SHARED 0x00000100
73#define __IRQF_TIMER 0x00000200
74#define IRQF_PERCPU 0x00000400
75#define IRQF_NOBALANCING 0x00000800
76#define IRQF_IRQPOLL 0x00001000
77#define IRQF_ONESHOT 0x00002000
78#define IRQF_NO_SUSPEND 0x00004000
79#define IRQF_FORCE_RESUME 0x00008000
80#define IRQF_NO_THREAD 0x00010000
81#define IRQF_EARLY_RESUME 0x00020000
82#define IRQF_COND_SUSPEND 0x00040000
83#define IRQF_NO_AUTOEN 0x00080000
84#define IRQF_NO_DEBUG 0x00100000
85
86#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
87
88
89
90
91
92
93
94
95enum {
96 IRQC_IS_HARDIRQ = 0,
97 IRQC_IS_NESTED,
98};
99
100typedef irqreturn_t (*irq_handler_t)(int, void *);
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118struct irqaction {
119 irq_handler_t handler;
120 void *dev_id;
121 void __percpu *percpu_dev_id;
122 struct irqaction *next;
123 irq_handler_t thread_fn;
124 struct task_struct *thread;
125 struct irqaction *secondary;
126 unsigned int irq;
127 unsigned int flags;
128 unsigned long thread_flags;
129 unsigned long thread_mask;
130 const char *name;
131 struct proc_dir_entry *dir;
132} ____cacheline_internodealigned_in_smp;
133
134extern irqreturn_t no_action(int cpl, void *dev_id);
135
136
137
138
139
140
141
142
143
144#define IRQ_NOTCONNECTED (1U << 31)
145
146extern int __must_check
147request_threaded_irq(unsigned int irq, irq_handler_t handler,
148 irq_handler_t thread_fn,
149 unsigned long flags, const char *name, void *dev);
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164static inline int __must_check
165request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
166 const char *name, void *dev)
167{
168 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
169}
170
171extern int __must_check
172request_any_context_irq(unsigned int irq, irq_handler_t handler,
173 unsigned long flags, const char *name, void *dev_id);
174
175extern int __must_check
176__request_percpu_irq(unsigned int irq, irq_handler_t handler,
177 unsigned long flags, const char *devname,
178 void __percpu *percpu_dev_id);
179
180extern int __must_check
181request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
182 const char *name, void *dev);
183
184static inline int __must_check
185request_percpu_irq(unsigned int irq, irq_handler_t handler,
186 const char *devname, void __percpu *percpu_dev_id)
187{
188 return __request_percpu_irq(irq, handler, 0,
189 devname, percpu_dev_id);
190}
191
192extern int __must_check
193request_percpu_nmi(unsigned int irq, irq_handler_t handler,
194 const char *devname, void __percpu *dev);
195
196extern const void *free_irq(unsigned int, void *);
197extern void free_percpu_irq(unsigned int, void __percpu *);
198
199extern const void *free_nmi(unsigned int irq, void *dev_id);
200extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
201
202struct device;
203
204extern int __must_check
205devm_request_threaded_irq(struct device *dev, unsigned int irq,
206 irq_handler_t handler, irq_handler_t thread_fn,
207 unsigned long irqflags, const char *devname,
208 void *dev_id);
209
210static inline int __must_check
211devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
212 unsigned long irqflags, const char *devname, void *dev_id)
213{
214 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
215 devname, dev_id);
216}
217
218extern int __must_check
219devm_request_any_context_irq(struct device *dev, unsigned int irq,
220 irq_handler_t handler, unsigned long irqflags,
221 const char *devname, void *dev_id);
222
223extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
224
225
226
227
228
229
230
231
232
233
234
235
236
237#ifdef CONFIG_LOCKDEP
238# define local_irq_enable_in_hardirq() do { } while (0)
239#else
240# define local_irq_enable_in_hardirq() local_irq_enable()
241#endif
242
243bool irq_has_action(unsigned int irq);
244extern void disable_irq_nosync(unsigned int irq);
245extern bool disable_hardirq(unsigned int irq);
246extern void disable_irq(unsigned int irq);
247extern void disable_percpu_irq(unsigned int irq);
248extern void enable_irq(unsigned int irq);
249extern void enable_percpu_irq(unsigned int irq, unsigned int type);
250extern bool irq_percpu_is_enabled(unsigned int irq);
251extern void irq_wake_thread(unsigned int irq, void *dev_id);
252
253extern void disable_nmi_nosync(unsigned int irq);
254extern void disable_percpu_nmi(unsigned int irq);
255extern void enable_nmi(unsigned int irq);
256extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
257extern int prepare_percpu_nmi(unsigned int irq);
258extern void teardown_percpu_nmi(unsigned int irq);
259
260extern int irq_inject_interrupt(unsigned int irq);
261
262
263extern void suspend_device_irqs(void);
264extern void resume_device_irqs(void);
265extern void rearm_wake_irq(unsigned int irq);
266
267
268
269
270
271
272
273
274
275
276
277
278
279struct irq_affinity_notify {
280 unsigned int irq;
281 struct kref kref;
282 struct work_struct work;
283 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
284 void (*release)(struct kref *ref);
285};
286
287#define IRQ_AFFINITY_MAX_SETS 4
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303struct irq_affinity {
304 unsigned int pre_vectors;
305 unsigned int post_vectors;
306 unsigned int nr_sets;
307 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
308 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
309 void *priv;
310};
311
312
313
314
315
316
317struct irq_affinity_desc {
318 struct cpumask mask;
319 unsigned int is_managed : 1;
320};
321
322#if defined(CONFIG_SMP)
323
324extern cpumask_var_t irq_default_affinity;
325
326extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
327extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
328
329extern int irq_can_set_affinity(unsigned int irq);
330extern int irq_select_affinity(unsigned int irq);
331
332extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
333 bool setaffinity);
334
335
336
337
338
339
340
341
342static inline int
343irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
344{
345 return __irq_apply_affinity_hint(irq, m, false);
346}
347
348
349
350
351
352
353
354
355
356
357static inline int
358irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
359{
360 return __irq_apply_affinity_hint(irq, m, true);
361}
362
363
364
365
366
367static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
368{
369 return irq_set_affinity_and_hint(irq, m);
370}
371
372extern int irq_update_affinity_desc(unsigned int irq,
373 struct irq_affinity_desc *affinity);
374
375extern int
376irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
377
378struct irq_affinity_desc *
379irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
380
381unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
382 const struct irq_affinity *affd);
383
384#else
385
386static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
387{
388 return -EINVAL;
389}
390
391static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
392{
393 return 0;
394}
395
396static inline int irq_can_set_affinity(unsigned int irq)
397{
398 return 0;
399}
400
401static inline int irq_select_affinity(unsigned int irq) { return 0; }
402
403static inline int irq_update_affinity_hint(unsigned int irq,
404 const struct cpumask *m)
405{
406 return -EINVAL;
407}
408
409static inline int irq_set_affinity_and_hint(unsigned int irq,
410 const struct cpumask *m)
411{
412 return -EINVAL;
413}
414
415static inline int irq_set_affinity_hint(unsigned int irq,
416 const struct cpumask *m)
417{
418 return -EINVAL;
419}
420
421static inline int irq_update_affinity_desc(unsigned int irq,
422 struct irq_affinity_desc *affinity)
423{
424 return -EINVAL;
425}
426
427static inline int
428irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
429{
430 return 0;
431}
432
433static inline struct irq_affinity_desc *
434irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
435{
436 return NULL;
437}
438
439static inline unsigned int
440irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
441 const struct irq_affinity *affd)
442{
443 return maxvec;
444}
445
446#endif
447
448
449
450
451
452
453
454
455
456
457
458
459static inline void disable_irq_nosync_lockdep(unsigned int irq)
460{
461 disable_irq_nosync(irq);
462#ifdef CONFIG_LOCKDEP
463 local_irq_disable();
464#endif
465}
466
467static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
468{
469 disable_irq_nosync(irq);
470#ifdef CONFIG_LOCKDEP
471 local_irq_save(*flags);
472#endif
473}
474
475static inline void disable_irq_lockdep(unsigned int irq)
476{
477 disable_irq(irq);
478#ifdef CONFIG_LOCKDEP
479 local_irq_disable();
480#endif
481}
482
483static inline void enable_irq_lockdep(unsigned int irq)
484{
485#ifdef CONFIG_LOCKDEP
486 local_irq_enable();
487#endif
488 enable_irq(irq);
489}
490
491static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
492{
493#ifdef CONFIG_LOCKDEP
494 local_irq_restore(*flags);
495#endif
496 enable_irq(irq);
497}
498
499
500extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
501
502static inline int enable_irq_wake(unsigned int irq)
503{
504 return irq_set_irq_wake(irq, 1);
505}
506
507static inline int disable_irq_wake(unsigned int irq)
508{
509 return irq_set_irq_wake(irq, 0);
510}
511
512
513
514
515enum irqchip_irq_state {
516 IRQCHIP_STATE_PENDING,
517 IRQCHIP_STATE_ACTIVE,
518 IRQCHIP_STATE_MASKED,
519 IRQCHIP_STATE_LINE_LEVEL,
520};
521
522extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
523 bool *state);
524extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
525 bool state);
526
527#ifdef CONFIG_IRQ_FORCED_THREADING
528# ifdef CONFIG_PREEMPT_RT
529# define force_irqthreads() (true)
530# else
531DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
532# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
533# endif
534#else
535#define force_irqthreads() (false)
536#endif
537
538#ifndef local_softirq_pending
539
540#ifndef local_softirq_pending_ref
541#define local_softirq_pending_ref irq_stat.__softirq_pending
542#endif
543
544#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
545#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
546#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
547
548#endif
549
550
551
552
553
554
555
556#ifndef hard_irq_disable
557#define hard_irq_disable() do { } while(0)
558#endif
559
560
561
562
563
564
565
566enum
567{
568 HI_SOFTIRQ=0,
569 TIMER_SOFTIRQ,
570 NET_TX_SOFTIRQ,
571 NET_RX_SOFTIRQ,
572 BLOCK_SOFTIRQ,
573 IRQ_POLL_SOFTIRQ,
574 TASKLET_SOFTIRQ,
575 SCHED_SOFTIRQ,
576 HRTIMER_SOFTIRQ,
577 RCU_SOFTIRQ,
578
579 NR_SOFTIRQS
580};
581
582#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
583
584
585
586
587extern const char * const softirq_to_name[NR_SOFTIRQS];
588
589
590
591
592
593struct softirq_action
594{
595 void (*action)(struct softirq_action *);
596};
597
598asmlinkage void do_softirq(void);
599asmlinkage void __do_softirq(void);
600
601extern void open_softirq(int nr, void (*action)(struct softirq_action *));
602extern void softirq_init(void);
603extern void __raise_softirq_irqoff(unsigned int nr);
604
605extern void raise_softirq_irqoff(unsigned int nr);
606extern void raise_softirq(unsigned int nr);
607
608DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
609
610static inline struct task_struct *this_cpu_ksoftirqd(void)
611{
612 return this_cpu_read(ksoftirqd);
613}
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638struct tasklet_struct
639{
640 struct tasklet_struct *next;
641 unsigned long state;
642 atomic_t count;
643 bool use_callback;
644 union {
645 void (*func)(unsigned long data);
646 void (*callback)(struct tasklet_struct *t);
647 };
648 unsigned long data;
649};
650
651#define DECLARE_TASKLET(name, _callback) \
652struct tasklet_struct name = { \
653 .count = ATOMIC_INIT(0), \
654 .callback = _callback, \
655 .use_callback = true, \
656}
657
658#define DECLARE_TASKLET_DISABLED(name, _callback) \
659struct tasklet_struct name = { \
660 .count = ATOMIC_INIT(1), \
661 .callback = _callback, \
662 .use_callback = true, \
663}
664
665#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
666 container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
667
668#define DECLARE_TASKLET_OLD(name, _func) \
669struct tasklet_struct name = { \
670 .count = ATOMIC_INIT(0), \
671 .func = _func, \
672}
673
674#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
675struct tasklet_struct name = { \
676 .count = ATOMIC_INIT(1), \
677 .func = _func, \
678}
679
680enum
681{
682 TASKLET_STATE_SCHED,
683 TASKLET_STATE_RUN
684};
685
686#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
687static inline int tasklet_trylock(struct tasklet_struct *t)
688{
689 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
690}
691
692void tasklet_unlock(struct tasklet_struct *t);
693void tasklet_unlock_wait(struct tasklet_struct *t);
694void tasklet_unlock_spin_wait(struct tasklet_struct *t);
695
696#else
697static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
698static inline void tasklet_unlock(struct tasklet_struct *t) { }
699static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
700static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
701#endif
702
703extern void __tasklet_schedule(struct tasklet_struct *t);
704
705static inline void tasklet_schedule(struct tasklet_struct *t)
706{
707 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
708 __tasklet_schedule(t);
709}
710
711extern void __tasklet_hi_schedule(struct tasklet_struct *t);
712
713static inline void tasklet_hi_schedule(struct tasklet_struct *t)
714{
715 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
716 __tasklet_hi_schedule(t);
717}
718
719static inline void tasklet_disable_nosync(struct tasklet_struct *t)
720{
721 atomic_inc(&t->count);
722 smp_mb__after_atomic();
723}
724
725
726
727
728
729static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
730{
731 tasklet_disable_nosync(t);
732 tasklet_unlock_spin_wait(t);
733 smp_mb();
734}
735
736static inline void tasklet_disable(struct tasklet_struct *t)
737{
738 tasklet_disable_nosync(t);
739 tasklet_unlock_wait(t);
740 smp_mb();
741}
742
743static inline void tasklet_enable(struct tasklet_struct *t)
744{
745 smp_mb__before_atomic();
746 atomic_dec(&t->count);
747}
748
749extern void tasklet_kill(struct tasklet_struct *t);
750extern void tasklet_init(struct tasklet_struct *t,
751 void (*func)(unsigned long), unsigned long data);
752extern void tasklet_setup(struct tasklet_struct *t,
753 void (*callback)(struct tasklet_struct *));
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783#if !defined(CONFIG_GENERIC_IRQ_PROBE)
784static inline unsigned long probe_irq_on(void)
785{
786 return 0;
787}
788static inline int probe_irq_off(unsigned long val)
789{
790 return 0;
791}
792static inline unsigned int probe_irq_mask(unsigned long val)
793{
794 return 0;
795}
796#else
797extern unsigned long probe_irq_on(void);
798extern int probe_irq_off(unsigned long);
799extern unsigned int probe_irq_mask(unsigned long);
800#endif
801
802#ifdef CONFIG_PROC_FS
803
804extern void init_irq_proc(void);
805#else
806static inline void init_irq_proc(void)
807{
808}
809#endif
810
811#ifdef CONFIG_IRQ_TIMINGS
812void irq_timings_enable(void);
813void irq_timings_disable(void);
814u64 irq_timings_next_event(u64 now);
815#endif
816
817struct seq_file;
818int show_interrupts(struct seq_file *p, void *v);
819int arch_show_interrupts(struct seq_file *p, int prec);
820
821extern int early_irq_init(void);
822extern int arch_probe_nr_irqs(void);
823extern int arch_early_irq_init(void);
824
825
826
827
828#ifndef __irq_entry
829# define __irq_entry __section(".irqentry.text")
830#endif
831
832#define __softirq_entry __section(".softirqentry.text")
833
834#endif
835