1
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
12#include <linux/hardirq.h>
13#include <linux/irqflags.h>
14#include <linux/smp.h>
15#include <linux/percpu.h>
16#include <linux/hrtimer.h>
17#include <linux/kref.h>
18#include <linux/workqueue.h>
19
20#include <linux/atomic.h>
21#include <asm/ptrace.h>
22#include <asm/system.h>
23#include <trace/events/irq.h>
24
25
26
27
28
29
30
31
32#define IRQF_TRIGGER_NONE 0x00000000
33#define IRQF_TRIGGER_RISING 0x00000001
34#define IRQF_TRIGGER_FALLING 0x00000002
35#define IRQF_TRIGGER_HIGH 0x00000004
36#define IRQF_TRIGGER_LOW 0x00000008
37#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
38 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
39#define IRQF_TRIGGER_PROBE 0x00000010
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define IRQF_DISABLED 0x00000020
66#define IRQF_SAMPLE_RANDOM 0x00000040
67#define IRQF_SHARED 0x00000080
68#define IRQF_PROBE_SHARED 0x00000100
69#define __IRQF_TIMER 0x00000200
70#define IRQF_PERCPU 0x00000400
71#define IRQF_NOBALANCING 0x00000800
72#define IRQF_IRQPOLL 0x00001000
73#define IRQF_ONESHOT 0x00002000
74#define IRQF_NO_SUSPEND 0x00004000
75#define IRQF_FORCE_RESUME 0x00008000
76#define IRQF_NO_THREAD 0x00010000
77#define IRQF_EARLY_RESUME 0x00020000
78
79#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
80
81
82
83
84
85
86
87
88enum {
89 IRQC_IS_HARDIRQ = 0,
90 IRQC_IS_NESTED,
91};
92
93typedef irqreturn_t (*irq_handler_t)(int, void *);
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110struct irqaction {
111 irq_handler_t handler;
112 unsigned long flags;
113 void *dev_id;
114 void __percpu *percpu_dev_id;
115 struct irqaction *next;
116 int irq;
117 irq_handler_t thread_fn;
118 struct task_struct *thread;
119 unsigned long thread_flags;
120 unsigned long thread_mask;
121 const char *name;
122 struct proc_dir_entry *dir;
123} ____cacheline_internodealigned_in_smp;
124
125extern irqreturn_t no_action(int cpl, void *dev_id);
126
127#ifdef CONFIG_GENERIC_HARDIRQS
128extern int __must_check
129request_threaded_irq(unsigned int irq, irq_handler_t handler,
130 irq_handler_t thread_fn,
131 unsigned long flags, const char *name, void *dev);
132
133static inline int __must_check
134request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
135 const char *name, void *dev)
136{
137 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
138}
139
140extern int __must_check
141request_any_context_irq(unsigned int irq, irq_handler_t handler,
142 unsigned long flags, const char *name, void *dev_id);
143
144extern int __must_check
145request_percpu_irq(unsigned int irq, irq_handler_t handler,
146 const char *devname, void __percpu *percpu_dev_id);
147
148extern void exit_irq_thread(void);
149#else
150
151extern int __must_check
152request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
153 const char *name, void *dev);
154
155
156
157
158
159
160
161static inline int __must_check
162request_threaded_irq(unsigned int irq, irq_handler_t handler,
163 irq_handler_t thread_fn,
164 unsigned long flags, const char *name, void *dev)
165{
166 return request_irq(irq, handler, flags, name, dev);
167}
168
169static inline int __must_check
170request_any_context_irq(unsigned int irq, irq_handler_t handler,
171 unsigned long flags, const char *name, void *dev_id)
172{
173 return request_irq(irq, handler, flags, name, dev_id);
174}
175
176static inline int __must_check
177request_percpu_irq(unsigned int irq, irq_handler_t handler,
178 const char *devname, void __percpu *percpu_dev_id)
179{
180 return request_irq(irq, handler, 0, devname, percpu_dev_id);
181}
182
183static inline void exit_irq_thread(void) { }
184#endif
185
186extern void free_irq(unsigned int, void *);
187extern void free_percpu_irq(unsigned int, void __percpu *);
188
189struct device;
190
191extern int __must_check
192devm_request_threaded_irq(struct device *dev, unsigned int irq,
193 irq_handler_t handler, irq_handler_t thread_fn,
194 unsigned long irqflags, const char *devname,
195 void *dev_id);
196
197static inline int __must_check
198devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
199 unsigned long irqflags, const char *devname, void *dev_id)
200{
201 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
202 devname, dev_id);
203}
204
205extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
206
207
208
209
210
211
212
213
214
215
216
217
218
219#ifdef CONFIG_LOCKDEP
220# define local_irq_enable_in_hardirq() do { } while (0)
221#else
222# define local_irq_enable_in_hardirq() local_irq_enable()
223#endif
224
225extern void disable_irq_nosync(unsigned int irq);
226extern void disable_irq(unsigned int irq);
227extern void disable_percpu_irq(unsigned int irq);
228extern void enable_irq(unsigned int irq);
229extern void enable_percpu_irq(unsigned int irq, unsigned int type);
230
231
232#ifdef CONFIG_GENERIC_HARDIRQS
233extern void suspend_device_irqs(void);
234extern void resume_device_irqs(void);
235#ifdef CONFIG_PM_SLEEP
236extern int check_wakeup_irqs(void);
237#else
238static inline int check_wakeup_irqs(void) { return 0; }
239#endif
240#else
241static inline void suspend_device_irqs(void) { };
242static inline void resume_device_irqs(void) { };
243static inline int check_wakeup_irqs(void) { return 0; }
244#endif
245
246#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
247
248extern cpumask_var_t irq_default_affinity;
249
250extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
251extern int irq_can_set_affinity(unsigned int irq);
252extern int irq_select_affinity(unsigned int irq);
253
254extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
255
256
257
258
259
260
261
262
263
264
265
266
267
268struct irq_affinity_notify {
269 unsigned int irq;
270 struct kref kref;
271 struct work_struct work;
272 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
273 void (*release)(struct kref *ref);
274};
275
276extern int
277irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
278
279static inline void irq_run_affinity_notifiers(void)
280{
281 flush_scheduled_work();
282}
283
284#else
285
286static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
287{
288 return -EINVAL;
289}
290
291static inline int irq_can_set_affinity(unsigned int irq)
292{
293 return 0;
294}
295
296static inline int irq_select_affinity(unsigned int irq) { return 0; }
297
298static inline int irq_set_affinity_hint(unsigned int irq,
299 const struct cpumask *m)
300{
301 return -EINVAL;
302}
303#endif
304
305#ifdef CONFIG_GENERIC_HARDIRQS
306
307
308
309
310
311
312
313
314
315
316
317static inline void disable_irq_nosync_lockdep(unsigned int irq)
318{
319 disable_irq_nosync(irq);
320#ifdef CONFIG_LOCKDEP
321 local_irq_disable();
322#endif
323}
324
325static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
326{
327 disable_irq_nosync(irq);
328#ifdef CONFIG_LOCKDEP
329 local_irq_save(*flags);
330#endif
331}
332
333static inline void disable_irq_lockdep(unsigned int irq)
334{
335 disable_irq(irq);
336#ifdef CONFIG_LOCKDEP
337 local_irq_disable();
338#endif
339}
340
341static inline void enable_irq_lockdep(unsigned int irq)
342{
343#ifdef CONFIG_LOCKDEP
344 local_irq_enable();
345#endif
346 enable_irq(irq);
347}
348
349static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
350{
351#ifdef CONFIG_LOCKDEP
352 local_irq_restore(*flags);
353#endif
354 enable_irq(irq);
355}
356
357
358extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
359
360static inline int enable_irq_wake(unsigned int irq)
361{
362 return irq_set_irq_wake(irq, 1);
363}
364
365static inline int disable_irq_wake(unsigned int irq)
366{
367 return irq_set_irq_wake(irq, 0);
368}
369
370#else
371
372
373
374
375
376#ifndef CONFIG_LOCKDEP
377# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
378# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
379 disable_irq_nosync(irq)
380# define disable_irq_lockdep(irq) disable_irq(irq)
381# define enable_irq_lockdep(irq) enable_irq(irq)
382# define enable_irq_lockdep_irqrestore(irq, flags) \
383 enable_irq(irq)
384# endif
385
386static inline int enable_irq_wake(unsigned int irq)
387{
388 return 0;
389}
390
391static inline int disable_irq_wake(unsigned int irq)
392{
393 return 0;
394}
395#endif
396
397
398#ifdef CONFIG_IRQ_FORCED_THREADING
399extern bool force_irqthreads;
400#else
401#define force_irqthreads (0)
402#endif
403
404#ifndef __ARCH_SET_SOFTIRQ_PENDING
405#define set_softirq_pending(x) (local_softirq_pending() = (x))
406#define or_softirq_pending(x) (local_softirq_pending() |= (x))
407#endif
408
409
410
411
412
413
414
415#ifndef hard_irq_disable
416#define hard_irq_disable() do { } while(0)
417#endif
418
419
420
421
422
423
424
425enum
426{
427 HI_SOFTIRQ=0,
428 TIMER_SOFTIRQ,
429 NET_TX_SOFTIRQ,
430 NET_RX_SOFTIRQ,
431 BLOCK_SOFTIRQ,
432 BLOCK_IOPOLL_SOFTIRQ,
433 TASKLET_SOFTIRQ,
434 SCHED_SOFTIRQ,
435 HRTIMER_SOFTIRQ,
436 RCU_SOFTIRQ,
437
438 NR_SOFTIRQS
439};
440
441
442
443
444extern char *softirq_to_name[NR_SOFTIRQS];
445
446
447
448
449
450struct softirq_action
451{
452 void (*action)(struct softirq_action *);
453};
454
455asmlinkage void do_softirq(void);
456asmlinkage void __do_softirq(void);
457extern void open_softirq(int nr, void (*action)(struct softirq_action *));
458extern void softirq_init(void);
459static inline void __raise_softirq_irqoff(unsigned int nr)
460{
461 trace_softirq_raise(nr);
462 or_softirq_pending(1UL << nr);
463}
464
465extern void raise_softirq_irqoff(unsigned int nr);
466extern void raise_softirq(unsigned int nr);
467
468
469
470
471
472
473
474
475DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
476
477DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
478
479static inline struct task_struct *this_cpu_ksoftirqd(void)
480{
481 return this_cpu_read(ksoftirqd);
482}
483
484
485
486
487extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
488
489
490
491
492extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
493 int this_cpu, int softirq);
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515struct tasklet_struct
516{
517 struct tasklet_struct *next;
518 unsigned long state;
519 atomic_t count;
520 void (*func)(unsigned long);
521 unsigned long data;
522};
523
524#define DECLARE_TASKLET(name, func, data) \
525struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
526
527#define DECLARE_TASKLET_DISABLED(name, func, data) \
528struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
529
530
531enum
532{
533 TASKLET_STATE_SCHED,
534 TASKLET_STATE_RUN
535};
536
537#ifdef CONFIG_SMP
538static inline int tasklet_trylock(struct tasklet_struct *t)
539{
540 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
541}
542
543static inline void tasklet_unlock(struct tasklet_struct *t)
544{
545 smp_mb__before_clear_bit();
546 clear_bit(TASKLET_STATE_RUN, &(t)->state);
547}
548
549static inline void tasklet_unlock_wait(struct tasklet_struct *t)
550{
551 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
552}
553#else
554#define tasklet_trylock(t) 1
555#define tasklet_unlock_wait(t) do { } while (0)
556#define tasklet_unlock(t) do { } while (0)
557#endif
558
559extern void __tasklet_schedule(struct tasklet_struct *t);
560
561static inline void tasklet_schedule(struct tasklet_struct *t)
562{
563 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
564 __tasklet_schedule(t);
565}
566
567extern void __tasklet_hi_schedule(struct tasklet_struct *t);
568
569static inline void tasklet_hi_schedule(struct tasklet_struct *t)
570{
571 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
572 __tasklet_hi_schedule(t);
573}
574
575extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
576
577
578
579
580
581
582
583static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
584{
585 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
586 __tasklet_hi_schedule_first(t);
587}
588
589
590static inline void tasklet_disable_nosync(struct tasklet_struct *t)
591{
592 atomic_inc(&t->count);
593 smp_mb__after_atomic_inc();
594}
595
596static inline void tasklet_disable(struct tasklet_struct *t)
597{
598 tasklet_disable_nosync(t);
599 tasklet_unlock_wait(t);
600 smp_mb();
601}
602
603static inline void tasklet_enable(struct tasklet_struct *t)
604{
605 smp_mb__before_atomic_dec();
606 atomic_dec(&t->count);
607}
608
609static inline void tasklet_hi_enable(struct tasklet_struct *t)
610{
611 smp_mb__before_atomic_dec();
612 atomic_dec(&t->count);
613}
614
615extern void tasklet_kill(struct tasklet_struct *t);
616extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
617extern void tasklet_init(struct tasklet_struct *t,
618 void (*func)(unsigned long), unsigned long data);
619
620struct tasklet_hrtimer {
621 struct hrtimer timer;
622 struct tasklet_struct tasklet;
623 enum hrtimer_restart (*function)(struct hrtimer *);
624};
625
626extern void
627tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
628 enum hrtimer_restart (*function)(struct hrtimer *),
629 clockid_t which_clock, enum hrtimer_mode mode);
630
631static inline
632int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
633 const enum hrtimer_mode mode)
634{
635 return hrtimer_start(&ttimer->timer, time, mode);
636}
637
638static inline
639void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
640{
641 hrtimer_cancel(&ttimer->timer);
642 tasklet_kill(&ttimer->tasklet);
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
674static inline unsigned long probe_irq_on(void)
675{
676 return 0;
677}
678static inline int probe_irq_off(unsigned long val)
679{
680 return 0;
681}
682static inline unsigned int probe_irq_mask(unsigned long val)
683{
684 return 0;
685}
686#else
687extern unsigned long probe_irq_on(void);
688extern int probe_irq_off(unsigned long);
689extern unsigned int probe_irq_mask(unsigned long);
690#endif
691
692#ifdef CONFIG_PROC_FS
693
694extern void init_irq_proc(void);
695#else
696static inline void init_irq_proc(void)
697{
698}
699#endif
700
701struct seq_file;
702int show_interrupts(struct seq_file *p, void *v);
703int arch_show_interrupts(struct seq_file *p, int prec);
704
705extern int early_irq_init(void);
706extern int arch_probe_nr_irqs(void);
707extern int arch_early_irq_init(void);
708
709#endif
710