1
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
12#include <linux/hardirq.h>
13#include <linux/irqflags.h>
14#include <linux/hrtimer.h>
15#include <linux/kref.h>
16#include <linux/workqueue.h>
17
18#include <linux/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/irq.h>
21
22
23
24
25
26
27
28
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define IRQF_SHARED 0x00000080
66#define IRQF_PROBE_SHARED 0x00000100
67#define __IRQF_TIMER 0x00000200
68#define IRQF_PERCPU 0x00000400
69#define IRQF_NOBALANCING 0x00000800
70#define IRQF_IRQPOLL 0x00001000
71#define IRQF_ONESHOT 0x00002000
72#define IRQF_NO_SUSPEND 0x00004000
73#define IRQF_FORCE_RESUME 0x00008000
74#define IRQF_NO_THREAD 0x00010000
75#define IRQF_EARLY_RESUME 0x00020000
76#define IRQF_COND_SUSPEND 0x00040000
77
78#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
79
80
81
82
83
84
85
86
87enum {
88 IRQC_IS_HARDIRQ = 0,
89 IRQC_IS_NESTED,
90};
91
92typedef irqreturn_t (*irq_handler_t)(int, void *);
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109struct irqaction {
110 irq_handler_t handler;
111 void *dev_id;
112 void __percpu *percpu_dev_id;
113 struct irqaction *next;
114 irq_handler_t thread_fn;
115 struct task_struct *thread;
116 unsigned int irq;
117 unsigned int flags;
118 unsigned long thread_flags;
119 unsigned long thread_mask;
120 const char *name;
121 struct proc_dir_entry *dir;
122} ____cacheline_internodealigned_in_smp;
123
124extern irqreturn_t no_action(int cpl, void *dev_id);
125
126extern int __must_check
127request_threaded_irq(unsigned int irq, irq_handler_t handler,
128 irq_handler_t thread_fn,
129 unsigned long flags, const char *name, void *dev);
130
131static inline int __must_check
132request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
133 const char *name, void *dev)
134{
135 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
136}
137
138extern int __must_check
139request_any_context_irq(unsigned int irq, irq_handler_t handler,
140 unsigned long flags, const char *name, void *dev_id);
141
142extern int __must_check
143request_percpu_irq(unsigned int irq, irq_handler_t handler,
144 const char *devname, void __percpu *percpu_dev_id);
145
146extern void free_irq(unsigned int, void *);
147extern void free_percpu_irq(unsigned int, void __percpu *);
148
149struct device;
150
151extern int __must_check
152devm_request_threaded_irq(struct device *dev, unsigned int irq,
153 irq_handler_t handler, irq_handler_t thread_fn,
154 unsigned long irqflags, const char *devname,
155 void *dev_id);
156
157static inline int __must_check
158devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
159 unsigned long irqflags, const char *devname, void *dev_id)
160{
161 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
162 devname, dev_id);
163}
164
165extern int __must_check
166devm_request_any_context_irq(struct device *dev, unsigned int irq,
167 irq_handler_t handler, unsigned long irqflags,
168 const char *devname, void *dev_id);
169
170extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
171
172
173
174
175
176
177
178
179
180
181
182
183
184#ifdef CONFIG_LOCKDEP
185# define local_irq_enable_in_hardirq() do { } while (0)
186#else
187# define local_irq_enable_in_hardirq() local_irq_enable()
188#endif
189
190extern void disable_irq_nosync(unsigned int irq);
191extern bool disable_hardirq(unsigned int irq);
192extern void disable_irq(unsigned int irq);
193extern void disable_percpu_irq(unsigned int irq);
194extern void enable_irq(unsigned int irq);
195extern void enable_percpu_irq(unsigned int irq, unsigned int type);
196extern void irq_wake_thread(unsigned int irq, void *dev_id);
197
198
199extern void suspend_device_irqs(void);
200extern void resume_device_irqs(void);
201
202
203
204
205
206
207
208
209
210
211
212
213
214struct irq_affinity_notify {
215 unsigned int irq;
216 struct kref kref;
217 struct work_struct work;
218 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
219 void (*release)(struct kref *ref);
220};
221
222#if defined(CONFIG_SMP)
223
224extern cpumask_var_t irq_default_affinity;
225
226
227extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
228 bool force);
229
230
231
232
233
234
235
236
237static inline int
238irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
239{
240 return __irq_set_affinity(irq, cpumask, false);
241}
242
243
244
245
246
247
248
249
250
251
252
253
254static inline int
255irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
256{
257 return __irq_set_affinity(irq, cpumask, true);
258}
259
260extern int irq_can_set_affinity(unsigned int irq);
261extern int irq_select_affinity(unsigned int irq);
262
263extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
264
265extern int
266irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
267
268#else
269
270static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
271{
272 return -EINVAL;
273}
274
275static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
276{
277 return 0;
278}
279
280static inline int irq_can_set_affinity(unsigned int irq)
281{
282 return 0;
283}
284
285static inline int irq_select_affinity(unsigned int irq) { return 0; }
286
287static inline int irq_set_affinity_hint(unsigned int irq,
288 const struct cpumask *m)
289{
290 return -EINVAL;
291}
292
293static inline int
294irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
295{
296 return 0;
297}
298#endif
299
300
301
302
303
304
305
306
307
308
309
310
311static inline void disable_irq_nosync_lockdep(unsigned int irq)
312{
313 disable_irq_nosync(irq);
314#ifdef CONFIG_LOCKDEP
315 local_irq_disable();
316#endif
317}
318
319static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
320{
321 disable_irq_nosync(irq);
322#ifdef CONFIG_LOCKDEP
323 local_irq_save(*flags);
324#endif
325}
326
327static inline void disable_irq_lockdep(unsigned int irq)
328{
329 disable_irq(irq);
330#ifdef CONFIG_LOCKDEP
331 local_irq_disable();
332#endif
333}
334
335static inline void enable_irq_lockdep(unsigned int irq)
336{
337#ifdef CONFIG_LOCKDEP
338 local_irq_enable();
339#endif
340 enable_irq(irq);
341}
342
343static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
344{
345#ifdef CONFIG_LOCKDEP
346 local_irq_restore(*flags);
347#endif
348 enable_irq(irq);
349}
350
351
352extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
353
354static inline int enable_irq_wake(unsigned int irq)
355{
356 return irq_set_irq_wake(irq, 1);
357}
358
359static inline int disable_irq_wake(unsigned int irq)
360{
361 return irq_set_irq_wake(irq, 0);
362}
363
364
365
366
367enum irqchip_irq_state {
368 IRQCHIP_STATE_PENDING,
369 IRQCHIP_STATE_ACTIVE,
370 IRQCHIP_STATE_MASKED,
371 IRQCHIP_STATE_LINE_LEVEL,
372};
373
374extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
375 bool *state);
376extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
377 bool state);
378
379#ifdef CONFIG_IRQ_FORCED_THREADING
380extern bool force_irqthreads;
381#else
382#define force_irqthreads (0)
383#endif
384
385#ifndef __ARCH_SET_SOFTIRQ_PENDING
386#define set_softirq_pending(x) (local_softirq_pending() = (x))
387#define or_softirq_pending(x) (local_softirq_pending() |= (x))
388#endif
389
390
391
392
393
394
395
396#ifndef hard_irq_disable
397#define hard_irq_disable() do { } while(0)
398#endif
399
400
401
402
403
404
405
406enum
407{
408 HI_SOFTIRQ=0,
409 TIMER_SOFTIRQ,
410 NET_TX_SOFTIRQ,
411 NET_RX_SOFTIRQ,
412 BLOCK_SOFTIRQ,
413 BLOCK_IOPOLL_SOFTIRQ,
414 TASKLET_SOFTIRQ,
415 SCHED_SOFTIRQ,
416 HRTIMER_SOFTIRQ,
417 RCU_SOFTIRQ,
418
419 NR_SOFTIRQS
420};
421
422#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
423
424
425
426
427extern const char * const softirq_to_name[NR_SOFTIRQS];
428
429
430
431
432
433struct softirq_action
434{
435 void (*action)(struct softirq_action *);
436};
437
438asmlinkage void do_softirq(void);
439asmlinkage void __do_softirq(void);
440
441#ifdef __ARCH_HAS_DO_SOFTIRQ
442void do_softirq_own_stack(void);
443#else
444static inline void do_softirq_own_stack(void)
445{
446 __do_softirq();
447}
448#endif
449
450extern void open_softirq(int nr, void (*action)(struct softirq_action *));
451extern void softirq_init(void);
452extern void __raise_softirq_irqoff(unsigned int nr);
453
454extern void raise_softirq_irqoff(unsigned int nr);
455extern void raise_softirq(unsigned int nr);
456
457DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
458
459static inline struct task_struct *this_cpu_ksoftirqd(void)
460{
461 return this_cpu_read(ksoftirqd);
462}
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484struct tasklet_struct
485{
486 struct tasklet_struct *next;
487 unsigned long state;
488 atomic_t count;
489 void (*func)(unsigned long);
490 unsigned long data;
491};
492
493#define DECLARE_TASKLET(name, func, data) \
494struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
495
496#define DECLARE_TASKLET_DISABLED(name, func, data) \
497struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
498
499
500enum
501{
502 TASKLET_STATE_SCHED,
503 TASKLET_STATE_RUN
504};
505
506#ifdef CONFIG_SMP
507static inline int tasklet_trylock(struct tasklet_struct *t)
508{
509 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
510}
511
512static inline void tasklet_unlock(struct tasklet_struct *t)
513{
514 smp_mb__before_atomic();
515 clear_bit(TASKLET_STATE_RUN, &(t)->state);
516}
517
518static inline void tasklet_unlock_wait(struct tasklet_struct *t)
519{
520 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
521}
522#else
523#define tasklet_trylock(t) 1
524#define tasklet_unlock_wait(t) do { } while (0)
525#define tasklet_unlock(t) do { } while (0)
526#endif
527
528extern void __tasklet_schedule(struct tasklet_struct *t);
529
530static inline void tasklet_schedule(struct tasklet_struct *t)
531{
532 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
533 __tasklet_schedule(t);
534}
535
536extern void __tasklet_hi_schedule(struct tasklet_struct *t);
537
538static inline void tasklet_hi_schedule(struct tasklet_struct *t)
539{
540 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
541 __tasklet_hi_schedule(t);
542}
543
544extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
545
546
547
548
549
550
551
552static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
553{
554 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
555 __tasklet_hi_schedule_first(t);
556}
557
558
559static inline void tasklet_disable_nosync(struct tasklet_struct *t)
560{
561 atomic_inc(&t->count);
562 smp_mb__after_atomic();
563}
564
565static inline void tasklet_disable(struct tasklet_struct *t)
566{
567 tasklet_disable_nosync(t);
568 tasklet_unlock_wait(t);
569 smp_mb();
570}
571
572static inline void tasklet_enable(struct tasklet_struct *t)
573{
574 smp_mb__before_atomic();
575 atomic_dec(&t->count);
576}
577
578extern void tasklet_kill(struct tasklet_struct *t);
579extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
580extern void tasklet_init(struct tasklet_struct *t,
581 void (*func)(unsigned long), unsigned long data);
582
583struct tasklet_hrtimer {
584 struct hrtimer timer;
585 struct tasklet_struct tasklet;
586 enum hrtimer_restart (*function)(struct hrtimer *);
587};
588
589extern void
590tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
591 enum hrtimer_restart (*function)(struct hrtimer *),
592 clockid_t which_clock, enum hrtimer_mode mode);
593
594static inline
595int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
596 const enum hrtimer_mode mode)
597{
598 return hrtimer_start(&ttimer->timer, time, mode);
599}
600
601static inline
602void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
603{
604 hrtimer_cancel(&ttimer->timer);
605 tasklet_kill(&ttimer->tasklet);
606}
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636#if !defined(CONFIG_GENERIC_IRQ_PROBE)
637static inline unsigned long probe_irq_on(void)
638{
639 return 0;
640}
641static inline int probe_irq_off(unsigned long val)
642{
643 return 0;
644}
645static inline unsigned int probe_irq_mask(unsigned long val)
646{
647 return 0;
648}
649#else
650extern unsigned long probe_irq_on(void);
651extern int probe_irq_off(unsigned long);
652extern unsigned int probe_irq_mask(unsigned long);
653#endif
654
655#ifdef CONFIG_PROC_FS
656
657extern void init_irq_proc(void);
658#else
659static inline void init_irq_proc(void)
660{
661}
662#endif
663
664struct seq_file;
665int show_interrupts(struct seq_file *p, void *v);
666int arch_show_interrupts(struct seq_file *p, int prec);
667
668extern int early_irq_init(void);
669extern int arch_probe_nr_irqs(void);
670extern int arch_early_irq_init(void);
671
672#endif
673