1
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
12#include <linux/hardirq.h>
13#include <linux/irqflags.h>
14#include <linux/hrtimer.h>
15#include <linux/kref.h>
16#include <linux/workqueue.h>
17
18#include <linux/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/irq.h>
21
22
23
24
25
26
27
28
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#define IRQF_DISABLED 0x00000020
62#define IRQF_SHARED 0x00000080
63#define IRQF_PROBE_SHARED 0x00000100
64#define __IRQF_TIMER 0x00000200
65#define IRQF_PERCPU 0x00000400
66#define IRQF_NOBALANCING 0x00000800
67#define IRQF_IRQPOLL 0x00001000
68#define IRQF_ONESHOT 0x00002000
69#define IRQF_NO_SUSPEND 0x00004000
70#define IRQF_FORCE_RESUME 0x00008000
71#define IRQF_NO_THREAD 0x00010000
72#define IRQF_EARLY_RESUME 0x00020000
73
74#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
75
76
77
78
79
80
81
82
83enum {
84 IRQC_IS_HARDIRQ = 0,
85 IRQC_IS_NESTED,
86};
87
88typedef irqreturn_t (*irq_handler_t)(int, void *);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105struct irqaction {
106 irq_handler_t handler;
107 void *dev_id;
108 void __percpu *percpu_dev_id;
109 struct irqaction *next;
110 irq_handler_t thread_fn;
111 struct task_struct *thread;
112 unsigned int irq;
113 unsigned int flags;
114 unsigned long thread_flags;
115 unsigned long thread_mask;
116 const char *name;
117 struct proc_dir_entry *dir;
118} ____cacheline_internodealigned_in_smp;
119
120extern irqreturn_t no_action(int cpl, void *dev_id);
121
122extern int __must_check
123request_threaded_irq(unsigned int irq, irq_handler_t handler,
124 irq_handler_t thread_fn,
125 unsigned long flags, const char *name, void *dev);
126
127static inline int __must_check
128request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev)
130{
131 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
132}
133
134extern int __must_check
135request_any_context_irq(unsigned int irq, irq_handler_t handler,
136 unsigned long flags, const char *name, void *dev_id);
137
138extern int __must_check
139request_percpu_irq(unsigned int irq, irq_handler_t handler,
140 const char *devname, void __percpu *percpu_dev_id);
141
142extern void free_irq(unsigned int, void *);
143extern void free_percpu_irq(unsigned int, void __percpu *);
144
145struct device;
146
147extern int __must_check
148devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153static inline int __must_check
154devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156{
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159}
160
161extern int __must_check
162devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
165
166extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167
168
169
170
171
172
173
174
175
176
177
178
179
180#ifdef CONFIG_LOCKDEP
181# define local_irq_enable_in_hardirq() do { } while (0)
182#else
183# define local_irq_enable_in_hardirq() local_irq_enable()
184#endif
185
186extern void disable_irq_nosync(unsigned int irq);
187extern void disable_irq(unsigned int irq);
188extern void disable_percpu_irq(unsigned int irq);
189extern void enable_irq(unsigned int irq);
190extern void enable_percpu_irq(unsigned int irq, unsigned int type);
191
192
193extern void suspend_device_irqs(void);
194extern void resume_device_irqs(void);
195#ifdef CONFIG_PM_SLEEP
196extern int check_wakeup_irqs(void);
197#else
198static inline int check_wakeup_irqs(void) { return 0; }
199#endif
200
201#if defined(CONFIG_SMP)
202
203extern cpumask_var_t irq_default_affinity;
204
205extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
206extern int irq_can_set_affinity(unsigned int irq);
207extern int irq_select_affinity(unsigned int irq);
208
209extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
210
211
212
213
214
215
216
217
218
219
220
221
222
223struct irq_affinity_notify {
224 unsigned int irq;
225 struct kref kref;
226 struct work_struct work;
227 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
228 void (*release)(struct kref *ref);
229};
230
231extern int
232irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
233
234#else
235
236static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
237{
238 return -EINVAL;
239}
240
241static inline int irq_can_set_affinity(unsigned int irq)
242{
243 return 0;
244}
245
246static inline int irq_select_affinity(unsigned int irq) { return 0; }
247
248static inline int irq_set_affinity_hint(unsigned int irq,
249 const struct cpumask *m)
250{
251 return -EINVAL;
252}
253#endif
254
255
256
257
258
259
260
261
262
263
264
265
266static inline void disable_irq_nosync_lockdep(unsigned int irq)
267{
268 disable_irq_nosync(irq);
269#ifdef CONFIG_LOCKDEP
270 local_irq_disable();
271#endif
272}
273
274static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
275{
276 disable_irq_nosync(irq);
277#ifdef CONFIG_LOCKDEP
278 local_irq_save(*flags);
279#endif
280}
281
282static inline void disable_irq_lockdep(unsigned int irq)
283{
284 disable_irq(irq);
285#ifdef CONFIG_LOCKDEP
286 local_irq_disable();
287#endif
288}
289
290static inline void enable_irq_lockdep(unsigned int irq)
291{
292#ifdef CONFIG_LOCKDEP
293 local_irq_enable();
294#endif
295 enable_irq(irq);
296}
297
298static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
299{
300#ifdef CONFIG_LOCKDEP
301 local_irq_restore(*flags);
302#endif
303 enable_irq(irq);
304}
305
306
307extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
308
309static inline int enable_irq_wake(unsigned int irq)
310{
311 return irq_set_irq_wake(irq, 1);
312}
313
314static inline int disable_irq_wake(unsigned int irq)
315{
316 return irq_set_irq_wake(irq, 0);
317}
318
319
320#ifdef CONFIG_IRQ_FORCED_THREADING
321extern bool force_irqthreads;
322#else
323#define force_irqthreads (0)
324#endif
325
326#ifndef __ARCH_SET_SOFTIRQ_PENDING
327#define set_softirq_pending(x) (local_softirq_pending() = (x))
328#define or_softirq_pending(x) (local_softirq_pending() |= (x))
329#endif
330
331
332
333
334
335
336
337#ifndef hard_irq_disable
338#define hard_irq_disable() do { } while(0)
339#endif
340
341
342
343
344
345
346
347enum
348{
349 HI_SOFTIRQ=0,
350 TIMER_SOFTIRQ,
351 NET_TX_SOFTIRQ,
352 NET_RX_SOFTIRQ,
353 BLOCK_SOFTIRQ,
354 BLOCK_IOPOLL_SOFTIRQ,
355 TASKLET_SOFTIRQ,
356 SCHED_SOFTIRQ,
357 HRTIMER_SOFTIRQ,
358 RCU_SOFTIRQ,
359
360 NR_SOFTIRQS
361};
362
363#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
364
365
366
367
368extern const char * const softirq_to_name[NR_SOFTIRQS];
369
370
371
372
373
374struct softirq_action
375{
376 void (*action)(struct softirq_action *);
377};
378
379asmlinkage void do_softirq(void);
380asmlinkage void __do_softirq(void);
381
382#ifdef __ARCH_HAS_DO_SOFTIRQ
383void do_softirq_own_stack(void);
384#else
385static inline void do_softirq_own_stack(void)
386{
387 __do_softirq();
388}
389#endif
390
391extern void open_softirq(int nr, void (*action)(struct softirq_action *));
392extern void softirq_init(void);
393extern void __raise_softirq_irqoff(unsigned int nr);
394
395extern void raise_softirq_irqoff(unsigned int nr);
396extern void raise_softirq(unsigned int nr);
397
398DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
399
400static inline struct task_struct *this_cpu_ksoftirqd(void)
401{
402 return this_cpu_read(ksoftirqd);
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425struct tasklet_struct
426{
427 struct tasklet_struct *next;
428 unsigned long state;
429 atomic_t count;
430 void (*func)(unsigned long);
431 unsigned long data;
432};
433
434#define DECLARE_TASKLET(name, func, data) \
435struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
436
437#define DECLARE_TASKLET_DISABLED(name, func, data) \
438struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
439
440
441enum
442{
443 TASKLET_STATE_SCHED,
444 TASKLET_STATE_RUN
445};
446
447#ifdef CONFIG_SMP
448static inline int tasklet_trylock(struct tasklet_struct *t)
449{
450 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
451}
452
453static inline void tasklet_unlock(struct tasklet_struct *t)
454{
455 smp_mb__before_clear_bit();
456 clear_bit(TASKLET_STATE_RUN, &(t)->state);
457}
458
459static inline void tasklet_unlock_wait(struct tasklet_struct *t)
460{
461 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
462}
463#else
464#define tasklet_trylock(t) 1
465#define tasklet_unlock_wait(t) do { } while (0)
466#define tasklet_unlock(t) do { } while (0)
467#endif
468
469extern void __tasklet_schedule(struct tasklet_struct *t);
470
471static inline void tasklet_schedule(struct tasklet_struct *t)
472{
473 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
474 __tasklet_schedule(t);
475}
476
477extern void __tasklet_hi_schedule(struct tasklet_struct *t);
478
479static inline void tasklet_hi_schedule(struct tasklet_struct *t)
480{
481 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
482 __tasklet_hi_schedule(t);
483}
484
485extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
486
487
488
489
490
491
492
493static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
494{
495 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
496 __tasklet_hi_schedule_first(t);
497}
498
499
500static inline void tasklet_disable_nosync(struct tasklet_struct *t)
501{
502 atomic_inc(&t->count);
503 smp_mb__after_atomic_inc();
504}
505
506static inline void tasklet_disable(struct tasklet_struct *t)
507{
508 tasklet_disable_nosync(t);
509 tasklet_unlock_wait(t);
510 smp_mb();
511}
512
513static inline void tasklet_enable(struct tasklet_struct *t)
514{
515 smp_mb__before_atomic_dec();
516 atomic_dec(&t->count);
517}
518
519static inline void tasklet_hi_enable(struct tasklet_struct *t)
520{
521 smp_mb__before_atomic_dec();
522 atomic_dec(&t->count);
523}
524
525extern void tasklet_kill(struct tasklet_struct *t);
526extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
527extern void tasklet_init(struct tasklet_struct *t,
528 void (*func)(unsigned long), unsigned long data);
529
530struct tasklet_hrtimer {
531 struct hrtimer timer;
532 struct tasklet_struct tasklet;
533 enum hrtimer_restart (*function)(struct hrtimer *);
534};
535
536extern void
537tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
538 enum hrtimer_restart (*function)(struct hrtimer *),
539 clockid_t which_clock, enum hrtimer_mode mode);
540
541static inline
542int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
543 const enum hrtimer_mode mode)
544{
545 return hrtimer_start(&ttimer->timer, time, mode);
546}
547
548static inline
549void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
550{
551 hrtimer_cancel(&ttimer->timer);
552 tasklet_kill(&ttimer->tasklet);
553}
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583#if !defined(CONFIG_GENERIC_IRQ_PROBE)
584static inline unsigned long probe_irq_on(void)
585{
586 return 0;
587}
588static inline int probe_irq_off(unsigned long val)
589{
590 return 0;
591}
592static inline unsigned int probe_irq_mask(unsigned long val)
593{
594 return 0;
595}
596#else
597extern unsigned long probe_irq_on(void);
598extern int probe_irq_off(unsigned long);
599extern unsigned int probe_irq_mask(unsigned long);
600#endif
601
602#ifdef CONFIG_PROC_FS
603
604extern void init_irq_proc(void);
605#else
606static inline void init_irq_proc(void)
607{
608}
609#endif
610
611struct seq_file;
612int show_interrupts(struct seq_file *p, void *v);
613int arch_show_interrupts(struct seq_file *p, int prec);
614
615extern int early_irq_init(void);
616extern int arch_probe_nr_irqs(void);
617extern int arch_early_irq_init(void);
618
619#endif
620