1
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
12#include <linux/hardirq.h>
13#include <linux/irqflags.h>
14#include <linux/hrtimer.h>
15#include <linux/kref.h>
16#include <linux/workqueue.h>
17
18#include <linux/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/irq.h>
21
22
23
24
25
26
27
28
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#define IRQF_DISABLED 0x00000020
62#define IRQF_SHARED 0x00000080
63#define IRQF_PROBE_SHARED 0x00000100
64#define __IRQF_TIMER 0x00000200
65#define IRQF_PERCPU 0x00000400
66#define IRQF_NOBALANCING 0x00000800
67#define IRQF_IRQPOLL 0x00001000
68#define IRQF_ONESHOT 0x00002000
69#define IRQF_NO_SUSPEND 0x00004000
70#define IRQF_FORCE_RESUME 0x00008000
71#define IRQF_NO_THREAD 0x00010000
72#define IRQF_EARLY_RESUME 0x00020000
73
74#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
75
76
77
78
79
80
81
82
83enum {
84 IRQC_IS_HARDIRQ = 0,
85 IRQC_IS_NESTED,
86};
87
88typedef irqreturn_t (*irq_handler_t)(int, void *);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105struct irqaction {
106 irq_handler_t handler;
107 void *dev_id;
108 void __percpu *percpu_dev_id;
109 struct irqaction *next;
110 irq_handler_t thread_fn;
111 struct task_struct *thread;
112 unsigned int irq;
113 unsigned int flags;
114 unsigned long thread_flags;
115 unsigned long thread_mask;
116 const char *name;
117 struct proc_dir_entry *dir;
118} ____cacheline_internodealigned_in_smp;
119
120extern irqreturn_t no_action(int cpl, void *dev_id);
121
122extern int __must_check
123request_threaded_irq(unsigned int irq, irq_handler_t handler,
124 irq_handler_t thread_fn,
125 unsigned long flags, const char *name, void *dev);
126
127static inline int __must_check
128request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev)
130{
131 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
132}
133
134extern int __must_check
135request_any_context_irq(unsigned int irq, irq_handler_t handler,
136 unsigned long flags, const char *name, void *dev_id);
137
138extern int __must_check
139request_percpu_irq(unsigned int irq, irq_handler_t handler,
140 const char *devname, void __percpu *percpu_dev_id);
141
142extern void free_irq(unsigned int, void *);
143extern void free_percpu_irq(unsigned int, void __percpu *);
144
145struct device;
146
147extern int __must_check
148devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153static inline int __must_check
154devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156{
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159}
160
161extern int __must_check
162devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
165
166extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167
168
169
170
171
172
173
174
175
176
177
178
179
180#ifdef CONFIG_LOCKDEP
181# define local_irq_enable_in_hardirq() do { } while (0)
182#else
183# define local_irq_enable_in_hardirq() local_irq_enable()
184#endif
185
186extern void disable_irq_nosync(unsigned int irq);
187extern void disable_irq(unsigned int irq);
188extern void disable_percpu_irq(unsigned int irq);
189extern void enable_irq(unsigned int irq);
190extern void enable_percpu_irq(unsigned int irq, unsigned int type);
191extern void irq_wake_thread(unsigned int irq, void *dev_id);
192
193
194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void);
196
197
198
199
200
201
202
203
204
205
206
207
208
209struct irq_affinity_notify {
210 unsigned int irq;
211 struct kref kref;
212 struct work_struct work;
213 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
214 void (*release)(struct kref *ref);
215};
216
217#if defined(CONFIG_SMP)
218
219extern cpumask_var_t irq_default_affinity;
220
221
222extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
223 bool force);
224
225
226
227
228
229
230
231
232static inline int
233irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
234{
235 return __irq_set_affinity(irq, cpumask, false);
236}
237
238
239
240
241
242
243
244
245
246
247
248
249static inline int
250irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
251{
252 return __irq_set_affinity(irq, cpumask, true);
253}
254
255extern int irq_can_set_affinity(unsigned int irq);
256extern int irq_select_affinity(unsigned int irq);
257
258extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
259
260extern int
261irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
262
263#else
264
265static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
266{
267 return -EINVAL;
268}
269
270static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
271{
272 return 0;
273}
274
275static inline int irq_can_set_affinity(unsigned int irq)
276{
277 return 0;
278}
279
280static inline int irq_select_affinity(unsigned int irq) { return 0; }
281
282static inline int irq_set_affinity_hint(unsigned int irq,
283 const struct cpumask *m)
284{
285 return -EINVAL;
286}
287
288static inline int
289irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
290{
291 return 0;
292}
293#endif
294
295
296
297
298
299
300
301
302
303
304
305
306static inline void disable_irq_nosync_lockdep(unsigned int irq)
307{
308 disable_irq_nosync(irq);
309#ifdef CONFIG_LOCKDEP
310 local_irq_disable();
311#endif
312}
313
314static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
315{
316 disable_irq_nosync(irq);
317#ifdef CONFIG_LOCKDEP
318 local_irq_save(*flags);
319#endif
320}
321
322static inline void disable_irq_lockdep(unsigned int irq)
323{
324 disable_irq(irq);
325#ifdef CONFIG_LOCKDEP
326 local_irq_disable();
327#endif
328}
329
330static inline void enable_irq_lockdep(unsigned int irq)
331{
332#ifdef CONFIG_LOCKDEP
333 local_irq_enable();
334#endif
335 enable_irq(irq);
336}
337
338static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
339{
340#ifdef CONFIG_LOCKDEP
341 local_irq_restore(*flags);
342#endif
343 enable_irq(irq);
344}
345
346
347extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
348
349static inline int enable_irq_wake(unsigned int irq)
350{
351 return irq_set_irq_wake(irq, 1);
352}
353
354static inline int disable_irq_wake(unsigned int irq)
355{
356 return irq_set_irq_wake(irq, 0);
357}
358
359
360#ifdef CONFIG_IRQ_FORCED_THREADING
361extern bool force_irqthreads;
362#else
363#define force_irqthreads (0)
364#endif
365
366#ifndef __ARCH_SET_SOFTIRQ_PENDING
367#define set_softirq_pending(x) (local_softirq_pending() = (x))
368#define or_softirq_pending(x) (local_softirq_pending() |= (x))
369#endif
370
371
372
373
374
375
376
377#ifndef hard_irq_disable
378#define hard_irq_disable() do { } while(0)
379#endif
380
381
382
383
384
385
386
387enum
388{
389 HI_SOFTIRQ=0,
390 TIMER_SOFTIRQ,
391 NET_TX_SOFTIRQ,
392 NET_RX_SOFTIRQ,
393 BLOCK_SOFTIRQ,
394 BLOCK_IOPOLL_SOFTIRQ,
395 TASKLET_SOFTIRQ,
396 SCHED_SOFTIRQ,
397 HRTIMER_SOFTIRQ,
398 RCU_SOFTIRQ,
399
400 NR_SOFTIRQS
401};
402
403#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
404
405
406
407
408extern const char * const softirq_to_name[NR_SOFTIRQS];
409
410
411
412
413
414struct softirq_action
415{
416 void (*action)(struct softirq_action *);
417};
418
419asmlinkage void do_softirq(void);
420asmlinkage void __do_softirq(void);
421
422#ifdef __ARCH_HAS_DO_SOFTIRQ
423void do_softirq_own_stack(void);
424#else
425static inline void do_softirq_own_stack(void)
426{
427 __do_softirq();
428}
429#endif
430
431extern void open_softirq(int nr, void (*action)(struct softirq_action *));
432extern void softirq_init(void);
433extern void __raise_softirq_irqoff(unsigned int nr);
434
435extern void raise_softirq_irqoff(unsigned int nr);
436extern void raise_softirq(unsigned int nr);
437
438DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
439
440static inline struct task_struct *this_cpu_ksoftirqd(void)
441{
442 return this_cpu_read(ksoftirqd);
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465struct tasklet_struct
466{
467 struct tasklet_struct *next;
468 unsigned long state;
469 atomic_t count;
470 void (*func)(unsigned long);
471 unsigned long data;
472};
473
474#define DECLARE_TASKLET(name, func, data) \
475struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
476
477#define DECLARE_TASKLET_DISABLED(name, func, data) \
478struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
479
480
481enum
482{
483 TASKLET_STATE_SCHED,
484 TASKLET_STATE_RUN
485};
486
487#ifdef CONFIG_SMP
488static inline int tasklet_trylock(struct tasklet_struct *t)
489{
490 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
491}
492
493static inline void tasklet_unlock(struct tasklet_struct *t)
494{
495 smp_mb__before_atomic();
496 clear_bit(TASKLET_STATE_RUN, &(t)->state);
497}
498
499static inline void tasklet_unlock_wait(struct tasklet_struct *t)
500{
501 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
502}
503#else
504#define tasklet_trylock(t) 1
505#define tasklet_unlock_wait(t) do { } while (0)
506#define tasklet_unlock(t) do { } while (0)
507#endif
508
509extern void __tasklet_schedule(struct tasklet_struct *t);
510
511static inline void tasklet_schedule(struct tasklet_struct *t)
512{
513 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
514 __tasklet_schedule(t);
515}
516
517extern void __tasklet_hi_schedule(struct tasklet_struct *t);
518
519static inline void tasklet_hi_schedule(struct tasklet_struct *t)
520{
521 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
522 __tasklet_hi_schedule(t);
523}
524
525extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
526
527
528
529
530
531
532
533static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
534{
535 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
536 __tasklet_hi_schedule_first(t);
537}
538
539
540static inline void tasklet_disable_nosync(struct tasklet_struct *t)
541{
542 atomic_inc(&t->count);
543 smp_mb__after_atomic();
544}
545
546static inline void tasklet_disable(struct tasklet_struct *t)
547{
548 tasklet_disable_nosync(t);
549 tasklet_unlock_wait(t);
550 smp_mb();
551}
552
553static inline void tasklet_enable(struct tasklet_struct *t)
554{
555 smp_mb__before_atomic();
556 atomic_dec(&t->count);
557}
558
559extern void tasklet_kill(struct tasklet_struct *t);
560extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
561extern void tasklet_init(struct tasklet_struct *t,
562 void (*func)(unsigned long), unsigned long data);
563
564struct tasklet_hrtimer {
565 struct hrtimer timer;
566 struct tasklet_struct tasklet;
567 enum hrtimer_restart (*function)(struct hrtimer *);
568};
569
570extern void
571tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
572 enum hrtimer_restart (*function)(struct hrtimer *),
573 clockid_t which_clock, enum hrtimer_mode mode);
574
575static inline
576int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
577 const enum hrtimer_mode mode)
578{
579 return hrtimer_start(&ttimer->timer, time, mode);
580}
581
582static inline
583void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
584{
585 hrtimer_cancel(&ttimer->timer);
586 tasklet_kill(&ttimer->tasklet);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617#if !defined(CONFIG_GENERIC_IRQ_PROBE)
618static inline unsigned long probe_irq_on(void)
619{
620 return 0;
621}
622static inline int probe_irq_off(unsigned long val)
623{
624 return 0;
625}
626static inline unsigned int probe_irq_mask(unsigned long val)
627{
628 return 0;
629}
630#else
631extern unsigned long probe_irq_on(void);
632extern int probe_irq_off(unsigned long);
633extern unsigned int probe_irq_mask(unsigned long);
634#endif
635
636#ifdef CONFIG_PROC_FS
637
638extern void init_irq_proc(void);
639#else
640static inline void init_irq_proc(void)
641{
642}
643#endif
644
645struct seq_file;
646int show_interrupts(struct seq_file *p, void *v);
647int arch_show_interrupts(struct seq_file *p, int prec);
648
649extern int early_irq_init(void);
650extern int arch_probe_nr_irqs(void);
651extern int arch_early_irq_init(void);
652
653#endif
654