1
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
12#include <linux/hardirq.h>
13#include <linux/irqflags.h>
14#include <linux/hrtimer.h>
15#include <linux/kref.h>
16#include <linux/workqueue.h>
17
18#include <linux/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/irq.h>
21
22
23
24
25
26
27
28
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61#define IRQF_DISABLED 0x00000020
62#define IRQF_SHARED 0x00000080
63#define IRQF_PROBE_SHARED 0x00000100
64#define __IRQF_TIMER 0x00000200
65#define IRQF_PERCPU 0x00000400
66#define IRQF_NOBALANCING 0x00000800
67#define IRQF_IRQPOLL 0x00001000
68#define IRQF_ONESHOT 0x00002000
69#define IRQF_NO_SUSPEND 0x00004000
70#define IRQF_FORCE_RESUME 0x00008000
71#define IRQF_NO_THREAD 0x00010000
72#define IRQF_EARLY_RESUME 0x00020000
73
74#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
75
76
77
78
79
80
81
82
83enum {
84 IRQC_IS_HARDIRQ = 0,
85 IRQC_IS_NESTED,
86};
87
88typedef irqreturn_t (*irq_handler_t)(int, void *);
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105struct irqaction {
106 irq_handler_t handler;
107 void *dev_id;
108 void __percpu *percpu_dev_id;
109 struct irqaction *next;
110 irq_handler_t thread_fn;
111 struct task_struct *thread;
112 unsigned int irq;
113 unsigned int flags;
114 unsigned long thread_flags;
115 unsigned long thread_mask;
116 const char *name;
117 struct proc_dir_entry *dir;
118} ____cacheline_internodealigned_in_smp;
119
120extern irqreturn_t no_action(int cpl, void *dev_id);
121
122extern int __must_check
123request_threaded_irq(unsigned int irq, irq_handler_t handler,
124 irq_handler_t thread_fn,
125 unsigned long flags, const char *name, void *dev);
126
127static inline int __must_check
128request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev)
130{
131 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
132}
133
134extern int __must_check
135request_any_context_irq(unsigned int irq, irq_handler_t handler,
136 unsigned long flags, const char *name, void *dev_id);
137
138extern int __must_check
139request_percpu_irq(unsigned int irq, irq_handler_t handler,
140 const char *devname, void __percpu *percpu_dev_id);
141
142extern void free_irq(unsigned int, void *);
143extern void free_percpu_irq(unsigned int, void __percpu *);
144
145struct device;
146
147extern int __must_check
148devm_request_threaded_irq(struct device *dev, unsigned int irq,
149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153static inline int __must_check
154devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156{
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159}
160
161extern int __must_check
162devm_request_any_context_irq(struct device *dev, unsigned int irq,
163 irq_handler_t handler, unsigned long irqflags,
164 const char *devname, void *dev_id);
165
166extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167
168
169
170
171
172
173
174
175
176
177
178
179
180#ifdef CONFIG_LOCKDEP
181# define local_irq_enable_in_hardirq() do { } while (0)
182#else
183# define local_irq_enable_in_hardirq() local_irq_enable()
184#endif
185
186extern void disable_irq_nosync(unsigned int irq);
187extern void disable_irq(unsigned int irq);
188extern void disable_percpu_irq(unsigned int irq);
189extern void enable_irq(unsigned int irq);
190extern void enable_percpu_irq(unsigned int irq, unsigned int type);
191extern void irq_wake_thread(unsigned int irq, void *dev_id);
192
193
194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void);
196#ifdef CONFIG_PM_SLEEP
197extern int check_wakeup_irqs(void);
198#else
199static inline int check_wakeup_irqs(void) { return 0; }
200#endif
201
202#if defined(CONFIG_SMP)
203
204extern cpumask_var_t irq_default_affinity;
205
206
207extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
208 bool force);
209
210
211
212
213
214
215
216
217static inline int
218irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
219{
220 return __irq_set_affinity(irq, cpumask, false);
221}
222
223
224
225
226
227
228
229
230
231
232
233
234static inline int
235irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
236{
237 return __irq_set_affinity(irq, cpumask, true);
238}
239
240extern int irq_can_set_affinity(unsigned int irq);
241extern int irq_select_affinity(unsigned int irq);
242
243extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
244
245
246
247
248
249
250
251
252
253
254
255
256
257struct irq_affinity_notify {
258 unsigned int irq;
259 struct kref kref;
260 struct work_struct work;
261 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
262 void (*release)(struct kref *ref);
263};
264
265extern int
266irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
267
268#else
269
270static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
271{
272 return -EINVAL;
273}
274
275static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
276{
277 return 0;
278}
279
280static inline int irq_can_set_affinity(unsigned int irq)
281{
282 return 0;
283}
284
285static inline int irq_select_affinity(unsigned int irq) { return 0; }
286
287static inline int irq_set_affinity_hint(unsigned int irq,
288 const struct cpumask *m)
289{
290 return -EINVAL;
291}
292#endif
293
294
295
296
297
298
299
300
301
302
303
304
305static inline void disable_irq_nosync_lockdep(unsigned int irq)
306{
307 disable_irq_nosync(irq);
308#ifdef CONFIG_LOCKDEP
309 local_irq_disable();
310#endif
311}
312
313static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
314{
315 disable_irq_nosync(irq);
316#ifdef CONFIG_LOCKDEP
317 local_irq_save(*flags);
318#endif
319}
320
321static inline void disable_irq_lockdep(unsigned int irq)
322{
323 disable_irq(irq);
324#ifdef CONFIG_LOCKDEP
325 local_irq_disable();
326#endif
327}
328
329static inline void enable_irq_lockdep(unsigned int irq)
330{
331#ifdef CONFIG_LOCKDEP
332 local_irq_enable();
333#endif
334 enable_irq(irq);
335}
336
337static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
338{
339#ifdef CONFIG_LOCKDEP
340 local_irq_restore(*flags);
341#endif
342 enable_irq(irq);
343}
344
345
346extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
347
348static inline int enable_irq_wake(unsigned int irq)
349{
350 return irq_set_irq_wake(irq, 1);
351}
352
353static inline int disable_irq_wake(unsigned int irq)
354{
355 return irq_set_irq_wake(irq, 0);
356}
357
358
359#ifdef CONFIG_IRQ_FORCED_THREADING
360extern bool force_irqthreads;
361#else
362#define force_irqthreads (0)
363#endif
364
365#ifndef __ARCH_SET_SOFTIRQ_PENDING
366#define set_softirq_pending(x) (local_softirq_pending() = (x))
367#define or_softirq_pending(x) (local_softirq_pending() |= (x))
368#endif
369
370
371
372
373
374
375
376#ifndef hard_irq_disable
377#define hard_irq_disable() do { } while(0)
378#endif
379
380
381
382
383
384
385
386enum
387{
388 HI_SOFTIRQ=0,
389 TIMER_SOFTIRQ,
390 NET_TX_SOFTIRQ,
391 NET_RX_SOFTIRQ,
392 BLOCK_SOFTIRQ,
393 BLOCK_IOPOLL_SOFTIRQ,
394 TASKLET_SOFTIRQ,
395 SCHED_SOFTIRQ,
396 HRTIMER_SOFTIRQ,
397 RCU_SOFTIRQ,
398
399 NR_SOFTIRQS
400};
401
402#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
403
404
405
406
407extern const char * const softirq_to_name[NR_SOFTIRQS];
408
409
410
411
412
413struct softirq_action
414{
415 void (*action)(struct softirq_action *);
416};
417
418asmlinkage void do_softirq(void);
419asmlinkage void __do_softirq(void);
420
421#ifdef __ARCH_HAS_DO_SOFTIRQ
422void do_softirq_own_stack(void);
423#else
424static inline void do_softirq_own_stack(void)
425{
426 __do_softirq();
427}
428#endif
429
430extern void open_softirq(int nr, void (*action)(struct softirq_action *));
431extern void softirq_init(void);
432extern void __raise_softirq_irqoff(unsigned int nr);
433
434extern void raise_softirq_irqoff(unsigned int nr);
435extern void raise_softirq(unsigned int nr);
436
437DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
438
439static inline struct task_struct *this_cpu_ksoftirqd(void)
440{
441 return this_cpu_read(ksoftirqd);
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464struct tasklet_struct
465{
466 struct tasklet_struct *next;
467 unsigned long state;
468 atomic_t count;
469 void (*func)(unsigned long);
470 unsigned long data;
471};
472
473#define DECLARE_TASKLET(name, func, data) \
474struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
475
476#define DECLARE_TASKLET_DISABLED(name, func, data) \
477struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
478
479
480enum
481{
482 TASKLET_STATE_SCHED,
483 TASKLET_STATE_RUN
484};
485
486#ifdef CONFIG_SMP
487static inline int tasklet_trylock(struct tasklet_struct *t)
488{
489 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
490}
491
492static inline void tasklet_unlock(struct tasklet_struct *t)
493{
494 smp_mb__before_clear_bit();
495 clear_bit(TASKLET_STATE_RUN, &(t)->state);
496}
497
498static inline void tasklet_unlock_wait(struct tasklet_struct *t)
499{
500 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
501}
502#else
503#define tasklet_trylock(t) 1
504#define tasklet_unlock_wait(t) do { } while (0)
505#define tasklet_unlock(t) do { } while (0)
506#endif
507
508extern void __tasklet_schedule(struct tasklet_struct *t);
509
510static inline void tasklet_schedule(struct tasklet_struct *t)
511{
512 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
513 __tasklet_schedule(t);
514}
515
516extern void __tasklet_hi_schedule(struct tasklet_struct *t);
517
518static inline void tasklet_hi_schedule(struct tasklet_struct *t)
519{
520 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
521 __tasklet_hi_schedule(t);
522}
523
524extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
525
526
527
528
529
530
531
532static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
533{
534 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
535 __tasklet_hi_schedule_first(t);
536}
537
538
539static inline void tasklet_disable_nosync(struct tasklet_struct *t)
540{
541 atomic_inc(&t->count);
542 smp_mb__after_atomic_inc();
543}
544
545static inline void tasklet_disable(struct tasklet_struct *t)
546{
547 tasklet_disable_nosync(t);
548 tasklet_unlock_wait(t);
549 smp_mb();
550}
551
552static inline void tasklet_enable(struct tasklet_struct *t)
553{
554 smp_mb__before_atomic_dec();
555 atomic_dec(&t->count);
556}
557
558static inline void tasklet_hi_enable(struct tasklet_struct *t)
559{
560 smp_mb__before_atomic_dec();
561 atomic_dec(&t->count);
562}
563
564extern void tasklet_kill(struct tasklet_struct *t);
565extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
566extern void tasklet_init(struct tasklet_struct *t,
567 void (*func)(unsigned long), unsigned long data);
568
569struct tasklet_hrtimer {
570 struct hrtimer timer;
571 struct tasklet_struct tasklet;
572 enum hrtimer_restart (*function)(struct hrtimer *);
573};
574
575extern void
576tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
577 enum hrtimer_restart (*function)(struct hrtimer *),
578 clockid_t which_clock, enum hrtimer_mode mode);
579
580static inline
581int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
582 const enum hrtimer_mode mode)
583{
584 return hrtimer_start(&ttimer->timer, time, mode);
585}
586
587static inline
588void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
589{
590 hrtimer_cancel(&ttimer->timer);
591 tasklet_kill(&ttimer->tasklet);
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622#if !defined(CONFIG_GENERIC_IRQ_PROBE)
623static inline unsigned long probe_irq_on(void)
624{
625 return 0;
626}
627static inline int probe_irq_off(unsigned long val)
628{
629 return 0;
630}
631static inline unsigned int probe_irq_mask(unsigned long val)
632{
633 return 0;
634}
635#else
636extern unsigned long probe_irq_on(void);
637extern int probe_irq_off(unsigned long);
638extern unsigned int probe_irq_mask(unsigned long);
639#endif
640
641#ifdef CONFIG_PROC_FS
642
643extern void init_irq_proc(void);
644#else
645static inline void init_irq_proc(void)
646{
647}
648#endif
649
650struct seq_file;
651int show_interrupts(struct seq_file *p, void *v);
652int arch_show_interrupts(struct seq_file *p, int prec);
653
654extern int early_irq_init(void);
655extern int arch_probe_nr_irqs(void);
656extern int arch_early_irq_init(void);
657
658#endif
659