1
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
12#include <linux/hardirq.h>
13#include <linux/irqflags.h>
14#include <linux/smp.h>
15#include <linux/percpu.h>
16#include <linux/hrtimer.h>
17
18#include <asm/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/system.h>
21#include <trace/events/irq.h>
22
23
24
25
26
27
28
29
30#define IRQF_TRIGGER_NONE 0x00000000
31#define IRQF_TRIGGER_RISING 0x00000001
32#define IRQF_TRIGGER_FALLING 0x00000002
33#define IRQF_TRIGGER_HIGH 0x00000004
34#define IRQF_TRIGGER_LOW 0x00000008
35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37#define IRQF_TRIGGER_PROBE 0x00000010
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#define IRQF_DISABLED 0x00000020
61#define IRQF_SAMPLE_RANDOM 0x00000040
62#define IRQF_SHARED 0x00000080
63#define IRQF_PROBE_SHARED 0x00000100
64#define __IRQF_TIMER 0x00000200
65#define IRQF_PERCPU 0x00000400
66#define IRQF_NOBALANCING 0x00000800
67#define IRQF_IRQPOLL 0x00001000
68#define IRQF_ONESHOT 0x00002000
69#define IRQF_NO_SUSPEND 0x00004000
70
71#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
72
73
74
75
76
77
78
79
80enum {
81 IRQTF_RUNTHREAD,
82 IRQTF_DIED,
83 IRQTF_WARNED,
84 IRQTF_AFFINITY,
85};
86
87
88
89
90
91
92
93
94enum {
95 IRQC_IS_HARDIRQ = 0,
96 IRQC_IS_NESTED,
97};
98
99typedef irqreturn_t (*irq_handler_t)(int, void *);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114struct irqaction {
115 irq_handler_t handler;
116 unsigned long flags;
117 void *dev_id;
118 struct irqaction *next;
119 int irq;
120 irq_handler_t thread_fn;
121 struct task_struct *thread;
122 unsigned long thread_flags;
123 const char *name;
124 struct proc_dir_entry *dir;
125} ____cacheline_internodealigned_in_smp;
126
127extern irqreturn_t no_action(int cpl, void *dev_id);
128
129#ifdef CONFIG_GENERIC_HARDIRQS
130extern int __must_check
131request_threaded_irq(unsigned int irq, irq_handler_t handler,
132 irq_handler_t thread_fn,
133 unsigned long flags, const char *name, void *dev);
134
135static inline int __must_check
136request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
137 const char *name, void *dev)
138{
139 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
140}
141
142extern int __must_check
143request_any_context_irq(unsigned int irq, irq_handler_t handler,
144 unsigned long flags, const char *name, void *dev_id);
145
146extern void exit_irq_thread(void);
147#else
148
149extern int __must_check
150request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
151 const char *name, void *dev);
152
153
154
155
156
157
158
159static inline int __must_check
160request_threaded_irq(unsigned int irq, irq_handler_t handler,
161 irq_handler_t thread_fn,
162 unsigned long flags, const char *name, void *dev)
163{
164 return request_irq(irq, handler, flags, name, dev);
165}
166
167static inline int __must_check
168request_any_context_irq(unsigned int irq, irq_handler_t handler,
169 unsigned long flags, const char *name, void *dev_id)
170{
171 return request_irq(irq, handler, flags, name, dev_id);
172}
173
174static inline void exit_irq_thread(void) { }
175#endif
176
177extern void free_irq(unsigned int, void *);
178
179struct device;
180
181extern int __must_check
182devm_request_threaded_irq(struct device *dev, unsigned int irq,
183 irq_handler_t handler, irq_handler_t thread_fn,
184 unsigned long irqflags, const char *devname,
185 void *dev_id);
186
187static inline int __must_check
188devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
189 unsigned long irqflags, const char *devname, void *dev_id)
190{
191 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
192 devname, dev_id);
193}
194
195extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
196
197
198
199
200
201
202
203
204
205
206
207
208
209#ifdef CONFIG_LOCKDEP
210# define local_irq_enable_in_hardirq() do { } while (0)
211#else
212# define local_irq_enable_in_hardirq() local_irq_enable()
213#endif
214
215extern void disable_irq_nosync(unsigned int irq);
216extern void disable_irq(unsigned int irq);
217extern void enable_irq(unsigned int irq);
218
219
220#ifdef CONFIG_GENERIC_HARDIRQS
221extern void suspend_device_irqs(void);
222extern void resume_device_irqs(void);
223#ifdef CONFIG_PM_SLEEP
224extern int check_wakeup_irqs(void);
225#else
226static inline int check_wakeup_irqs(void) { return 0; }
227#endif
228#else
229static inline void suspend_device_irqs(void) { };
230static inline void resume_device_irqs(void) { };
231static inline int check_wakeup_irqs(void) { return 0; }
232#endif
233
234#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
235
236extern cpumask_var_t irq_default_affinity;
237
238extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
239extern int irq_can_set_affinity(unsigned int irq);
240extern int irq_select_affinity(unsigned int irq);
241
242extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
243#else
244
245static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
246{
247 return -EINVAL;
248}
249
250static inline int irq_can_set_affinity(unsigned int irq)
251{
252 return 0;
253}
254
255static inline int irq_select_affinity(unsigned int irq) { return 0; }
256
257static inline int irq_set_affinity_hint(unsigned int irq,
258 const struct cpumask *m)
259{
260 return -EINVAL;
261}
262#endif
263
264#ifdef CONFIG_GENERIC_HARDIRQS
265
266
267
268
269
270
271
272
273
274
275
276static inline void disable_irq_nosync_lockdep(unsigned int irq)
277{
278 disable_irq_nosync(irq);
279#ifdef CONFIG_LOCKDEP
280 local_irq_disable();
281#endif
282}
283
284static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
285{
286 disable_irq_nosync(irq);
287#ifdef CONFIG_LOCKDEP
288 local_irq_save(*flags);
289#endif
290}
291
292static inline void disable_irq_lockdep(unsigned int irq)
293{
294 disable_irq(irq);
295#ifdef CONFIG_LOCKDEP
296 local_irq_disable();
297#endif
298}
299
300static inline void enable_irq_lockdep(unsigned int irq)
301{
302#ifdef CONFIG_LOCKDEP
303 local_irq_enable();
304#endif
305 enable_irq(irq);
306}
307
308static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
309{
310#ifdef CONFIG_LOCKDEP
311 local_irq_restore(*flags);
312#endif
313 enable_irq(irq);
314}
315
316
317extern int set_irq_wake(unsigned int irq, unsigned int on);
318
319static inline int enable_irq_wake(unsigned int irq)
320{
321 return set_irq_wake(irq, 1);
322}
323
324static inline int disable_irq_wake(unsigned int irq)
325{
326 return set_irq_wake(irq, 0);
327}
328
329#else
330
331
332
333
334
335#ifndef CONFIG_LOCKDEP
336# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
337# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
338 disable_irq_nosync(irq)
339# define disable_irq_lockdep(irq) disable_irq(irq)
340# define enable_irq_lockdep(irq) enable_irq(irq)
341# define enable_irq_lockdep_irqrestore(irq, flags) \
342 enable_irq(irq)
343# endif
344
345static inline int enable_irq_wake(unsigned int irq)
346{
347 return 0;
348}
349
350static inline int disable_irq_wake(unsigned int irq)
351{
352 return 0;
353}
354#endif
355
356#ifndef __ARCH_SET_SOFTIRQ_PENDING
357#define set_softirq_pending(x) (local_softirq_pending() = (x))
358#define or_softirq_pending(x) (local_softirq_pending() |= (x))
359#endif
360
361
362
363
364
365
366
367#ifndef hard_irq_disable
368#define hard_irq_disable() do { } while(0)
369#endif
370
371
372
373
374
375
376
377enum
378{
379 HI_SOFTIRQ=0,
380 TIMER_SOFTIRQ,
381 NET_TX_SOFTIRQ,
382 NET_RX_SOFTIRQ,
383 BLOCK_SOFTIRQ,
384 BLOCK_IOPOLL_SOFTIRQ,
385 TASKLET_SOFTIRQ,
386 SCHED_SOFTIRQ,
387 HRTIMER_SOFTIRQ,
388 RCU_SOFTIRQ,
389
390 NR_SOFTIRQS
391};
392
393
394
395
396extern char *softirq_to_name[NR_SOFTIRQS];
397
398
399
400
401
402struct softirq_action
403{
404 void (*action)(struct softirq_action *);
405};
406
407asmlinkage void do_softirq(void);
408asmlinkage void __do_softirq(void);
409extern void open_softirq(int nr, void (*action)(struct softirq_action *));
410extern void softirq_init(void);
411static inline void __raise_softirq_irqoff(unsigned int nr)
412{
413 trace_softirq_raise(nr);
414 or_softirq_pending(1UL << nr);
415}
416
417extern void raise_softirq_irqoff(unsigned int nr);
418extern void raise_softirq(unsigned int nr);
419
420
421
422
423
424
425
426
427DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
428
429
430
431
432extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
433
434
435
436
437extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
438 int this_cpu, int softirq);
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460struct tasklet_struct
461{
462 struct tasklet_struct *next;
463 unsigned long state;
464 atomic_t count;
465 void (*func)(unsigned long);
466 unsigned long data;
467};
468
469#define DECLARE_TASKLET(name, func, data) \
470struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
471
472#define DECLARE_TASKLET_DISABLED(name, func, data) \
473struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
474
475
476enum
477{
478 TASKLET_STATE_SCHED,
479 TASKLET_STATE_RUN
480};
481
482#ifdef CONFIG_SMP
483static inline int tasklet_trylock(struct tasklet_struct *t)
484{
485 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
486}
487
488static inline void tasklet_unlock(struct tasklet_struct *t)
489{
490 smp_mb__before_clear_bit();
491 clear_bit(TASKLET_STATE_RUN, &(t)->state);
492}
493
494static inline void tasklet_unlock_wait(struct tasklet_struct *t)
495{
496 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
497}
498#else
499#define tasklet_trylock(t) 1
500#define tasklet_unlock_wait(t) do { } while (0)
501#define tasklet_unlock(t) do { } while (0)
502#endif
503
504extern void __tasklet_schedule(struct tasklet_struct *t);
505
506static inline void tasklet_schedule(struct tasklet_struct *t)
507{
508 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
509 __tasklet_schedule(t);
510}
511
512extern void __tasklet_hi_schedule(struct tasklet_struct *t);
513
514static inline void tasklet_hi_schedule(struct tasklet_struct *t)
515{
516 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
517 __tasklet_hi_schedule(t);
518}
519
520extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
521
522
523
524
525
526
527
528static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
529{
530 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
531 __tasklet_hi_schedule_first(t);
532}
533
534
535static inline void tasklet_disable_nosync(struct tasklet_struct *t)
536{
537 atomic_inc(&t->count);
538 smp_mb__after_atomic_inc();
539}
540
541static inline void tasklet_disable(struct tasklet_struct *t)
542{
543 tasklet_disable_nosync(t);
544 tasklet_unlock_wait(t);
545 smp_mb();
546}
547
548static inline void tasklet_enable(struct tasklet_struct *t)
549{
550 smp_mb__before_atomic_dec();
551 atomic_dec(&t->count);
552}
553
554static inline void tasklet_hi_enable(struct tasklet_struct *t)
555{
556 smp_mb__before_atomic_dec();
557 atomic_dec(&t->count);
558}
559
560extern void tasklet_kill(struct tasklet_struct *t);
561extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
562extern void tasklet_init(struct tasklet_struct *t,
563 void (*func)(unsigned long), unsigned long data);
564
565struct tasklet_hrtimer {
566 struct hrtimer timer;
567 struct tasklet_struct tasklet;
568 enum hrtimer_restart (*function)(struct hrtimer *);
569};
570
571extern void
572tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
573 enum hrtimer_restart (*function)(struct hrtimer *),
574 clockid_t which_clock, enum hrtimer_mode mode);
575
576static inline
577int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
578 const enum hrtimer_mode mode)
579{
580 return hrtimer_start(&ttimer->timer, time, mode);
581}
582
583static inline
584void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
585{
586 hrtimer_cancel(&ttimer->timer);
587 tasklet_kill(&ttimer->tasklet);
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
619static inline unsigned long probe_irq_on(void)
620{
621 return 0;
622}
623static inline int probe_irq_off(unsigned long val)
624{
625 return 0;
626}
627static inline unsigned int probe_irq_mask(unsigned long val)
628{
629 return 0;
630}
631#else
632extern unsigned long probe_irq_on(void);
633extern int probe_irq_off(unsigned long);
634extern unsigned int probe_irq_mask(unsigned long);
635#endif
636
637#ifdef CONFIG_PROC_FS
638
639extern void init_irq_proc(void);
640#else
641static inline void init_irq_proc(void)
642{
643}
644#endif
645
646struct seq_file;
647int show_interrupts(struct seq_file *p, void *v);
648
649extern int early_irq_init(void);
650extern int arch_probe_nr_irqs(void);
651extern int arch_early_irq_init(void);
652
653#endif
654