1
2
3
4
5
6
7
8#include <linux/jiffies.h>
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/interrupt.h>
12#include <linux/moduleparam.h>
13#include <linux/timer.h>
14
15#include "internals.h"
16
17static int irqfixup __read_mostly;
18
19#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
20static void poll_spurious_irqs(struct timer_list *unused);
21static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
22static int irq_poll_cpu;
23static atomic_t irq_poll_active;
24
25
26
27
28
29
30
31
32
33
34
35
36bool irq_wait_for_poll(struct irq_desc *desc)
37 __must_hold(&desc->lock)
38{
39 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
40 "irq poll in progress on cpu %d for irq %d\n",
41 smp_processor_id(), desc->irq_data.irq))
42 return false;
43
44#ifdef CONFIG_SMP
45 do {
46 raw_spin_unlock(&desc->lock);
47 while (irqd_irq_inprogress(&desc->irq_data))
48 cpu_relax();
49 raw_spin_lock(&desc->lock);
50 } while (irqd_irq_inprogress(&desc->irq_data));
51
52 return !irqd_irq_disabled(&desc->irq_data) && desc->action;
53#else
54 return false;
55#endif
56}
57
58
59
60
61
62static int try_one_irq(struct irq_desc *desc, bool force)
63{
64 irqreturn_t ret = IRQ_NONE;
65 struct irqaction *action;
66
67 raw_spin_lock(&desc->lock);
68
69
70
71
72
73 if (irq_settings_is_per_cpu(desc) ||
74 irq_settings_is_nested_thread(desc) ||
75 irq_settings_is_polled(desc))
76 goto out;
77
78
79
80
81
82 if (irqd_irq_disabled(&desc->irq_data) && !force)
83 goto out;
84
85
86
87
88
89 action = desc->action;
90 if (!action || !(action->flags & IRQF_SHARED) ||
91 (action->flags & __IRQF_TIMER))
92 goto out;
93
94
95 if (irqd_irq_inprogress(&desc->irq_data)) {
96
97
98
99
100 desc->istate |= IRQS_PENDING;
101 goto out;
102 }
103
104
105 desc->istate |= IRQS_POLL_INPROGRESS;
106 do {
107 if (handle_irq_event(desc) == IRQ_HANDLED)
108 ret = IRQ_HANDLED;
109
110 action = desc->action;
111 } while ((desc->istate & IRQS_PENDING) && action);
112 desc->istate &= ~IRQS_POLL_INPROGRESS;
113out:
114 raw_spin_unlock(&desc->lock);
115 return ret == IRQ_HANDLED;
116}
117
118static int misrouted_irq(int irq)
119{
120 struct irq_desc *desc;
121 int i, ok = 0;
122
123 if (atomic_inc_return(&irq_poll_active) != 1)
124 goto out;
125
126 irq_poll_cpu = smp_processor_id();
127
128 for_each_irq_desc(i, desc) {
129 if (!i)
130 continue;
131
132 if (i == irq)
133 continue;
134
135 if (try_one_irq(desc, false))
136 ok = 1;
137 }
138out:
139 atomic_dec(&irq_poll_active);
140
141 return ok;
142}
143
144static void poll_spurious_irqs(struct timer_list *unused)
145{
146 struct irq_desc *desc;
147 int i;
148
149 if (atomic_inc_return(&irq_poll_active) != 1)
150 goto out;
151 irq_poll_cpu = smp_processor_id();
152
153 for_each_irq_desc(i, desc) {
154 unsigned int state;
155
156 if (!i)
157 continue;
158
159
160 state = desc->istate;
161 barrier();
162 if (!(state & IRQS_SPURIOUS_DISABLED))
163 continue;
164
165 local_irq_disable();
166 try_one_irq(desc, true);
167 local_irq_enable();
168 }
169out:
170 atomic_dec(&irq_poll_active);
171 mod_timer(&poll_spurious_irq_timer,
172 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
173}
174
175static inline int bad_action_ret(irqreturn_t action_ret)
176{
177 unsigned int r = action_ret;
178
179 if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
180 return 0;
181 return 1;
182}
183
184
185
186
187
188
189
190
191
192static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
193{
194 unsigned int irq = irq_desc_get_irq(desc);
195 struct irqaction *action;
196 unsigned long flags;
197
198 if (bad_action_ret(action_ret)) {
199 printk(KERN_ERR "irq event %d: bogus return value %x\n",
200 irq, action_ret);
201 } else {
202 printk(KERN_ERR "irq %d: nobody cared (try booting with "
203 "the \"irqpoll\" option)\n", irq);
204 }
205 dump_stack();
206 printk(KERN_ERR "handlers:\n");
207
208
209
210
211
212
213
214 raw_spin_lock_irqsave(&desc->lock, flags);
215 for_each_action_of_desc(desc, action) {
216 printk(KERN_ERR "[<%p>] %ps", action->handler, action->handler);
217 if (action->thread_fn)
218 printk(KERN_CONT " threaded [<%p>] %ps",
219 action->thread_fn, action->thread_fn);
220 printk(KERN_CONT "\n");
221 }
222 raw_spin_unlock_irqrestore(&desc->lock, flags);
223}
224
225static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
226{
227 static int count = 100;
228
229 if (count > 0) {
230 count--;
231 __report_bad_irq(desc, action_ret);
232 }
233}
234
235static inline int
236try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
237 irqreturn_t action_ret)
238{
239 struct irqaction *action;
240
241 if (!irqfixup)
242 return 0;
243
244
245 if (action_ret == IRQ_NONE)
246 return 1;
247
248
249
250
251
252
253 if (irqfixup < 2)
254 return 0;
255
256 if (!irq)
257 return 1;
258
259
260
261
262
263
264
265 action = desc->action;
266 barrier();
267 return action && (action->flags & IRQF_IRQPOLL);
268}
269
270#define SPURIOUS_DEFERRED 0x80000000
271
272void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
273{
274 unsigned int irq;
275
276 if (desc->istate & IRQS_POLL_INPROGRESS ||
277 irq_settings_is_polled(desc))
278 return;
279
280 if (bad_action_ret(action_ret)) {
281 report_bad_irq(desc, action_ret);
282 return;
283 }
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305 if (action_ret & IRQ_WAKE_THREAD) {
306
307
308
309
310
311
312 if (action_ret == IRQ_WAKE_THREAD) {
313 int handled;
314
315
316
317
318
319
320
321
322 if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
323 desc->threads_handled_last |= SPURIOUS_DEFERRED;
324 return;
325 }
326
327
328
329
330
331
332
333
334
335
336
337
338 handled = atomic_read(&desc->threads_handled);
339 handled |= SPURIOUS_DEFERRED;
340 if (handled != desc->threads_handled_last) {
341 action_ret = IRQ_HANDLED;
342
343
344
345
346
347
348
349
350 desc->threads_handled_last = handled;
351 } else {
352
353
354
355
356
357
358
359
360
361 action_ret = IRQ_NONE;
362 }
363 } else {
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381 desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
382 }
383 }
384
385 if (unlikely(action_ret == IRQ_NONE)) {
386
387
388
389
390
391
392 if (time_after(jiffies, desc->last_unhandled + HZ/10))
393 desc->irqs_unhandled = 1;
394 else
395 desc->irqs_unhandled++;
396 desc->last_unhandled = jiffies;
397 }
398
399 irq = irq_desc_get_irq(desc);
400 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
401 int ok = misrouted_irq(irq);
402 if (action_ret == IRQ_NONE)
403 desc->irqs_unhandled -= ok;
404 }
405
406 desc->irq_count++;
407 if (likely(desc->irq_count < 100000))
408 return;
409
410 desc->irq_count = 0;
411 if (unlikely(desc->irqs_unhandled > 99900)) {
412
413
414
415 __report_bad_irq(desc, action_ret);
416
417
418
419 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
420 desc->istate |= IRQS_SPURIOUS_DISABLED;
421 desc->depth++;
422 irq_disable(desc);
423
424 mod_timer(&poll_spurious_irq_timer,
425 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
426 }
427 desc->irqs_unhandled = 0;
428}
429
430bool noirqdebug __read_mostly;
431
432int noirqdebug_setup(char *str)
433{
434 noirqdebug = 1;
435 printk(KERN_INFO "IRQ lockup detection disabled\n");
436
437 return 1;
438}
439
440__setup("noirqdebug", noirqdebug_setup);
441module_param(noirqdebug, bool, 0644);
442MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
443
444static int __init irqfixup_setup(char *str)
445{
446 irqfixup = 1;
447 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
448 printk(KERN_WARNING "This may impact system performance.\n");
449
450 return 1;
451}
452
453__setup("irqfixup", irqfixup_setup);
454module_param(irqfixup, int, 0644);
455
456static int __init irqpoll_setup(char *str)
457{
458 irqfixup = 2;
459 printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
460 "enabled\n");
461 printk(KERN_WARNING "This may significantly impact system "
462 "performance\n");
463 return 1;
464}
465
466__setup("irqpoll", irqpoll_setup);
467