1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#undef DEBUG
32
33#include <linux/export.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
38#include <linux/ptrace.h>
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
42#include <linux/init.h>
43#include <linux/slab.h>
44#include <linux/delay.h>
45#include <linux/irq.h>
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
48#include <linux/profile.h>
49#include <linux/bitops.h>
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
54#include <linux/pci.h>
55#include <linux/debugfs.h>
56#include <linux/of.h>
57#include <linux/of_irq.h>
58
59#include <asm/uaccess.h>
60#include <asm/io.h>
61#include <asm/pgtable.h>
62#include <asm/irq.h>
63#include <asm/cache.h>
64#include <asm/prom.h>
65#include <asm/ptrace.h>
66#include <asm/machdep.h>
67#include <asm/udbg.h>
68#include <asm/smp.h>
69#include <asm/debug.h>
70
71#ifdef CONFIG_PPC64
72#include <asm/paca.h>
73#include <asm/firmware.h>
74#include <asm/lv1call.h>
75#endif
76#define CREATE_TRACE_POINTS
77#include <asm/trace.h>
78
79DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80EXPORT_PER_CPU_SYMBOL(irq_stat);
81
82int __irq_offset_value;
83
84#ifdef CONFIG_PPC32
85EXPORT_SYMBOL(__irq_offset_value);
86atomic_t ppc_n_lost_interrupts;
87
88#ifdef CONFIG_TAU_INT
89extern int tau_initialized;
90extern int tau_interrupts(int);
91#endif
92#endif
93
94#ifdef CONFIG_PPC64
95
96int distribute_irqs = 1;
97
98static inline notrace unsigned long get_irq_happened(void)
99{
100 unsigned long happened;
101
102 __asm__ __volatile__("lbz %0,%1(13)"
103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
104
105 return happened;
106}
107
108static inline notrace void set_soft_enabled(unsigned long enable)
109{
110 __asm__ __volatile__("stb %0,%1(13)"
111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
112}
113
114static inline notrace int decrementer_check_overflow(void)
115{
116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
118
119 return now >= *next_tb;
120}
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136notrace unsigned int __check_irq_replay(void)
137{
138
139
140
141
142
143 unsigned char happened = local_paca->irq_happened;
144
145
146 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
147
148
149
150
151
152 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
153 u64 tmp, tmp2;
154 lv1_get_version_info(&tmp, &tmp2);
155 }
156
157
158
159
160
161
162 local_paca->irq_happened &= ~PACA_IRQ_DEC;
163 if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
164 return 0x900;
165
166
167 local_paca->irq_happened &= ~PACA_IRQ_EE;
168 if (happened & PACA_IRQ_EE)
169 return 0x500;
170
171#ifdef CONFIG_PPC_BOOK3E
172
173
174
175
176 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
177 if (happened & PACA_IRQ_EE_EDGE)
178 return 0x500;
179
180 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
181 if (happened & PACA_IRQ_DBELL)
182 return 0x280;
183#else
184 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
185 if (happened & PACA_IRQ_DBELL) {
186 if (cpu_has_feature(CPU_FTR_HVMODE))
187 return 0xe80;
188 return 0xa00;
189 }
190#endif
191
192
193 BUG_ON(local_paca->irq_happened != 0);
194
195 return 0;
196}
197
198notrace void arch_local_irq_restore(unsigned long en)
199{
200 unsigned char irq_happened;
201 unsigned int replay;
202
203
204 set_soft_enabled(en);
205 if (!en)
206 return;
207
208
209
210
211
212
213
214
215
216
217
218
219
220 irq_happened = get_irq_happened();
221 if (!irq_happened)
222 return;
223
224
225
226
227
228
229
230
231
232
233
234
235 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
236 __hard_irq_disable();
237#ifdef CONFIG_TRACE_IRQFLAGS
238 else {
239
240
241
242
243
244
245 if (WARN_ON(mfmsr() & MSR_EE))
246 __hard_irq_disable();
247 }
248#endif
249
250 set_soft_enabled(0);
251
252
253
254
255
256
257 replay = __check_irq_replay();
258
259
260 set_soft_enabled(1);
261
262
263
264
265
266 if (replay) {
267 __replay_interrupt(replay);
268 return;
269 }
270
271
272 __hard_irq_enable();
273}
274EXPORT_SYMBOL(arch_local_irq_restore);
275
276
277
278
279
280
281
282
283
284
285void notrace restore_interrupts(void)
286{
287 if (irqs_disabled()) {
288 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
289 local_irq_enable();
290 } else
291 __hard_irq_enable();
292}
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309bool prep_irq_for_idle(void)
310{
311
312
313
314
315 hard_irq_disable();
316
317
318
319
320
321 if (lazy_irq_pending())
322 return false;
323
324
325 trace_hardirqs_on();
326
327
328
329
330
331
332
333 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
334 local_paca->soft_enabled = 1;
335
336
337 return true;
338}
339
340#endif
341
342int arch_show_interrupts(struct seq_file *p, int prec)
343{
344 int j;
345
346#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
347 if (tau_initialized) {
348 seq_printf(p, "%*s: ", prec, "TAU");
349 for_each_online_cpu(j)
350 seq_printf(p, "%10u ", tau_interrupts(j));
351 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
352 }
353#endif
354
355 seq_printf(p, "%*s: ", prec, "LOC");
356 for_each_online_cpu(j)
357 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
358 seq_printf(p, " Local timer interrupts\n");
359
360 seq_printf(p, "%*s: ", prec, "SPU");
361 for_each_online_cpu(j)
362 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
363 seq_printf(p, " Spurious interrupts\n");
364
365 seq_printf(p, "%*s: ", prec, "PMI");
366 for_each_online_cpu(j)
367 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
368 seq_printf(p, " Performance monitoring interrupts\n");
369
370 seq_printf(p, "%*s: ", prec, "MCE");
371 for_each_online_cpu(j)
372 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
373 seq_printf(p, " Machine check exceptions\n");
374
375#ifdef CONFIG_PPC_DOORBELL
376 if (cpu_has_feature(CPU_FTR_DBELL)) {
377 seq_printf(p, "%*s: ", prec, "DBL");
378 for_each_online_cpu(j)
379 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
380 seq_printf(p, " Doorbell interrupts\n");
381 }
382#endif
383
384 return 0;
385}
386
387
388
389
390u64 arch_irq_stat_cpu(unsigned int cpu)
391{
392 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
393
394 sum += per_cpu(irq_stat, cpu).pmu_irqs;
395 sum += per_cpu(irq_stat, cpu).mce_exceptions;
396 sum += per_cpu(irq_stat, cpu).spurious_irqs;
397#ifdef CONFIG_PPC_DOORBELL
398 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
399#endif
400
401 return sum;
402}
403
404#ifdef CONFIG_HOTPLUG_CPU
405void migrate_irqs(void)
406{
407 struct irq_desc *desc;
408 unsigned int irq;
409 static int warned;
410 cpumask_var_t mask;
411 const struct cpumask *map = cpu_online_mask;
412
413 alloc_cpumask_var(&mask, GFP_KERNEL);
414
415 for_each_irq_desc(irq, desc) {
416 struct irq_data *data;
417 struct irq_chip *chip;
418
419 data = irq_desc_get_irq_data(desc);
420 if (irqd_is_per_cpu(data))
421 continue;
422
423 chip = irq_data_get_irq_chip(data);
424
425 cpumask_and(mask, data->affinity, map);
426 if (cpumask_any(mask) >= nr_cpu_ids) {
427 printk("Breaking affinity for irq %i\n", irq);
428 cpumask_copy(mask, map);
429 }
430 if (chip->irq_set_affinity)
431 chip->irq_set_affinity(data, mask, true);
432 else if (desc->action && !(warned++))
433 printk("Cannot set affinity for irq %i\n", irq);
434 }
435
436 free_cpumask_var(mask);
437
438 local_irq_enable();
439 mdelay(1);
440 local_irq_disable();
441}
442#endif
443
444static inline void check_stack_overflow(void)
445{
446#ifdef CONFIG_DEBUG_STACKOVERFLOW
447 long sp;
448
449 sp = __get_SP() & (THREAD_SIZE-1);
450
451
452 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
453 printk("do_IRQ: stack overflow: %ld\n",
454 sp - sizeof(struct thread_info));
455 dump_stack();
456 }
457#endif
458}
459
460void __do_irq(struct pt_regs *regs)
461{
462 struct irq_desc *desc;
463 unsigned int irq;
464
465 irq_enter();
466
467 trace_irq_entry(regs);
468
469 check_stack_overflow();
470
471
472
473
474
475
476 irq = ppc_md.get_irq();
477
478
479 may_hard_irq_enable();
480
481
482 if (unlikely(irq == NO_IRQ))
483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
489
490 trace_irq_exit(regs);
491
492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp, *sirqtp;
499
500
501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503 sirqtp = softirq_ctx[raw_smp_processor_id()];
504
505
506 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
507 __do_irq(regs);
508 set_irq_regs(old_regs);
509 return;
510 }
511
512
513 irqtp->task = curtp->task;
514 irqtp->flags = 0;
515
516
517 irqtp->preempt_count = curtp->preempt_count;
518
519
520 call_do_irq(regs, irqtp);
521
522
523 irqtp->task = NULL;
524
525
526 if (irqtp->flags)
527 set_bits(irqtp->flags, &curtp->flags);
528
529 set_irq_regs(old_regs);
530}
531
532void __init init_IRQ(void)
533{
534 if (ppc_md.init_IRQ)
535 ppc_md.init_IRQ();
536
537 exc_lvl_ctx_init();
538
539 irq_ctx_init();
540}
541
542#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
543struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
544struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
545struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
546
547void exc_lvl_ctx_init(void)
548{
549 struct thread_info *tp;
550 int i, cpu_nr;
551
552 for_each_possible_cpu(i) {
553#ifdef CONFIG_PPC64
554 cpu_nr = i;
555#else
556 cpu_nr = get_hard_smp_processor_id(i);
557#endif
558 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
559 tp = critirq_ctx[cpu_nr];
560 tp->cpu = cpu_nr;
561 tp->preempt_count = 0;
562
563#ifdef CONFIG_BOOKE
564 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
565 tp = dbgirq_ctx[cpu_nr];
566 tp->cpu = cpu_nr;
567 tp->preempt_count = 0;
568
569 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
570 tp = mcheckirq_ctx[cpu_nr];
571 tp->cpu = cpu_nr;
572 tp->preempt_count = HARDIRQ_OFFSET;
573#endif
574 }
575}
576#endif
577
578struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
579struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
580
581void irq_ctx_init(void)
582{
583 struct thread_info *tp;
584 int i;
585
586 for_each_possible_cpu(i) {
587 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
588 tp = softirq_ctx[i];
589 tp->cpu = i;
590
591 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
592 tp = hardirq_ctx[i];
593 tp->cpu = i;
594 }
595}
596
597static inline void do_softirq_onstack(void)
598{
599 struct thread_info *curtp, *irqtp;
600
601 curtp = current_thread_info();
602 irqtp = softirq_ctx[smp_processor_id()];
603 irqtp->task = curtp->task;
604 irqtp->flags = 0;
605 call_do_softirq(irqtp);
606 irqtp->task = NULL;
607
608
609
610
611 if (irqtp->flags)
612 set_bits(irqtp->flags, &curtp->flags);
613}
614
615void do_softirq(void)
616{
617 unsigned long flags;
618
619 if (in_interrupt())
620 return;
621
622 local_irq_save(flags);
623
624 if (local_softirq_pending())
625 do_softirq_onstack();
626
627 local_irq_restore(flags);
628}
629
630irq_hw_number_t virq_to_hw(unsigned int virq)
631{
632 struct irq_data *irq_data = irq_get_irq_data(virq);
633 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
634}
635EXPORT_SYMBOL_GPL(virq_to_hw);
636
637#ifdef CONFIG_SMP
638int irq_choose_cpu(const struct cpumask *mask)
639{
640 int cpuid;
641
642 if (cpumask_equal(mask, cpu_online_mask)) {
643 static int irq_rover;
644 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
645 unsigned long flags;
646
647
648do_round_robin:
649 raw_spin_lock_irqsave(&irq_rover_lock, flags);
650
651 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
652 if (irq_rover >= nr_cpu_ids)
653 irq_rover = cpumask_first(cpu_online_mask);
654
655 cpuid = irq_rover;
656
657 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
658 } else {
659 cpuid = cpumask_first_and(mask, cpu_online_mask);
660 if (cpuid >= nr_cpu_ids)
661 goto do_round_robin;
662 }
663
664 return get_hard_smp_processor_id(cpuid);
665}
666#else
667int irq_choose_cpu(const struct cpumask *mask)
668{
669 return hard_smp_processor_id();
670}
671#endif
672
673int arch_early_irq_init(void)
674{
675 return 0;
676}
677
678#ifdef CONFIG_PPC64
679static int __init setup_noirqdistrib(char *str)
680{
681 distribute_irqs = 0;
682 return 1;
683}
684
685__setup("noirqdistrib", setup_noirqdistrib);
686#endif
687