1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#undef DEBUG
32
33#include <linux/export.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
38#include <linux/ptrace.h>
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
42#include <linux/init.h>
43#include <linux/slab.h>
44#include <linux/delay.h>
45#include <linux/irq.h>
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
48#include <linux/profile.h>
49#include <linux/bitops.h>
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/pci.h>
54#include <linux/debugfs.h>
55#include <linux/of.h>
56#include <linux/of_irq.h>
57
58#include <linux/uaccess.h>
59#include <asm/io.h>
60#include <asm/pgtable.h>
61#include <asm/irq.h>
62#include <asm/cache.h>
63#include <asm/prom.h>
64#include <asm/ptrace.h>
65#include <asm/machdep.h>
66#include <asm/udbg.h>
67#include <asm/smp.h>
68#include <asm/livepatch.h>
69#include <asm/asm-prototypes.h>
70#include <asm/hw_irq.h>
71
72#ifdef CONFIG_PPC64
73#include <asm/paca.h>
74#include <asm/firmware.h>
75#include <asm/lv1call.h>
76#endif
77#define CREATE_TRACE_POINTS
78#include <asm/trace.h>
79#include <asm/cpu_has_feature.h>
80
81DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82EXPORT_PER_CPU_SYMBOL(irq_stat);
83
84int __irq_offset_value;
85
86#ifdef CONFIG_PPC32
87EXPORT_SYMBOL(__irq_offset_value);
88atomic_t ppc_n_lost_interrupts;
89
90#ifdef CONFIG_TAU_INT
91extern int tau_initialized;
92u32 tau_interrupts(unsigned long cpu);
93#endif
94#endif
95
96#ifdef CONFIG_PPC64
97
98int distribute_irqs = 1;
99
100static inline notrace unsigned long get_irq_happened(void)
101{
102 unsigned long happened;
103
104 __asm__ __volatile__("lbz %0,%1(13)"
105 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
106
107 return happened;
108}
109
110static inline notrace int decrementer_check_overflow(void)
111{
112 u64 now = get_tb_or_rtc();
113 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114
115 return now >= *next_tb;
116}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132notrace unsigned int __check_irq_replay(void)
133{
134
135
136
137
138
139 unsigned char happened = local_paca->irq_happened;
140
141
142
143
144
145 trace_hardirqs_on();
146 trace_hardirqs_off();
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161 if (happened & PACA_IRQ_HARD_DIS) {
162 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
163
164
165
166
167
168
169 if (!(happened & PACA_IRQ_DEC)) {
170 if (decrementer_check_overflow()) {
171 local_paca->irq_happened |= PACA_IRQ_DEC;
172 happened |= PACA_IRQ_DEC;
173 }
174 }
175 }
176
177
178
179
180
181 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
182 u64 tmp, tmp2;
183 lv1_get_version_info(&tmp, &tmp2);
184 }
185
186
187
188
189
190
191 if (happened & PACA_IRQ_HMI) {
192 local_paca->irq_happened &= ~PACA_IRQ_HMI;
193 return 0xe60;
194 }
195
196 if (happened & PACA_IRQ_DEC) {
197 local_paca->irq_happened &= ~PACA_IRQ_DEC;
198 return 0x900;
199 }
200
201 if (happened & PACA_IRQ_PMI) {
202 local_paca->irq_happened &= ~PACA_IRQ_PMI;
203 return 0xf00;
204 }
205
206 if (happened & PACA_IRQ_EE) {
207 local_paca->irq_happened &= ~PACA_IRQ_EE;
208 return 0x500;
209 }
210
211#ifdef CONFIG_PPC_BOOK3E
212
213
214
215
216
217 if (happened & PACA_IRQ_EE_EDGE) {
218 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
219 return 0x500;
220 }
221
222 if (happened & PACA_IRQ_DBELL) {
223 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
224 return 0x280;
225 }
226#else
227 if (happened & PACA_IRQ_DBELL) {
228 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
229 return 0xa00;
230 }
231#endif
232
233
234 BUG_ON(local_paca->irq_happened != 0);
235
236 return 0;
237}
238
239notrace void arch_local_irq_restore(unsigned long mask)
240{
241 unsigned char irq_happened;
242 unsigned int replay;
243
244
245 irq_soft_mask_set(mask);
246 if (mask)
247 return;
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262 irq_happened = get_irq_happened();
263 if (!irq_happened) {
264
265
266
267
268
269
270
271
272
273
274 return;
275 }
276
277
278
279
280
281
282
283 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
284#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
285 WARN_ON(!(mfmsr() & MSR_EE));
286#endif
287 __hard_irq_disable();
288#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
289 } else {
290
291
292
293
294
295
296 if (WARN_ON(mfmsr() & MSR_EE))
297 __hard_irq_disable();
298#endif
299 }
300
301 irq_soft_mask_set(IRQS_ALL_DISABLED);
302 trace_hardirqs_off();
303
304
305
306
307
308
309 replay = __check_irq_replay();
310
311
312 trace_hardirqs_on();
313 irq_soft_mask_set(IRQS_ENABLED);
314
315
316
317
318
319 if (replay) {
320 __replay_interrupt(replay);
321 return;
322 }
323
324
325 __hard_irq_enable();
326}
327EXPORT_SYMBOL(arch_local_irq_restore);
328
329
330
331
332
333
334
335
336
337
338void notrace restore_interrupts(void)
339{
340 if (irqs_disabled()) {
341 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
342 local_irq_enable();
343 } else
344 __hard_irq_enable();
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362bool prep_irq_for_idle(void)
363{
364
365
366
367
368 __hard_irq_disable();
369 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
370
371
372
373
374
375 if (lazy_irq_pending())
376 return false;
377
378
379 trace_hardirqs_on();
380
381
382
383
384
385
386
387 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
388 irq_soft_mask_set(IRQS_ENABLED);
389
390
391 return true;
392}
393
394#ifdef CONFIG_PPC_BOOK3S
395
396
397
398
399
400
401bool prep_irq_for_idle_irqsoff(void)
402{
403 WARN_ON(!irqs_disabled());
404
405
406
407
408
409 __hard_irq_disable();
410 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
411
412
413
414
415
416 if (lazy_irq_pending())
417 return false;
418
419
420 trace_hardirqs_on();
421
422 return true;
423}
424
425
426
427
428
429
430
431
432
433
434
435#define IRQ_SYSTEM_RESET 0xff
436
437static const u8 srr1_to_lazyirq[0x10] = {
438 0, 0, 0,
439 PACA_IRQ_DBELL,
440 IRQ_SYSTEM_RESET,
441 PACA_IRQ_DBELL,
442 PACA_IRQ_DEC,
443 0,
444 PACA_IRQ_EE,
445 PACA_IRQ_EE,
446 PACA_IRQ_HMI,
447 0, 0, 0, 0, 0 };
448
449void replay_system_reset(void)
450{
451 struct pt_regs regs;
452
453 ppc_save_regs(®s);
454 regs.trap = 0x100;
455 get_paca()->in_nmi = 1;
456 system_reset_exception(®s);
457 get_paca()->in_nmi = 0;
458}
459EXPORT_SYMBOL_GPL(replay_system_reset);
460
461void irq_set_pending_from_srr1(unsigned long srr1)
462{
463 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
464 u8 reason = srr1_to_lazyirq[idx];
465
466
467
468
469
470
471 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
472 replay_system_reset();
473 return;
474 }
475
476
477
478
479
480
481
482
483
484
485 local_paca->irq_happened |= reason;
486}
487#endif
488
489
490
491
492void force_external_irq_replay(void)
493{
494
495
496
497
498 WARN_ON(!arch_irqs_disabled());
499
500
501
502
503
504
505 __hard_irq_disable();
506 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
507
508
509 local_paca->irq_happened |= PACA_IRQ_EE;
510}
511
512#endif
513
514int arch_show_interrupts(struct seq_file *p, int prec)
515{
516 int j;
517
518#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
519 if (tau_initialized) {
520 seq_printf(p, "%*s: ", prec, "TAU");
521 for_each_online_cpu(j)
522 seq_printf(p, "%10u ", tau_interrupts(j));
523 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
524 }
525#endif
526
527 seq_printf(p, "%*s: ", prec, "LOC");
528 for_each_online_cpu(j)
529 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
530 seq_printf(p, " Local timer interrupts for timer event device\n");
531
532 seq_printf(p, "%*s: ", prec, "BCT");
533 for_each_online_cpu(j)
534 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
535 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
536
537 seq_printf(p, "%*s: ", prec, "LOC");
538 for_each_online_cpu(j)
539 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
540 seq_printf(p, " Local timer interrupts for others\n");
541
542 seq_printf(p, "%*s: ", prec, "SPU");
543 for_each_online_cpu(j)
544 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
545 seq_printf(p, " Spurious interrupts\n");
546
547 seq_printf(p, "%*s: ", prec, "PMI");
548 for_each_online_cpu(j)
549 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
550 seq_printf(p, " Performance monitoring interrupts\n");
551
552 seq_printf(p, "%*s: ", prec, "MCE");
553 for_each_online_cpu(j)
554 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
555 seq_printf(p, " Machine check exceptions\n");
556
557 if (cpu_has_feature(CPU_FTR_HVMODE)) {
558 seq_printf(p, "%*s: ", prec, "HMI");
559 for_each_online_cpu(j)
560 seq_printf(p, "%10u ",
561 per_cpu(irq_stat, j).hmi_exceptions);
562 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
563 }
564
565 seq_printf(p, "%*s: ", prec, "NMI");
566 for_each_online_cpu(j)
567 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
568 seq_printf(p, " System Reset interrupts\n");
569
570#ifdef CONFIG_PPC_WATCHDOG
571 seq_printf(p, "%*s: ", prec, "WDG");
572 for_each_online_cpu(j)
573 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
574 seq_printf(p, " Watchdog soft-NMI interrupts\n");
575#endif
576
577#ifdef CONFIG_PPC_DOORBELL
578 if (cpu_has_feature(CPU_FTR_DBELL)) {
579 seq_printf(p, "%*s: ", prec, "DBL");
580 for_each_online_cpu(j)
581 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
582 seq_printf(p, " Doorbell interrupts\n");
583 }
584#endif
585
586 return 0;
587}
588
589
590
591
592u64 arch_irq_stat_cpu(unsigned int cpu)
593{
594 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
595
596 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
597 sum += per_cpu(irq_stat, cpu).pmu_irqs;
598 sum += per_cpu(irq_stat, cpu).mce_exceptions;
599 sum += per_cpu(irq_stat, cpu).spurious_irqs;
600 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
601 sum += per_cpu(irq_stat, cpu).hmi_exceptions;
602 sum += per_cpu(irq_stat, cpu).sreset_irqs;
603#ifdef CONFIG_PPC_WATCHDOG
604 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
605#endif
606#ifdef CONFIG_PPC_DOORBELL
607 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
608#endif
609
610 return sum;
611}
612
613static inline void check_stack_overflow(void)
614{
615#ifdef CONFIG_DEBUG_STACKOVERFLOW
616 long sp;
617
618 sp = current_stack_pointer() & (THREAD_SIZE-1);
619
620
621 if (unlikely(sp < 2048)) {
622 pr_err("do_IRQ: stack overflow: %ld\n", sp);
623 dump_stack();
624 }
625#endif
626}
627
628void __do_irq(struct pt_regs *regs)
629{
630 unsigned int irq;
631
632 irq_enter();
633
634 trace_irq_entry(regs);
635
636 check_stack_overflow();
637
638
639
640
641
642
643 irq = ppc_md.get_irq();
644
645
646 may_hard_irq_enable();
647
648
649 if (unlikely(!irq))
650 __this_cpu_inc(irq_stat.spurious_irqs);
651 else
652 generic_handle_irq(irq);
653
654 trace_irq_exit(regs);
655
656 irq_exit();
657}
658
659void do_IRQ(struct pt_regs *regs)
660{
661 struct pt_regs *old_regs = set_irq_regs(regs);
662 void *cursp, *irqsp, *sirqsp;
663
664
665 cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
666 irqsp = hardirq_ctx[raw_smp_processor_id()];
667 sirqsp = softirq_ctx[raw_smp_processor_id()];
668
669
670 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
671 __do_irq(regs);
672 set_irq_regs(old_regs);
673 return;
674 }
675
676 call_do_irq(regs, irqsp);
677
678 set_irq_regs(old_regs);
679}
680
681void __init init_IRQ(void)
682{
683 if (ppc_md.init_IRQ)
684 ppc_md.init_IRQ();
685}
686
687#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
688void *critirq_ctx[NR_CPUS] __read_mostly;
689void *dbgirq_ctx[NR_CPUS] __read_mostly;
690void *mcheckirq_ctx[NR_CPUS] __read_mostly;
691#endif
692
693void *softirq_ctx[NR_CPUS] __read_mostly;
694void *hardirq_ctx[NR_CPUS] __read_mostly;
695
696void do_softirq_own_stack(void)
697{
698 call_do_softirq(softirq_ctx[smp_processor_id()]);
699}
700
701irq_hw_number_t virq_to_hw(unsigned int virq)
702{
703 struct irq_data *irq_data = irq_get_irq_data(virq);
704 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
705}
706EXPORT_SYMBOL_GPL(virq_to_hw);
707
708#ifdef CONFIG_SMP
709int irq_choose_cpu(const struct cpumask *mask)
710{
711 int cpuid;
712
713 if (cpumask_equal(mask, cpu_online_mask)) {
714 static int irq_rover;
715 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
716 unsigned long flags;
717
718
719do_round_robin:
720 raw_spin_lock_irqsave(&irq_rover_lock, flags);
721
722 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
723 if (irq_rover >= nr_cpu_ids)
724 irq_rover = cpumask_first(cpu_online_mask);
725
726 cpuid = irq_rover;
727
728 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
729 } else {
730 cpuid = cpumask_first_and(mask, cpu_online_mask);
731 if (cpuid >= nr_cpu_ids)
732 goto do_round_robin;
733 }
734
735 return get_hard_smp_processor_id(cpuid);
736}
737#else
738int irq_choose_cpu(const struct cpumask *mask)
739{
740 return hard_smp_processor_id();
741}
742#endif
743
744#ifdef CONFIG_PPC64
745static int __init setup_noirqdistrib(char *str)
746{
747 distribute_irqs = 0;
748 return 1;
749}
750
751__setup("noirqdistrib", setup_noirqdistrib);
752#endif
753