1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/bitops.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h>
30#include <linux/types.h>
31#include <asm/io.h>
32
33#include <asm/smp.h>
34#include <asm/ldcw.h>
35
36#undef PARISC_IRQ_CR16_COUNTS
37
38extern irqreturn_t timer_interrupt(int, void *);
39extern irqreturn_t ipi_interrupt(int, void *);
40
41#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
42
43
44
45
46static volatile unsigned long cpu_eiem = 0;
47
48
49
50
51
52
53static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
54
55static void cpu_mask_irq(struct irq_data *d)
56{
57 unsigned long eirr_bit = EIEM_MASK(d->irq);
58
59 cpu_eiem &= ~eirr_bit;
60
61
62
63
64}
65
66static void __cpu_unmask_irq(unsigned int irq)
67{
68 unsigned long eirr_bit = EIEM_MASK(irq);
69
70 cpu_eiem |= eirr_bit;
71
72
73
74
75 smp_send_all_nop();
76}
77
78static void cpu_unmask_irq(struct irq_data *d)
79{
80 __cpu_unmask_irq(d->irq);
81}
82
83void cpu_ack_irq(struct irq_data *d)
84{
85 unsigned long mask = EIEM_MASK(d->irq);
86 int cpu = smp_processor_id();
87
88
89 per_cpu(local_ack_eiem, cpu) &= ~mask;
90
91
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
93
94
95 mtctl(mask, 23);
96}
97
98void cpu_eoi_irq(struct irq_data *d)
99{
100 unsigned long mask = EIEM_MASK(d->irq);
101 int cpu = smp_processor_id();
102
103
104 per_cpu(local_ack_eiem, cpu) |= mask;
105
106
107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
108}
109
110#ifdef CONFIG_SMP
111int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
112{
113 int cpu_dest;
114
115
116 if (irqd_is_per_cpu(d))
117 return -EINVAL;
118
119
120 cpu_dest = cpumask_first_and(dest, cpu_online_mask);
121
122 return cpu_dest;
123}
124
125static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
126 bool force)
127{
128 int cpu_dest;
129
130 cpu_dest = cpu_check_affinity(d, dest);
131 if (cpu_dest < 0)
132 return -1;
133
134 cpumask_copy(irq_data_get_affinity_mask(d), dest);
135
136 return 0;
137}
138#endif
139
140static struct irq_chip cpu_interrupt_type = {
141 .name = "CPU",
142 .irq_mask = cpu_mask_irq,
143 .irq_unmask = cpu_unmask_irq,
144 .irq_ack = cpu_ack_irq,
145 .irq_eoi = cpu_eoi_irq,
146#ifdef CONFIG_SMP
147 .irq_set_affinity = cpu_set_affinity_irq,
148#endif
149
150
151
152 .irq_retrigger = NULL,
153};
154
155DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
156#define irq_stats(x) (&per_cpu(irq_stat, x))
157
158
159
160
161int arch_show_interrupts(struct seq_file *p, int prec)
162{
163 int j;
164
165#ifdef CONFIG_DEBUG_STACKOVERFLOW
166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175# endif
176#endif
177#ifdef CONFIG_SMP
178 seq_printf(p, "%*s: ", prec, "RES");
179 for_each_online_cpu(j)
180 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
181 seq_puts(p, " Rescheduling interrupts\n");
182#endif
183 seq_printf(p, "%*s: ", prec, "UAH");
184 for_each_online_cpu(j)
185 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
186 seq_puts(p, " Unaligned access handler traps\n");
187 seq_printf(p, "%*s: ", prec, "FPA");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
190 seq_puts(p, " Floating point assist traps\n");
191 seq_printf(p, "%*s: ", prec, "TLB");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
194 seq_puts(p, " TLB shootdowns\n");
195 return 0;
196}
197
198int show_interrupts(struct seq_file *p, void *v)
199{
200 int i = *(loff_t *) v, j;
201 unsigned long flags;
202
203 if (i == 0) {
204 seq_puts(p, " ");
205 for_each_online_cpu(j)
206 seq_printf(p, " CPU%d", j);
207
208#ifdef PARISC_IRQ_CR16_COUNTS
209 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
210#endif
211 seq_putc(p, '\n');
212 }
213
214 if (i < NR_IRQS) {
215 struct irq_desc *desc = irq_to_desc(i);
216 struct irqaction *action;
217
218 raw_spin_lock_irqsave(&desc->lock, flags);
219 action = desc->action;
220 if (!action)
221 goto skip;
222 seq_printf(p, "%3d: ", i);
223#ifdef CONFIG_SMP
224 for_each_online_cpu(j)
225 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
226#else
227 seq_printf(p, "%10u ", kstat_irqs(i));
228#endif
229
230 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
231#ifndef PARISC_IRQ_CR16_COUNTS
232 seq_printf(p, " %s", action->name);
233
234 while ((action = action->next))
235 seq_printf(p, ", %s", action->name);
236#else
237 for ( ;action; action = action->next) {
238 unsigned int k, avg, min, max;
239
240 min = max = action->cr16_hist[0];
241
242 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
243 int hist = action->cr16_hist[k];
244
245 if (hist) {
246 avg += hist;
247 } else
248 break;
249
250 if (hist > max) max = hist;
251 if (hist < min) min = hist;
252 }
253
254 avg /= k;
255 seq_printf(p, " %s[%d/%d/%d]", action->name,
256 min,avg,max);
257 }
258#endif
259
260 seq_putc(p, '\n');
261 skip:
262 raw_spin_unlock_irqrestore(&desc->lock, flags);
263 }
264
265 if (i == NR_IRQS)
266 arch_show_interrupts(p, 3);
267
268 return 0;
269}
270
271
272
273
274
275
276
277
278
279
280
281int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
282{
283 if (irq_has_action(irq))
284 return -EBUSY;
285 if (irq_get_chip(irq) != &cpu_interrupt_type)
286 return -EBUSY;
287
288
289 if (type) {
290 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
291 irq_set_chip_data(irq, data);
292 __cpu_unmask_irq(irq);
293 }
294 return 0;
295}
296
297int txn_claim_irq(int irq)
298{
299 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320int txn_alloc_irq(unsigned int bits_wide)
321{
322 int irq;
323
324
325 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
326 if (cpu_claim_irq(irq, NULL, NULL) < 0)
327 continue;
328 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
329 continue;
330 return irq;
331 }
332
333
334 return -1;
335}
336
337
338unsigned long txn_affinity_addr(unsigned int irq, int cpu)
339{
340#ifdef CONFIG_SMP
341 struct irq_data *d = irq_get_irq_data(irq);
342 cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
343#endif
344
345 return per_cpu(cpu_data, cpu).txn_addr;
346}
347
348
349unsigned long txn_alloc_addr(unsigned int virt_irq)
350{
351 static int next_cpu = -1;
352
353 next_cpu++;
354
355
356 while ((next_cpu < nr_cpu_ids) &&
357 (!per_cpu(cpu_data, next_cpu).txn_addr ||
358 !cpu_online(next_cpu)))
359 next_cpu++;
360
361 if (next_cpu >= nr_cpu_ids)
362 next_cpu = 0;
363
364 return txn_affinity_addr(virt_irq, next_cpu);
365}
366
367
368unsigned int txn_alloc_data(unsigned int virt_irq)
369{
370 return virt_irq - CPU_IRQ_BASE;
371}
372
373static inline int eirr_to_irq(unsigned long eirr)
374{
375 int bit = fls_long(eirr);
376 return (BITS_PER_LONG - bit) + TIMER_IRQ;
377}
378
379#ifdef CONFIG_IRQSTACKS
380
381
382
383#define IRQ_STACK_SIZE (4096 << 3)
384
385union irq_stack_union {
386 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
387 volatile unsigned int slock[4];
388 volatile unsigned int lock[1];
389};
390
391DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
392 .slock = { 1,1,1,1 },
393 };
394#endif
395
396
397int sysctl_panic_on_stackoverflow = 1;
398
399static inline void stack_overflow_check(struct pt_regs *regs)
400{
401#ifdef CONFIG_DEBUG_STACKOVERFLOW
402 #define STACK_MARGIN (256*6)
403
404
405 unsigned long stack_start = (unsigned long) current_thread_info();
406 unsigned long sp = regs->gr[30];
407 unsigned long stack_usage;
408 unsigned int *last_usage;
409 int cpu = smp_processor_id();
410
411
412
413 if (regs->sr[7])
414 return;
415
416
417 if (sysctl_panic_on_stackoverflow < 0)
418 return;
419
420
421 stack_usage = sp - stack_start;
422#ifdef CONFIG_IRQSTACKS
423 if (likely(stack_usage <= THREAD_SIZE))
424 goto check_kernel_stack;
425
426
427 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
428 stack_usage = sp - stack_start;
429
430 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
431 if (unlikely(stack_usage > *last_usage))
432 *last_usage = stack_usage;
433
434 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
435 return;
436
437 pr_emerg("stackcheck: %s will most likely overflow irq stack "
438 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
439 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
440 goto panic_check;
441
442check_kernel_stack:
443#endif
444
445
446 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
447
448 if (unlikely(stack_usage > *last_usage))
449 *last_usage = stack_usage;
450
451 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
452 return;
453
454 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
455 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
456 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
457
458#ifdef CONFIG_IRQSTACKS
459panic_check:
460#endif
461 if (sysctl_panic_on_stackoverflow) {
462 sysctl_panic_on_stackoverflow = -1;
463 panic("low stack detected by irq handler - check messages\n");
464 }
465#endif
466}
467
468#ifdef CONFIG_IRQSTACKS
469
470void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
471
472static void execute_on_irq_stack(void *func, unsigned long param1)
473{
474 union irq_stack_union *union_ptr;
475 unsigned long irq_stack;
476 volatile unsigned int *irq_stack_in_use;
477
478 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
479 irq_stack = (unsigned long) &union_ptr->stack;
480 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
481 64);
482
483
484
485
486
487 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
488 if (!__ldcw(irq_stack_in_use)) {
489 void (*direct_call)(unsigned long p1) = func;
490
491
492
493 direct_call(param1);
494 return;
495 }
496
497
498 call_on_stack(param1, func, irq_stack);
499
500
501 *irq_stack_in_use = 1;
502}
503
504void do_softirq_own_stack(void)
505{
506 execute_on_irq_stack(__do_softirq, 0);
507}
508#endif
509
510
511void do_cpu_irq_mask(struct pt_regs *regs)
512{
513 struct pt_regs *old_regs;
514 unsigned long eirr_val;
515 int irq, cpu = smp_processor_id();
516 struct irq_data *irq_data;
517#ifdef CONFIG_SMP
518 cpumask_t dest;
519#endif
520
521 old_regs = set_irq_regs(regs);
522 local_irq_disable();
523 irq_enter();
524
525 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
526 if (!eirr_val)
527 goto set_out;
528 irq = eirr_to_irq(eirr_val);
529
530 irq_data = irq_get_irq_data(irq);
531
532
533 if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
534 goto set_out;
535
536#ifdef CONFIG_SMP
537 cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
538 if (irqd_is_per_cpu(irq_data) &&
539 !cpumask_test_cpu(smp_processor_id(), &dest)) {
540 int cpu = cpumask_first(&dest);
541
542 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
543 irq, smp_processor_id(), cpu);
544 gsc_writel(irq + CPU_IRQ_BASE,
545 per_cpu(cpu_data, cpu).hpa);
546 goto set_out;
547 }
548#endif
549 stack_overflow_check(regs);
550
551#ifdef CONFIG_IRQSTACKS
552 execute_on_irq_stack(&generic_handle_irq, irq);
553#else
554 generic_handle_irq(irq);
555#endif
556
557 out:
558 irq_exit();
559 set_irq_regs(old_regs);
560 return;
561
562 set_out:
563 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
564 goto out;
565}
566
567static struct irqaction timer_action = {
568 .handler = timer_interrupt,
569 .name = "timer",
570 .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
571};
572
573#ifdef CONFIG_SMP
574static struct irqaction ipi_action = {
575 .handler = ipi_interrupt,
576 .name = "IPI",
577 .flags = IRQF_PERCPU,
578};
579#endif
580
581static void claim_cpu_irqs(void)
582{
583 int i;
584 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
585 irq_set_chip_and_handler(i, &cpu_interrupt_type,
586 handle_percpu_irq);
587 }
588
589 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
590 setup_irq(TIMER_IRQ, &timer_action);
591#ifdef CONFIG_SMP
592 irq_set_handler(IPI_IRQ, handle_percpu_irq);
593 setup_irq(IPI_IRQ, &ipi_action);
594#endif
595}
596
597void __init init_IRQ(void)
598{
599 local_irq_disable();
600 mtctl(~0UL, 23);
601#ifdef CONFIG_SMP
602 if (!cpu_eiem) {
603 claim_cpu_irqs();
604 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
605 }
606#else
607 claim_cpu_irqs();
608 cpu_eiem = EIEM_MASK(TIMER_IRQ);
609#endif
610 set_eiem(cpu_eiem);
611}
612