1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/bitops.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h>
30#include <linux/types.h>
31#include <asm/io.h>
32
33#include <asm/smp.h>
34#include <asm/ldcw.h>
35
36#undef PARISC_IRQ_CR16_COUNTS
37
38extern irqreturn_t timer_interrupt(int, void *);
39extern irqreturn_t ipi_interrupt(int, void *);
40
41#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
42
43
44
45
46static volatile unsigned long cpu_eiem = 0;
47
48
49
50
51
52
53static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
54
55static void cpu_mask_irq(struct irq_data *d)
56{
57 unsigned long eirr_bit = EIEM_MASK(d->irq);
58
59 cpu_eiem &= ~eirr_bit;
60
61
62
63
64}
65
66static void __cpu_unmask_irq(unsigned int irq)
67{
68 unsigned long eirr_bit = EIEM_MASK(irq);
69
70 cpu_eiem |= eirr_bit;
71
72
73
74
75 smp_send_all_nop();
76}
77
78static void cpu_unmask_irq(struct irq_data *d)
79{
80 __cpu_unmask_irq(d->irq);
81}
82
83void cpu_ack_irq(struct irq_data *d)
84{
85 unsigned long mask = EIEM_MASK(d->irq);
86 int cpu = smp_processor_id();
87
88
89 per_cpu(local_ack_eiem, cpu) &= ~mask;
90
91
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
93
94
95 mtctl(mask, 23);
96}
97
98void cpu_eoi_irq(struct irq_data *d)
99{
100 unsigned long mask = EIEM_MASK(d->irq);
101 int cpu = smp_processor_id();
102
103
104 per_cpu(local_ack_eiem, cpu) |= mask;
105
106
107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
108}
109
110#ifdef CONFIG_SMP
111int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
112{
113 int cpu_dest;
114
115
116 if (irqd_is_per_cpu(d))
117 return -EINVAL;
118
119
120 cpu_dest = first_cpu(*dest);
121
122 return cpu_dest;
123}
124
125static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
126 bool force)
127{
128 int cpu_dest;
129
130 cpu_dest = cpu_check_affinity(d, dest);
131 if (cpu_dest < 0)
132 return -1;
133
134 cpumask_copy(d->affinity, dest);
135
136 return 0;
137}
138#endif
139
140static struct irq_chip cpu_interrupt_type = {
141 .name = "CPU",
142 .irq_mask = cpu_mask_irq,
143 .irq_unmask = cpu_unmask_irq,
144 .irq_ack = cpu_ack_irq,
145 .irq_eoi = cpu_eoi_irq,
146#ifdef CONFIG_SMP
147 .irq_set_affinity = cpu_set_affinity_irq,
148#endif
149
150
151
152 .irq_retrigger = NULL,
153};
154
155DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
156#define irq_stats(x) (&per_cpu(irq_stat, x))
157
158
159
160
161int arch_show_interrupts(struct seq_file *p, int prec)
162{
163 int j;
164
165#ifdef CONFIG_DEBUG_STACKOVERFLOW
166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175# endif
176#endif
177#ifdef CONFIG_SMP
178 seq_printf(p, "%*s: ", prec, "RES");
179 for_each_online_cpu(j)
180 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
181 seq_puts(p, " Rescheduling interrupts\n");
182 seq_printf(p, "%*s: ", prec, "CAL");
183 for_each_online_cpu(j)
184 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
185 seq_puts(p, " Function call interrupts\n");
186#endif
187 seq_printf(p, "%*s: ", prec, "UAH");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
190 seq_puts(p, " Unaligned access handler traps\n");
191 seq_printf(p, "%*s: ", prec, "FPA");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
194 seq_puts(p, " Floating point assist traps\n");
195 seq_printf(p, "%*s: ", prec, "TLB");
196 for_each_online_cpu(j)
197 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
198 seq_puts(p, " TLB shootdowns\n");
199 return 0;
200}
201
202int show_interrupts(struct seq_file *p, void *v)
203{
204 int i = *(loff_t *) v, j;
205 unsigned long flags;
206
207 if (i == 0) {
208 seq_puts(p, " ");
209 for_each_online_cpu(j)
210 seq_printf(p, " CPU%d", j);
211
212#ifdef PARISC_IRQ_CR16_COUNTS
213 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
214#endif
215 seq_putc(p, '\n');
216 }
217
218 if (i < NR_IRQS) {
219 struct irq_desc *desc = irq_to_desc(i);
220 struct irqaction *action;
221
222 raw_spin_lock_irqsave(&desc->lock, flags);
223 action = desc->action;
224 if (!action)
225 goto skip;
226 seq_printf(p, "%3d: ", i);
227#ifdef CONFIG_SMP
228 for_each_online_cpu(j)
229 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
230#else
231 seq_printf(p, "%10u ", kstat_irqs(i));
232#endif
233
234 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
235#ifndef PARISC_IRQ_CR16_COUNTS
236 seq_printf(p, " %s", action->name);
237
238 while ((action = action->next))
239 seq_printf(p, ", %s", action->name);
240#else
241 for ( ;action; action = action->next) {
242 unsigned int k, avg, min, max;
243
244 min = max = action->cr16_hist[0];
245
246 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
247 int hist = action->cr16_hist[k];
248
249 if (hist) {
250 avg += hist;
251 } else
252 break;
253
254 if (hist > max) max = hist;
255 if (hist < min) min = hist;
256 }
257
258 avg /= k;
259 seq_printf(p, " %s[%d/%d/%d]", action->name,
260 min,avg,max);
261 }
262#endif
263
264 seq_putc(p, '\n');
265 skip:
266 raw_spin_unlock_irqrestore(&desc->lock, flags);
267 }
268
269 if (i == NR_IRQS)
270 arch_show_interrupts(p, 3);
271
272 return 0;
273}
274
275
276
277
278
279
280
281
282
283
284
285int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
286{
287 if (irq_has_action(irq))
288 return -EBUSY;
289 if (irq_get_chip(irq) != &cpu_interrupt_type)
290 return -EBUSY;
291
292
293 if (type) {
294 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
295 irq_set_chip_data(irq, data);
296 __cpu_unmask_irq(irq);
297 }
298 return 0;
299}
300
301int txn_claim_irq(int irq)
302{
303 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
304}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324int txn_alloc_irq(unsigned int bits_wide)
325{
326 int irq;
327
328
329 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
330 if (cpu_claim_irq(irq, NULL, NULL) < 0)
331 continue;
332 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
333 continue;
334 return irq;
335 }
336
337
338 return -1;
339}
340
341
342unsigned long txn_affinity_addr(unsigned int irq, int cpu)
343{
344#ifdef CONFIG_SMP
345 struct irq_data *d = irq_get_irq_data(irq);
346 cpumask_copy(d->affinity, cpumask_of(cpu));
347#endif
348
349 return per_cpu(cpu_data, cpu).txn_addr;
350}
351
352
353unsigned long txn_alloc_addr(unsigned int virt_irq)
354{
355 static int next_cpu = -1;
356
357 next_cpu++;
358
359
360 while ((next_cpu < nr_cpu_ids) &&
361 (!per_cpu(cpu_data, next_cpu).txn_addr ||
362 !cpu_online(next_cpu)))
363 next_cpu++;
364
365 if (next_cpu >= nr_cpu_ids)
366 next_cpu = 0;
367
368 return txn_affinity_addr(virt_irq, next_cpu);
369}
370
371
372unsigned int txn_alloc_data(unsigned int virt_irq)
373{
374 return virt_irq - CPU_IRQ_BASE;
375}
376
377static inline int eirr_to_irq(unsigned long eirr)
378{
379 int bit = fls_long(eirr);
380 return (BITS_PER_LONG - bit) + TIMER_IRQ;
381}
382
383#ifdef CONFIG_IRQSTACKS
384
385
386
387#define IRQ_STACK_SIZE (4096 << 2)
388
389union irq_stack_union {
390 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
391 volatile unsigned int slock[4];
392 volatile unsigned int lock[1];
393};
394
395DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
396 .slock = { 1,1,1,1 },
397 };
398#endif
399
400
401int sysctl_panic_on_stackoverflow = 1;
402
403static inline void stack_overflow_check(struct pt_regs *regs)
404{
405#ifdef CONFIG_DEBUG_STACKOVERFLOW
406 #define STACK_MARGIN (256*6)
407
408
409 unsigned long stack_start = (unsigned long) current_thread_info();
410 unsigned long sp = regs->gr[30];
411 unsigned long stack_usage;
412 unsigned int *last_usage;
413 int cpu = smp_processor_id();
414
415
416
417 if (regs->sr[7])
418 return;
419
420
421 stack_usage = sp - stack_start;
422#ifdef CONFIG_IRQSTACKS
423 if (likely(stack_usage <= THREAD_SIZE))
424 goto check_kernel_stack;
425
426
427 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
428 stack_usage = sp - stack_start;
429
430 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
431 if (unlikely(stack_usage > *last_usage))
432 *last_usage = stack_usage;
433
434 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
435 return;
436
437 pr_emerg("stackcheck: %s will most likely overflow irq stack "
438 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
439 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
440 goto panic_check;
441
442check_kernel_stack:
443#endif
444
445
446 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
447
448 if (unlikely(stack_usage > *last_usage))
449 *last_usage = stack_usage;
450
451 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
452 return;
453
454 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
455 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
456 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
457
458#ifdef CONFIG_IRQSTACKS
459panic_check:
460#endif
461 if (sysctl_panic_on_stackoverflow)
462 panic("low stack detected by irq handler - check messages\n");
463#endif
464}
465
466#ifdef CONFIG_IRQSTACKS
467
468void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
469
470static void execute_on_irq_stack(void *func, unsigned long param1)
471{
472 union irq_stack_union *union_ptr;
473 unsigned long irq_stack;
474 volatile unsigned int *irq_stack_in_use;
475
476 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
477 irq_stack = (unsigned long) &union_ptr->stack;
478 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
479 64);
480
481
482
483
484
485 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
486 if (!__ldcw(irq_stack_in_use)) {
487 void (*direct_call)(unsigned long p1) = func;
488
489
490
491 direct_call(param1);
492 return;
493 }
494
495
496 call_on_stack(param1, func, irq_stack);
497
498
499 *irq_stack_in_use = 1;
500}
501
502asmlinkage void do_softirq(void)
503{
504 __u32 pending;
505 unsigned long flags;
506
507 if (in_interrupt())
508 return;
509
510 local_irq_save(flags);
511
512 pending = local_softirq_pending();
513
514 if (pending)
515 execute_on_irq_stack(__do_softirq, 0);
516
517 local_irq_restore(flags);
518}
519#endif
520
521
522void do_cpu_irq_mask(struct pt_regs *regs)
523{
524 struct pt_regs *old_regs;
525 unsigned long eirr_val;
526 int irq, cpu = smp_processor_id();
527#ifdef CONFIG_SMP
528 struct irq_desc *desc;
529 cpumask_t dest;
530#endif
531
532 old_regs = set_irq_regs(regs);
533 local_irq_disable();
534 irq_enter();
535
536 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
537 if (!eirr_val)
538 goto set_out;
539 irq = eirr_to_irq(eirr_val);
540
541#ifdef CONFIG_SMP
542 desc = irq_to_desc(irq);
543 cpumask_copy(&dest, desc->irq_data.affinity);
544 if (irqd_is_per_cpu(&desc->irq_data) &&
545 !cpu_isset(smp_processor_id(), dest)) {
546 int cpu = first_cpu(dest);
547
548 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
549 irq, smp_processor_id(), cpu);
550 gsc_writel(irq + CPU_IRQ_BASE,
551 per_cpu(cpu_data, cpu).hpa);
552 goto set_out;
553 }
554#endif
555 stack_overflow_check(regs);
556
557#ifdef CONFIG_IRQSTACKS
558 execute_on_irq_stack(&generic_handle_irq, irq);
559#else
560 generic_handle_irq(irq);
561#endif
562
563 out:
564 irq_exit();
565 set_irq_regs(old_regs);
566 return;
567
568 set_out:
569 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
570 goto out;
571}
572
573static struct irqaction timer_action = {
574 .handler = timer_interrupt,
575 .name = "timer",
576 .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
577};
578
579#ifdef CONFIG_SMP
580static struct irqaction ipi_action = {
581 .handler = ipi_interrupt,
582 .name = "IPI",
583 .flags = IRQF_PERCPU,
584};
585#endif
586
587static void claim_cpu_irqs(void)
588{
589 int i;
590 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
591 irq_set_chip_and_handler(i, &cpu_interrupt_type,
592 handle_percpu_irq);
593 }
594
595 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
596 setup_irq(TIMER_IRQ, &timer_action);
597#ifdef CONFIG_SMP
598 irq_set_handler(IPI_IRQ, handle_percpu_irq);
599 setup_irq(IPI_IRQ, &ipi_action);
600#endif
601}
602
603void __init init_IRQ(void)
604{
605 local_irq_disable();
606 mtctl(~0UL, 23);
607#ifdef CONFIG_SMP
608 if (!cpu_eiem) {
609 claim_cpu_irqs();
610 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
611 }
612#else
613 claim_cpu_irqs();
614 cpu_eiem = EIEM_MASK(TIMER_IRQ);
615#endif
616 set_eiem(cpu_eiem);
617}
618