1
2
3
4
5#include <asm/irq.h>
6#include <linux/irq.h>
7#include <linux/interrupt.h>
8#include <linux/smp.h>
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/profile.h>
13#include <linux/of.h>
14#include <linux/of_irq.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/threads.h>
18#include <linux/spinlock.h>
19#include <linux/kernel_stat.h>
20#include <hwregs/reg_map.h>
21#include <hwregs/reg_rdwr.h>
22#include <hwregs/intr_vect.h>
23#include <hwregs/intr_vect_defs.h>
24
25#define CPU_FIXED -1
26
27
28#if TIMER0_INTR_VECT - FIRST_IRQ < 32
29#define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ))
30#undef TIMER_VECT1
31#else
32#define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32))
33#define TIMER_VECT1
34#endif
35#ifdef CONFIG_ETRAX_KGDB
36#if defined(CONFIG_ETRAX_KGDB_PORT0)
37#define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
38#elif defined(CONFIG_ETRAX_KGDB_PORT1)
39#define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
40#elif defined(CONFIG_ETRAX_KGDB_PORT2)
41#define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
42#elif defined(CONFIG_ETRAX_KGDB_PORT3)
43#define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
44#endif
45#endif
46
47DEFINE_SPINLOCK(irq_lock);
48
49struct cris_irq_allocation
50{
51 int cpu;
52 cpumask_t mask;
53};
54
55struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
56 { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
57
58static unsigned long irq_regs[NR_CPUS] =
59{
60 regi_irq,
61};
62
63#if NR_REAL_IRQS > 32
64#define NBR_REGS 2
65#else
66#define NBR_REGS 1
67#endif
68
69unsigned long cpu_irq_counters[NR_CPUS];
70unsigned long irq_counters[NR_REAL_IRQS];
71
72
73extern void weird_irq(void);
74
75
76extern void system_call(void);
77extern void nmi_interrupt(void);
78extern void multiple_interrupt(void);
79extern void gdb_handle_exception(void);
80extern void i_mmu_refill(void);
81extern void i_mmu_invalid(void);
82extern void i_mmu_access(void);
83extern void i_mmu_execute(void);
84extern void d_mmu_refill(void);
85extern void d_mmu_invalid(void);
86extern void d_mmu_access(void);
87extern void d_mmu_write(void);
88
89
90extern void kgdb_init(void);
91extern void breakpoint(void);
92
93
94extern void breakh_BUG(void);
95
96
97
98
99#ifdef CONFIG_CRIS_MACH_ARTPEC3
100BUILD_TIMER_IRQ(0x31, 0)
101#else
102BUILD_IRQ(0x31)
103#endif
104BUILD_IRQ(0x32)
105BUILD_IRQ(0x33)
106BUILD_IRQ(0x34)
107BUILD_IRQ(0x35)
108BUILD_IRQ(0x36)
109BUILD_IRQ(0x37)
110BUILD_IRQ(0x38)
111BUILD_IRQ(0x39)
112BUILD_IRQ(0x3a)
113BUILD_IRQ(0x3b)
114BUILD_IRQ(0x3c)
115BUILD_IRQ(0x3d)
116BUILD_IRQ(0x3e)
117BUILD_IRQ(0x3f)
118BUILD_IRQ(0x40)
119BUILD_IRQ(0x41)
120BUILD_IRQ(0x42)
121BUILD_IRQ(0x43)
122BUILD_IRQ(0x44)
123BUILD_IRQ(0x45)
124BUILD_IRQ(0x46)
125BUILD_IRQ(0x47)
126BUILD_IRQ(0x48)
127BUILD_IRQ(0x49)
128BUILD_IRQ(0x4a)
129#ifdef CONFIG_ETRAXFS
130BUILD_TIMER_IRQ(0x4b, 0)
131#else
132BUILD_IRQ(0x4b)
133#endif
134BUILD_IRQ(0x4c)
135BUILD_IRQ(0x4d)
136BUILD_IRQ(0x4e)
137BUILD_IRQ(0x4f)
138BUILD_IRQ(0x50)
139#if MACH_IRQS > 32
140BUILD_IRQ(0x51)
141BUILD_IRQ(0x52)
142BUILD_IRQ(0x53)
143BUILD_IRQ(0x54)
144BUILD_IRQ(0x55)
145BUILD_IRQ(0x56)
146BUILD_IRQ(0x57)
147BUILD_IRQ(0x58)
148BUILD_IRQ(0x59)
149BUILD_IRQ(0x5a)
150BUILD_IRQ(0x5b)
151BUILD_IRQ(0x5c)
152BUILD_IRQ(0x5d)
153BUILD_IRQ(0x5e)
154BUILD_IRQ(0x5f)
155BUILD_IRQ(0x60)
156BUILD_IRQ(0x61)
157BUILD_IRQ(0x62)
158BUILD_IRQ(0x63)
159BUILD_IRQ(0x64)
160BUILD_IRQ(0x65)
161BUILD_IRQ(0x66)
162BUILD_IRQ(0x67)
163BUILD_IRQ(0x68)
164BUILD_IRQ(0x69)
165BUILD_IRQ(0x6a)
166BUILD_IRQ(0x6b)
167BUILD_IRQ(0x6c)
168BUILD_IRQ(0x6d)
169BUILD_IRQ(0x6e)
170BUILD_IRQ(0x6f)
171BUILD_IRQ(0x70)
172#endif
173
174
175static void (*interrupt[MACH_IRQS])(void) = {
176 IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
177 IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
178 IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
179 IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
180 IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
181 IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
182 IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
183 IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
184 IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
185 IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
186 IRQ0x4f_interrupt, IRQ0x50_interrupt,
187#if MACH_IRQS > 32
188 IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt,
189 IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt,
190 IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt,
191 IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt,
192 IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt,
193 IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt,
194 IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt,
195 IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt,
196 IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt,
197 IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt,
198 IRQ0x6f_interrupt, IRQ0x70_interrupt,
199#endif
200};
201
202void
203block_irq(int irq, int cpu)
204{
205 int intr_mask;
206 unsigned long flags;
207
208 spin_lock_irqsave(&irq_lock, flags);
209
210 if (irq - FIRST_IRQ < 32) {
211 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
212 rw_mask, 0);
213 intr_mask &= ~(1 << (irq - FIRST_IRQ));
214 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
215 0, intr_mask);
216 } else {
217 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
218 rw_mask, 1);
219 intr_mask &= ~(1 << (irq - FIRST_IRQ - 32));
220 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
221 1, intr_mask);
222 }
223 spin_unlock_irqrestore(&irq_lock, flags);
224}
225
226void
227unblock_irq(int irq, int cpu)
228{
229 int intr_mask;
230 unsigned long flags;
231
232 spin_lock_irqsave(&irq_lock, flags);
233
234 if (irq - FIRST_IRQ < 32) {
235 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
236 rw_mask, 0);
237 intr_mask |= (1 << (irq - FIRST_IRQ));
238 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
239 0, intr_mask);
240 } else {
241 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
242 rw_mask, 1);
243 intr_mask |= (1 << (irq - FIRST_IRQ - 32));
244 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
245 1, intr_mask);
246 }
247 spin_unlock_irqrestore(&irq_lock, flags);
248}
249
250
251static int irq_cpu(int irq)
252{
253 int cpu;
254 unsigned long flags;
255
256 spin_lock_irqsave(&irq_lock, flags);
257 cpu = irq_allocations[irq - FIRST_IRQ].cpu;
258
259
260 if (cpu == CPU_FIXED)
261 {
262 spin_unlock_irqrestore(&irq_lock, flags);
263 return smp_processor_id();
264 }
265
266
267
268 if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
269 goto out;
270
271
272 cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
273 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
274out:
275 spin_unlock_irqrestore(&irq_lock, flags);
276 return cpu;
277}
278
279void crisv32_mask_irq(int irq)
280{
281 int cpu;
282
283 for (cpu = 0; cpu < NR_CPUS; cpu++)
284 block_irq(irq, cpu);
285}
286
287void crisv32_unmask_irq(int irq)
288{
289 unblock_irq(irq, irq_cpu(irq));
290}
291
292
293static void enable_crisv32_irq(struct irq_data *data)
294{
295 crisv32_unmask_irq(data->irq);
296}
297
298static void disable_crisv32_irq(struct irq_data *data)
299{
300 crisv32_mask_irq(data->irq);
301}
302
303static int set_affinity_crisv32_irq(struct irq_data *data,
304 const struct cpumask *dest, bool force)
305{
306 unsigned long flags;
307
308 spin_lock_irqsave(&irq_lock, flags);
309 irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
310 spin_unlock_irqrestore(&irq_lock, flags);
311 return 0;
312}
313
314static struct irq_chip crisv32_irq_type = {
315 .name = "CRISv32",
316 .irq_shutdown = disable_crisv32_irq,
317 .irq_enable = enable_crisv32_irq,
318 .irq_disable = disable_crisv32_irq,
319 .irq_set_affinity = set_affinity_crisv32_irq,
320};
321
322void
323set_exception_vector(int n, irqvectptr addr)
324{
325 etrax_irv->v[n] = (irqvectptr) addr;
326}
327
328extern void do_IRQ(int irq, struct pt_regs * regs);
329
330void
331crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
332{
333
334
335
336
337
338 if (!block) {
339 do_IRQ(irq, regs);
340 return;
341 }
342
343 block_irq(irq, smp_processor_id());
344 do_IRQ(irq, regs);
345
346 unblock_irq(irq, irq_cpu(irq));
347}
348
349
350
351
352
353
354
355
356
357
358
359void
360crisv32_do_multiple(struct pt_regs* regs)
361{
362 int cpu;
363 int mask;
364 int masked[NBR_REGS];
365 int bit;
366 int i;
367
368 cpu = smp_processor_id();
369
370
371
372
373 irq_enter();
374
375 for (i = 0; i < NBR_REGS; i++) {
376
377 masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
378 r_masked_vect, i);
379
380
381 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
382 mask &= ~masked[i];
383
384
385#ifdef TIMER_VECT1
386 if ((i == 1) && (masked[0] & TIMER_MASK))
387 mask |= TIMER_MASK;
388#else
389 if ((i == 0) && (masked[0] & TIMER_MASK))
390 mask |= TIMER_MASK;
391#endif
392
393 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
394
395
396#ifdef TIMER_VECT1
397 if ((i == 1) && (masked[i] & TIMER_MASK)) {
398 masked[i] &= ~TIMER_MASK;
399 do_IRQ(TIMER0_INTR_VECT, regs);
400 }
401#else
402 if ((i == 0) && (masked[i] & TIMER_MASK)) {
403 masked[i] &= ~TIMER_MASK;
404 do_IRQ(TIMER0_INTR_VECT, regs);
405 }
406#endif
407 }
408
409#ifdef IGNORE_MASK
410
411 masked[0] &= ~IGNORE_MASK;
412#endif
413
414
415 for (i = 0; i < NBR_REGS; i++) {
416 for (bit = 0; bit < 32; bit++) {
417 if (masked[i] & (1 << bit))
418 do_IRQ(bit + FIRST_IRQ + i*32, regs);
419 }
420 }
421
422
423 for (i = 0; i < NBR_REGS; i++) {
424 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
425 mask |= masked[i];
426 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
427 }
428
429
430 irq_exit();
431}
432
433static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
434 irq_hw_number_t hw_irq_num)
435{
436 irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
437
438 return 0;
439}
440
441static struct irq_domain_ops crisv32_irq_ops = {
442 .map = crisv32_irq_map,
443 .xlate = irq_domain_xlate_onecell,
444};
445
446
447
448
449
450void __init
451init_IRQ(void)
452{
453 int i;
454 int j;
455 reg_intr_vect_rw_mask vect_mask = {0};
456 struct device_node *np;
457 struct irq_domain *domain;
458
459
460 for (i = 0; i < NBR_REGS; i++)
461 REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask);
462
463 for (i = 0; i < 256; i++)
464 etrax_irv->v[i] = weird_irq;
465
466 np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
467 domain = irq_domain_add_legacy(np, NBR_INTR_VECT - FIRST_IRQ,
468 FIRST_IRQ, FIRST_IRQ,
469 &crisv32_irq_ops, NULL);
470 BUG_ON(!domain);
471 irq_set_default_host(domain);
472 of_node_put(np);
473
474 for (i = FIRST_IRQ, j = 0; j < NBR_INTR_VECT && j < MACH_IRQS; i++, j++)
475 set_exception_vector(i, interrupt[j]);
476
477
478 irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
479 irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU);
480 irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
481 irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU);
482
483 set_exception_vector(0x00, nmi_interrupt);
484 set_exception_vector(0x30, multiple_interrupt);
485
486
487 set_exception_vector(0x04, i_mmu_refill);
488 set_exception_vector(0x05, i_mmu_invalid);
489 set_exception_vector(0x06, i_mmu_access);
490 set_exception_vector(0x07, i_mmu_execute);
491 set_exception_vector(0x08, d_mmu_refill);
492 set_exception_vector(0x09, d_mmu_invalid);
493 set_exception_vector(0x0a, d_mmu_access);
494 set_exception_vector(0x0b, d_mmu_write);
495
496#ifdef CONFIG_BUG
497
498 set_exception_vector(0x1e, breakh_BUG);
499#endif
500
501
502 set_exception_vector(0x1d, system_call);
503
504
505
506
507 set_exception_vector(0x18, gdb_handle_exception);
508
509 set_exception_vector(0x3, gdb_handle_exception);
510
511 set_exception_vector(0xc, gdb_handle_exception);
512
513#ifdef CONFIG_ETRAX_KGDB
514 kgdb_init();
515
516 breakpoint();
517#endif
518}
519
520