1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/irq.h>
20#include <linux/interrupt.h>
21#include <linux/irqchip/chained_irq.h>
22#include <linux/cpu.h>
23#include <linux/io.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/irqdomain.h>
28#include <linux/slab.h>
29#include <linux/msi.h>
30#include <asm/mach/arch.h>
31#include <asm/exception.h>
32#include <asm/smp_plat.h>
33#include <asm/mach/irq.h>
34
35#include "irqchip.h"
36
37
38#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
39#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
40
41#define ARMADA_370_XP_INT_CONTROL (0x00)
42#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
43#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
44#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
45#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
46#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
47
48#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
49#define ARMADA_375_PPI_CAUSE (0x10)
50
51#define ARMADA_370_XP_SW_TRIG_INT_OFFS (0x4)
52#define ARMADA_370_XP_IN_DRBEL_MSK_OFFS (0xc)
53#define ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS (0x8)
54
55#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
56
57#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
58
59#define IPI_DOORBELL_START (0)
60#define IPI_DOORBELL_END (8)
61#define IPI_DOORBELL_MASK 0xFF
62#define PCI_MSI_DOORBELL_START (16)
63#define PCI_MSI_DOORBELL_NR (16)
64#define PCI_MSI_DOORBELL_END (32)
65#define PCI_MSI_DOORBELL_MASK 0xFFFF0000
66
67static void __iomem *per_cpu_int_base;
68static void __iomem *main_int_base;
69static struct irq_domain *armada_370_xp_mpic_domain;
70#ifdef CONFIG_PCI_MSI
71static struct irq_domain *armada_370_xp_msi_domain;
72static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
73static DEFINE_MUTEX(msi_used_lock);
74static phys_addr_t msi_doorbell_addr;
75#endif
76
77
78
79
80
81
82static void armada_370_xp_irq_mask(struct irq_data *d)
83{
84 irq_hw_number_t hwirq = irqd_to_hwirq(d);
85
86 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
87 writel(hwirq, main_int_base +
88 ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
89 else
90 writel(hwirq, per_cpu_int_base +
91 ARMADA_370_XP_INT_SET_MASK_OFFS);
92}
93
94static void armada_370_xp_irq_unmask(struct irq_data *d)
95{
96 irq_hw_number_t hwirq = irqd_to_hwirq(d);
97
98 if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
99 writel(hwirq, main_int_base +
100 ARMADA_370_XP_INT_SET_ENABLE_OFFS);
101 else
102 writel(hwirq, per_cpu_int_base +
103 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
104}
105
106#ifdef CONFIG_PCI_MSI
107
108static int armada_370_xp_alloc_msi(void)
109{
110 int hwirq;
111
112 mutex_lock(&msi_used_lock);
113 hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
114 if (hwirq >= PCI_MSI_DOORBELL_NR)
115 hwirq = -ENOSPC;
116 else
117 set_bit(hwirq, msi_used);
118 mutex_unlock(&msi_used_lock);
119
120 return hwirq;
121}
122
123static void armada_370_xp_free_msi(int hwirq)
124{
125 mutex_lock(&msi_used_lock);
126 if (!test_bit(hwirq, msi_used))
127 pr_err("trying to free unused MSI#%d\n", hwirq);
128 else
129 clear_bit(hwirq, msi_used);
130 mutex_unlock(&msi_used_lock);
131}
132
133static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
134 struct pci_dev *pdev,
135 struct msi_desc *desc)
136{
137 struct msi_msg msg;
138 int virq, hwirq;
139
140
141 if (desc->msi_attrib.is_msix)
142 return -EINVAL;
143
144 hwirq = armada_370_xp_alloc_msi();
145 if (hwirq < 0)
146 return hwirq;
147
148 virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
149 if (!virq) {
150 armada_370_xp_free_msi(hwirq);
151 return -EINVAL;
152 }
153
154 irq_set_msi_desc(virq, desc);
155
156 msg.address_lo = msi_doorbell_addr;
157 msg.address_hi = 0;
158 msg.data = 0xf00 | (hwirq + 16);
159
160 write_msi_msg(virq, &msg);
161 return 0;
162}
163
164static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
165 unsigned int irq)
166{
167 struct irq_data *d = irq_get_irq_data(irq);
168 unsigned long hwirq = d->hwirq;
169
170 irq_dispose_mapping(irq);
171 armada_370_xp_free_msi(hwirq);
172}
173
174static struct irq_chip armada_370_xp_msi_irq_chip = {
175 .name = "armada_370_xp_msi_irq",
176 .irq_enable = unmask_msi_irq,
177 .irq_disable = mask_msi_irq,
178 .irq_mask = mask_msi_irq,
179 .irq_unmask = unmask_msi_irq,
180};
181
182static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
183 irq_hw_number_t hw)
184{
185 irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
186 handle_simple_irq);
187 set_irq_flags(virq, IRQF_VALID);
188
189 return 0;
190}
191
192static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
193 .map = armada_370_xp_msi_map,
194};
195
196static int armada_370_xp_msi_init(struct device_node *node,
197 phys_addr_t main_int_phys_base)
198{
199 struct msi_chip *msi_chip;
200 u32 reg;
201 int ret;
202
203 msi_doorbell_addr = main_int_phys_base +
204 ARMADA_370_XP_SW_TRIG_INT_OFFS;
205
206 msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
207 if (!msi_chip)
208 return -ENOMEM;
209
210 msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
211 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
212 msi_chip->of_node = node;
213
214 armada_370_xp_msi_domain =
215 irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
216 &armada_370_xp_msi_irq_ops,
217 NULL);
218 if (!armada_370_xp_msi_domain) {
219 kfree(msi_chip);
220 return -ENOMEM;
221 }
222
223 ret = of_pci_msi_chip_add(msi_chip);
224 if (ret < 0) {
225 irq_domain_remove(armada_370_xp_msi_domain);
226 kfree(msi_chip);
227 return ret;
228 }
229
230 reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
231 | PCI_MSI_DOORBELL_MASK;
232
233 writel(reg, per_cpu_int_base +
234 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
235
236
237 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
238
239 return 0;
240}
241#else
242static inline int armada_370_xp_msi_init(struct device_node *node,
243 phys_addr_t main_int_phys_base)
244{
245 return 0;
246}
247#endif
248
249#ifdef CONFIG_SMP
250static DEFINE_RAW_SPINLOCK(irq_controller_lock);
251
252static int armada_xp_set_affinity(struct irq_data *d,
253 const struct cpumask *mask_val, bool force)
254{
255 irq_hw_number_t hwirq = irqd_to_hwirq(d);
256 unsigned long reg, mask;
257 int cpu;
258
259
260 cpu = cpumask_any_and(mask_val, cpu_online_mask);
261 mask = 1UL << cpu_logical_map(cpu);
262
263 raw_spin_lock(&irq_controller_lock);
264 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
265 reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
266 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
267 raw_spin_unlock(&irq_controller_lock);
268
269 return 0;
270}
271#endif
272
273static struct irq_chip armada_370_xp_irq_chip = {
274 .name = "armada_370_xp_irq",
275 .irq_mask = armada_370_xp_irq_mask,
276 .irq_mask_ack = armada_370_xp_irq_mask,
277 .irq_unmask = armada_370_xp_irq_unmask,
278#ifdef CONFIG_SMP
279 .irq_set_affinity = armada_xp_set_affinity,
280#endif
281};
282
283static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
284 unsigned int virq, irq_hw_number_t hw)
285{
286 armada_370_xp_irq_mask(irq_get_irq_data(virq));
287 if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
288 writel(hw, per_cpu_int_base +
289 ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
290 else
291 writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
292 irq_set_status_flags(virq, IRQ_LEVEL);
293
294 if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
295 irq_set_percpu_devid(virq);
296 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
297 handle_percpu_devid_irq);
298
299 } else {
300 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
301 handle_level_irq);
302 }
303 set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
304
305 return 0;
306}
307
308#ifdef CONFIG_SMP
309static void armada_mpic_send_doorbell(const struct cpumask *mask,
310 unsigned int irq)
311{
312 int cpu;
313 unsigned long map = 0;
314
315
316 for_each_cpu(cpu, mask)
317 map |= 1 << cpu_logical_map(cpu);
318
319
320
321
322
323 dsb();
324
325
326 writel((map << 8) | irq, main_int_base +
327 ARMADA_370_XP_SW_TRIG_INT_OFFS);
328}
329
330static void armada_xp_mpic_smp_cpu_init(void)
331{
332 u32 control;
333 int nr_irqs, i;
334
335 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
336 nr_irqs = (control >> 2) & 0x3ff;
337
338 for (i = 0; i < nr_irqs; i++)
339 writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
340
341
342 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
343
344
345 writel(IPI_DOORBELL_MASK, per_cpu_int_base +
346 ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
347
348
349 writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
350}
351
352static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
353 unsigned long action, void *hcpu)
354{
355 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
356 armada_xp_mpic_smp_cpu_init();
357 return NOTIFY_OK;
358}
359
360static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
361 .notifier_call = armada_xp_mpic_secondary_init,
362 .priority = 100,
363};
364
365#endif
366
367static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
368 .map = armada_370_xp_mpic_irq_map,
369 .xlate = irq_domain_xlate_onecell,
370};
371
372#ifdef CONFIG_PCI_MSI
373static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
374{
375 u32 msimask, msinr;
376
377 msimask = readl_relaxed(per_cpu_int_base +
378 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
379 & PCI_MSI_DOORBELL_MASK;
380
381 writel(~msimask, per_cpu_int_base +
382 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
383
384 for (msinr = PCI_MSI_DOORBELL_START;
385 msinr < PCI_MSI_DOORBELL_END; msinr++) {
386 int irq;
387
388 if (!(msimask & BIT(msinr)))
389 continue;
390
391 if (is_chained) {
392 irq = irq_find_mapping(armada_370_xp_msi_domain,
393 msinr - 16);
394 generic_handle_irq(irq);
395 } else {
396 irq = msinr - 16;
397 handle_domain_irq(armada_370_xp_msi_domain,
398 irq, regs);
399 }
400 }
401}
402#else
403static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
404#endif
405
406static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
407 struct irq_desc *desc)
408{
409 struct irq_chip *chip = irq_get_chip(irq);
410 unsigned long irqmap, irqn, irqsrc, cpuid;
411 unsigned int cascade_irq;
412
413 chained_irq_enter(chip, desc);
414
415 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
416 cpuid = cpu_logical_map(smp_processor_id());
417
418 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
419 irqsrc = readl_relaxed(main_int_base +
420 ARMADA_370_XP_INT_SOURCE_CTL(irqn));
421
422
423
424
425 if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
426 continue;
427
428 if (irqn == 1) {
429 armada_370_xp_handle_msi_irq(NULL, true);
430 continue;
431 }
432
433 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
434 generic_handle_irq(cascade_irq);
435 }
436
437 chained_irq_exit(chip, desc);
438}
439
440static void __exception_irq_entry
441armada_370_xp_handle_irq(struct pt_regs *regs)
442{
443 u32 irqstat, irqnr;
444
445 do {
446 irqstat = readl_relaxed(per_cpu_int_base +
447 ARMADA_370_XP_CPU_INTACK_OFFS);
448 irqnr = irqstat & 0x3FF;
449
450 if (irqnr > 1022)
451 break;
452
453 if (irqnr > 1) {
454 handle_domain_irq(armada_370_xp_mpic_domain,
455 irqnr, regs);
456 continue;
457 }
458
459
460 if (irqnr == 1)
461 armada_370_xp_handle_msi_irq(regs, false);
462
463#ifdef CONFIG_SMP
464
465 if (irqnr == 0) {
466 u32 ipimask, ipinr;
467
468 ipimask = readl_relaxed(per_cpu_int_base +
469 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
470 & IPI_DOORBELL_MASK;
471
472 writel(~ipimask, per_cpu_int_base +
473 ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
474
475
476 for (ipinr = IPI_DOORBELL_START;
477 ipinr < IPI_DOORBELL_END; ipinr++) {
478 if (ipimask & (0x1 << ipinr))
479 handle_IPI(ipinr, regs);
480 }
481 continue;
482 }
483#endif
484
485 } while (1);
486}
487
488static int __init armada_370_xp_mpic_of_init(struct device_node *node,
489 struct device_node *parent)
490{
491 struct resource main_int_res, per_cpu_int_res;
492 int parent_irq, nr_irqs, i;
493 u32 control;
494
495 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
496 BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
497
498 BUG_ON(!request_mem_region(main_int_res.start,
499 resource_size(&main_int_res),
500 node->full_name));
501 BUG_ON(!request_mem_region(per_cpu_int_res.start,
502 resource_size(&per_cpu_int_res),
503 node->full_name));
504
505 main_int_base = ioremap(main_int_res.start,
506 resource_size(&main_int_res));
507 BUG_ON(!main_int_base);
508
509 per_cpu_int_base = ioremap(per_cpu_int_res.start,
510 resource_size(&per_cpu_int_res));
511 BUG_ON(!per_cpu_int_base);
512
513 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
514 nr_irqs = (control >> 2) & 0x3ff;
515
516 for (i = 0; i < nr_irqs; i++)
517 writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
518
519 armada_370_xp_mpic_domain =
520 irq_domain_add_linear(node, nr_irqs,
521 &armada_370_xp_mpic_irq_ops, NULL);
522
523 BUG_ON(!armada_370_xp_mpic_domain);
524
525#ifdef CONFIG_SMP
526 armada_xp_mpic_smp_cpu_init();
527#endif
528
529 armada_370_xp_msi_init(node, main_int_res.start);
530
531 parent_irq = irq_of_parse_and_map(node, 0);
532 if (parent_irq <= 0) {
533 irq_set_default_host(armada_370_xp_mpic_domain);
534 set_handle_irq(armada_370_xp_handle_irq);
535#ifdef CONFIG_SMP
536 set_smp_cross_call(armada_mpic_send_doorbell);
537 register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
538#endif
539 } else {
540 irq_set_chained_handler(parent_irq,
541 armada_370_xp_mpic_handle_cascade_irq);
542 }
543
544 return 0;
545}
546
547IRQCHIP_DECLARE(armada_370_xp_mpic, "marvell,mpic", armada_370_xp_mpic_of_init);
548