1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/cpu.h>
19#include <linux/interrupt.h>
20#include <linux/module.h>
21#include <linux/msi.h>
22#include <linux/of_irq.h>
23#include <linux/irqchip/chained_irq.h>
24#include <linux/pci.h>
25#include <linux/platform_device.h>
26#include <linux/of_pci.h>
27
28#define MSI_IR0 0x000000
29#define MSI_INT0 0x800000
30#define IDX_PER_GROUP 8
31#define IRQS_PER_IDX 16
32#define NR_HW_IRQS 16
33#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
34
35struct xgene_msi_group {
36 struct xgene_msi *msi;
37 int gic_irq;
38 u32 msi_grp;
39};
40
41struct xgene_msi {
42 struct device_node *node;
43 struct irq_domain *inner_domain;
44 struct irq_domain *msi_domain;
45 u64 msi_addr;
46 void __iomem *msi_regs;
47 unsigned long *bitmap;
48 struct mutex bitmap_lock;
49 struct xgene_msi_group *msi_groups;
50 int num_cpus;
51};
52
53
54static struct xgene_msi xgene_msi_ctrl;
55
56static struct irq_chip xgene_msi_top_irq_chip = {
57 .name = "X-Gene1 MSI",
58 .irq_enable = pci_msi_unmask_irq,
59 .irq_disable = pci_msi_mask_irq,
60 .irq_mask = pci_msi_mask_irq,
61 .irq_unmask = pci_msi_unmask_irq,
62};
63
64static struct msi_domain_info xgene_msi_domain_info = {
65 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
66 MSI_FLAG_PCI_MSIX),
67 .chip = &xgene_msi_top_irq_chip,
68};
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103static u32 xgene_msi_ir_read(struct xgene_msi *msi,
104 u32 msi_grp, u32 msir_idx)
105{
106 return readl_relaxed(msi->msi_regs + MSI_IR0 +
107 (msi_grp << 19) + (msir_idx << 16));
108}
109
110
111static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
112{
113 return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
114}
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135static u32 hwirq_to_reg_set(unsigned long hwirq)
136{
137 return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
138}
139
140static u32 hwirq_to_group(unsigned long hwirq)
141{
142 return (hwirq % NR_HW_IRQS);
143}
144
145static u32 hwirq_to_msi_data(unsigned long hwirq)
146{
147 return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
148}
149
150static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
151{
152 struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
153 u32 reg_set = hwirq_to_reg_set(data->hwirq);
154 u32 group = hwirq_to_group(data->hwirq);
155 u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
156
157 msg->address_hi = upper_32_bits(target_addr);
158 msg->address_lo = lower_32_bits(target_addr);
159 msg->data = hwirq_to_msi_data(data->hwirq);
160}
161
162
163
164
165
166
167
168
169
170
171static int hwirq_to_cpu(unsigned long hwirq)
172{
173 return (hwirq % xgene_msi_ctrl.num_cpus);
174}
175
176static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
177{
178 return (hwirq - hwirq_to_cpu(hwirq));
179}
180
181static int xgene_msi_set_affinity(struct irq_data *irqdata,
182 const struct cpumask *mask, bool force)
183{
184 int target_cpu = cpumask_first(mask);
185 int curr_cpu;
186
187 curr_cpu = hwirq_to_cpu(irqdata->hwirq);
188 if (curr_cpu == target_cpu)
189 return IRQ_SET_MASK_OK_DONE;
190
191
192 irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
193
194 return IRQ_SET_MASK_OK;
195}
196
197static struct irq_chip xgene_msi_bottom_irq_chip = {
198 .name = "MSI",
199 .irq_set_affinity = xgene_msi_set_affinity,
200 .irq_compose_msi_msg = xgene_compose_msi_msg,
201};
202
203static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
204 unsigned int nr_irqs, void *args)
205{
206 struct xgene_msi *msi = domain->host_data;
207 int msi_irq;
208
209 mutex_lock(&msi->bitmap_lock);
210
211 msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
212 msi->num_cpus, 0);
213 if (msi_irq < NR_MSI_VEC)
214 bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
215 else
216 msi_irq = -ENOSPC;
217
218 mutex_unlock(&msi->bitmap_lock);
219
220 if (msi_irq < 0)
221 return msi_irq;
222
223 irq_domain_set_info(domain, virq, msi_irq,
224 &xgene_msi_bottom_irq_chip, domain->host_data,
225 handle_simple_irq, NULL, NULL);
226
227 return 0;
228}
229
230static void xgene_irq_domain_free(struct irq_domain *domain,
231 unsigned int virq, unsigned int nr_irqs)
232{
233 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
234 struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
235 u32 hwirq;
236
237 mutex_lock(&msi->bitmap_lock);
238
239 hwirq = hwirq_to_canonical_hwirq(d->hwirq);
240 bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
241
242 mutex_unlock(&msi->bitmap_lock);
243
244 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
245}
246
247static const struct irq_domain_ops msi_domain_ops = {
248 .alloc = xgene_irq_domain_alloc,
249 .free = xgene_irq_domain_free,
250};
251
252static int xgene_allocate_domains(struct xgene_msi *msi)
253{
254 msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
255 &msi_domain_ops, msi);
256 if (!msi->inner_domain)
257 return -ENOMEM;
258
259 msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
260 &xgene_msi_domain_info,
261 msi->inner_domain);
262
263 if (!msi->msi_domain) {
264 irq_domain_remove(msi->inner_domain);
265 return -ENOMEM;
266 }
267
268 return 0;
269}
270
271static void xgene_free_domains(struct xgene_msi *msi)
272{
273 if (msi->msi_domain)
274 irq_domain_remove(msi->msi_domain);
275 if (msi->inner_domain)
276 irq_domain_remove(msi->inner_domain);
277}
278
279static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
280{
281 int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
282
283 xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
284 if (!xgene_msi->bitmap)
285 return -ENOMEM;
286
287 mutex_init(&xgene_msi->bitmap_lock);
288
289 xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
290 sizeof(struct xgene_msi_group),
291 GFP_KERNEL);
292 if (!xgene_msi->msi_groups)
293 return -ENOMEM;
294
295 return 0;
296}
297
298static void xgene_msi_isr(struct irq_desc *desc)
299{
300 struct irq_chip *chip = irq_desc_get_chip(desc);
301 struct xgene_msi_group *msi_groups;
302 struct xgene_msi *xgene_msi;
303 unsigned int virq;
304 int msir_index, msir_val, hw_irq;
305 u32 intr_index, grp_select, msi_grp;
306
307 chained_irq_enter(chip, desc);
308
309 msi_groups = irq_desc_get_handler_data(desc);
310 xgene_msi = msi_groups->msi;
311 msi_grp = msi_groups->msi_grp;
312
313
314
315
316
317
318 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
319 while (grp_select) {
320 msir_index = ffs(grp_select) - 1;
321
322
323
324
325
326 msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
327 while (msir_val) {
328 intr_index = ffs(msir_val) - 1;
329
330
331
332
333
334 hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
335 NR_HW_IRQS) + msi_grp;
336
337
338
339
340
341 hw_irq = hwirq_to_canonical_hwirq(hw_irq);
342 virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
343 WARN_ON(!virq);
344 if (virq != 0)
345 generic_handle_irq(virq);
346 msir_val &= ~(1 << intr_index);
347 }
348 grp_select &= ~(1 << msir_index);
349
350 if (!grp_select) {
351
352
353
354
355
356 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
357 }
358 }
359
360 chained_irq_exit(chip, desc);
361}
362
363static enum cpuhp_state pci_xgene_online;
364
365static int xgene_msi_remove(struct platform_device *pdev)
366{
367 struct xgene_msi *msi = platform_get_drvdata(pdev);
368
369 if (pci_xgene_online)
370 cpuhp_remove_state(pci_xgene_online);
371 cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
372
373 kfree(msi->msi_groups);
374
375 kfree(msi->bitmap);
376 msi->bitmap = NULL;
377
378 xgene_free_domains(msi);
379
380 return 0;
381}
382
383static int xgene_msi_hwirq_alloc(unsigned int cpu)
384{
385 struct xgene_msi *msi = &xgene_msi_ctrl;
386 struct xgene_msi_group *msi_group;
387 cpumask_var_t mask;
388 int i;
389 int err;
390
391 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
392 msi_group = &msi->msi_groups[i];
393 if (!msi_group->gic_irq)
394 continue;
395
396 irq_set_chained_handler(msi_group->gic_irq,
397 xgene_msi_isr);
398 err = irq_set_handler_data(msi_group->gic_irq, msi_group);
399 if (err) {
400 pr_err("failed to register GIC IRQ handler\n");
401 return -EINVAL;
402 }
403
404
405
406
407
408 if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
409 cpumask_clear(mask);
410 cpumask_set_cpu(cpu, mask);
411 err = irq_set_affinity(msi_group->gic_irq, mask);
412 if (err)
413 pr_err("failed to set affinity for GIC IRQ");
414 free_cpumask_var(mask);
415 } else {
416 pr_err("failed to alloc CPU mask for affinity\n");
417 err = -EINVAL;
418 }
419
420 if (err) {
421 irq_set_chained_handler_and_data(msi_group->gic_irq,
422 NULL, NULL);
423 return err;
424 }
425 }
426
427 return 0;
428}
429
430static int xgene_msi_hwirq_free(unsigned int cpu)
431{
432 struct xgene_msi *msi = &xgene_msi_ctrl;
433 struct xgene_msi_group *msi_group;
434 int i;
435
436 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
437 msi_group = &msi->msi_groups[i];
438 if (!msi_group->gic_irq)
439 continue;
440
441 irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
442 NULL);
443 }
444 return 0;
445}
446
447static const struct of_device_id xgene_msi_match_table[] = {
448 {.compatible = "apm,xgene1-msi"},
449 {},
450};
451
452static int xgene_msi_probe(struct platform_device *pdev)
453{
454 struct resource *res;
455 int rc, irq_index;
456 struct xgene_msi *xgene_msi;
457 int virt_msir;
458 u32 msi_val, msi_idx;
459
460 xgene_msi = &xgene_msi_ctrl;
461
462 platform_set_drvdata(pdev, xgene_msi);
463
464 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
465 xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
466 if (IS_ERR(xgene_msi->msi_regs)) {
467 dev_err(&pdev->dev, "no reg space\n");
468 rc = -EINVAL;
469 goto error;
470 }
471 xgene_msi->msi_addr = res->start;
472 xgene_msi->node = pdev->dev.of_node;
473 xgene_msi->num_cpus = num_possible_cpus();
474
475 rc = xgene_msi_init_allocator(xgene_msi);
476 if (rc) {
477 dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
478 goto error;
479 }
480
481 rc = xgene_allocate_domains(xgene_msi);
482 if (rc) {
483 dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
484 goto error;
485 }
486
487 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
488 virt_msir = platform_get_irq(pdev, irq_index);
489 if (virt_msir < 0) {
490 dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
491 irq_index);
492 rc = virt_msir;
493 goto error;
494 }
495 xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
496 xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
497 xgene_msi->msi_groups[irq_index].msi = xgene_msi;
498 }
499
500
501
502
503
504
505 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
506 for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
507 msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
508 msi_idx);
509
510 msi_val = xgene_msi_int_read(xgene_msi, irq_index);
511 if (msi_val) {
512 dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
513 rc = -EINVAL;
514 goto error;
515 }
516 }
517
518 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
519 xgene_msi_hwirq_alloc, NULL);
520 if (rc < 0)
521 goto err_cpuhp;
522 pci_xgene_online = rc;
523 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
524 xgene_msi_hwirq_free);
525 if (rc)
526 goto err_cpuhp;
527
528 dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
529
530 return 0;
531
532err_cpuhp:
533 dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
534error:
535 xgene_msi_remove(pdev);
536 return rc;
537}
538
539static struct platform_driver xgene_msi_driver = {
540 .driver = {
541 .name = "xgene-msi",
542 .of_match_table = xgene_msi_match_table,
543 },
544 .probe = xgene_msi_probe,
545 .remove = xgene_msi_remove,
546};
547
548static int __init xgene_pcie_msi_init(void)
549{
550 return platform_driver_register(&xgene_msi_driver);
551}
552subsys_initcall(xgene_pcie_msi_init);
553