1
2
3
4
5
6
7
8
9#include <linux/cpu.h>
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/of_irq.h>
14#include <linux/irqchip/chained_irq.h>
15#include <linux/pci.h>
16#include <linux/platform_device.h>
17#include <linux/of_pci.h>
18
19#define MSI_IR0 0x000000
20#define MSI_INT0 0x800000
21#define IDX_PER_GROUP 8
22#define IRQS_PER_IDX 16
23#define NR_HW_IRQS 16
24#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS)
25
26struct xgene_msi_group {
27 struct xgene_msi *msi;
28 int gic_irq;
29 u32 msi_grp;
30};
31
32struct xgene_msi {
33 struct device_node *node;
34 struct irq_domain *inner_domain;
35 struct irq_domain *msi_domain;
36 u64 msi_addr;
37 void __iomem *msi_regs;
38 unsigned long *bitmap;
39 struct mutex bitmap_lock;
40 struct xgene_msi_group *msi_groups;
41 int num_cpus;
42};
43
44
45static struct xgene_msi xgene_msi_ctrl;
46
47static struct irq_chip xgene_msi_top_irq_chip = {
48 .name = "X-Gene1 MSI",
49 .irq_enable = pci_msi_unmask_irq,
50 .irq_disable = pci_msi_mask_irq,
51 .irq_mask = pci_msi_mask_irq,
52 .irq_unmask = pci_msi_unmask_irq,
53};
54
55static struct msi_domain_info xgene_msi_domain_info = {
56 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
57 MSI_FLAG_PCI_MSIX),
58 .chip = &xgene_msi_top_irq_chip,
59};
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94static u32 xgene_msi_ir_read(struct xgene_msi *msi,
95 u32 msi_grp, u32 msir_idx)
96{
97 return readl_relaxed(msi->msi_regs + MSI_IR0 +
98 (msi_grp << 19) + (msir_idx << 16));
99}
100
101
102static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp)
103{
104 return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16));
105}
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static u32 hwirq_to_reg_set(unsigned long hwirq)
127{
128 return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX));
129}
130
131static u32 hwirq_to_group(unsigned long hwirq)
132{
133 return (hwirq % NR_HW_IRQS);
134}
135
136static u32 hwirq_to_msi_data(unsigned long hwirq)
137{
138 return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX);
139}
140
141static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
142{
143 struct xgene_msi *msi = irq_data_get_irq_chip_data(data);
144 u32 reg_set = hwirq_to_reg_set(data->hwirq);
145 u32 group = hwirq_to_group(data->hwirq);
146 u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16);
147
148 msg->address_hi = upper_32_bits(target_addr);
149 msg->address_lo = lower_32_bits(target_addr);
150 msg->data = hwirq_to_msi_data(data->hwirq);
151}
152
153
154
155
156
157
158
159
160
161
162static int hwirq_to_cpu(unsigned long hwirq)
163{
164 return (hwirq % xgene_msi_ctrl.num_cpus);
165}
166
167static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq)
168{
169 return (hwirq - hwirq_to_cpu(hwirq));
170}
171
172static int xgene_msi_set_affinity(struct irq_data *irqdata,
173 const struct cpumask *mask, bool force)
174{
175 int target_cpu = cpumask_first(mask);
176 int curr_cpu;
177
178 curr_cpu = hwirq_to_cpu(irqdata->hwirq);
179 if (curr_cpu == target_cpu)
180 return IRQ_SET_MASK_OK_DONE;
181
182
183 irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu;
184
185 return IRQ_SET_MASK_OK;
186}
187
188static struct irq_chip xgene_msi_bottom_irq_chip = {
189 .name = "MSI",
190 .irq_set_affinity = xgene_msi_set_affinity,
191 .irq_compose_msi_msg = xgene_compose_msi_msg,
192};
193
194static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
195 unsigned int nr_irqs, void *args)
196{
197 struct xgene_msi *msi = domain->host_data;
198 int msi_irq;
199
200 mutex_lock(&msi->bitmap_lock);
201
202 msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0,
203 msi->num_cpus, 0);
204 if (msi_irq < NR_MSI_VEC)
205 bitmap_set(msi->bitmap, msi_irq, msi->num_cpus);
206 else
207 msi_irq = -ENOSPC;
208
209 mutex_unlock(&msi->bitmap_lock);
210
211 if (msi_irq < 0)
212 return msi_irq;
213
214 irq_domain_set_info(domain, virq, msi_irq,
215 &xgene_msi_bottom_irq_chip, domain->host_data,
216 handle_simple_irq, NULL, NULL);
217
218 return 0;
219}
220
221static void xgene_irq_domain_free(struct irq_domain *domain,
222 unsigned int virq, unsigned int nr_irqs)
223{
224 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
225 struct xgene_msi *msi = irq_data_get_irq_chip_data(d);
226 u32 hwirq;
227
228 mutex_lock(&msi->bitmap_lock);
229
230 hwirq = hwirq_to_canonical_hwirq(d->hwirq);
231 bitmap_clear(msi->bitmap, hwirq, msi->num_cpus);
232
233 mutex_unlock(&msi->bitmap_lock);
234
235 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
236}
237
238static const struct irq_domain_ops msi_domain_ops = {
239 .alloc = xgene_irq_domain_alloc,
240 .free = xgene_irq_domain_free,
241};
242
243static int xgene_allocate_domains(struct xgene_msi *msi)
244{
245 msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC,
246 &msi_domain_ops, msi);
247 if (!msi->inner_domain)
248 return -ENOMEM;
249
250 msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node),
251 &xgene_msi_domain_info,
252 msi->inner_domain);
253
254 if (!msi->msi_domain) {
255 irq_domain_remove(msi->inner_domain);
256 return -ENOMEM;
257 }
258
259 return 0;
260}
261
262static void xgene_free_domains(struct xgene_msi *msi)
263{
264 if (msi->msi_domain)
265 irq_domain_remove(msi->msi_domain);
266 if (msi->inner_domain)
267 irq_domain_remove(msi->inner_domain);
268}
269
270static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
271{
272 int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
273
274 xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
275 if (!xgene_msi->bitmap)
276 return -ENOMEM;
277
278 mutex_init(&xgene_msi->bitmap_lock);
279
280 xgene_msi->msi_groups = kcalloc(NR_HW_IRQS,
281 sizeof(struct xgene_msi_group),
282 GFP_KERNEL);
283 if (!xgene_msi->msi_groups)
284 return -ENOMEM;
285
286 return 0;
287}
288
289static void xgene_msi_isr(struct irq_desc *desc)
290{
291 struct irq_chip *chip = irq_desc_get_chip(desc);
292 struct xgene_msi_group *msi_groups;
293 struct xgene_msi *xgene_msi;
294 unsigned int virq;
295 int msir_index, msir_val, hw_irq;
296 u32 intr_index, grp_select, msi_grp;
297
298 chained_irq_enter(chip, desc);
299
300 msi_groups = irq_desc_get_handler_data(desc);
301 xgene_msi = msi_groups->msi;
302 msi_grp = msi_groups->msi_grp;
303
304
305
306
307
308
309 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
310 while (grp_select) {
311 msir_index = ffs(grp_select) - 1;
312
313
314
315
316
317 msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index);
318 while (msir_val) {
319 intr_index = ffs(msir_val) - 1;
320
321
322
323
324
325 hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) *
326 NR_HW_IRQS) + msi_grp;
327
328
329
330
331
332 hw_irq = hwirq_to_canonical_hwirq(hw_irq);
333 virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq);
334 WARN_ON(!virq);
335 if (virq != 0)
336 generic_handle_irq(virq);
337 msir_val &= ~(1 << intr_index);
338 }
339 grp_select &= ~(1 << msir_index);
340
341 if (!grp_select) {
342
343
344
345
346
347 grp_select = xgene_msi_int_read(xgene_msi, msi_grp);
348 }
349 }
350
351 chained_irq_exit(chip, desc);
352}
353
354static enum cpuhp_state pci_xgene_online;
355
356static int xgene_msi_remove(struct platform_device *pdev)
357{
358 struct xgene_msi *msi = platform_get_drvdata(pdev);
359
360 if (pci_xgene_online)
361 cpuhp_remove_state(pci_xgene_online);
362 cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD);
363
364 kfree(msi->msi_groups);
365
366 kfree(msi->bitmap);
367 msi->bitmap = NULL;
368
369 xgene_free_domains(msi);
370
371 return 0;
372}
373
374static int xgene_msi_hwirq_alloc(unsigned int cpu)
375{
376 struct xgene_msi *msi = &xgene_msi_ctrl;
377 struct xgene_msi_group *msi_group;
378 cpumask_var_t mask;
379 int i;
380 int err;
381
382 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
383 msi_group = &msi->msi_groups[i];
384 if (!msi_group->gic_irq)
385 continue;
386
387 irq_set_chained_handler(msi_group->gic_irq,
388 xgene_msi_isr);
389 err = irq_set_handler_data(msi_group->gic_irq, msi_group);
390 if (err) {
391 pr_err("failed to register GIC IRQ handler\n");
392 return -EINVAL;
393 }
394
395
396
397
398
399 if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
400 cpumask_clear(mask);
401 cpumask_set_cpu(cpu, mask);
402 err = irq_set_affinity(msi_group->gic_irq, mask);
403 if (err)
404 pr_err("failed to set affinity for GIC IRQ");
405 free_cpumask_var(mask);
406 } else {
407 pr_err("failed to alloc CPU mask for affinity\n");
408 err = -EINVAL;
409 }
410
411 if (err) {
412 irq_set_chained_handler_and_data(msi_group->gic_irq,
413 NULL, NULL);
414 return err;
415 }
416 }
417
418 return 0;
419}
420
421static int xgene_msi_hwirq_free(unsigned int cpu)
422{
423 struct xgene_msi *msi = &xgene_msi_ctrl;
424 struct xgene_msi_group *msi_group;
425 int i;
426
427 for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) {
428 msi_group = &msi->msi_groups[i];
429 if (!msi_group->gic_irq)
430 continue;
431
432 irq_set_chained_handler_and_data(msi_group->gic_irq, NULL,
433 NULL);
434 }
435 return 0;
436}
437
438static const struct of_device_id xgene_msi_match_table[] = {
439 {.compatible = "apm,xgene1-msi"},
440 {},
441};
442
443static int xgene_msi_probe(struct platform_device *pdev)
444{
445 struct resource *res;
446 int rc, irq_index;
447 struct xgene_msi *xgene_msi;
448 int virt_msir;
449 u32 msi_val, msi_idx;
450
451 xgene_msi = &xgene_msi_ctrl;
452
453 platform_set_drvdata(pdev, xgene_msi);
454
455 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
456 xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res);
457 if (IS_ERR(xgene_msi->msi_regs)) {
458 dev_err(&pdev->dev, "no reg space\n");
459 rc = PTR_ERR(xgene_msi->msi_regs);
460 goto error;
461 }
462 xgene_msi->msi_addr = res->start;
463 xgene_msi->node = pdev->dev.of_node;
464 xgene_msi->num_cpus = num_possible_cpus();
465
466 rc = xgene_msi_init_allocator(xgene_msi);
467 if (rc) {
468 dev_err(&pdev->dev, "Error allocating MSI bitmap\n");
469 goto error;
470 }
471
472 rc = xgene_allocate_domains(xgene_msi);
473 if (rc) {
474 dev_err(&pdev->dev, "Failed to allocate MSI domain\n");
475 goto error;
476 }
477
478 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
479 virt_msir = platform_get_irq(pdev, irq_index);
480 if (virt_msir < 0) {
481 dev_err(&pdev->dev, "Cannot translate IRQ index %d\n",
482 irq_index);
483 rc = virt_msir;
484 goto error;
485 }
486 xgene_msi->msi_groups[irq_index].gic_irq = virt_msir;
487 xgene_msi->msi_groups[irq_index].msi_grp = irq_index;
488 xgene_msi->msi_groups[irq_index].msi = xgene_msi;
489 }
490
491
492
493
494
495
496 for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) {
497 for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++)
498 msi_val = xgene_msi_ir_read(xgene_msi, irq_index,
499 msi_idx);
500
501 msi_val = xgene_msi_int_read(xgene_msi, irq_index);
502 if (msi_val) {
503 dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
504 rc = -EINVAL;
505 goto error;
506 }
507 }
508
509 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
510 xgene_msi_hwirq_alloc, NULL);
511 if (rc < 0)
512 goto err_cpuhp;
513 pci_xgene_online = rc;
514 rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
515 xgene_msi_hwirq_free);
516 if (rc)
517 goto err_cpuhp;
518
519 dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n");
520
521 return 0;
522
523err_cpuhp:
524 dev_err(&pdev->dev, "failed to add CPU MSI notifier\n");
525error:
526 xgene_msi_remove(pdev);
527 return rc;
528}
529
530static struct platform_driver xgene_msi_driver = {
531 .driver = {
532 .name = "xgene-msi",
533 .of_match_table = xgene_msi_match_table,
534 },
535 .probe = xgene_msi_probe,
536 .remove = xgene_msi_remove,
537};
538
539static int __init xgene_pcie_msi_init(void)
540{
541 return platform_driver_register(&xgene_msi_driver);
542}
543subsys_initcall(xgene_pcie_msi_init);
544