1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) "GICv2m: " fmt
17
18#include <linux/acpi.h>
19#include <linux/dma-iommu.h>
20#include <linux/irq.h>
21#include <linux/irqdomain.h>
22#include <linux/kernel.h>
23#include <linux/pci.h>
24#include <linux/msi.h>
25#include <linux/of_address.h>
26#include <linux/of_pci.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/irqchip/arm-gic.h>
30
31
32
33
34
35
36
37
38#define V2M_MSI_TYPER 0x008
39#define V2M_MSI_TYPER_BASE_SHIFT 16
40#define V2M_MSI_TYPER_BASE_MASK 0x3FF
41#define V2M_MSI_TYPER_NUM_MASK 0x3FF
42#define V2M_MSI_SETSPI_NS 0x040
43#define V2M_MIN_SPI 32
44#define V2M_MAX_SPI 1019
45#define V2M_MSI_IIDR 0xFCC
46
47#define V2M_MSI_TYPER_BASE_SPI(x) \
48 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
49
50#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
51
52
53#define XGENE_GICV2M_MSI_IIDR 0x06000170
54
55
56#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
57
58
59#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
60#define GICV2M_GRAVITON_ADDRESS_ONLY 0x00000002
61
62static LIST_HEAD(v2m_nodes);
63static DEFINE_SPINLOCK(v2m_lock);
64
65struct v2m_data {
66 struct list_head entry;
67 struct fwnode_handle *fwnode;
68 struct resource res;
69 void __iomem *base;
70 u32 spi_start;
71 u32 nr_spis;
72 u32 spi_offset;
73 unsigned long *bm;
74 u32 flags;
75};
76
77static void gicv2m_mask_msi_irq(struct irq_data *d)
78{
79 pci_msi_mask_irq(d);
80 irq_chip_mask_parent(d);
81}
82
83static void gicv2m_unmask_msi_irq(struct irq_data *d)
84{
85 pci_msi_unmask_irq(d);
86 irq_chip_unmask_parent(d);
87}
88
89static struct irq_chip gicv2m_msi_irq_chip = {
90 .name = "MSI",
91 .irq_mask = gicv2m_mask_msi_irq,
92 .irq_unmask = gicv2m_unmask_msi_irq,
93 .irq_eoi = irq_chip_eoi_parent,
94 .irq_write_msi_msg = pci_msi_domain_write_msg,
95};
96
97static struct msi_domain_info gicv2m_msi_domain_info = {
98 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
99 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
100 .chip = &gicv2m_msi_irq_chip,
101};
102
103static phys_addr_t gicv2m_get_msi_addr(struct v2m_data *v2m, int hwirq)
104{
105 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
106 return v2m->res.start | ((hwirq - 32) << 3);
107 else
108 return v2m->res.start + V2M_MSI_SETSPI_NS;
109}
110
111static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
112{
113 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
114 phys_addr_t addr = gicv2m_get_msi_addr(v2m, data->hwirq);
115
116 msg->address_hi = upper_32_bits(addr);
117 msg->address_lo = lower_32_bits(addr);
118
119 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)
120 msg->data = 0;
121 else
122 msg->data = data->hwirq;
123 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
124 msg->data -= v2m->spi_offset;
125
126 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
127}
128
129static struct irq_chip gicv2m_irq_chip = {
130 .name = "GICv2m",
131 .irq_mask = irq_chip_mask_parent,
132 .irq_unmask = irq_chip_unmask_parent,
133 .irq_eoi = irq_chip_eoi_parent,
134 .irq_set_affinity = irq_chip_set_affinity_parent,
135 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
136};
137
138static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
139 unsigned int virq,
140 irq_hw_number_t hwirq)
141{
142 struct irq_fwspec fwspec;
143 struct irq_data *d;
144 int err;
145
146 if (is_of_node(domain->parent->fwnode)) {
147 fwspec.fwnode = domain->parent->fwnode;
148 fwspec.param_count = 3;
149 fwspec.param[0] = 0;
150 fwspec.param[1] = hwirq - 32;
151 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
152 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
153 fwspec.fwnode = domain->parent->fwnode;
154 fwspec.param_count = 2;
155 fwspec.param[0] = hwirq;
156 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
157 } else {
158 return -EINVAL;
159 }
160
161 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
162 if (err)
163 return err;
164
165
166 d = irq_domain_get_irq_data(domain->parent, virq);
167 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
168 return 0;
169}
170
171static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
172 int nr_irqs)
173{
174 spin_lock(&v2m_lock);
175 bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
176 get_count_order(nr_irqs));
177 spin_unlock(&v2m_lock);
178}
179
180static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
181 unsigned int nr_irqs, void *args)
182{
183 msi_alloc_info_t *info = args;
184 struct v2m_data *v2m = NULL, *tmp;
185 int hwirq, offset, i, err = 0;
186
187 spin_lock(&v2m_lock);
188 list_for_each_entry(tmp, &v2m_nodes, entry) {
189 offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
190 get_count_order(nr_irqs));
191 if (offset >= 0) {
192 v2m = tmp;
193 break;
194 }
195 }
196 spin_unlock(&v2m_lock);
197
198 if (!v2m)
199 return -ENOSPC;
200
201 hwirq = v2m->spi_start + offset;
202
203 err = iommu_dma_prepare_msi(info->desc,
204 gicv2m_get_msi_addr(v2m, hwirq));
205 if (err)
206 return err;
207
208 for (i = 0; i < nr_irqs; i++) {
209 err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
210 if (err)
211 goto fail;
212
213 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
214 &gicv2m_irq_chip, v2m);
215 }
216
217 return 0;
218
219fail:
220 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
221 gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
222 return err;
223}
224
225static void gicv2m_irq_domain_free(struct irq_domain *domain,
226 unsigned int virq, unsigned int nr_irqs)
227{
228 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
229 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
230
231 gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
232 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
233}
234
235static const struct irq_domain_ops gicv2m_domain_ops = {
236 .alloc = gicv2m_irq_domain_alloc,
237 .free = gicv2m_irq_domain_free,
238};
239
240static bool is_msi_spi_valid(u32 base, u32 num)
241{
242 if (base < V2M_MIN_SPI) {
243 pr_err("Invalid MSI base SPI (base:%u)\n", base);
244 return false;
245 }
246
247 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
248 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
249 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
250 return false;
251 }
252
253 return true;
254}
255
256static struct irq_chip gicv2m_pmsi_irq_chip = {
257 .name = "pMSI",
258};
259
260static struct msi_domain_ops gicv2m_pmsi_ops = {
261};
262
263static struct msi_domain_info gicv2m_pmsi_domain_info = {
264 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
265 .ops = &gicv2m_pmsi_ops,
266 .chip = &gicv2m_pmsi_irq_chip,
267};
268
269static void gicv2m_teardown(void)
270{
271 struct v2m_data *v2m, *tmp;
272
273 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
274 list_del(&v2m->entry);
275 kfree(v2m->bm);
276 iounmap(v2m->base);
277 of_node_put(to_of_node(v2m->fwnode));
278 if (is_fwnode_irqchip(v2m->fwnode))
279 irq_domain_free_fwnode(v2m->fwnode);
280 kfree(v2m);
281 }
282}
283
284static int gicv2m_allocate_domains(struct irq_domain *parent)
285{
286 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
287 struct v2m_data *v2m;
288
289 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
290 if (!v2m)
291 return 0;
292
293 inner_domain = irq_domain_create_tree(v2m->fwnode,
294 &gicv2m_domain_ops, v2m);
295 if (!inner_domain) {
296 pr_err("Failed to create GICv2m domain\n");
297 return -ENOMEM;
298 }
299
300 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
301 inner_domain->parent = parent;
302 pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
303 &gicv2m_msi_domain_info,
304 inner_domain);
305 plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
306 &gicv2m_pmsi_domain_info,
307 inner_domain);
308 if (!pci_domain || !plat_domain) {
309 pr_err("Failed to create MSI domains\n");
310 if (plat_domain)
311 irq_domain_remove(plat_domain);
312 if (pci_domain)
313 irq_domain_remove(pci_domain);
314 irq_domain_remove(inner_domain);
315 return -ENOMEM;
316 }
317
318 return 0;
319}
320
321static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
322 u32 spi_start, u32 nr_spis,
323 struct resource *res, u32 flags)
324{
325 int ret;
326 struct v2m_data *v2m;
327
328 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
329 if (!v2m) {
330 pr_err("Failed to allocate struct v2m_data.\n");
331 return -ENOMEM;
332 }
333
334 INIT_LIST_HEAD(&v2m->entry);
335 v2m->fwnode = fwnode;
336 v2m->flags = flags;
337
338 memcpy(&v2m->res, res, sizeof(struct resource));
339
340 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
341 if (!v2m->base) {
342 pr_err("Failed to map GICv2m resource\n");
343 ret = -ENOMEM;
344 goto err_free_v2m;
345 }
346
347 if (spi_start && nr_spis) {
348 v2m->spi_start = spi_start;
349 v2m->nr_spis = nr_spis;
350 } else {
351 u32 typer;
352
353
354 if (v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY) {
355 ret = -EINVAL;
356 goto err_iounmap;
357 }
358 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
359
360 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
361 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
362 }
363
364 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
365 ret = -EINVAL;
366 goto err_iounmap;
367 }
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382 if (!(v2m->flags & GICV2M_GRAVITON_ADDRESS_ONLY)) {
383 switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
384 case XGENE_GICV2M_MSI_IIDR:
385 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
386 v2m->spi_offset = v2m->spi_start;
387 break;
388 case BCM_NS2_GICV2M_MSI_IIDR:
389 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
390 v2m->spi_offset = 32;
391 break;
392 }
393 }
394 v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
395 GFP_KERNEL);
396 if (!v2m->bm) {
397 ret = -ENOMEM;
398 goto err_iounmap;
399 }
400
401 list_add_tail(&v2m->entry, &v2m_nodes);
402
403 pr_info("range%pR, SPI[%d:%d]\n", res,
404 v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
405 return 0;
406
407err_iounmap:
408 iounmap(v2m->base);
409err_free_v2m:
410 kfree(v2m);
411 return ret;
412}
413
414static struct of_device_id gicv2m_device_id[] = {
415 { .compatible = "arm,gic-v2m-frame", },
416 {},
417};
418
419static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
420 struct irq_domain *parent)
421{
422 int ret = 0;
423 struct device_node *node = to_of_node(parent_handle);
424 struct device_node *child;
425
426 for (child = of_find_matching_node(node, gicv2m_device_id); child;
427 child = of_find_matching_node(child, gicv2m_device_id)) {
428 u32 spi_start = 0, nr_spis = 0;
429 struct resource res;
430
431 if (!of_find_property(child, "msi-controller", NULL))
432 continue;
433
434 ret = of_address_to_resource(child, 0, &res);
435 if (ret) {
436 pr_err("Failed to allocate v2m resource.\n");
437 break;
438 }
439
440 if (!of_property_read_u32(child, "arm,msi-base-spi",
441 &spi_start) &&
442 !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
443 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
444 spi_start, nr_spis);
445
446 ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis,
447 &res, 0);
448 if (ret) {
449 of_node_put(child);
450 break;
451 }
452 }
453
454 if (!ret)
455 ret = gicv2m_allocate_domains(parent);
456 if (ret)
457 gicv2m_teardown();
458 return ret;
459}
460
461#ifdef CONFIG_ACPI
462static int acpi_num_msi;
463
464static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
465{
466 struct v2m_data *data;
467
468 if (WARN_ON(acpi_num_msi <= 0))
469 return NULL;
470
471
472 data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
473 if (!data)
474 return NULL;
475
476 return data->fwnode;
477}
478
479static bool acpi_check_amazon_graviton_quirks(void)
480{
481 static struct acpi_table_madt *madt;
482 acpi_status status;
483 bool rc = false;
484
485#define ACPI_AMZN_OEM_ID "AMAZON"
486
487 status = acpi_get_table(ACPI_SIG_MADT, 0,
488 (struct acpi_table_header **)&madt);
489
490 if (ACPI_FAILURE(status) || !madt)
491 return rc;
492 rc = !memcmp(madt->header.oem_id, ACPI_AMZN_OEM_ID, ACPI_OEM_ID_SIZE);
493 acpi_put_table((struct acpi_table_header *)madt);
494
495 return rc;
496}
497
498static int __init
499acpi_parse_madt_msi(union acpi_subtable_headers *header,
500 const unsigned long end)
501{
502 int ret;
503 struct resource res;
504 u32 spi_start = 0, nr_spis = 0;
505 struct acpi_madt_generic_msi_frame *m;
506 struct fwnode_handle *fwnode;
507 u32 flags = 0;
508
509 m = (struct acpi_madt_generic_msi_frame *)header;
510 if (BAD_MADT_ENTRY(m, end))
511 return -EINVAL;
512
513 res.start = m->base_address;
514 res.end = m->base_address + SZ_4K - 1;
515 res.flags = IORESOURCE_MEM;
516
517 if (acpi_check_amazon_graviton_quirks()) {
518 pr_info("applying Amazon Graviton quirk\n");
519 res.end = res.start + SZ_8K - 1;
520 flags |= GICV2M_GRAVITON_ADDRESS_ONLY;
521 gicv2m_msi_domain_info.flags &= ~MSI_FLAG_MULTI_PCI_MSI;
522 }
523
524 if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
525 spi_start = m->spi_base;
526 nr_spis = m->spi_count;
527
528 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
529 spi_start, nr_spis);
530 }
531
532 fwnode = irq_domain_alloc_fwnode(&res.start);
533 if (!fwnode) {
534 pr_err("Unable to allocate GICv2m domain token\n");
535 return -EINVAL;
536 }
537
538 ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res, flags);
539 if (ret)
540 irq_domain_free_fwnode(fwnode);
541
542 return ret;
543}
544
545static int __init gicv2m_acpi_init(struct irq_domain *parent)
546{
547 int ret;
548
549 if (acpi_num_msi > 0)
550 return 0;
551
552 acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
553 acpi_parse_madt_msi, 0);
554
555 if (acpi_num_msi <= 0)
556 goto err_out;
557
558 ret = gicv2m_allocate_domains(parent);
559 if (ret)
560 goto err_out;
561
562 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
563
564 return 0;
565
566err_out:
567 gicv2m_teardown();
568 return -EINVAL;
569}
570#else
571static int __init gicv2m_acpi_init(struct irq_domain *parent)
572{
573 return -EINVAL;
574}
575#endif
576
577int __init gicv2m_init(struct fwnode_handle *parent_handle,
578 struct irq_domain *parent)
579{
580 if (is_of_node(parent_handle))
581 return gicv2m_of_init(parent_handle, parent);
582
583 return gicv2m_acpi_init(parent);
584}
585