1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) "GICv2m: " fmt
17
18#include <linux/acpi.h>
19#include <linux/dma-iommu.h>
20#include <linux/irq.h>
21#include <linux/irqdomain.h>
22#include <linux/kernel.h>
23#include <linux/msi.h>
24#include <linux/of_address.h>
25#include <linux/of_pci.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/irqchip/arm-gic.h>
29
30
31
32
33
34
35
36
37#define V2M_MSI_TYPER 0x008
38#define V2M_MSI_TYPER_BASE_SHIFT 16
39#define V2M_MSI_TYPER_BASE_MASK 0x3FF
40#define V2M_MSI_TYPER_NUM_MASK 0x3FF
41#define V2M_MSI_SETSPI_NS 0x040
42#define V2M_MIN_SPI 32
43#define V2M_MAX_SPI 1019
44#define V2M_MSI_IIDR 0xFCC
45
46#define V2M_MSI_TYPER_BASE_SPI(x) \
47 (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
48
49#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
50
51
52#define XGENE_GICV2M_MSI_IIDR 0x06000170
53
54
55#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
56
57
58#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
59
60static LIST_HEAD(v2m_nodes);
61static DEFINE_SPINLOCK(v2m_lock);
62
63struct v2m_data {
64 struct list_head entry;
65 struct fwnode_handle *fwnode;
66 struct resource res;
67 void __iomem *base;
68 u32 spi_start;
69 u32 nr_spis;
70 u32 spi_offset;
71 unsigned long *bm;
72 u32 flags;
73};
74
75static void gicv2m_mask_msi_irq(struct irq_data *d)
76{
77 pci_msi_mask_irq(d);
78 irq_chip_mask_parent(d);
79}
80
81static void gicv2m_unmask_msi_irq(struct irq_data *d)
82{
83 pci_msi_unmask_irq(d);
84 irq_chip_unmask_parent(d);
85}
86
87static struct irq_chip gicv2m_msi_irq_chip = {
88 .name = "MSI",
89 .irq_mask = gicv2m_mask_msi_irq,
90 .irq_unmask = gicv2m_unmask_msi_irq,
91 .irq_eoi = irq_chip_eoi_parent,
92 .irq_write_msi_msg = pci_msi_domain_write_msg,
93};
94
95static struct msi_domain_info gicv2m_msi_domain_info = {
96 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
97 MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
98 .chip = &gicv2m_msi_irq_chip,
99};
100
101static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
102{
103 struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
104 phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
105
106 msg->address_hi = upper_32_bits(addr);
107 msg->address_lo = lower_32_bits(addr);
108 msg->data = data->hwirq;
109
110 if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
111 msg->data -= v2m->spi_offset;
112
113 iommu_dma_map_msi_msg(data->irq, msg);
114}
115
116static struct irq_chip gicv2m_irq_chip = {
117 .name = "GICv2m",
118 .irq_mask = irq_chip_mask_parent,
119 .irq_unmask = irq_chip_unmask_parent,
120 .irq_eoi = irq_chip_eoi_parent,
121 .irq_set_affinity = irq_chip_set_affinity_parent,
122 .irq_compose_msi_msg = gicv2m_compose_msi_msg,
123};
124
125static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
126 unsigned int virq,
127 irq_hw_number_t hwirq)
128{
129 struct irq_fwspec fwspec;
130 struct irq_data *d;
131 int err;
132
133 if (is_of_node(domain->parent->fwnode)) {
134 fwspec.fwnode = domain->parent->fwnode;
135 fwspec.param_count = 3;
136 fwspec.param[0] = 0;
137 fwspec.param[1] = hwirq - 32;
138 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
139 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
140 fwspec.fwnode = domain->parent->fwnode;
141 fwspec.param_count = 2;
142 fwspec.param[0] = hwirq;
143 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
144 } else {
145 return -EINVAL;
146 }
147
148 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
149 if (err)
150 return err;
151
152
153 d = irq_domain_get_irq_data(domain->parent, virq);
154 d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
155 return 0;
156}
157
158static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
159 int nr_irqs)
160{
161 spin_lock(&v2m_lock);
162 bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
163 get_count_order(nr_irqs));
164 spin_unlock(&v2m_lock);
165}
166
167static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
168 unsigned int nr_irqs, void *args)
169{
170 struct v2m_data *v2m = NULL, *tmp;
171 int hwirq, offset, i, err = 0;
172
173 spin_lock(&v2m_lock);
174 list_for_each_entry(tmp, &v2m_nodes, entry) {
175 offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
176 get_count_order(nr_irqs));
177 if (offset >= 0) {
178 v2m = tmp;
179 break;
180 }
181 }
182 spin_unlock(&v2m_lock);
183
184 if (!v2m)
185 return -ENOSPC;
186
187 hwirq = v2m->spi_start + offset;
188
189 for (i = 0; i < nr_irqs; i++) {
190 err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
191 if (err)
192 goto fail;
193
194 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
195 &gicv2m_irq_chip, v2m);
196 }
197
198 return 0;
199
200fail:
201 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
202 gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
203 return err;
204}
205
206static void gicv2m_irq_domain_free(struct irq_domain *domain,
207 unsigned int virq, unsigned int nr_irqs)
208{
209 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
210 struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
211
212 gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
213 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
214}
215
216static const struct irq_domain_ops gicv2m_domain_ops = {
217 .alloc = gicv2m_irq_domain_alloc,
218 .free = gicv2m_irq_domain_free,
219};
220
221static bool is_msi_spi_valid(u32 base, u32 num)
222{
223 if (base < V2M_MIN_SPI) {
224 pr_err("Invalid MSI base SPI (base:%u)\n", base);
225 return false;
226 }
227
228 if ((num == 0) || (base + num > V2M_MAX_SPI)) {
229 pr_err("Number of SPIs (%u) exceed maximum (%u)\n",
230 num, V2M_MAX_SPI - V2M_MIN_SPI + 1);
231 return false;
232 }
233
234 return true;
235}
236
237static struct irq_chip gicv2m_pmsi_irq_chip = {
238 .name = "pMSI",
239};
240
241static struct msi_domain_ops gicv2m_pmsi_ops = {
242};
243
244static struct msi_domain_info gicv2m_pmsi_domain_info = {
245 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
246 .ops = &gicv2m_pmsi_ops,
247 .chip = &gicv2m_pmsi_irq_chip,
248};
249
250static void gicv2m_teardown(void)
251{
252 struct v2m_data *v2m, *tmp;
253
254 list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) {
255 list_del(&v2m->entry);
256 kfree(v2m->bm);
257 iounmap(v2m->base);
258 of_node_put(to_of_node(v2m->fwnode));
259 if (is_fwnode_irqchip(v2m->fwnode))
260 irq_domain_free_fwnode(v2m->fwnode);
261 kfree(v2m);
262 }
263}
264
265static int gicv2m_allocate_domains(struct irq_domain *parent)
266{
267 struct irq_domain *inner_domain, *pci_domain, *plat_domain;
268 struct v2m_data *v2m;
269
270 v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
271 if (!v2m)
272 return 0;
273
274 inner_domain = irq_domain_create_tree(v2m->fwnode,
275 &gicv2m_domain_ops, v2m);
276 if (!inner_domain) {
277 pr_err("Failed to create GICv2m domain\n");
278 return -ENOMEM;
279 }
280
281 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
282 inner_domain->parent = parent;
283 pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
284 &gicv2m_msi_domain_info,
285 inner_domain);
286 plat_domain = platform_msi_create_irq_domain(v2m->fwnode,
287 &gicv2m_pmsi_domain_info,
288 inner_domain);
289 if (!pci_domain || !plat_domain) {
290 pr_err("Failed to create MSI domains\n");
291 if (plat_domain)
292 irq_domain_remove(plat_domain);
293 if (pci_domain)
294 irq_domain_remove(pci_domain);
295 irq_domain_remove(inner_domain);
296 return -ENOMEM;
297 }
298
299 return 0;
300}
301
302static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
303 u32 spi_start, u32 nr_spis,
304 struct resource *res)
305{
306 int ret;
307 struct v2m_data *v2m;
308
309 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL);
310 if (!v2m) {
311 pr_err("Failed to allocate struct v2m_data.\n");
312 return -ENOMEM;
313 }
314
315 INIT_LIST_HEAD(&v2m->entry);
316 v2m->fwnode = fwnode;
317
318 memcpy(&v2m->res, res, sizeof(struct resource));
319
320 v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res));
321 if (!v2m->base) {
322 pr_err("Failed to map GICv2m resource\n");
323 ret = -ENOMEM;
324 goto err_free_v2m;
325 }
326
327 if (spi_start && nr_spis) {
328 v2m->spi_start = spi_start;
329 v2m->nr_spis = nr_spis;
330 } else {
331 u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER);
332
333 v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer);
334 v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer);
335 }
336
337 if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) {
338 ret = -EINVAL;
339 goto err_iounmap;
340 }
341
342
343
344
345
346
347
348
349
350
351
352
353 switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
354 case XGENE_GICV2M_MSI_IIDR:
355 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
356 v2m->spi_offset = v2m->spi_start;
357 break;
358 case BCM_NS2_GICV2M_MSI_IIDR:
359 v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
360 v2m->spi_offset = 32;
361 break;
362 }
363
364 v2m->bm = kcalloc(BITS_TO_LONGS(v2m->nr_spis), sizeof(long),
365 GFP_KERNEL);
366 if (!v2m->bm) {
367 ret = -ENOMEM;
368 goto err_iounmap;
369 }
370
371 list_add_tail(&v2m->entry, &v2m_nodes);
372
373 pr_info("range%pR, SPI[%d:%d]\n", res,
374 v2m->spi_start, (v2m->spi_start + v2m->nr_spis - 1));
375 return 0;
376
377err_iounmap:
378 iounmap(v2m->base);
379err_free_v2m:
380 kfree(v2m);
381 return ret;
382}
383
384static struct of_device_id gicv2m_device_id[] = {
385 { .compatible = "arm,gic-v2m-frame", },
386 {},
387};
388
389static int __init gicv2m_of_init(struct fwnode_handle *parent_handle,
390 struct irq_domain *parent)
391{
392 int ret = 0;
393 struct device_node *node = to_of_node(parent_handle);
394 struct device_node *child;
395
396 for (child = of_find_matching_node(node, gicv2m_device_id); child;
397 child = of_find_matching_node(child, gicv2m_device_id)) {
398 u32 spi_start = 0, nr_spis = 0;
399 struct resource res;
400
401 if (!of_find_property(child, "msi-controller", NULL))
402 continue;
403
404 ret = of_address_to_resource(child, 0, &res);
405 if (ret) {
406 pr_err("Failed to allocate v2m resource.\n");
407 break;
408 }
409
410 if (!of_property_read_u32(child, "arm,msi-base-spi",
411 &spi_start) &&
412 !of_property_read_u32(child, "arm,msi-num-spis", &nr_spis))
413 pr_info("DT overriding V2M MSI_TYPER (base:%u, num:%u)\n",
414 spi_start, nr_spis);
415
416 ret = gicv2m_init_one(&child->fwnode, spi_start, nr_spis, &res);
417 if (ret) {
418 of_node_put(child);
419 break;
420 }
421 }
422
423 if (!ret)
424 ret = gicv2m_allocate_domains(parent);
425 if (ret)
426 gicv2m_teardown();
427 return ret;
428}
429
430#ifdef CONFIG_ACPI
431static int acpi_num_msi;
432
433static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev)
434{
435 struct v2m_data *data;
436
437 if (WARN_ON(acpi_num_msi <= 0))
438 return NULL;
439
440
441 data = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry);
442 if (!data)
443 return NULL;
444
445 return data->fwnode;
446}
447
448static int __init
449acpi_parse_madt_msi(struct acpi_subtable_header *header,
450 const unsigned long end)
451{
452 int ret;
453 struct resource res;
454 u32 spi_start = 0, nr_spis = 0;
455 struct acpi_madt_generic_msi_frame *m;
456 struct fwnode_handle *fwnode;
457
458 m = (struct acpi_madt_generic_msi_frame *)header;
459 if (BAD_MADT_ENTRY(m, end))
460 return -EINVAL;
461
462 res.start = m->base_address;
463 res.end = m->base_address + SZ_4K - 1;
464 res.flags = IORESOURCE_MEM;
465
466 if (m->flags & ACPI_MADT_OVERRIDE_SPI_VALUES) {
467 spi_start = m->spi_base;
468 nr_spis = m->spi_count;
469
470 pr_info("ACPI overriding V2M MSI_TYPER (base:%u, num:%u)\n",
471 spi_start, nr_spis);
472 }
473
474 fwnode = irq_domain_alloc_fwnode((void *)m->base_address);
475 if (!fwnode) {
476 pr_err("Unable to allocate GICv2m domain token\n");
477 return -EINVAL;
478 }
479
480 ret = gicv2m_init_one(fwnode, spi_start, nr_spis, &res);
481 if (ret)
482 irq_domain_free_fwnode(fwnode);
483
484 return ret;
485}
486
487static int __init gicv2m_acpi_init(struct irq_domain *parent)
488{
489 int ret;
490
491 if (acpi_num_msi > 0)
492 return 0;
493
494 acpi_num_msi = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_MSI_FRAME,
495 acpi_parse_madt_msi, 0);
496
497 if (acpi_num_msi <= 0)
498 goto err_out;
499
500 ret = gicv2m_allocate_domains(parent);
501 if (ret)
502 goto err_out;
503
504 pci_msi_register_fwnode_provider(&gicv2m_get_fwnode);
505
506 return 0;
507
508err_out:
509 gicv2m_teardown();
510 return -EINVAL;
511}
512#else
513static int __init gicv2m_acpi_init(struct irq_domain *parent)
514{
515 return -EINVAL;
516}
517#endif
518
519int __init gicv2m_init(struct fwnode_handle *parent_handle,
520 struct irq_domain *parent)
521{
522 if (is_of_node(parent_handle))
523 return gicv2m_of_init(parent_handle, parent);
524
525 return gicv2m_acpi_init(parent);
526}
527