1
2
3
4
5
6
7#define pr_fmt(fmt) "PCI: OF: " fmt
8
9#include <linux/irqdomain.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/of_address.h>
15#include <linux/of_pci.h>
16#include "pci.h"
17
18#ifdef CONFIG_PCI
19void pci_set_of_node(struct pci_dev *dev)
20{
21 if (!dev->bus->dev.of_node)
22 return;
23 dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
24 dev->devfn);
25 if (dev->dev.of_node)
26 dev->dev.fwnode = &dev->dev.of_node->fwnode;
27}
28
29void pci_release_of_node(struct pci_dev *dev)
30{
31 of_node_put(dev->dev.of_node);
32 dev->dev.of_node = NULL;
33 dev->dev.fwnode = NULL;
34}
35
36void pci_set_bus_of_node(struct pci_bus *bus)
37{
38 struct device_node *node;
39
40 if (bus->self == NULL) {
41 node = pcibios_get_phb_of_node(bus);
42 } else {
43 node = of_node_get(bus->self->dev.of_node);
44 if (node && of_property_read_bool(node, "external-facing"))
45 bus->self->untrusted = true;
46 }
47
48 bus->dev.of_node = node;
49
50 if (bus->dev.of_node)
51 bus->dev.fwnode = &bus->dev.of_node->fwnode;
52}
53
54void pci_release_bus_of_node(struct pci_bus *bus)
55{
56 of_node_put(bus->dev.of_node);
57 bus->dev.of_node = NULL;
58 bus->dev.fwnode = NULL;
59}
60
61struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
62{
63
64 if (WARN_ON(bus->self || bus->parent))
65 return NULL;
66
67
68
69
70
71
72 if (bus->bridge->of_node)
73 return of_node_get(bus->bridge->of_node);
74 if (bus->bridge->parent && bus->bridge->parent->of_node)
75 return of_node_get(bus->bridge->parent->of_node);
76 return NULL;
77}
78
79struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
80{
81#ifdef CONFIG_IRQ_DOMAIN
82 struct irq_domain *d;
83
84 if (!bus->dev.of_node)
85 return NULL;
86
87
88 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
89 if (d)
90 return d;
91
92
93
94
95
96 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
97 if (d)
98 return d;
99
100 return irq_find_host(bus->dev.of_node);
101#else
102 return NULL;
103#endif
104}
105
106static inline int __of_pci_pci_compare(struct device_node *node,
107 unsigned int data)
108{
109 int devfn;
110
111 devfn = of_pci_get_devfn(node);
112 if (devfn < 0)
113 return 0;
114
115 return devfn == data;
116}
117
118struct device_node *of_pci_find_child_device(struct device_node *parent,
119 unsigned int devfn)
120{
121 struct device_node *node, *node2;
122
123 for_each_child_of_node(parent, node) {
124 if (__of_pci_pci_compare(node, devfn))
125 return node;
126
127
128
129
130
131 if (of_node_name_eq(node, "multifunc-device")) {
132 for_each_child_of_node(node, node2) {
133 if (__of_pci_pci_compare(node2, devfn)) {
134 of_node_put(node);
135 return node2;
136 }
137 }
138 }
139 }
140 return NULL;
141}
142EXPORT_SYMBOL_GPL(of_pci_find_child_device);
143
144
145
146
147
148
149
150
151
152
153int of_pci_get_devfn(struct device_node *np)
154{
155 u32 reg[5];
156 int error;
157
158 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
159 if (error)
160 return error;
161
162 return (reg[0] >> 8) & 0xff;
163}
164EXPORT_SYMBOL_GPL(of_pci_get_devfn);
165
166
167
168
169
170
171
172
173int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
174{
175 u32 bus_range[2];
176 int error;
177
178 error = of_property_read_u32_array(node, "bus-range", bus_range,
179 ARRAY_SIZE(bus_range));
180 if (error)
181 return error;
182
183 res->name = node->name;
184 res->start = bus_range[0];
185 res->end = bus_range[1];
186 res->flags = IORESOURCE_BUS;
187
188 return 0;
189}
190EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
191
192
193
194
195
196
197
198
199
200
201int of_get_pci_domain_nr(struct device_node *node)
202{
203 u32 domain;
204 int error;
205
206 error = of_property_read_u32(node, "linux,pci-domain", &domain);
207 if (error)
208 return error;
209
210 return (u16)domain;
211}
212EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
213
214
215
216
217
218void of_pci_check_probe_only(void)
219{
220 u32 val;
221 int ret;
222
223 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
224 if (ret) {
225 if (ret == -ENODATA || ret == -EOVERFLOW)
226 pr_warn("linux,pci-probe-only without valid value, ignoring\n");
227 return;
228 }
229
230 if (val)
231 pci_add_flags(PCI_PROBE_ONLY);
232 else
233 pci_clear_flags(PCI_PROBE_ONLY);
234
235 pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
236}
237EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257static int devm_of_pci_get_host_bridge_resources(struct device *dev,
258 unsigned char busno, unsigned char bus_max,
259 struct list_head *resources,
260 struct list_head *ib_resources,
261 resource_size_t *io_base)
262{
263 struct device_node *dev_node = dev->of_node;
264 struct resource *res, tmp_res;
265 struct resource *bus_range;
266 struct of_pci_range range;
267 struct of_pci_range_parser parser;
268 const char *range_type;
269 int err;
270
271 if (io_base)
272 *io_base = (resource_size_t)OF_BAD_ADDR;
273
274 bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
275 if (!bus_range)
276 return -ENOMEM;
277
278 dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
279
280 err = of_pci_parse_bus_range(dev_node, bus_range);
281 if (err) {
282 bus_range->start = busno;
283 bus_range->end = bus_max;
284 bus_range->flags = IORESOURCE_BUS;
285 dev_info(dev, " No bus range found for %pOF, using %pR\n",
286 dev_node, bus_range);
287 } else {
288 if (bus_range->end > bus_range->start + bus_max)
289 bus_range->end = bus_range->start + bus_max;
290 }
291 pci_add_resource(resources, bus_range);
292
293
294 err = of_pci_range_parser_init(&parser, dev_node);
295 if (err)
296 goto failed;
297
298 dev_dbg(dev, "Parsing ranges property...\n");
299 for_each_of_pci_range(&parser, &range) {
300
301 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
302 range_type = "IO";
303 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
304 range_type = "MEM";
305 else
306 range_type = "err";
307 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
308 range_type, range.cpu_addr,
309 range.cpu_addr + range.size - 1, range.pci_addr);
310
311
312
313
314
315 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
316 continue;
317
318 err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
319 if (err)
320 continue;
321
322 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
323 if (!res) {
324 err = -ENOMEM;
325 goto failed;
326 }
327
328 if (resource_type(res) == IORESOURCE_IO) {
329 if (!io_base) {
330 dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
331 dev_node);
332 err = -EINVAL;
333 goto failed;
334 }
335 if (*io_base != (resource_size_t)OF_BAD_ADDR)
336 dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
337 dev_node);
338 *io_base = range.cpu_addr;
339 }
340
341 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
342 }
343
344
345 if (!ib_resources)
346 return 0;
347 err = of_pci_dma_range_parser_init(&parser, dev_node);
348 if (err)
349 return 0;
350
351 dev_dbg(dev, "Parsing dma-ranges property...\n");
352 for_each_of_pci_range(&parser, &range) {
353 struct resource_entry *entry;
354
355
356
357
358 if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
359 range.cpu_addr == OF_BAD_ADDR || range.size == 0)
360 continue;
361
362 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
363 "IB MEM", range.cpu_addr,
364 range.cpu_addr + range.size - 1, range.pci_addr);
365
366
367 err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
368 if (err)
369 continue;
370
371 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
372 if (!res) {
373 err = -ENOMEM;
374 goto failed;
375 }
376
377
378 resource_list_for_each_entry(entry, ib_resources)
379 if (entry->res->start > res->start)
380 break;
381
382 pci_add_resource_offset(&entry->node, res,
383 res->start - range.pci_addr);
384 }
385
386 return 0;
387
388failed:
389 pci_free_resource_list(resources);
390 return err;
391}
392
393#if IS_ENABLED(CONFIG_OF_IRQ)
394
395
396
397
398
399
400
401
402
403
404
405static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
406{
407 struct device_node *dn, *ppnode;
408 struct pci_dev *ppdev;
409 __be32 laddr[3];
410 u8 pin;
411 int rc;
412
413
414
415
416
417 dn = pci_device_to_OF_node(pdev);
418 if (dn) {
419 rc = of_irq_parse_one(dn, 0, out_irq);
420 if (!rc)
421 return rc;
422 }
423
424
425
426
427
428
429 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
430 if (rc != 0)
431 goto err;
432
433 if (pin == 0)
434 return -ENODEV;
435
436
437 for (;;) {
438
439 ppdev = pdev->bus->self;
440
441
442 if (ppdev == NULL) {
443 ppnode = pci_bus_to_OF_node(pdev->bus);
444
445
446 if (ppnode == NULL) {
447 rc = -EINVAL;
448 goto err;
449 }
450 } else {
451
452 ppnode = pci_device_to_OF_node(ppdev);
453 }
454
455
456
457
458
459
460
461
462
463
464
465
466
467 if (ppnode)
468 break;
469
470
471
472
473
474 pin = pci_swizzle_interrupt_pin(pdev, pin);
475 pdev = ppdev;
476 }
477
478 out_irq->np = ppnode;
479 out_irq->args_count = 1;
480 out_irq->args[0] = pin;
481 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
482 laddr[1] = laddr[2] = cpu_to_be32(0);
483 rc = of_irq_parse_raw(laddr, out_irq);
484 if (rc)
485 goto err;
486 return 0;
487err:
488 if (rc == -ENOENT) {
489 dev_warn(&pdev->dev,
490 "%s: no interrupt-map found, INTx interrupts not available\n",
491 __func__);
492 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
493 __func__);
494 } else {
495 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
496 }
497 return rc;
498}
499
500
501
502
503
504
505
506
507
508
509
510int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
511{
512 struct of_phandle_args oirq;
513 int ret;
514
515 ret = of_irq_parse_pci(dev, &oirq);
516 if (ret)
517 return 0;
518
519 return irq_create_of_mapping(&oirq);
520}
521EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
522#endif
523
524int pci_parse_request_of_pci_ranges(struct device *dev,
525 struct list_head *resources,
526 struct list_head *ib_resources,
527 struct resource **bus_range)
528{
529 int err, res_valid = 0;
530 resource_size_t iobase;
531 struct resource_entry *win, *tmp;
532
533 INIT_LIST_HEAD(resources);
534 if (ib_resources)
535 INIT_LIST_HEAD(ib_resources);
536 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources,
537 ib_resources, &iobase);
538 if (err)
539 return err;
540
541 err = devm_request_pci_bus_resources(dev, resources);
542 if (err)
543 goto out_release_res;
544
545 resource_list_for_each_entry_safe(win, tmp, resources) {
546 struct resource *res = win->res;
547
548 switch (resource_type(res)) {
549 case IORESOURCE_IO:
550 err = devm_pci_remap_iospace(dev, res, iobase);
551 if (err) {
552 dev_warn(dev, "error %d: failed to map resource %pR\n",
553 err, res);
554 resource_list_destroy_entry(win);
555 }
556 break;
557 case IORESOURCE_MEM:
558 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
559 break;
560 case IORESOURCE_BUS:
561 if (bus_range)
562 *bus_range = res;
563 break;
564 }
565 }
566
567 if (res_valid)
568 return 0;
569
570 dev_err(dev, "non-prefetchable memory resource required\n");
571 err = -EINVAL;
572
573 out_release_res:
574 pci_free_resource_list(resources);
575 return err;
576}
577EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges);
578
579#endif
580
581
582
583
584
585
586
587
588
589
590int of_pci_get_max_link_speed(struct device_node *node)
591{
592 u32 max_link_speed;
593
594 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
595 max_link_speed > 4)
596 return -EINVAL;
597
598 return max_link_speed;
599}
600EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
601