1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/bitops.h>
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/irq.h>
20#include <linux/irqdomain.h>
21#include <linux/kernel.h>
22#include <linux/init.h>
23#include <linux/iopoll.h>
24#include <linux/msi.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27#include <linux/of_pci.h>
28#include <linux/of_platform.h>
29#include <linux/pci.h>
30#include <linux/phy/phy.h>
31#include <linux/platform_device.h>
32#include <linux/pm_runtime.h>
33#include <linux/slab.h>
34
35#include "pcie-rcar.h"
36
37struct rcar_msi {
38 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
39 struct irq_domain *domain;
40 struct mutex map_lock;
41 spinlock_t mask_lock;
42 int irq1;
43 int irq2;
44};
45
46#ifdef CONFIG_ARM
47
48
49
50
51
52
53static void __iomem *pcie_base;
54
55
56
57
58static struct clk *pcie_bus_clk;
59#endif
60
61
62struct rcar_pcie_host {
63 struct rcar_pcie pcie;
64 struct phy *phy;
65 struct clk *bus_clk;
66 struct rcar_msi msi;
67 int (*phy_init_fn)(struct rcar_pcie_host *host);
68};
69
70static struct rcar_pcie_host *msi_to_host(struct rcar_msi *msi)
71{
72 return container_of(msi, struct rcar_pcie_host, msi);
73}
74
75static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
76{
77 unsigned int shift = BITS_PER_BYTE * (where & 3);
78 u32 val = rcar_pci_read_reg(pcie, where & ~3);
79
80 return val >> shift;
81}
82
83
84static int rcar_pcie_config_access(struct rcar_pcie_host *host,
85 unsigned char access_type, struct pci_bus *bus,
86 unsigned int devfn, int where, u32 *data)
87{
88 struct rcar_pcie *pcie = &host->pcie;
89 unsigned int dev, func, reg, index;
90
91 dev = PCI_SLOT(devfn);
92 func = PCI_FUNC(devfn);
93 reg = where & ~3;
94 index = reg / 4;
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111 if (pci_is_root_bus(bus)) {
112 if (dev != 0)
113 return PCIBIOS_DEVICE_NOT_FOUND;
114
115 if (access_type == RCAR_PCI_ACCESS_READ)
116 *data = rcar_pci_read_reg(pcie, PCICONF(index));
117 else
118 rcar_pci_write_reg(pcie, *data, PCICONF(index));
119
120 return PCIBIOS_SUCCESSFUL;
121 }
122
123
124 rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
125
126
127 rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
128 PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
129
130
131 if (pci_is_root_bus(bus->parent))
132 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
133 else
134 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
135
136
137 if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
138 return PCIBIOS_DEVICE_NOT_FOUND;
139
140
141 if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
142 (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
143 return PCIBIOS_DEVICE_NOT_FOUND;
144
145 if (access_type == RCAR_PCI_ACCESS_READ)
146 *data = rcar_pci_read_reg(pcie, PCIECDR);
147 else
148 rcar_pci_write_reg(pcie, *data, PCIECDR);
149
150
151 rcar_pci_write_reg(pcie, 0, PCIECCTLR);
152
153 return PCIBIOS_SUCCESSFUL;
154}
155
156static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
157 int where, int size, u32 *val)
158{
159 struct rcar_pcie_host *host = bus->sysdata;
160 int ret;
161
162 ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
163 bus, devfn, where, val);
164 if (ret != PCIBIOS_SUCCESSFUL) {
165 *val = 0xffffffff;
166 return ret;
167 }
168
169 if (size == 1)
170 *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
171 else if (size == 2)
172 *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff;
173
174 dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
175 bus->number, devfn, where, size, *val);
176
177 return ret;
178}
179
180
181static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
182 int where, int size, u32 val)
183{
184 struct rcar_pcie_host *host = bus->sysdata;
185 unsigned int shift;
186 u32 data;
187 int ret;
188
189 ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
190 bus, devfn, where, &data);
191 if (ret != PCIBIOS_SUCCESSFUL)
192 return ret;
193
194 dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n",
195 bus->number, devfn, where, size, val);
196
197 if (size == 1) {
198 shift = BITS_PER_BYTE * (where & 3);
199 data &= ~(0xff << shift);
200 data |= ((val & 0xff) << shift);
201 } else if (size == 2) {
202 shift = BITS_PER_BYTE * (where & 2);
203 data &= ~(0xffff << shift);
204 data |= ((val & 0xffff) << shift);
205 } else
206 data = val;
207
208 ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_WRITE,
209 bus, devfn, where, &data);
210
211 return ret;
212}
213
214static struct pci_ops rcar_pcie_ops = {
215 .read = rcar_pcie_read_conf,
216 .write = rcar_pcie_write_conf,
217};
218
219static void rcar_pcie_force_speedup(struct rcar_pcie *pcie)
220{
221 struct device *dev = pcie->dev;
222 unsigned int timeout = 1000;
223 u32 macsr;
224
225 if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS)
226 return;
227
228 if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) {
229 dev_err(dev, "Speed change already in progress\n");
230 return;
231 }
232
233 macsr = rcar_pci_read_reg(pcie, MACSR);
234 if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS)
235 goto done;
236
237
238 rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS,
239 PCI_EXP_LNKSTA_CLS_5_0GB);
240
241
242 rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0);
243
244
245 if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL))
246 rcar_pci_write_reg(pcie, macsr, MACSR);
247
248
249 rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE);
250
251 while (timeout--) {
252 macsr = rcar_pci_read_reg(pcie, MACSR);
253 if (macsr & SPCHGFIN) {
254
255 rcar_pci_write_reg(pcie, macsr, MACSR);
256
257 if (macsr & SPCHGFAIL)
258 dev_err(dev, "Speed change failed\n");
259
260 goto done;
261 }
262
263 msleep(1);
264 }
265
266 dev_err(dev, "Speed change timed out\n");
267
268done:
269 dev_info(dev, "Current link speed is %s GT/s\n",
270 (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5");
271}
272
273static void rcar_pcie_hw_enable(struct rcar_pcie_host *host)
274{
275 struct rcar_pcie *pcie = &host->pcie;
276 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
277 struct resource_entry *win;
278 LIST_HEAD(res);
279 int i = 0;
280
281
282 rcar_pcie_force_speedup(pcie);
283
284
285 resource_list_for_each_entry(win, &bridge->windows) {
286 struct resource *res = win->res;
287
288 if (!res->flags)
289 continue;
290
291 switch (resource_type(res)) {
292 case IORESOURCE_IO:
293 case IORESOURCE_MEM:
294 rcar_pcie_set_outbound(pcie, i, win);
295 i++;
296 break;
297 }
298 }
299}
300
301static int rcar_pcie_enable(struct rcar_pcie_host *host)
302{
303 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
304
305 rcar_pcie_hw_enable(host);
306
307 pci_add_flags(PCI_REASSIGN_ALL_BUS);
308
309 bridge->sysdata = host;
310 bridge->ops = &rcar_pcie_ops;
311
312 return pci_host_probe(bridge);
313}
314
315static int phy_wait_for_ack(struct rcar_pcie *pcie)
316{
317 struct device *dev = pcie->dev;
318 unsigned int timeout = 100;
319
320 while (timeout--) {
321 if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
322 return 0;
323
324 udelay(100);
325 }
326
327 dev_err(dev, "Access to PCIe phy timed out\n");
328
329 return -ETIMEDOUT;
330}
331
332static void phy_write_reg(struct rcar_pcie *pcie,
333 unsigned int rate, u32 addr,
334 unsigned int lane, u32 data)
335{
336 u32 phyaddr;
337
338 phyaddr = WRITE_CMD |
339 ((rate & 1) << RATE_POS) |
340 ((lane & 0xf) << LANE_POS) |
341 ((addr & 0xff) << ADR_POS);
342
343
344 rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
345 rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
346
347
348 phy_wait_for_ack(pcie);
349
350
351 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
352 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
353
354
355 phy_wait_for_ack(pcie);
356}
357
358static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
359{
360 int err;
361
362
363 rcar_pci_write_reg(pcie, 0, PCIETCTLR);
364
365
366 rcar_pci_write_reg(pcie, 1, PCIEMSR);
367
368 err = rcar_pcie_wait_for_phyrdy(pcie);
369 if (err)
370 return err;
371
372
373
374
375
376
377 rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
378
379
380
381
382
383 rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
384 rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
385
386
387 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
388 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
389 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
390 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
391 PCI_HEADER_TYPE_BRIDGE);
392
393
394 rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
395 PCI_EXP_LNKCAP_DLLLARC);
396
397
398 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
399
400
401 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
402
403
404 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
405
406
407 if (IS_ENABLED(CONFIG_PCI_MSI))
408 rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
409
410 rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
411
412
413 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
414
415
416 err = rcar_pcie_wait_for_dl(pcie);
417 if (err)
418 return err;
419
420
421 rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
422
423 wmb();
424
425 return 0;
426}
427
428static int rcar_pcie_phy_init_h1(struct rcar_pcie_host *host)
429{
430 struct rcar_pcie *pcie = &host->pcie;
431
432
433 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
434 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
435 phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
436 phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
437 phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
438 phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
439 phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
440 phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
441 phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
442 phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
443 phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
444 phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
445
446 phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
447 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
448 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
449
450 return 0;
451}
452
453static int rcar_pcie_phy_init_gen2(struct rcar_pcie_host *host)
454{
455 struct rcar_pcie *pcie = &host->pcie;
456
457
458
459
460
461 rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR);
462 rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA);
463 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
464 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
465
466 rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR);
467
468 rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA);
469 rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL);
470 rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL);
471
472 return 0;
473}
474
475static int rcar_pcie_phy_init_gen3(struct rcar_pcie_host *host)
476{
477 int err;
478
479 err = phy_init(host->phy);
480 if (err)
481 return err;
482
483 err = phy_power_on(host->phy);
484 if (err)
485 phy_exit(host->phy);
486
487 return err;
488}
489
490static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
491{
492 struct rcar_pcie_host *host = data;
493 struct rcar_pcie *pcie = &host->pcie;
494 struct rcar_msi *msi = &host->msi;
495 struct device *dev = pcie->dev;
496 unsigned long reg;
497
498 reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
499
500
501 if (!reg)
502 return IRQ_NONE;
503
504 while (reg) {
505 unsigned int index = find_first_bit(®, 32);
506 int ret;
507
508 ret = generic_handle_domain_irq(msi->domain->parent, index);
509 if (ret) {
510
511 dev_dbg(dev, "unexpected MSI\n");
512 rcar_pci_write_reg(pcie, BIT(index), PCIEMSIFR);
513 }
514
515
516 reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
517 }
518
519 return IRQ_HANDLED;
520}
521
522static void rcar_msi_top_irq_ack(struct irq_data *d)
523{
524 irq_chip_ack_parent(d);
525}
526
527static void rcar_msi_top_irq_mask(struct irq_data *d)
528{
529 pci_msi_mask_irq(d);
530 irq_chip_mask_parent(d);
531}
532
533static void rcar_msi_top_irq_unmask(struct irq_data *d)
534{
535 pci_msi_unmask_irq(d);
536 irq_chip_unmask_parent(d);
537}
538
539static struct irq_chip rcar_msi_top_chip = {
540 .name = "PCIe MSI",
541 .irq_ack = rcar_msi_top_irq_ack,
542 .irq_mask = rcar_msi_top_irq_mask,
543 .irq_unmask = rcar_msi_top_irq_unmask,
544};
545
546static void rcar_msi_irq_ack(struct irq_data *d)
547{
548 struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
549 struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
550
551
552 rcar_pci_write_reg(pcie, BIT(d->hwirq), PCIEMSIFR);
553}
554
555static void rcar_msi_irq_mask(struct irq_data *d)
556{
557 struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
558 struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
559 unsigned long flags;
560 u32 value;
561
562 spin_lock_irqsave(&msi->mask_lock, flags);
563 value = rcar_pci_read_reg(pcie, PCIEMSIIER);
564 value &= ~BIT(d->hwirq);
565 rcar_pci_write_reg(pcie, value, PCIEMSIIER);
566 spin_unlock_irqrestore(&msi->mask_lock, flags);
567}
568
569static void rcar_msi_irq_unmask(struct irq_data *d)
570{
571 struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
572 struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
573 unsigned long flags;
574 u32 value;
575
576 spin_lock_irqsave(&msi->mask_lock, flags);
577 value = rcar_pci_read_reg(pcie, PCIEMSIIER);
578 value |= BIT(d->hwirq);
579 rcar_pci_write_reg(pcie, value, PCIEMSIIER);
580 spin_unlock_irqrestore(&msi->mask_lock, flags);
581}
582
583static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force)
584{
585 return -EINVAL;
586}
587
588static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
589{
590 struct rcar_msi *msi = irq_data_get_irq_chip_data(data);
591 struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
592
593 msg->address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
594 msg->address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
595 msg->data = data->hwirq;
596}
597
598static struct irq_chip rcar_msi_bottom_chip = {
599 .name = "Rcar MSI",
600 .irq_ack = rcar_msi_irq_ack,
601 .irq_mask = rcar_msi_irq_mask,
602 .irq_unmask = rcar_msi_irq_unmask,
603 .irq_set_affinity = rcar_msi_set_affinity,
604 .irq_compose_msi_msg = rcar_compose_msi_msg,
605};
606
607static int rcar_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
608 unsigned int nr_irqs, void *args)
609{
610 struct rcar_msi *msi = domain->host_data;
611 unsigned int i;
612 int hwirq;
613
614 mutex_lock(&msi->map_lock);
615
616 hwirq = bitmap_find_free_region(msi->used, INT_PCI_MSI_NR, order_base_2(nr_irqs));
617
618 mutex_unlock(&msi->map_lock);
619
620 if (hwirq < 0)
621 return -ENOSPC;
622
623 for (i = 0; i < nr_irqs; i++)
624 irq_domain_set_info(domain, virq + i, hwirq + i,
625 &rcar_msi_bottom_chip, domain->host_data,
626 handle_edge_irq, NULL, NULL);
627
628 return 0;
629}
630
631static void rcar_msi_domain_free(struct irq_domain *domain, unsigned int virq,
632 unsigned int nr_irqs)
633{
634 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
635 struct rcar_msi *msi = domain->host_data;
636
637 mutex_lock(&msi->map_lock);
638
639 bitmap_release_region(msi->used, d->hwirq, order_base_2(nr_irqs));
640
641 mutex_unlock(&msi->map_lock);
642}
643
644static const struct irq_domain_ops rcar_msi_domain_ops = {
645 .alloc = rcar_msi_domain_alloc,
646 .free = rcar_msi_domain_free,
647};
648
649static struct msi_domain_info rcar_msi_info = {
650 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
651 MSI_FLAG_MULTI_PCI_MSI),
652 .chip = &rcar_msi_top_chip,
653};
654
655static int rcar_allocate_domains(struct rcar_msi *msi)
656{
657 struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
658 struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
659 struct irq_domain *parent;
660
661 parent = irq_domain_create_linear(fwnode, INT_PCI_MSI_NR,
662 &rcar_msi_domain_ops, msi);
663 if (!parent) {
664 dev_err(pcie->dev, "failed to create IRQ domain\n");
665 return -ENOMEM;
666 }
667 irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
668
669 msi->domain = pci_msi_create_irq_domain(fwnode, &rcar_msi_info, parent);
670 if (!msi->domain) {
671 dev_err(pcie->dev, "failed to create MSI domain\n");
672 irq_domain_remove(parent);
673 return -ENOMEM;
674 }
675
676 return 0;
677}
678
679static void rcar_free_domains(struct rcar_msi *msi)
680{
681 struct irq_domain *parent = msi->domain->parent;
682
683 irq_domain_remove(msi->domain);
684 irq_domain_remove(parent);
685}
686
687static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
688{
689 struct rcar_pcie *pcie = &host->pcie;
690 struct device *dev = pcie->dev;
691 struct rcar_msi *msi = &host->msi;
692 struct resource res;
693 int err;
694
695 mutex_init(&msi->map_lock);
696 spin_lock_init(&msi->mask_lock);
697
698 err = of_address_to_resource(dev->of_node, 0, &res);
699 if (err)
700 return err;
701
702 err = rcar_allocate_domains(msi);
703 if (err)
704 return err;
705
706
707 err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq,
708 IRQF_SHARED | IRQF_NO_THREAD,
709 rcar_msi_bottom_chip.name, host);
710 if (err < 0) {
711 dev_err(dev, "failed to request IRQ: %d\n", err);
712 goto err;
713 }
714
715 err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq,
716 IRQF_SHARED | IRQF_NO_THREAD,
717 rcar_msi_bottom_chip.name, host);
718 if (err < 0) {
719 dev_err(dev, "failed to request IRQ: %d\n", err);
720 goto err;
721 }
722
723
724 rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
725
726
727
728
729
730 rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
731 rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
732
733 return 0;
734
735err:
736 rcar_free_domains(msi);
737 return err;
738}
739
740static void rcar_pcie_teardown_msi(struct rcar_pcie_host *host)
741{
742 struct rcar_pcie *pcie = &host->pcie;
743
744
745 rcar_pci_write_reg(pcie, 0, PCIEMSIIER);
746
747
748 rcar_pci_write_reg(pcie, 0, PCIEMSIALR);
749
750 rcar_free_domains(&host->msi);
751}
752
753static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
754{
755 struct rcar_pcie *pcie = &host->pcie;
756 struct device *dev = pcie->dev;
757 struct resource res;
758 int err, i;
759
760 host->phy = devm_phy_optional_get(dev, "pcie");
761 if (IS_ERR(host->phy))
762 return PTR_ERR(host->phy);
763
764 err = of_address_to_resource(dev->of_node, 0, &res);
765 if (err)
766 return err;
767
768 pcie->base = devm_ioremap_resource(dev, &res);
769 if (IS_ERR(pcie->base))
770 return PTR_ERR(pcie->base);
771
772 host->bus_clk = devm_clk_get(dev, "pcie_bus");
773 if (IS_ERR(host->bus_clk)) {
774 dev_err(dev, "cannot get pcie bus clock\n");
775 return PTR_ERR(host->bus_clk);
776 }
777
778 i = irq_of_parse_and_map(dev->of_node, 0);
779 if (!i) {
780 dev_err(dev, "cannot get platform resources for msi interrupt\n");
781 err = -ENOENT;
782 goto err_irq1;
783 }
784 host->msi.irq1 = i;
785
786 i = irq_of_parse_and_map(dev->of_node, 1);
787 if (!i) {
788 dev_err(dev, "cannot get platform resources for msi interrupt\n");
789 err = -ENOENT;
790 goto err_irq2;
791 }
792 host->msi.irq2 = i;
793
794#ifdef CONFIG_ARM
795
796 pcie_base = pcie->base;
797 pcie_bus_clk = host->bus_clk;
798#endif
799
800 return 0;
801
802err_irq2:
803 irq_dispose_mapping(host->msi.irq1);
804err_irq1:
805 return err;
806}
807
808static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
809 struct resource_entry *entry,
810 int *index)
811{
812 u64 restype = entry->res->flags;
813 u64 cpu_addr = entry->res->start;
814 u64 cpu_end = entry->res->end;
815 u64 pci_addr = entry->res->start - entry->offset;
816 u32 flags = LAM_64BIT | LAR_ENABLE;
817 u64 mask;
818 u64 size = resource_size(entry->res);
819 int idx = *index;
820
821 if (restype & IORESOURCE_PREFETCH)
822 flags |= LAM_PREFETCH;
823
824 while (cpu_addr < cpu_end) {
825 if (idx >= MAX_NR_INBOUND_MAPS - 1) {
826 dev_err(pcie->dev, "Failed to map inbound regions!\n");
827 return -EINVAL;
828 }
829
830
831
832
833
834 if (cpu_addr > 0) {
835 unsigned long nr_zeros = __ffs64(cpu_addr);
836 u64 alignment = 1ULL << nr_zeros;
837
838 size = min(size, alignment);
839 }
840
841 size = min(size, 1ULL << 32);
842
843 mask = roundup_pow_of_two(size) - 1;
844 mask &= ~0xf;
845
846 rcar_pcie_set_inbound(pcie, cpu_addr, pci_addr,
847 lower_32_bits(mask) | flags, idx, true);
848
849 pci_addr += size;
850 cpu_addr += size;
851 idx += 2;
852 }
853 *index = idx;
854
855 return 0;
856}
857
858static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie_host *host)
859{
860 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host);
861 struct resource_entry *entry;
862 int index = 0, err = 0;
863
864 resource_list_for_each_entry(entry, &bridge->dma_ranges) {
865 err = rcar_pcie_inbound_ranges(&host->pcie, entry, &index);
866 if (err)
867 break;
868 }
869
870 return err;
871}
872
873static const struct of_device_id rcar_pcie_of_match[] = {
874 { .compatible = "renesas,pcie-r8a7779",
875 .data = rcar_pcie_phy_init_h1 },
876 { .compatible = "renesas,pcie-r8a7790",
877 .data = rcar_pcie_phy_init_gen2 },
878 { .compatible = "renesas,pcie-r8a7791",
879 .data = rcar_pcie_phy_init_gen2 },
880 { .compatible = "renesas,pcie-rcar-gen2",
881 .data = rcar_pcie_phy_init_gen2 },
882 { .compatible = "renesas,pcie-r8a7795",
883 .data = rcar_pcie_phy_init_gen3 },
884 { .compatible = "renesas,pcie-rcar-gen3",
885 .data = rcar_pcie_phy_init_gen3 },
886 {},
887};
888
889static int rcar_pcie_probe(struct platform_device *pdev)
890{
891 struct device *dev = &pdev->dev;
892 struct rcar_pcie_host *host;
893 struct rcar_pcie *pcie;
894 u32 data;
895 int err;
896 struct pci_host_bridge *bridge;
897
898 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host));
899 if (!bridge)
900 return -ENOMEM;
901
902 host = pci_host_bridge_priv(bridge);
903 pcie = &host->pcie;
904 pcie->dev = dev;
905 platform_set_drvdata(pdev, host);
906
907 pm_runtime_enable(pcie->dev);
908 err = pm_runtime_get_sync(pcie->dev);
909 if (err < 0) {
910 dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
911 goto err_pm_put;
912 }
913
914 err = rcar_pcie_get_resources(host);
915 if (err < 0) {
916 dev_err(dev, "failed to request resources: %d\n", err);
917 goto err_pm_put;
918 }
919
920 err = clk_prepare_enable(host->bus_clk);
921 if (err) {
922 dev_err(dev, "failed to enable bus clock: %d\n", err);
923 goto err_unmap_msi_irqs;
924 }
925
926 err = rcar_pcie_parse_map_dma_ranges(host);
927 if (err)
928 goto err_clk_disable;
929
930 host->phy_init_fn = of_device_get_match_data(dev);
931 err = host->phy_init_fn(host);
932 if (err) {
933 dev_err(dev, "failed to init PCIe PHY\n");
934 goto err_clk_disable;
935 }
936
937
938 if (rcar_pcie_hw_init(pcie)) {
939 dev_info(dev, "PCIe link down\n");
940 err = -ENODEV;
941 goto err_phy_shutdown;
942 }
943
944 data = rcar_pci_read_reg(pcie, MACSR);
945 dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
946
947 if (IS_ENABLED(CONFIG_PCI_MSI)) {
948 err = rcar_pcie_enable_msi(host);
949 if (err < 0) {
950 dev_err(dev,
951 "failed to enable MSI support: %d\n",
952 err);
953 goto err_phy_shutdown;
954 }
955 }
956
957 err = rcar_pcie_enable(host);
958 if (err)
959 goto err_msi_teardown;
960
961 return 0;
962
963err_msi_teardown:
964 if (IS_ENABLED(CONFIG_PCI_MSI))
965 rcar_pcie_teardown_msi(host);
966
967err_phy_shutdown:
968 if (host->phy) {
969 phy_power_off(host->phy);
970 phy_exit(host->phy);
971 }
972
973err_clk_disable:
974 clk_disable_unprepare(host->bus_clk);
975
976err_unmap_msi_irqs:
977 irq_dispose_mapping(host->msi.irq2);
978 irq_dispose_mapping(host->msi.irq1);
979
980err_pm_put:
981 pm_runtime_put(dev);
982 pm_runtime_disable(dev);
983
984 return err;
985}
986
987static int __maybe_unused rcar_pcie_resume(struct device *dev)
988{
989 struct rcar_pcie_host *host = dev_get_drvdata(dev);
990 struct rcar_pcie *pcie = &host->pcie;
991 unsigned int data;
992 int err;
993
994 err = rcar_pcie_parse_map_dma_ranges(host);
995 if (err)
996 return 0;
997
998
999 err = host->phy_init_fn(host);
1000 if (err) {
1001 dev_info(dev, "PCIe link down\n");
1002 return 0;
1003 }
1004
1005 data = rcar_pci_read_reg(pcie, MACSR);
1006 dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1007
1008
1009 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1010 struct resource res;
1011 u32 val;
1012
1013 of_address_to_resource(dev->of_node, 0, &res);
1014 rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR);
1015 rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR);
1016
1017 bitmap_to_arr32(&val, host->msi.used, INT_PCI_MSI_NR);
1018 rcar_pci_write_reg(pcie, val, PCIEMSIIER);
1019 }
1020
1021 rcar_pcie_hw_enable(host);
1022
1023 return 0;
1024}
1025
1026static int rcar_pcie_resume_noirq(struct device *dev)
1027{
1028 struct rcar_pcie_host *host = dev_get_drvdata(dev);
1029 struct rcar_pcie *pcie = &host->pcie;
1030
1031 if (rcar_pci_read_reg(pcie, PMSR) &&
1032 !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN))
1033 return 0;
1034
1035
1036 rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
1037 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
1038 return rcar_pcie_wait_for_dl(pcie);
1039}
1040
1041static const struct dev_pm_ops rcar_pcie_pm_ops = {
1042 SET_SYSTEM_SLEEP_PM_OPS(NULL, rcar_pcie_resume)
1043 .resume_noirq = rcar_pcie_resume_noirq,
1044};
1045
1046static struct platform_driver rcar_pcie_driver = {
1047 .driver = {
1048 .name = "rcar-pcie",
1049 .of_match_table = rcar_pcie_of_match,
1050 .pm = &rcar_pcie_pm_ops,
1051 .suppress_bind_attrs = true,
1052 },
1053 .probe = rcar_pcie_probe,
1054};
1055
1056#ifdef CONFIG_ARM
1057static DEFINE_SPINLOCK(pmsr_lock);
1058static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
1059 unsigned int fsr, struct pt_regs *regs)
1060{
1061 unsigned long flags;
1062 u32 pmsr, val;
1063 int ret = 0;
1064
1065 spin_lock_irqsave(&pmsr_lock, flags);
1066
1067 if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) {
1068 ret = 1;
1069 goto unlock_exit;
1070 }
1071
1072 pmsr = readl(pcie_base + PMSR);
1073
1074
1075
1076
1077
1078
1079
1080 if ((pmsr & PMEL1RX) && ((pmsr & PMSTATE) != PMSTATE_L1)) {
1081 writel(L1IATN, pcie_base + PMCTLR);
1082 ret = readl_poll_timeout_atomic(pcie_base + PMSR, val,
1083 val & L1FAEG, 10, 1000);
1084 WARN(ret, "Timeout waiting for L1 link state, ret=%d\n", ret);
1085 writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
1086 }
1087
1088unlock_exit:
1089 spin_unlock_irqrestore(&pmsr_lock, flags);
1090 return ret;
1091}
1092
1093static const struct of_device_id rcar_pcie_abort_handler_of_match[] __initconst = {
1094 { .compatible = "renesas,pcie-r8a7779" },
1095 { .compatible = "renesas,pcie-r8a7790" },
1096 { .compatible = "renesas,pcie-r8a7791" },
1097 { .compatible = "renesas,pcie-rcar-gen2" },
1098 {},
1099};
1100
1101static int __init rcar_pcie_init(void)
1102{
1103 if (of_find_matching_node(NULL, rcar_pcie_abort_handler_of_match)) {
1104#ifdef CONFIG_ARM_LPAE
1105 hook_fault_code(17, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1106 "asynchronous external abort");
1107#else
1108 hook_fault_code(22, rcar_pcie_aarch32_abort_handler, SIGBUS, 0,
1109 "imprecise external abort");
1110#endif
1111 }
1112
1113 return platform_driver_register(&rcar_pcie_driver);
1114}
1115device_initcall(rcar_pcie_init);
1116#else
1117builtin_platform_driver(rcar_pcie_driver);
1118#endif
1119