1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/irqdomain.h>
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/of_device.h>
21#include <linux/of_gpio.h>
22#include <linux/of_pci.h>
23#include <linux/pci.h>
24#include <linux/phy/phy.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/resource.h>
28#include <linux/types.h>
29#include <linux/mfd/syscon.h>
30#include <linux/regmap.h>
31
32#include "pcie-designware.h"
33
34
35
36#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
37#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
38#define ERR_SYS BIT(0)
39#define ERR_FATAL BIT(1)
40#define ERR_NONFATAL BIT(2)
41#define ERR_COR BIT(3)
42#define ERR_AXI BIT(4)
43#define ERR_ECRC BIT(5)
44#define PME_TURN_OFF BIT(8)
45#define PME_TO_ACK BIT(9)
46#define PM_PME BIT(10)
47#define LINK_REQ_RST BIT(11)
48#define LINK_UP_EVT BIT(12)
49#define CFG_BME_EVT BIT(13)
50#define CFG_MSE_EVT BIT(14)
51#define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
54
55#define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
56#define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
57#define INTA BIT(0)
58#define INTB BIT(1)
59#define INTC BIT(2)
60#define INTD BIT(3)
61#define MSI BIT(4)
62#define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
63
64#define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65#define DEVICE_TYPE_EP 0x0
66#define DEVICE_TYPE_LEG_EP 0x1
67#define DEVICE_TYPE_RC 0x4
68
69#define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
70#define LTSSM_EN 0x1
71
72#define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
73#define LINK_UP BIT(16)
74#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
75
76#define EXP_CAP_ID_OFFSET 0x70
77
78#define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
79#define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
80
81#define PCIECTRL_TI_CONF_MSI_XMT 0x012c
82#define MSI_REQ_GRANT BIT(0)
83#define MSI_VECTOR_SHIFT 7
84
85struct dra7xx_pcie {
86 struct dw_pcie *pci;
87 void __iomem *base;
88 int phy_count;
89 struct phy **phy;
90 int link_gen;
91 struct irq_domain *irq_domain;
92 enum dw_pcie_device_mode mode;
93};
94
95struct dra7xx_pcie_of_data {
96 enum dw_pcie_device_mode mode;
97};
98
99#define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
100
101static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
102{
103 return readl(pcie->base + offset);
104}
105
106static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
107 u32 value)
108{
109 writel(value, pcie->base + offset);
110}
111
112static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
113{
114 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
115}
116
117static int dra7xx_pcie_link_up(struct dw_pcie *pci)
118{
119 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
120 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
121
122 return !!(reg & LINK_UP);
123}
124
125static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
126{
127 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
128 u32 reg;
129
130 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
131 reg &= ~LTSSM_EN;
132 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
133}
134
135static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
136{
137 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
138 struct device *dev = pci->dev;
139 u32 reg;
140 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
141
142 if (dw_pcie_link_up(pci)) {
143 dev_err(dev, "link is already up\n");
144 return 0;
145 }
146
147 if (dra7xx->link_gen == 1) {
148 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
149 4, ®);
150 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
151 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
152 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
153 dw_pcie_write(pci->dbi_base + exp_cap_off +
154 PCI_EXP_LNKCAP, 4, reg);
155 }
156
157 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
158 2, ®);
159 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
160 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
161 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
162 dw_pcie_write(pci->dbi_base + exp_cap_off +
163 PCI_EXP_LNKCTL2, 2, reg);
164 }
165 }
166
167 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
168 reg |= LTSSM_EN;
169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
170
171 return 0;
172}
173
174static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
175{
176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
177 LEG_EP_INTERRUPTS | MSI);
178
179 dra7xx_pcie_writel(dra7xx,
180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
181 MSI | LEG_EP_INTERRUPTS);
182}
183
184static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
185{
186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
187 INTERRUPTS);
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
189 INTERRUPTS);
190}
191
192static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
193{
194 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
195 dra7xx_pcie_enable_msi_interrupts(dra7xx);
196}
197
198static void dra7xx_pcie_host_init(struct pcie_port *pp)
199{
200 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
201 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
202
203 dw_pcie_setup_rc(pp);
204
205 dra7xx_pcie_establish_link(pci);
206 dw_pcie_wait_for_link(pci);
207 dw_pcie_msi_init(pp);
208 dra7xx_pcie_enable_interrupts(dra7xx);
209}
210
211static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
212 .host_init = dra7xx_pcie_host_init,
213};
214
215static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
216 irq_hw_number_t hwirq)
217{
218 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
219 irq_set_chip_data(irq, domain->host_data);
220
221 return 0;
222}
223
224static const struct irq_domain_ops intx_domain_ops = {
225 .map = dra7xx_pcie_intx_map,
226};
227
228static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
229{
230 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
231 struct device *dev = pci->dev;
232 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
233 struct device_node *node = dev->of_node;
234 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
235
236 if (!pcie_intc_node) {
237 dev_err(dev, "No PCIe Intc node found\n");
238 return -ENODEV;
239 }
240
241 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
242 &intx_domain_ops, pp);
243 if (!dra7xx->irq_domain) {
244 dev_err(dev, "Failed to get a INTx IRQ domain\n");
245 return -ENODEV;
246 }
247
248 return 0;
249}
250
251static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
252{
253 struct dra7xx_pcie *dra7xx = arg;
254 struct dw_pcie *pci = dra7xx->pci;
255 struct pcie_port *pp = &pci->pp;
256 u32 reg;
257
258 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
259
260 switch (reg) {
261 case MSI:
262 dw_handle_msi_irq(pp);
263 break;
264 case INTA:
265 case INTB:
266 case INTC:
267 case INTD:
268 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
269 ffs(reg)));
270 break;
271 }
272
273 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
274
275 return IRQ_HANDLED;
276}
277
278
279static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
280{
281 struct dra7xx_pcie *dra7xx = arg;
282 struct dw_pcie *pci = dra7xx->pci;
283 struct device *dev = pci->dev;
284 struct dw_pcie_ep *ep = &pci->ep;
285 u32 reg;
286
287 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
288
289 if (reg & ERR_SYS)
290 dev_dbg(dev, "System Error\n");
291
292 if (reg & ERR_FATAL)
293 dev_dbg(dev, "Fatal Error\n");
294
295 if (reg & ERR_NONFATAL)
296 dev_dbg(dev, "Non Fatal Error\n");
297
298 if (reg & ERR_COR)
299 dev_dbg(dev, "Correctable Error\n");
300
301 if (reg & ERR_AXI)
302 dev_dbg(dev, "AXI tag lookup fatal Error\n");
303
304 if (reg & ERR_ECRC)
305 dev_dbg(dev, "ECRC Error\n");
306
307 if (reg & PME_TURN_OFF)
308 dev_dbg(dev,
309 "Power Management Event Turn-Off message received\n");
310
311 if (reg & PME_TO_ACK)
312 dev_dbg(dev,
313 "Power Management Turn-Off Ack message received\n");
314
315 if (reg & PM_PME)
316 dev_dbg(dev, "PM Power Management Event message received\n");
317
318 if (reg & LINK_REQ_RST)
319 dev_dbg(dev, "Link Request Reset\n");
320
321 if (reg & LINK_UP_EVT) {
322 if (dra7xx->mode == DW_PCIE_EP_TYPE)
323 dw_pcie_ep_linkup(ep);
324 dev_dbg(dev, "Link-up state change\n");
325 }
326
327 if (reg & CFG_BME_EVT)
328 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
329
330 if (reg & CFG_MSE_EVT)
331 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
332
333 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
334
335 return IRQ_HANDLED;
336}
337
338static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
339{
340 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
341 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
342
343 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
344}
345
346static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
347{
348 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
349 mdelay(1);
350 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
351}
352
353static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
354 u8 interrupt_num)
355{
356 u32 reg;
357
358 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
359 reg |= MSI_REQ_GRANT;
360 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
361}
362
363static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
364 enum pci_epc_irq_type type, u8 interrupt_num)
365{
366 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
367 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
368
369 switch (type) {
370 case PCI_EPC_IRQ_LEGACY:
371 dra7xx_pcie_raise_legacy_irq(dra7xx);
372 break;
373 case PCI_EPC_IRQ_MSI:
374 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
375 break;
376 default:
377 dev_err(pci->dev, "UNKNOWN IRQ type\n");
378 }
379
380 return 0;
381}
382
383static struct dw_pcie_ep_ops pcie_ep_ops = {
384 .ep_init = dra7xx_pcie_ep_init,
385 .raise_irq = dra7xx_pcie_raise_irq,
386};
387
388static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
389 struct platform_device *pdev)
390{
391 int ret;
392 struct dw_pcie_ep *ep;
393 struct resource *res;
394 struct device *dev = &pdev->dev;
395 struct dw_pcie *pci = dra7xx->pci;
396
397 ep = &pci->ep;
398 ep->ops = &pcie_ep_ops;
399
400 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
401 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
402 if (!pci->dbi_base)
403 return -ENOMEM;
404
405 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
406 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
407 if (!pci->dbi_base2)
408 return -ENOMEM;
409
410 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
411 if (!res)
412 return -EINVAL;
413
414 ep->phys_base = res->start;
415 ep->addr_size = resource_size(res);
416
417 ret = dw_pcie_ep_init(ep);
418 if (ret) {
419 dev_err(dev, "failed to initialize endpoint\n");
420 return ret;
421 }
422
423 return 0;
424}
425
426static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
427 struct platform_device *pdev)
428{
429 int ret;
430 struct dw_pcie *pci = dra7xx->pci;
431 struct pcie_port *pp = &pci->pp;
432 struct device *dev = pci->dev;
433 struct resource *res;
434
435 pp->irq = platform_get_irq(pdev, 1);
436 if (pp->irq < 0) {
437 dev_err(dev, "missing IRQ resource\n");
438 return -EINVAL;
439 }
440
441 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
442 IRQF_SHARED | IRQF_NO_THREAD,
443 "dra7-pcie-msi", dra7xx);
444 if (ret) {
445 dev_err(dev, "failed to request irq\n");
446 return ret;
447 }
448
449 ret = dra7xx_pcie_init_irq_domain(pp);
450 if (ret < 0)
451 return ret;
452
453 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
454 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
455 if (!pci->dbi_base)
456 return -ENOMEM;
457
458 ret = dw_pcie_host_init(pp);
459 if (ret) {
460 dev_err(dev, "failed to initialize host\n");
461 return ret;
462 }
463
464 return 0;
465}
466
467static const struct dw_pcie_ops dw_pcie_ops = {
468 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
469 .start_link = dra7xx_pcie_establish_link,
470 .stop_link = dra7xx_pcie_stop_link,
471 .link_up = dra7xx_pcie_link_up,
472};
473
474static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
475{
476 int phy_count = dra7xx->phy_count;
477
478 while (phy_count--) {
479 phy_power_off(dra7xx->phy[phy_count]);
480 phy_exit(dra7xx->phy[phy_count]);
481 }
482}
483
484static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
485{
486 int phy_count = dra7xx->phy_count;
487 int ret;
488 int i;
489
490 for (i = 0; i < phy_count; i++) {
491 ret = phy_init(dra7xx->phy[i]);
492 if (ret < 0)
493 goto err_phy;
494
495 ret = phy_power_on(dra7xx->phy[i]);
496 if (ret < 0) {
497 phy_exit(dra7xx->phy[i]);
498 goto err_phy;
499 }
500 }
501
502 return 0;
503
504err_phy:
505 while (--i >= 0) {
506 phy_power_off(dra7xx->phy[i]);
507 phy_exit(dra7xx->phy[i]);
508 }
509
510 return ret;
511}
512
513static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
514 .mode = DW_PCIE_RC_TYPE,
515};
516
517static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
518 .mode = DW_PCIE_EP_TYPE,
519};
520
521static const struct of_device_id of_dra7xx_pcie_match[] = {
522 {
523 .compatible = "ti,dra7-pcie",
524 .data = &dra7xx_pcie_rc_of_data,
525 },
526 {
527 .compatible = "ti,dra7-pcie-ep",
528 .data = &dra7xx_pcie_ep_of_data,
529 },
530 {},
531};
532
533
534
535
536
537
538
539
540
541
542
543
544static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
545{
546 int ret;
547 struct device_node *np = dev->of_node;
548 struct of_phandle_args args;
549 struct regmap *regmap;
550
551 regmap = syscon_regmap_lookup_by_phandle(np,
552 "ti,syscon-unaligned-access");
553 if (IS_ERR(regmap)) {
554 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
555 return -EINVAL;
556 }
557
558 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
559 2, 0, &args);
560 if (ret) {
561 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
562 return ret;
563 }
564
565 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
566 args.args[1]);
567 if (ret)
568 dev_err(dev, "failed to enable unaligned access\n");
569
570 of_node_put(args.np);
571
572 return ret;
573}
574
575static int __init dra7xx_pcie_probe(struct platform_device *pdev)
576{
577 u32 reg;
578 int ret;
579 int irq;
580 int i;
581 int phy_count;
582 struct phy **phy;
583 void __iomem *base;
584 struct resource *res;
585 struct dw_pcie *pci;
586 struct pcie_port *pp;
587 struct dra7xx_pcie *dra7xx;
588 struct device *dev = &pdev->dev;
589 struct device_node *np = dev->of_node;
590 char name[10];
591 struct gpio_desc *reset;
592 const struct of_device_id *match;
593 const struct dra7xx_pcie_of_data *data;
594 enum dw_pcie_device_mode mode;
595
596 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
597 if (!match)
598 return -EINVAL;
599
600 data = (struct dra7xx_pcie_of_data *)match->data;
601 mode = (enum dw_pcie_device_mode)data->mode;
602
603 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
604 if (!dra7xx)
605 return -ENOMEM;
606
607 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
608 if (!pci)
609 return -ENOMEM;
610
611 pci->dev = dev;
612 pci->ops = &dw_pcie_ops;
613
614 pp = &pci->pp;
615 pp->ops = &dra7xx_pcie_host_ops;
616
617 irq = platform_get_irq(pdev, 0);
618 if (irq < 0) {
619 dev_err(dev, "missing IRQ resource\n");
620 return -EINVAL;
621 }
622
623 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
624 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
625 if (!base)
626 return -ENOMEM;
627
628 phy_count = of_property_count_strings(np, "phy-names");
629 if (phy_count < 0) {
630 dev_err(dev, "unable to find the strings\n");
631 return phy_count;
632 }
633
634 phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
635 if (!phy)
636 return -ENOMEM;
637
638 for (i = 0; i < phy_count; i++) {
639 snprintf(name, sizeof(name), "pcie-phy%d", i);
640 phy[i] = devm_phy_get(dev, name);
641 if (IS_ERR(phy[i]))
642 return PTR_ERR(phy[i]);
643 }
644
645 dra7xx->base = base;
646 dra7xx->phy = phy;
647 dra7xx->pci = pci;
648 dra7xx->phy_count = phy_count;
649
650 ret = dra7xx_pcie_enable_phy(dra7xx);
651 if (ret) {
652 dev_err(dev, "failed to enable phy\n");
653 return ret;
654 }
655
656 platform_set_drvdata(pdev, dra7xx);
657
658 pm_runtime_enable(dev);
659 ret = pm_runtime_get_sync(dev);
660 if (ret < 0) {
661 dev_err(dev, "pm_runtime_get_sync failed\n");
662 goto err_get_sync;
663 }
664
665 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
666 if (IS_ERR(reset)) {
667 ret = PTR_ERR(reset);
668 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
669 goto err_gpio;
670 }
671
672 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
673 reg &= ~LTSSM_EN;
674 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
675
676 dra7xx->link_gen = of_pci_get_max_link_speed(np);
677 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
678 dra7xx->link_gen = 2;
679
680 switch (mode) {
681 case DW_PCIE_RC_TYPE:
682 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
683 DEVICE_TYPE_RC);
684 ret = dra7xx_add_pcie_port(dra7xx, pdev);
685 if (ret < 0)
686 goto err_gpio;
687 break;
688 case DW_PCIE_EP_TYPE:
689 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
690 DEVICE_TYPE_EP);
691
692 ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
693 if (ret)
694 goto err_gpio;
695
696 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
697 if (ret < 0)
698 goto err_gpio;
699 break;
700 default:
701 dev_err(dev, "INVALID device type %d\n", mode);
702 }
703 dra7xx->mode = mode;
704
705 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
706 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
707 if (ret) {
708 dev_err(dev, "failed to request irq\n");
709 goto err_gpio;
710 }
711
712 return 0;
713
714err_gpio:
715 pm_runtime_put(dev);
716
717err_get_sync:
718 pm_runtime_disable(dev);
719 dra7xx_pcie_disable_phy(dra7xx);
720
721 return ret;
722}
723
724#ifdef CONFIG_PM_SLEEP
725static int dra7xx_pcie_suspend(struct device *dev)
726{
727 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
728 struct dw_pcie *pci = dra7xx->pci;
729 u32 val;
730
731 if (dra7xx->mode != DW_PCIE_RC_TYPE)
732 return 0;
733
734
735 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
736 val &= ~PCI_COMMAND_MEMORY;
737 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
738
739 return 0;
740}
741
742static int dra7xx_pcie_resume(struct device *dev)
743{
744 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
745 struct dw_pcie *pci = dra7xx->pci;
746 u32 val;
747
748 if (dra7xx->mode != DW_PCIE_RC_TYPE)
749 return 0;
750
751
752 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
753 val |= PCI_COMMAND_MEMORY;
754 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
755
756 return 0;
757}
758
759static int dra7xx_pcie_suspend_noirq(struct device *dev)
760{
761 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
762
763 dra7xx_pcie_disable_phy(dra7xx);
764
765 return 0;
766}
767
768static int dra7xx_pcie_resume_noirq(struct device *dev)
769{
770 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
771 int ret;
772
773 ret = dra7xx_pcie_enable_phy(dra7xx);
774 if (ret) {
775 dev_err(dev, "failed to enable phy\n");
776 return ret;
777 }
778
779 return 0;
780}
781#endif
782
783static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
784 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
785 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
786 dra7xx_pcie_resume_noirq)
787};
788
789static struct platform_driver dra7xx_pcie_driver = {
790 .driver = {
791 .name = "dra7-pcie",
792 .of_match_table = of_dra7xx_pcie_match,
793 .suppress_bind_attrs = true,
794 .pm = &dra7xx_pcie_pm_ops,
795 },
796};
797builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
798