1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/clk.h>
20#include <linux/delay.h>
21#include <linux/gpio.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/iopoll.h>
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
31#include <linux/phy/phy.h>
32#include <linux/regulator/consumer.h>
33#include <linux/reset.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36
37#include "pcie-designware.h"
38
39#define PCIE20_PARF_SYS_CTRL 0x00
40#define PCIE20_PARF_PHY_CTRL 0x40
41#define PCIE20_PARF_PHY_REFCLK 0x4C
42#define PCIE20_PARF_DBI_BASE_ADDR 0x168
43#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
44#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
45#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178
46#define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8
47#define PCIE20_PARF_LTSSM 0x1B0
48#define PCIE20_PARF_SID_OFFSET 0x234
49#define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C
50
51#define PCIE20_ELBI_SYS_CTRL 0x04
52#define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0)
53
54#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
55#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4
56#define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5
57#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
58#define CFG_BRIDGE_SB_INIT BIT(0)
59
60#define PCIE20_CAP 0x70
61
62#define PERST_DELAY_US 1000
63
64struct qcom_pcie_resources_v0 {
65 struct clk *iface_clk;
66 struct clk *core_clk;
67 struct clk *phy_clk;
68 struct reset_control *pci_reset;
69 struct reset_control *axi_reset;
70 struct reset_control *ahb_reset;
71 struct reset_control *por_reset;
72 struct reset_control *phy_reset;
73 struct regulator *vdda;
74 struct regulator *vdda_phy;
75 struct regulator *vdda_refclk;
76};
77
78struct qcom_pcie_resources_v1 {
79 struct clk *iface;
80 struct clk *aux;
81 struct clk *master_bus;
82 struct clk *slave_bus;
83 struct reset_control *core;
84 struct regulator *vdda;
85};
86
87struct qcom_pcie_resources_v2 {
88 struct clk *aux_clk;
89 struct clk *master_clk;
90 struct clk *slave_clk;
91 struct clk *cfg_clk;
92 struct clk *pipe_clk;
93};
94
95struct qcom_pcie_resources_v3 {
96 struct clk *aux_clk;
97 struct clk *master_clk;
98 struct clk *slave_clk;
99 struct reset_control *axi_m_reset;
100 struct reset_control *axi_s_reset;
101 struct reset_control *pipe_reset;
102 struct reset_control *axi_m_vmid_reset;
103 struct reset_control *axi_s_xpu_reset;
104 struct reset_control *parf_reset;
105 struct reset_control *phy_reset;
106 struct reset_control *axi_m_sticky_reset;
107 struct reset_control *pipe_sticky_reset;
108 struct reset_control *pwr_reset;
109 struct reset_control *ahb_reset;
110 struct reset_control *phy_ahb_reset;
111};
112
113union qcom_pcie_resources {
114 struct qcom_pcie_resources_v0 v0;
115 struct qcom_pcie_resources_v1 v1;
116 struct qcom_pcie_resources_v2 v2;
117 struct qcom_pcie_resources_v3 v3;
118};
119
120struct qcom_pcie;
121
122struct qcom_pcie_ops {
123 int (*get_resources)(struct qcom_pcie *pcie);
124 int (*init)(struct qcom_pcie *pcie);
125 int (*post_init)(struct qcom_pcie *pcie);
126 void (*deinit)(struct qcom_pcie *pcie);
127 void (*ltssm_enable)(struct qcom_pcie *pcie);
128};
129
130struct qcom_pcie {
131 struct dw_pcie *pci;
132 void __iomem *parf;
133 void __iomem *elbi;
134 union qcom_pcie_resources res;
135 struct phy *phy;
136 struct gpio_desc *reset;
137 struct qcom_pcie_ops *ops;
138};
139
140#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
141
142static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
143{
144 gpiod_set_value(pcie->reset, 1);
145 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
146}
147
148static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
149{
150 gpiod_set_value(pcie->reset, 0);
151 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
152}
153
154static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg)
155{
156 struct pcie_port *pp = arg;
157
158 return dw_handle_msi_irq(pp);
159}
160
161static int qcom_pcie_establish_link(struct qcom_pcie *pcie)
162{
163 struct dw_pcie *pci = pcie->pci;
164
165 if (dw_pcie_link_up(pci))
166 return 0;
167
168
169 if (pcie->ops->ltssm_enable)
170 pcie->ops->ltssm_enable(pcie);
171
172 return dw_pcie_wait_for_link(pci);
173}
174
175static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie)
176{
177 u32 val;
178
179
180 val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL);
181 val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE;
182 writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL);
183}
184
185static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie)
186{
187 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
188 struct dw_pcie *pci = pcie->pci;
189 struct device *dev = pci->dev;
190
191 res->vdda = devm_regulator_get(dev, "vdda");
192 if (IS_ERR(res->vdda))
193 return PTR_ERR(res->vdda);
194
195 res->vdda_phy = devm_regulator_get(dev, "vdda_phy");
196 if (IS_ERR(res->vdda_phy))
197 return PTR_ERR(res->vdda_phy);
198
199 res->vdda_refclk = devm_regulator_get(dev, "vdda_refclk");
200 if (IS_ERR(res->vdda_refclk))
201 return PTR_ERR(res->vdda_refclk);
202
203 res->iface_clk = devm_clk_get(dev, "iface");
204 if (IS_ERR(res->iface_clk))
205 return PTR_ERR(res->iface_clk);
206
207 res->core_clk = devm_clk_get(dev, "core");
208 if (IS_ERR(res->core_clk))
209 return PTR_ERR(res->core_clk);
210
211 res->phy_clk = devm_clk_get(dev, "phy");
212 if (IS_ERR(res->phy_clk))
213 return PTR_ERR(res->phy_clk);
214
215 res->pci_reset = devm_reset_control_get(dev, "pci");
216 if (IS_ERR(res->pci_reset))
217 return PTR_ERR(res->pci_reset);
218
219 res->axi_reset = devm_reset_control_get(dev, "axi");
220 if (IS_ERR(res->axi_reset))
221 return PTR_ERR(res->axi_reset);
222
223 res->ahb_reset = devm_reset_control_get(dev, "ahb");
224 if (IS_ERR(res->ahb_reset))
225 return PTR_ERR(res->ahb_reset);
226
227 res->por_reset = devm_reset_control_get(dev, "por");
228 if (IS_ERR(res->por_reset))
229 return PTR_ERR(res->por_reset);
230
231 res->phy_reset = devm_reset_control_get(dev, "phy");
232 return PTR_ERR_OR_ZERO(res->phy_reset);
233}
234
235static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie)
236{
237 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
238
239 reset_control_assert(res->pci_reset);
240 reset_control_assert(res->axi_reset);
241 reset_control_assert(res->ahb_reset);
242 reset_control_assert(res->por_reset);
243 reset_control_assert(res->pci_reset);
244 clk_disable_unprepare(res->iface_clk);
245 clk_disable_unprepare(res->core_clk);
246 clk_disable_unprepare(res->phy_clk);
247 regulator_disable(res->vdda);
248 regulator_disable(res->vdda_phy);
249 regulator_disable(res->vdda_refclk);
250}
251
252static int qcom_pcie_init_v0(struct qcom_pcie *pcie)
253{
254 struct qcom_pcie_resources_v0 *res = &pcie->res.v0;
255 struct dw_pcie *pci = pcie->pci;
256 struct device *dev = pci->dev;
257 u32 val;
258 int ret;
259
260 ret = regulator_enable(res->vdda);
261 if (ret) {
262 dev_err(dev, "cannot enable vdda regulator\n");
263 return ret;
264 }
265
266 ret = regulator_enable(res->vdda_refclk);
267 if (ret) {
268 dev_err(dev, "cannot enable vdda_refclk regulator\n");
269 goto err_refclk;
270 }
271
272 ret = regulator_enable(res->vdda_phy);
273 if (ret) {
274 dev_err(dev, "cannot enable vdda_phy regulator\n");
275 goto err_vdda_phy;
276 }
277
278 ret = reset_control_assert(res->ahb_reset);
279 if (ret) {
280 dev_err(dev, "cannot assert ahb reset\n");
281 goto err_assert_ahb;
282 }
283
284 ret = clk_prepare_enable(res->iface_clk);
285 if (ret) {
286 dev_err(dev, "cannot prepare/enable iface clock\n");
287 goto err_assert_ahb;
288 }
289
290 ret = clk_prepare_enable(res->phy_clk);
291 if (ret) {
292 dev_err(dev, "cannot prepare/enable phy clock\n");
293 goto err_clk_phy;
294 }
295
296 ret = clk_prepare_enable(res->core_clk);
297 if (ret) {
298 dev_err(dev, "cannot prepare/enable core clock\n");
299 goto err_clk_core;
300 }
301
302 ret = reset_control_deassert(res->ahb_reset);
303 if (ret) {
304 dev_err(dev, "cannot deassert ahb reset\n");
305 goto err_deassert_ahb;
306 }
307
308
309 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
310 val &= ~BIT(0);
311 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
312
313
314 val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
315 val |= BIT(16);
316 writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
317
318 ret = reset_control_deassert(res->phy_reset);
319 if (ret) {
320 dev_err(dev, "cannot deassert phy reset\n");
321 return ret;
322 }
323
324 ret = reset_control_deassert(res->pci_reset);
325 if (ret) {
326 dev_err(dev, "cannot deassert pci reset\n");
327 return ret;
328 }
329
330 ret = reset_control_deassert(res->por_reset);
331 if (ret) {
332 dev_err(dev, "cannot deassert por reset\n");
333 return ret;
334 }
335
336 ret = reset_control_deassert(res->axi_reset);
337 if (ret) {
338 dev_err(dev, "cannot deassert axi reset\n");
339 return ret;
340 }
341
342
343 usleep_range(1000, 1500);
344
345
346
347 writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
348 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
349 writel(CFG_BRIDGE_SB_INIT,
350 pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
351
352 return 0;
353
354err_deassert_ahb:
355 clk_disable_unprepare(res->core_clk);
356err_clk_core:
357 clk_disable_unprepare(res->phy_clk);
358err_clk_phy:
359 clk_disable_unprepare(res->iface_clk);
360err_assert_ahb:
361 regulator_disable(res->vdda_phy);
362err_vdda_phy:
363 regulator_disable(res->vdda_refclk);
364err_refclk:
365 regulator_disable(res->vdda);
366
367 return ret;
368}
369
370static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie)
371{
372 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
373 struct dw_pcie *pci = pcie->pci;
374 struct device *dev = pci->dev;
375
376 res->vdda = devm_regulator_get(dev, "vdda");
377 if (IS_ERR(res->vdda))
378 return PTR_ERR(res->vdda);
379
380 res->iface = devm_clk_get(dev, "iface");
381 if (IS_ERR(res->iface))
382 return PTR_ERR(res->iface);
383
384 res->aux = devm_clk_get(dev, "aux");
385 if (IS_ERR(res->aux))
386 return PTR_ERR(res->aux);
387
388 res->master_bus = devm_clk_get(dev, "master_bus");
389 if (IS_ERR(res->master_bus))
390 return PTR_ERR(res->master_bus);
391
392 res->slave_bus = devm_clk_get(dev, "slave_bus");
393 if (IS_ERR(res->slave_bus))
394 return PTR_ERR(res->slave_bus);
395
396 res->core = devm_reset_control_get(dev, "core");
397 return PTR_ERR_OR_ZERO(res->core);
398}
399
400static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie)
401{
402 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
403
404 reset_control_assert(res->core);
405 clk_disable_unprepare(res->slave_bus);
406 clk_disable_unprepare(res->master_bus);
407 clk_disable_unprepare(res->iface);
408 clk_disable_unprepare(res->aux);
409 regulator_disable(res->vdda);
410}
411
412static int qcom_pcie_init_v1(struct qcom_pcie *pcie)
413{
414 struct qcom_pcie_resources_v1 *res = &pcie->res.v1;
415 struct dw_pcie *pci = pcie->pci;
416 struct device *dev = pci->dev;
417 int ret;
418
419 ret = reset_control_deassert(res->core);
420 if (ret) {
421 dev_err(dev, "cannot deassert core reset\n");
422 return ret;
423 }
424
425 ret = clk_prepare_enable(res->aux);
426 if (ret) {
427 dev_err(dev, "cannot prepare/enable aux clock\n");
428 goto err_res;
429 }
430
431 ret = clk_prepare_enable(res->iface);
432 if (ret) {
433 dev_err(dev, "cannot prepare/enable iface clock\n");
434 goto err_aux;
435 }
436
437 ret = clk_prepare_enable(res->master_bus);
438 if (ret) {
439 dev_err(dev, "cannot prepare/enable master_bus clock\n");
440 goto err_iface;
441 }
442
443 ret = clk_prepare_enable(res->slave_bus);
444 if (ret) {
445 dev_err(dev, "cannot prepare/enable slave_bus clock\n");
446 goto err_master;
447 }
448
449 ret = regulator_enable(res->vdda);
450 if (ret) {
451 dev_err(dev, "cannot enable vdda regulator\n");
452 goto err_slave;
453 }
454
455
456 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
457
458 if (IS_ENABLED(CONFIG_PCI_MSI)) {
459 u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
460
461 val |= BIT(31);
462 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
463 }
464
465 return 0;
466err_slave:
467 clk_disable_unprepare(res->slave_bus);
468err_master:
469 clk_disable_unprepare(res->master_bus);
470err_iface:
471 clk_disable_unprepare(res->iface);
472err_aux:
473 clk_disable_unprepare(res->aux);
474err_res:
475 reset_control_assert(res->core);
476
477 return ret;
478}
479
480static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie)
481{
482 u32 val;
483
484
485 val = readl(pcie->parf + PCIE20_PARF_LTSSM);
486 val |= BIT(8);
487 writel(val, pcie->parf + PCIE20_PARF_LTSSM);
488}
489
490static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie)
491{
492 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
493 struct dw_pcie *pci = pcie->pci;
494 struct device *dev = pci->dev;
495
496 res->aux_clk = devm_clk_get(dev, "aux");
497 if (IS_ERR(res->aux_clk))
498 return PTR_ERR(res->aux_clk);
499
500 res->cfg_clk = devm_clk_get(dev, "cfg");
501 if (IS_ERR(res->cfg_clk))
502 return PTR_ERR(res->cfg_clk);
503
504 res->master_clk = devm_clk_get(dev, "bus_master");
505 if (IS_ERR(res->master_clk))
506 return PTR_ERR(res->master_clk);
507
508 res->slave_clk = devm_clk_get(dev, "bus_slave");
509 if (IS_ERR(res->slave_clk))
510 return PTR_ERR(res->slave_clk);
511
512 res->pipe_clk = devm_clk_get(dev, "pipe");
513 return PTR_ERR_OR_ZERO(res->pipe_clk);
514}
515
516static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie)
517{
518 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
519
520 clk_disable_unprepare(res->pipe_clk);
521 clk_disable_unprepare(res->slave_clk);
522 clk_disable_unprepare(res->master_clk);
523 clk_disable_unprepare(res->cfg_clk);
524 clk_disable_unprepare(res->aux_clk);
525}
526
527static int qcom_pcie_init_v2(struct qcom_pcie *pcie)
528{
529 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
530 struct dw_pcie *pci = pcie->pci;
531 struct device *dev = pci->dev;
532 u32 val;
533 int ret;
534
535 ret = clk_prepare_enable(res->aux_clk);
536 if (ret) {
537 dev_err(dev, "cannot prepare/enable aux clock\n");
538 return ret;
539 }
540
541 ret = clk_prepare_enable(res->cfg_clk);
542 if (ret) {
543 dev_err(dev, "cannot prepare/enable cfg clock\n");
544 goto err_cfg_clk;
545 }
546
547 ret = clk_prepare_enable(res->master_clk);
548 if (ret) {
549 dev_err(dev, "cannot prepare/enable master clock\n");
550 goto err_master_clk;
551 }
552
553 ret = clk_prepare_enable(res->slave_clk);
554 if (ret) {
555 dev_err(dev, "cannot prepare/enable slave clock\n");
556 goto err_slave_clk;
557 }
558
559
560 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
561 val &= ~BIT(0);
562 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
563
564
565 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
566
567
568 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
569 val &= ~BIT(29);
570 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
571
572 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
573 val |= BIT(4);
574 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
575
576 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
577 val |= BIT(31);
578 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
579
580 return 0;
581
582err_slave_clk:
583 clk_disable_unprepare(res->master_clk);
584err_master_clk:
585 clk_disable_unprepare(res->cfg_clk);
586err_cfg_clk:
587 clk_disable_unprepare(res->aux_clk);
588
589 return ret;
590}
591
592static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie)
593{
594 struct qcom_pcie_resources_v2 *res = &pcie->res.v2;
595 struct dw_pcie *pci = pcie->pci;
596 struct device *dev = pci->dev;
597 int ret;
598
599 ret = clk_prepare_enable(res->pipe_clk);
600 if (ret) {
601 dev_err(dev, "cannot prepare/enable pipe clock\n");
602 return ret;
603 }
604
605 return 0;
606}
607
608static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie)
609{
610 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
611 struct dw_pcie *pci = pcie->pci;
612 struct device *dev = pci->dev;
613
614 res->aux_clk = devm_clk_get(dev, "aux");
615 if (IS_ERR(res->aux_clk))
616 return PTR_ERR(res->aux_clk);
617
618 res->master_clk = devm_clk_get(dev, "master_bus");
619 if (IS_ERR(res->master_clk))
620 return PTR_ERR(res->master_clk);
621
622 res->slave_clk = devm_clk_get(dev, "slave_bus");
623 if (IS_ERR(res->slave_clk))
624 return PTR_ERR(res->slave_clk);
625
626 res->axi_m_reset = devm_reset_control_get(dev, "axi_m");
627 if (IS_ERR(res->axi_m_reset))
628 return PTR_ERR(res->axi_m_reset);
629
630 res->axi_s_reset = devm_reset_control_get(dev, "axi_s");
631 if (IS_ERR(res->axi_s_reset))
632 return PTR_ERR(res->axi_s_reset);
633
634 res->pipe_reset = devm_reset_control_get(dev, "pipe");
635 if (IS_ERR(res->pipe_reset))
636 return PTR_ERR(res->pipe_reset);
637
638 res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid");
639 if (IS_ERR(res->axi_m_vmid_reset))
640 return PTR_ERR(res->axi_m_vmid_reset);
641
642 res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu");
643 if (IS_ERR(res->axi_s_xpu_reset))
644 return PTR_ERR(res->axi_s_xpu_reset);
645
646 res->parf_reset = devm_reset_control_get(dev, "parf");
647 if (IS_ERR(res->parf_reset))
648 return PTR_ERR(res->parf_reset);
649
650 res->phy_reset = devm_reset_control_get(dev, "phy");
651 if (IS_ERR(res->phy_reset))
652 return PTR_ERR(res->phy_reset);
653
654 res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky");
655 if (IS_ERR(res->axi_m_sticky_reset))
656 return PTR_ERR(res->axi_m_sticky_reset);
657
658 res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky");
659 if (IS_ERR(res->pipe_sticky_reset))
660 return PTR_ERR(res->pipe_sticky_reset);
661
662 res->pwr_reset = devm_reset_control_get(dev, "pwr");
663 if (IS_ERR(res->pwr_reset))
664 return PTR_ERR(res->pwr_reset);
665
666 res->ahb_reset = devm_reset_control_get(dev, "ahb");
667 if (IS_ERR(res->ahb_reset))
668 return PTR_ERR(res->ahb_reset);
669
670 res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb");
671 if (IS_ERR(res->phy_ahb_reset))
672 return PTR_ERR(res->phy_ahb_reset);
673
674 return 0;
675}
676
677static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie)
678{
679 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
680
681 reset_control_assert(res->axi_m_reset);
682 reset_control_assert(res->axi_s_reset);
683 reset_control_assert(res->pipe_reset);
684 reset_control_assert(res->pipe_sticky_reset);
685 reset_control_assert(res->phy_reset);
686 reset_control_assert(res->phy_ahb_reset);
687 reset_control_assert(res->axi_m_sticky_reset);
688 reset_control_assert(res->pwr_reset);
689 reset_control_assert(res->ahb_reset);
690 clk_disable_unprepare(res->aux_clk);
691 clk_disable_unprepare(res->master_clk);
692 clk_disable_unprepare(res->slave_clk);
693}
694
695static int qcom_pcie_init_v3(struct qcom_pcie *pcie)
696{
697 struct qcom_pcie_resources_v3 *res = &pcie->res.v3;
698 struct dw_pcie *pci = pcie->pci;
699 struct device *dev = pci->dev;
700 u32 val;
701 int ret;
702
703 ret = reset_control_assert(res->axi_m_reset);
704 if (ret) {
705 dev_err(dev, "cannot assert axi master reset\n");
706 return ret;
707 }
708
709 ret = reset_control_assert(res->axi_s_reset);
710 if (ret) {
711 dev_err(dev, "cannot assert axi slave reset\n");
712 return ret;
713 }
714
715 usleep_range(10000, 12000);
716
717 ret = reset_control_assert(res->pipe_reset);
718 if (ret) {
719 dev_err(dev, "cannot assert pipe reset\n");
720 return ret;
721 }
722
723 ret = reset_control_assert(res->pipe_sticky_reset);
724 if (ret) {
725 dev_err(dev, "cannot assert pipe sticky reset\n");
726 return ret;
727 }
728
729 ret = reset_control_assert(res->phy_reset);
730 if (ret) {
731 dev_err(dev, "cannot assert phy reset\n");
732 return ret;
733 }
734
735 ret = reset_control_assert(res->phy_ahb_reset);
736 if (ret) {
737 dev_err(dev, "cannot assert phy ahb reset\n");
738 return ret;
739 }
740
741 usleep_range(10000, 12000);
742
743 ret = reset_control_assert(res->axi_m_sticky_reset);
744 if (ret) {
745 dev_err(dev, "cannot assert axi master sticky reset\n");
746 return ret;
747 }
748
749 ret = reset_control_assert(res->pwr_reset);
750 if (ret) {
751 dev_err(dev, "cannot assert power reset\n");
752 return ret;
753 }
754
755 ret = reset_control_assert(res->ahb_reset);
756 if (ret) {
757 dev_err(dev, "cannot assert ahb reset\n");
758 return ret;
759 }
760
761 usleep_range(10000, 12000);
762
763 ret = reset_control_deassert(res->phy_ahb_reset);
764 if (ret) {
765 dev_err(dev, "cannot deassert phy ahb reset\n");
766 return ret;
767 }
768
769 ret = reset_control_deassert(res->phy_reset);
770 if (ret) {
771 dev_err(dev, "cannot deassert phy reset\n");
772 goto err_rst_phy;
773 }
774
775 ret = reset_control_deassert(res->pipe_reset);
776 if (ret) {
777 dev_err(dev, "cannot deassert pipe reset\n");
778 goto err_rst_pipe;
779 }
780
781 ret = reset_control_deassert(res->pipe_sticky_reset);
782 if (ret) {
783 dev_err(dev, "cannot deassert pipe sticky reset\n");
784 goto err_rst_pipe_sticky;
785 }
786
787 usleep_range(10000, 12000);
788
789 ret = reset_control_deassert(res->axi_m_reset);
790 if (ret) {
791 dev_err(dev, "cannot deassert axi master reset\n");
792 goto err_rst_axi_m;
793 }
794
795 ret = reset_control_deassert(res->axi_m_sticky_reset);
796 if (ret) {
797 dev_err(dev, "cannot deassert axi master sticky reset\n");
798 goto err_rst_axi_m_sticky;
799 }
800
801 ret = reset_control_deassert(res->axi_s_reset);
802 if (ret) {
803 dev_err(dev, "cannot deassert axi slave reset\n");
804 goto err_rst_axi_s;
805 }
806
807 ret = reset_control_deassert(res->pwr_reset);
808 if (ret) {
809 dev_err(dev, "cannot deassert power reset\n");
810 goto err_rst_pwr;
811 }
812
813 ret = reset_control_deassert(res->ahb_reset);
814 if (ret) {
815 dev_err(dev, "cannot deassert ahb reset\n");
816 goto err_rst_ahb;
817 }
818
819 usleep_range(10000, 12000);
820
821 ret = clk_prepare_enable(res->aux_clk);
822 if (ret) {
823 dev_err(dev, "cannot prepare/enable iface clock\n");
824 goto err_clk_aux;
825 }
826
827 ret = clk_prepare_enable(res->master_clk);
828 if (ret) {
829 dev_err(dev, "cannot prepare/enable core clock\n");
830 goto err_clk_axi_m;
831 }
832
833 ret = clk_prepare_enable(res->slave_clk);
834 if (ret) {
835 dev_err(dev, "cannot prepare/enable phy clock\n");
836 goto err_clk_axi_s;
837 }
838
839
840 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
841 val &= !BIT(0);
842 writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
843
844
845 writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR);
846
847
848 val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL);
849 val &= ~BIT(29);
850 writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL);
851
852 val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
853 val |= BIT(4);
854 writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL);
855
856 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
857 val |= BIT(31);
858 writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2);
859
860 return 0;
861
862err_clk_axi_s:
863 clk_disable_unprepare(res->master_clk);
864err_clk_axi_m:
865 clk_disable_unprepare(res->aux_clk);
866err_clk_aux:
867 reset_control_assert(res->ahb_reset);
868err_rst_ahb:
869 reset_control_assert(res->pwr_reset);
870err_rst_pwr:
871 reset_control_assert(res->axi_s_reset);
872err_rst_axi_s:
873 reset_control_assert(res->axi_m_sticky_reset);
874err_rst_axi_m_sticky:
875 reset_control_assert(res->axi_m_reset);
876err_rst_axi_m:
877 reset_control_assert(res->pipe_sticky_reset);
878err_rst_pipe_sticky:
879 reset_control_assert(res->pipe_reset);
880err_rst_pipe:
881 reset_control_assert(res->phy_reset);
882err_rst_phy:
883 reset_control_assert(res->phy_ahb_reset);
884 return ret;
885}
886
887static int qcom_pcie_link_up(struct dw_pcie *pci)
888{
889 u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA);
890
891 return !!(val & PCI_EXP_LNKSTA_DLLLA);
892}
893
894static void qcom_pcie_host_init(struct pcie_port *pp)
895{
896 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
897 struct qcom_pcie *pcie = to_qcom_pcie(pci);
898 int ret;
899
900 qcom_ep_reset_assert(pcie);
901
902 ret = pcie->ops->init(pcie);
903 if (ret)
904 goto err_deinit;
905
906 ret = phy_power_on(pcie->phy);
907 if (ret)
908 goto err_deinit;
909
910 if (pcie->ops->post_init)
911 pcie->ops->post_init(pcie);
912
913 dw_pcie_setup_rc(pp);
914
915 if (IS_ENABLED(CONFIG_PCI_MSI))
916 dw_pcie_msi_init(pp);
917
918 qcom_ep_reset_deassert(pcie);
919
920 ret = qcom_pcie_establish_link(pcie);
921 if (ret)
922 goto err;
923
924 return;
925err:
926 qcom_ep_reset_assert(pcie);
927 phy_power_off(pcie->phy);
928err_deinit:
929 pcie->ops->deinit(pcie);
930}
931
932static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
933 u32 *val)
934{
935 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
936
937
938 if (where == PCI_CLASS_REVISION && size == 4) {
939 *val = readl(pci->dbi_base + PCI_CLASS_REVISION);
940 *val &= 0xff;
941 *val |= PCI_CLASS_BRIDGE_PCI << 16;
942 return PCIBIOS_SUCCESSFUL;
943 }
944
945 return dw_pcie_read(pci->dbi_base + where, size, val);
946}
947
948static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
949 .host_init = qcom_pcie_host_init,
950 .rd_own_conf = qcom_pcie_rd_own_conf,
951};
952
953static const struct qcom_pcie_ops ops_v0 = {
954 .get_resources = qcom_pcie_get_resources_v0,
955 .init = qcom_pcie_init_v0,
956 .deinit = qcom_pcie_deinit_v0,
957 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
958};
959
960static const struct qcom_pcie_ops ops_v1 = {
961 .get_resources = qcom_pcie_get_resources_v1,
962 .init = qcom_pcie_init_v1,
963 .deinit = qcom_pcie_deinit_v1,
964 .ltssm_enable = qcom_pcie_v0_v1_ltssm_enable,
965};
966
967static const struct qcom_pcie_ops ops_v2 = {
968 .get_resources = qcom_pcie_get_resources_v2,
969 .init = qcom_pcie_init_v2,
970 .post_init = qcom_pcie_post_init_v2,
971 .deinit = qcom_pcie_deinit_v2,
972 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
973};
974
975static const struct dw_pcie_ops dw_pcie_ops = {
976 .link_up = qcom_pcie_link_up,
977};
978
979static const struct qcom_pcie_ops ops_v3 = {
980 .get_resources = qcom_pcie_get_resources_v3,
981 .init = qcom_pcie_init_v3,
982 .deinit = qcom_pcie_deinit_v3,
983 .ltssm_enable = qcom_pcie_v2_ltssm_enable,
984};
985
986static int qcom_pcie_probe(struct platform_device *pdev)
987{
988 struct device *dev = &pdev->dev;
989 struct resource *res;
990 struct pcie_port *pp;
991 struct dw_pcie *pci;
992 struct qcom_pcie *pcie;
993 int ret;
994
995 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
996 if (!pcie)
997 return -ENOMEM;
998
999 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1000 if (!pci)
1001 return -ENOMEM;
1002
1003 pci->dev = dev;
1004 pci->ops = &dw_pcie_ops;
1005 pp = &pci->pp;
1006
1007 pcie->pci = pci;
1008
1009 pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
1010
1011 pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
1012 if (IS_ERR(pcie->reset))
1013 return PTR_ERR(pcie->reset);
1014
1015 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
1016 pcie->parf = devm_ioremap_resource(dev, res);
1017 if (IS_ERR(pcie->parf))
1018 return PTR_ERR(pcie->parf);
1019
1020 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1021 pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
1022 if (IS_ERR(pci->dbi_base))
1023 return PTR_ERR(pci->dbi_base);
1024
1025 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
1026 pcie->elbi = devm_ioremap_resource(dev, res);
1027 if (IS_ERR(pcie->elbi))
1028 return PTR_ERR(pcie->elbi);
1029
1030 pcie->phy = devm_phy_optional_get(dev, "pciephy");
1031 if (IS_ERR(pcie->phy))
1032 return PTR_ERR(pcie->phy);
1033
1034 ret = pcie->ops->get_resources(pcie);
1035 if (ret)
1036 return ret;
1037
1038 pp->root_bus_nr = -1;
1039 pp->ops = &qcom_pcie_dw_ops;
1040
1041 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1042 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
1043 if (pp->msi_irq < 0)
1044 return pp->msi_irq;
1045
1046 ret = devm_request_irq(dev, pp->msi_irq,
1047 qcom_pcie_msi_irq_handler,
1048 IRQF_SHARED | IRQF_NO_THREAD,
1049 "qcom-pcie-msi", pp);
1050 if (ret) {
1051 dev_err(dev, "cannot request msi irq\n");
1052 return ret;
1053 }
1054 }
1055
1056 ret = phy_init(pcie->phy);
1057 if (ret)
1058 return ret;
1059
1060 platform_set_drvdata(pdev, pcie);
1061
1062 ret = dw_pcie_host_init(pp);
1063 if (ret) {
1064 dev_err(dev, "cannot initialize host\n");
1065 return ret;
1066 }
1067
1068 return 0;
1069}
1070
1071static const struct of_device_id qcom_pcie_match[] = {
1072 { .compatible = "qcom,pcie-ipq8064", .data = &ops_v0 },
1073 { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 },
1074 { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 },
1075 { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 },
1076 { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 },
1077 { }
1078};
1079
1080static struct platform_driver qcom_pcie_driver = {
1081 .probe = qcom_pcie_probe,
1082 .driver = {
1083 .name = "qcom-pcie",
1084 .suppress_bind_attrs = true,
1085 .of_match_table = qcom_pcie_match,
1086 },
1087};
1088builtin_platform_driver(qcom_pcie_driver);
1089