1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/clk.h>
30#include <linux/debugfs.h>
31#include <linux/delay.h>
32#include <linux/export.h>
33#include <linux/interrupt.h>
34#include <linux/irq.h>
35#include <linux/irqdomain.h>
36#include <linux/kernel.h>
37#include <linux/init.h>
38#include <linux/msi.h>
39#include <linux/of_address.h>
40#include <linux/of_pci.h>
41#include <linux/of_platform.h>
42#include <linux/pci.h>
43#include <linux/phy/phy.h>
44#include <linux/platform_device.h>
45#include <linux/reset.h>
46#include <linux/sizes.h>
47#include <linux/slab.h>
48#include <linux/vmalloc.h>
49#include <linux/regulator/consumer.h>
50
51#include <soc/tegra/cpuidle.h>
52#include <soc/tegra/pmc.h>
53
54#include <asm/mach/irq.h>
55#include <asm/mach/map.h>
56#include <asm/mach/pci.h>
57
58#define INT_PCI_MSI_NR (8 * 32)
59
60
61
62#define AFI_AXI_BAR0_SZ 0x00
63#define AFI_AXI_BAR1_SZ 0x04
64#define AFI_AXI_BAR2_SZ 0x08
65#define AFI_AXI_BAR3_SZ 0x0c
66#define AFI_AXI_BAR4_SZ 0x10
67#define AFI_AXI_BAR5_SZ 0x14
68
69#define AFI_AXI_BAR0_START 0x18
70#define AFI_AXI_BAR1_START 0x1c
71#define AFI_AXI_BAR2_START 0x20
72#define AFI_AXI_BAR3_START 0x24
73#define AFI_AXI_BAR4_START 0x28
74#define AFI_AXI_BAR5_START 0x2c
75
76#define AFI_FPCI_BAR0 0x30
77#define AFI_FPCI_BAR1 0x34
78#define AFI_FPCI_BAR2 0x38
79#define AFI_FPCI_BAR3 0x3c
80#define AFI_FPCI_BAR4 0x40
81#define AFI_FPCI_BAR5 0x44
82
83#define AFI_CACHE_BAR0_SZ 0x48
84#define AFI_CACHE_BAR0_ST 0x4c
85#define AFI_CACHE_BAR1_SZ 0x50
86#define AFI_CACHE_BAR1_ST 0x54
87
88#define AFI_MSI_BAR_SZ 0x60
89#define AFI_MSI_FPCI_BAR_ST 0x64
90#define AFI_MSI_AXI_BAR_ST 0x68
91
92#define AFI_MSI_VEC0 0x6c
93#define AFI_MSI_VEC1 0x70
94#define AFI_MSI_VEC2 0x74
95#define AFI_MSI_VEC3 0x78
96#define AFI_MSI_VEC4 0x7c
97#define AFI_MSI_VEC5 0x80
98#define AFI_MSI_VEC6 0x84
99#define AFI_MSI_VEC7 0x88
100
101#define AFI_MSI_EN_VEC0 0x8c
102#define AFI_MSI_EN_VEC1 0x90
103#define AFI_MSI_EN_VEC2 0x94
104#define AFI_MSI_EN_VEC3 0x98
105#define AFI_MSI_EN_VEC4 0x9c
106#define AFI_MSI_EN_VEC5 0xa0
107#define AFI_MSI_EN_VEC6 0xa4
108#define AFI_MSI_EN_VEC7 0xa8
109
110#define AFI_CONFIGURATION 0xac
111#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
112
113#define AFI_FPCI_ERROR_MASKS 0xb0
114
115#define AFI_INTR_MASK 0xb4
116#define AFI_INTR_MASK_INT_MASK (1 << 0)
117#define AFI_INTR_MASK_MSI_MASK (1 << 8)
118
119#define AFI_INTR_CODE 0xb8
120#define AFI_INTR_CODE_MASK 0xf
121#define AFI_INTR_INI_SLAVE_ERROR 1
122#define AFI_INTR_INI_DECODE_ERROR 2
123#define AFI_INTR_TARGET_ABORT 3
124#define AFI_INTR_MASTER_ABORT 4
125#define AFI_INTR_INVALID_WRITE 5
126#define AFI_INTR_LEGACY 6
127#define AFI_INTR_FPCI_DECODE_ERROR 7
128#define AFI_INTR_AXI_DECODE_ERROR 8
129#define AFI_INTR_FPCI_TIMEOUT 9
130#define AFI_INTR_PE_PRSNT_SENSE 10
131#define AFI_INTR_PE_CLKREQ_SENSE 11
132#define AFI_INTR_CLKCLAMP_SENSE 12
133#define AFI_INTR_RDY4PD_SENSE 13
134#define AFI_INTR_P2P_ERROR 14
135
136#define AFI_INTR_SIGNATURE 0xbc
137#define AFI_UPPER_FPCI_ADDRESS 0xc0
138#define AFI_SM_INTR_ENABLE 0xc4
139#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
140#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
141#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
142#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
143#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
144#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
145#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
146#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
147
148#define AFI_AFI_INTR_ENABLE 0xc8
149#define AFI_INTR_EN_INI_SLVERR (1 << 0)
150#define AFI_INTR_EN_INI_DECERR (1 << 1)
151#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
152#define AFI_INTR_EN_TGT_DECERR (1 << 3)
153#define AFI_INTR_EN_TGT_WRERR (1 << 4)
154#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
155#define AFI_INTR_EN_AXI_DECERR (1 << 6)
156#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
157#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
158
159#define AFI_PCIE_CONFIG 0x0f8
160#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
161#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
166#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
167#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
168#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
169#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
170
171#define AFI_FUSE 0x104
172#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
173
174#define AFI_PEX0_CTRL 0x110
175#define AFI_PEX1_CTRL 0x118
176#define AFI_PEX2_CTRL 0x128
177#define AFI_PEX_CTRL_RST (1 << 0)
178#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
179#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
180#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
181
182#define AFI_PLLE_CONTROL 0x160
183#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
184#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
185
186#define AFI_PEXBIAS_CTRL_0 0x168
187
188#define RP_VEND_XP 0x00000f00
189#define RP_VEND_XP_DL_UP (1 << 30)
190
191#define RP_PRIV_MISC 0x00000fe0
192#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
193#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
194
195#define RP_LINK_CONTROL_STATUS 0x00000090
196#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
197#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
198
199#define PADS_CTL_SEL 0x0000009c
200
201#define PADS_CTL 0x000000a0
202#define PADS_CTL_IDDQ_1L (1 << 0)
203#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
204#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
205
206#define PADS_PLL_CTL_TEGRA20 0x000000b8
207#define PADS_PLL_CTL_TEGRA30 0x000000b4
208#define PADS_PLL_CTL_RST_B4SM (1 << 1)
209#define PADS_PLL_CTL_LOCKDET (1 << 8)
210#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
211#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
212#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
213#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
214#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
215#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
216#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
217#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
218
219#define PADS_REFCLK_CFG0 0x000000c8
220#define PADS_REFCLK_CFG1 0x000000cc
221#define PADS_REFCLK_BIAS 0x000000d0
222
223
224
225
226
227
228#define PADS_REFCLK_CFG_TERM_SHIFT 2
229#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
230#define PADS_REFCLK_CFG_PREDI_SHIFT 8
231#define PADS_REFCLK_CFG_DRVI_SHIFT 12
232
233struct tegra_msi {
234 struct msi_controller chip;
235 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
236 struct irq_domain *domain;
237 unsigned long pages;
238 struct mutex lock;
239 int irq;
240};
241
242
243struct tegra_pcie_soc {
244 unsigned int num_ports;
245 unsigned int msi_base_shift;
246 u32 pads_pll_ctl;
247 u32 tx_ref_sel;
248 u32 pads_refclk_cfg0;
249 u32 pads_refclk_cfg1;
250 bool has_pex_clkreq_en;
251 bool has_pex_bias_ctrl;
252 bool has_intr_prsnt_sense;
253 bool has_cml_clk;
254 bool has_gen2;
255};
256
257static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
258{
259 return container_of(chip, struct tegra_msi, chip);
260}
261
262struct tegra_pcie {
263 struct device *dev;
264
265 void __iomem *pads;
266 void __iomem *afi;
267 int irq;
268
269 struct list_head buses;
270 struct resource *cs;
271
272 struct resource io;
273 struct resource pio;
274 struct resource mem;
275 struct resource prefetch;
276 struct resource busn;
277
278 struct {
279 resource_size_t mem;
280 resource_size_t io;
281 } offset;
282
283 struct clk *pex_clk;
284 struct clk *afi_clk;
285 struct clk *pll_e;
286 struct clk *cml_clk;
287
288 struct reset_control *pex_rst;
289 struct reset_control *afi_rst;
290 struct reset_control *pcie_xrst;
291
292 bool legacy_phy;
293 struct phy *phy;
294
295 struct tegra_msi msi;
296
297 struct list_head ports;
298 u32 xbar_config;
299
300 struct regulator_bulk_data *supplies;
301 unsigned int num_supplies;
302
303 const struct tegra_pcie_soc *soc;
304 struct dentry *debugfs;
305};
306
307struct tegra_pcie_port {
308 struct tegra_pcie *pcie;
309 struct device_node *np;
310 struct list_head list;
311 struct resource regs;
312 void __iomem *base;
313 unsigned int index;
314 unsigned int lanes;
315
316 struct phy **phys;
317};
318
319struct tegra_pcie_bus {
320 struct vm_struct *area;
321 struct list_head list;
322 unsigned int nr;
323};
324
325static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
326{
327 return sys->private_data;
328}
329
330static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
331 unsigned long offset)
332{
333 writel(value, pcie->afi + offset);
334}
335
336static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
337{
338 return readl(pcie->afi + offset);
339}
340
341static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
342 unsigned long offset)
343{
344 writel(value, pcie->pads + offset);
345}
346
347static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
348{
349 return readl(pcie->pads + offset);
350}
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
379{
380 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
381 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
382}
383
384static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
385 unsigned int busnr)
386{
387 struct device *dev = pcie->dev;
388 pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
389 L_PTE_XN | L_PTE_MT_DEV_SHARED | L_PTE_SHARED);
390 phys_addr_t cs = pcie->cs->start;
391 struct tegra_pcie_bus *bus;
392 unsigned int i;
393 int err;
394
395 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
396 if (!bus)
397 return ERR_PTR(-ENOMEM);
398
399 INIT_LIST_HEAD(&bus->list);
400 bus->nr = busnr;
401
402
403 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
404 if (!bus->area) {
405 err = -ENOMEM;
406 goto free;
407 }
408
409
410 for (i = 0; i < 16; i++) {
411 unsigned long virt = (unsigned long)bus->area->addr +
412 i * SZ_64K;
413 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
414
415 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
416 if (err < 0) {
417 dev_err(dev, "ioremap_page_range() failed: %d\n", err);
418 goto unmap;
419 }
420 }
421
422 return bus;
423
424unmap:
425 vunmap(bus->area->addr);
426free:
427 kfree(bus);
428 return ERR_PTR(err);
429}
430
431static int tegra_pcie_add_bus(struct pci_bus *bus)
432{
433 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
434 struct tegra_pcie_bus *b;
435
436 b = tegra_pcie_bus_alloc(pcie, bus->number);
437 if (IS_ERR(b))
438 return PTR_ERR(b);
439
440 list_add_tail(&b->list, &pcie->buses);
441
442 return 0;
443}
444
445static void tegra_pcie_remove_bus(struct pci_bus *child)
446{
447 struct tegra_pcie *pcie = sys_to_pcie(child->sysdata);
448 struct tegra_pcie_bus *bus, *tmp;
449
450 list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
451 if (bus->nr == child->number) {
452 vunmap(bus->area->addr);
453 list_del(&bus->list);
454 kfree(bus);
455 break;
456 }
457 }
458}
459
460static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
461 unsigned int devfn,
462 int where)
463{
464 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
465 struct device *dev = pcie->dev;
466 void __iomem *addr = NULL;
467
468 if (bus->number == 0) {
469 unsigned int slot = PCI_SLOT(devfn);
470 struct tegra_pcie_port *port;
471
472 list_for_each_entry(port, &pcie->ports, list) {
473 if (port->index + 1 == slot) {
474 addr = port->base + (where & ~3);
475 break;
476 }
477 }
478 } else {
479 struct tegra_pcie_bus *b;
480
481 list_for_each_entry(b, &pcie->buses, list)
482 if (b->nr == bus->number)
483 addr = (void __iomem *)b->area->addr;
484
485 if (!addr) {
486 dev_err(dev, "failed to map cfg. space for bus %u\n",
487 bus->number);
488 return NULL;
489 }
490
491 addr += tegra_pcie_conf_offset(devfn, where);
492 }
493
494 return addr;
495}
496
497static struct pci_ops tegra_pcie_ops = {
498 .add_bus = tegra_pcie_add_bus,
499 .remove_bus = tegra_pcie_remove_bus,
500 .map_bus = tegra_pcie_map_bus,
501 .read = pci_generic_config_read32,
502 .write = pci_generic_config_write32,
503};
504
505static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
506{
507 unsigned long ret = 0;
508
509 switch (port->index) {
510 case 0:
511 ret = AFI_PEX0_CTRL;
512 break;
513
514 case 1:
515 ret = AFI_PEX1_CTRL;
516 break;
517
518 case 2:
519 ret = AFI_PEX2_CTRL;
520 break;
521 }
522
523 return ret;
524}
525
526static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
527{
528 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
529 unsigned long value;
530
531
532 value = afi_readl(port->pcie, ctrl);
533 value &= ~AFI_PEX_CTRL_RST;
534 afi_writel(port->pcie, value, ctrl);
535
536 usleep_range(1000, 2000);
537
538 value = afi_readl(port->pcie, ctrl);
539 value |= AFI_PEX_CTRL_RST;
540 afi_writel(port->pcie, value, ctrl);
541}
542
543static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
544{
545 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
546 const struct tegra_pcie_soc *soc = port->pcie->soc;
547 unsigned long value;
548
549
550 value = afi_readl(port->pcie, ctrl);
551 value |= AFI_PEX_CTRL_REFCLK_EN;
552
553 if (soc->has_pex_clkreq_en)
554 value |= AFI_PEX_CTRL_CLKREQ_EN;
555
556 value |= AFI_PEX_CTRL_OVERRIDE_EN;
557
558 afi_writel(port->pcie, value, ctrl);
559
560 tegra_pcie_port_reset(port);
561}
562
563static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
564{
565 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
566 const struct tegra_pcie_soc *soc = port->pcie->soc;
567 unsigned long value;
568
569
570 value = afi_readl(port->pcie, ctrl);
571 value &= ~AFI_PEX_CTRL_RST;
572 afi_writel(port->pcie, value, ctrl);
573
574
575 value = afi_readl(port->pcie, ctrl);
576
577 if (soc->has_pex_clkreq_en)
578 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
579
580 value &= ~AFI_PEX_CTRL_REFCLK_EN;
581 afi_writel(port->pcie, value, ctrl);
582}
583
584static void tegra_pcie_port_free(struct tegra_pcie_port *port)
585{
586 struct tegra_pcie *pcie = port->pcie;
587 struct device *dev = pcie->dev;
588
589 devm_iounmap(dev, port->base);
590 devm_release_mem_region(dev, port->regs.start,
591 resource_size(&port->regs));
592 list_del(&port->list);
593 devm_kfree(dev, port);
594}
595
596
597static void tegra_pcie_fixup_class(struct pci_dev *dev)
598{
599 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
600}
601DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
602DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
603DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
604DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
605
606
607static void tegra_pcie_relax_enable(struct pci_dev *dev)
608{
609 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
610}
611DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
612
613static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
614{
615 struct tegra_pcie *pcie = sys_to_pcie(sys);
616 struct device *dev = pcie->dev;
617 int err;
618
619 sys->mem_offset = pcie->offset.mem;
620 sys->io_offset = pcie->offset.io;
621
622 err = devm_request_resource(dev, &iomem_resource, &pcie->io);
623 if (err < 0)
624 return err;
625
626 err = pci_remap_iospace(&pcie->pio, pcie->io.start);
627 if (!err)
628 pci_add_resource_offset(&sys->resources, &pcie->pio,
629 sys->io_offset);
630
631 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
632 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
633 sys->mem_offset);
634 pci_add_resource(&sys->resources, &pcie->busn);
635
636 err = devm_request_pci_bus_resources(dev, &sys->resources);
637 if (err < 0)
638 return err;
639
640 return 1;
641}
642
643static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
644{
645 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
646 int irq;
647
648 tegra_cpuidle_pcie_irqs_in_use();
649
650 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
651 if (!irq)
652 irq = pcie->irq;
653
654 return irq;
655}
656
657static irqreturn_t tegra_pcie_isr(int irq, void *arg)
658{
659 const char *err_msg[] = {
660 "Unknown",
661 "AXI slave error",
662 "AXI decode error",
663 "Target abort",
664 "Master abort",
665 "Invalid write",
666 "Legacy interrupt",
667 "Response decoding error",
668 "AXI response decoding error",
669 "Transaction timeout",
670 "Slot present pin change",
671 "Slot clock request change",
672 "TMS clock ramp change",
673 "TMS ready for power down",
674 "Peer2Peer error",
675 };
676 struct tegra_pcie *pcie = arg;
677 struct device *dev = pcie->dev;
678 u32 code, signature;
679
680 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
681 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
682 afi_writel(pcie, 0, AFI_INTR_CODE);
683
684 if (code == AFI_INTR_LEGACY)
685 return IRQ_NONE;
686
687 if (code >= ARRAY_SIZE(err_msg))
688 code = 0;
689
690
691
692
693
694 if (code == AFI_INTR_MASTER_ABORT)
695 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
696 else
697 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
698
699 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
700 code == AFI_INTR_FPCI_DECODE_ERROR) {
701 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
702 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
703
704 if (code == AFI_INTR_MASTER_ABORT)
705 dev_dbg(dev, " FPCI address: %10llx\n", address);
706 else
707 dev_err(dev, " FPCI address: %10llx\n", address);
708 }
709
710 return IRQ_HANDLED;
711}
712
713
714
715
716
717
718
719
720
721static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
722{
723 u32 fpci_bar, size, axi_address;
724
725
726 fpci_bar = 0xfe100000;
727 size = resource_size(pcie->cs);
728 axi_address = pcie->cs->start;
729 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
730 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
731 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
732
733
734 fpci_bar = 0xfdfc0000;
735 size = resource_size(&pcie->io);
736 axi_address = pcie->io.start;
737 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
738 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
739 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
740
741
742 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
743 size = resource_size(&pcie->prefetch);
744 axi_address = pcie->prefetch.start;
745 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
746 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
747 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
748
749
750 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
751 size = resource_size(&pcie->mem);
752 axi_address = pcie->mem.start;
753 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
754 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
755 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
756
757
758 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
759 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
760 afi_writel(pcie, 0, AFI_FPCI_BAR4);
761
762 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
763 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
764 afi_writel(pcie, 0, AFI_FPCI_BAR5);
765
766
767 afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
768 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
769 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
770 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
771
772
773 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
774 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
775 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
776 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
777}
778
779static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
780{
781 const struct tegra_pcie_soc *soc = pcie->soc;
782 u32 value;
783
784 timeout = jiffies + msecs_to_jiffies(timeout);
785
786 while (time_before(jiffies, timeout)) {
787 value = pads_readl(pcie, soc->pads_pll_ctl);
788 if (value & PADS_PLL_CTL_LOCKDET)
789 return 0;
790 }
791
792 return -ETIMEDOUT;
793}
794
795static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
796{
797 struct device *dev = pcie->dev;
798 const struct tegra_pcie_soc *soc = pcie->soc;
799 u32 value;
800 int err;
801
802
803 pads_writel(pcie, 0x0, PADS_CTL_SEL);
804
805
806 value = pads_readl(pcie, PADS_CTL);
807 value |= PADS_CTL_IDDQ_1L;
808 pads_writel(pcie, value, PADS_CTL);
809
810
811
812
813
814 value = pads_readl(pcie, soc->pads_pll_ctl);
815 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
816 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
817 pads_writel(pcie, value, soc->pads_pll_ctl);
818
819
820 value = pads_readl(pcie, soc->pads_pll_ctl);
821 value &= ~PADS_PLL_CTL_RST_B4SM;
822 pads_writel(pcie, value, soc->pads_pll_ctl);
823
824 usleep_range(20, 100);
825
826
827 value = pads_readl(pcie, soc->pads_pll_ctl);
828 value |= PADS_PLL_CTL_RST_B4SM;
829 pads_writel(pcie, value, soc->pads_pll_ctl);
830
831
832 err = tegra_pcie_pll_wait(pcie, 500);
833 if (err < 0) {
834 dev_err(dev, "PLL failed to lock: %d\n", err);
835 return err;
836 }
837
838
839 value = pads_readl(pcie, PADS_CTL);
840 value &= ~PADS_CTL_IDDQ_1L;
841 pads_writel(pcie, value, PADS_CTL);
842
843
844 value = pads_readl(pcie, PADS_CTL);
845 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
846 pads_writel(pcie, value, PADS_CTL);
847
848 return 0;
849}
850
851static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
852{
853 const struct tegra_pcie_soc *soc = pcie->soc;
854 u32 value;
855
856
857 value = pads_readl(pcie, PADS_CTL);
858 value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
859 pads_writel(pcie, value, PADS_CTL);
860
861
862 value = pads_readl(pcie, PADS_CTL);
863 value |= PADS_CTL_IDDQ_1L;
864 pads_writel(pcie, value, PADS_CTL);
865
866
867 value = pads_readl(pcie, soc->pads_pll_ctl);
868 value &= ~PADS_PLL_CTL_RST_B4SM;
869 pads_writel(pcie, value, soc->pads_pll_ctl);
870
871 usleep_range(20, 100);
872
873 return 0;
874}
875
876static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
877{
878 struct device *dev = port->pcie->dev;
879 unsigned int i;
880 int err;
881
882 for (i = 0; i < port->lanes; i++) {
883 err = phy_power_on(port->phys[i]);
884 if (err < 0) {
885 dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
886 return err;
887 }
888 }
889
890 return 0;
891}
892
893static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
894{
895 struct device *dev = port->pcie->dev;
896 unsigned int i;
897 int err;
898
899 for (i = 0; i < port->lanes; i++) {
900 err = phy_power_off(port->phys[i]);
901 if (err < 0) {
902 dev_err(dev, "failed to power off PHY#%u: %d\n", i,
903 err);
904 return err;
905 }
906 }
907
908 return 0;
909}
910
911static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
912{
913 struct device *dev = pcie->dev;
914 const struct tegra_pcie_soc *soc = pcie->soc;
915 struct tegra_pcie_port *port;
916 int err;
917
918 if (pcie->legacy_phy) {
919 if (pcie->phy)
920 err = phy_power_on(pcie->phy);
921 else
922 err = tegra_pcie_phy_enable(pcie);
923
924 if (err < 0)
925 dev_err(dev, "failed to power on PHY: %d\n", err);
926
927 return err;
928 }
929
930 list_for_each_entry(port, &pcie->ports, list) {
931 err = tegra_pcie_port_phy_power_on(port);
932 if (err < 0) {
933 dev_err(dev,
934 "failed to power on PCIe port %u PHY: %d\n",
935 port->index, err);
936 return err;
937 }
938 }
939
940
941 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
942
943 if (soc->num_ports > 2)
944 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
945
946 return 0;
947}
948
949static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
950{
951 struct device *dev = pcie->dev;
952 struct tegra_pcie_port *port;
953 int err;
954
955 if (pcie->legacy_phy) {
956 if (pcie->phy)
957 err = phy_power_off(pcie->phy);
958 else
959 err = tegra_pcie_phy_disable(pcie);
960
961 if (err < 0)
962 dev_err(dev, "failed to power off PHY: %d\n", err);
963
964 return err;
965 }
966
967 list_for_each_entry(port, &pcie->ports, list) {
968 err = tegra_pcie_port_phy_power_off(port);
969 if (err < 0) {
970 dev_err(dev,
971 "failed to power off PCIe port %u PHY: %d\n",
972 port->index, err);
973 return err;
974 }
975 }
976
977 return 0;
978}
979
980static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
981{
982 struct device *dev = pcie->dev;
983 const struct tegra_pcie_soc *soc = pcie->soc;
984 struct tegra_pcie_port *port;
985 unsigned long value;
986 int err;
987
988
989 if (pcie->phy) {
990 value = afi_readl(pcie, AFI_PLLE_CONTROL);
991 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
992 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
993 afi_writel(pcie, value, AFI_PLLE_CONTROL);
994 }
995
996
997 if (soc->has_pex_bias_ctrl)
998 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
999
1000
1001 value = afi_readl(pcie, AFI_PCIE_CONFIG);
1002 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1003 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1004
1005 list_for_each_entry(port, &pcie->ports, list)
1006 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1007
1008 afi_writel(pcie, value, AFI_PCIE_CONFIG);
1009
1010 if (soc->has_gen2) {
1011 value = afi_readl(pcie, AFI_FUSE);
1012 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1013 afi_writel(pcie, value, AFI_FUSE);
1014 } else {
1015 value = afi_readl(pcie, AFI_FUSE);
1016 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1017 afi_writel(pcie, value, AFI_FUSE);
1018 }
1019
1020 err = tegra_pcie_phy_power_on(pcie);
1021 if (err < 0) {
1022 dev_err(dev, "failed to power on PHY(s): %d\n", err);
1023 return err;
1024 }
1025
1026
1027 reset_control_deassert(pcie->pcie_xrst);
1028
1029
1030 value = afi_readl(pcie, AFI_CONFIGURATION);
1031 value |= AFI_CONFIGURATION_EN_FPCI;
1032 afi_writel(pcie, value, AFI_CONFIGURATION);
1033
1034 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1035 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1036 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1037
1038 if (soc->has_intr_prsnt_sense)
1039 value |= AFI_INTR_EN_PRSNT_SENSE;
1040
1041 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1042 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1043
1044
1045 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1046
1047
1048 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1049
1050 return 0;
1051}
1052
1053static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1054{
1055 struct device *dev = pcie->dev;
1056 int err;
1057
1058
1059
1060 err = tegra_pcie_phy_power_off(pcie);
1061 if (err < 0)
1062 dev_err(dev, "failed to power off PHY(s): %d\n", err);
1063
1064 reset_control_assert(pcie->pcie_xrst);
1065 reset_control_assert(pcie->afi_rst);
1066 reset_control_assert(pcie->pex_rst);
1067
1068 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1069
1070 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1071 if (err < 0)
1072 dev_warn(dev, "failed to disable regulators: %d\n", err);
1073}
1074
1075static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1076{
1077 struct device *dev = pcie->dev;
1078 const struct tegra_pcie_soc *soc = pcie->soc;
1079 int err;
1080
1081 reset_control_assert(pcie->pcie_xrst);
1082 reset_control_assert(pcie->afi_rst);
1083 reset_control_assert(pcie->pex_rst);
1084
1085 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1086
1087
1088 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1089 if (err < 0)
1090 dev_err(dev, "failed to enable regulators: %d\n", err);
1091
1092 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1093 pcie->pex_clk,
1094 pcie->pex_rst);
1095 if (err) {
1096 dev_err(dev, "powerup sequence failed: %d\n", err);
1097 return err;
1098 }
1099
1100 reset_control_deassert(pcie->afi_rst);
1101
1102 err = clk_prepare_enable(pcie->afi_clk);
1103 if (err < 0) {
1104 dev_err(dev, "failed to enable AFI clock: %d\n", err);
1105 return err;
1106 }
1107
1108 if (soc->has_cml_clk) {
1109 err = clk_prepare_enable(pcie->cml_clk);
1110 if (err < 0) {
1111 dev_err(dev, "failed to enable CML clock: %d\n", err);
1112 return err;
1113 }
1114 }
1115
1116 err = clk_prepare_enable(pcie->pll_e);
1117 if (err < 0) {
1118 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1119 return err;
1120 }
1121
1122 return 0;
1123}
1124
1125static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1126{
1127 struct device *dev = pcie->dev;
1128 const struct tegra_pcie_soc *soc = pcie->soc;
1129
1130 pcie->pex_clk = devm_clk_get(dev, "pex");
1131 if (IS_ERR(pcie->pex_clk))
1132 return PTR_ERR(pcie->pex_clk);
1133
1134 pcie->afi_clk = devm_clk_get(dev, "afi");
1135 if (IS_ERR(pcie->afi_clk))
1136 return PTR_ERR(pcie->afi_clk);
1137
1138 pcie->pll_e = devm_clk_get(dev, "pll_e");
1139 if (IS_ERR(pcie->pll_e))
1140 return PTR_ERR(pcie->pll_e);
1141
1142 if (soc->has_cml_clk) {
1143 pcie->cml_clk = devm_clk_get(dev, "cml");
1144 if (IS_ERR(pcie->cml_clk))
1145 return PTR_ERR(pcie->cml_clk);
1146 }
1147
1148 return 0;
1149}
1150
1151static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1152{
1153 struct device *dev = pcie->dev;
1154
1155 pcie->pex_rst = devm_reset_control_get(dev, "pex");
1156 if (IS_ERR(pcie->pex_rst))
1157 return PTR_ERR(pcie->pex_rst);
1158
1159 pcie->afi_rst = devm_reset_control_get(dev, "afi");
1160 if (IS_ERR(pcie->afi_rst))
1161 return PTR_ERR(pcie->afi_rst);
1162
1163 pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
1164 if (IS_ERR(pcie->pcie_xrst))
1165 return PTR_ERR(pcie->pcie_xrst);
1166
1167 return 0;
1168}
1169
1170static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1171{
1172 struct device *dev = pcie->dev;
1173 int err;
1174
1175 pcie->phy = devm_phy_optional_get(dev, "pcie");
1176 if (IS_ERR(pcie->phy)) {
1177 err = PTR_ERR(pcie->phy);
1178 dev_err(dev, "failed to get PHY: %d\n", err);
1179 return err;
1180 }
1181
1182 err = phy_init(pcie->phy);
1183 if (err < 0) {
1184 dev_err(dev, "failed to initialize PHY: %d\n", err);
1185 return err;
1186 }
1187
1188 pcie->legacy_phy = true;
1189
1190 return 0;
1191}
1192
1193static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1194 struct device_node *np,
1195 const char *consumer,
1196 unsigned int index)
1197{
1198 struct phy *phy;
1199 char *name;
1200
1201 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1202 if (!name)
1203 return ERR_PTR(-ENOMEM);
1204
1205 phy = devm_of_phy_get(dev, np, name);
1206 kfree(name);
1207
1208 if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1209 phy = NULL;
1210
1211 return phy;
1212}
1213
1214static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1215{
1216 struct device *dev = port->pcie->dev;
1217 struct phy *phy;
1218 unsigned int i;
1219 int err;
1220
1221 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1222 if (!port->phys)
1223 return -ENOMEM;
1224
1225 for (i = 0; i < port->lanes; i++) {
1226 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1227 if (IS_ERR(phy)) {
1228 dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1229 PTR_ERR(phy));
1230 return PTR_ERR(phy);
1231 }
1232
1233 err = phy_init(phy);
1234 if (err < 0) {
1235 dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1236 err);
1237 return err;
1238 }
1239
1240 port->phys[i] = phy;
1241 }
1242
1243 return 0;
1244}
1245
1246static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1247{
1248 const struct tegra_pcie_soc *soc = pcie->soc;
1249 struct device_node *np = pcie->dev->of_node;
1250 struct tegra_pcie_port *port;
1251 int err;
1252
1253 if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1254 return tegra_pcie_phys_get_legacy(pcie);
1255
1256 list_for_each_entry(port, &pcie->ports, list) {
1257 err = tegra_pcie_port_get_phys(port);
1258 if (err < 0)
1259 return err;
1260 }
1261
1262 return 0;
1263}
1264
1265static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1266{
1267 struct device *dev = pcie->dev;
1268 struct platform_device *pdev = to_platform_device(dev);
1269 struct resource *pads, *afi, *res;
1270 int err;
1271
1272 err = tegra_pcie_clocks_get(pcie);
1273 if (err) {
1274 dev_err(dev, "failed to get clocks: %d\n", err);
1275 return err;
1276 }
1277
1278 err = tegra_pcie_resets_get(pcie);
1279 if (err) {
1280 dev_err(dev, "failed to get resets: %d\n", err);
1281 return err;
1282 }
1283
1284 err = tegra_pcie_phys_get(pcie);
1285 if (err < 0) {
1286 dev_err(dev, "failed to get PHYs: %d\n", err);
1287 return err;
1288 }
1289
1290 err = tegra_pcie_power_on(pcie);
1291 if (err) {
1292 dev_err(dev, "failed to power up: %d\n", err);
1293 return err;
1294 }
1295
1296 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1297 pcie->pads = devm_ioremap_resource(dev, pads);
1298 if (IS_ERR(pcie->pads)) {
1299 err = PTR_ERR(pcie->pads);
1300 goto poweroff;
1301 }
1302
1303 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1304 pcie->afi = devm_ioremap_resource(dev, afi);
1305 if (IS_ERR(pcie->afi)) {
1306 err = PTR_ERR(pcie->afi);
1307 goto poweroff;
1308 }
1309
1310
1311 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1312 if (!res) {
1313 err = -EADDRNOTAVAIL;
1314 goto poweroff;
1315 }
1316
1317 pcie->cs = devm_request_mem_region(dev, res->start,
1318 resource_size(res), res->name);
1319 if (!pcie->cs) {
1320 err = -EADDRNOTAVAIL;
1321 goto poweroff;
1322 }
1323
1324
1325 err = platform_get_irq_byname(pdev, "intr");
1326 if (err < 0) {
1327 dev_err(dev, "failed to get IRQ: %d\n", err);
1328 goto poweroff;
1329 }
1330
1331 pcie->irq = err;
1332
1333 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1334 if (err) {
1335 dev_err(dev, "failed to register IRQ: %d\n", err);
1336 goto poweroff;
1337 }
1338
1339 return 0;
1340
1341poweroff:
1342 tegra_pcie_power_off(pcie);
1343 return err;
1344}
1345
1346static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1347{
1348 struct device *dev = pcie->dev;
1349 int err;
1350
1351 if (pcie->irq > 0)
1352 free_irq(pcie->irq, pcie);
1353
1354 tegra_pcie_power_off(pcie);
1355
1356 err = phy_exit(pcie->phy);
1357 if (err < 0)
1358 dev_err(dev, "failed to teardown PHY: %d\n", err);
1359
1360 return 0;
1361}
1362
1363static int tegra_msi_alloc(struct tegra_msi *chip)
1364{
1365 int msi;
1366
1367 mutex_lock(&chip->lock);
1368
1369 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1370 if (msi < INT_PCI_MSI_NR)
1371 set_bit(msi, chip->used);
1372 else
1373 msi = -ENOSPC;
1374
1375 mutex_unlock(&chip->lock);
1376
1377 return msi;
1378}
1379
1380static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1381{
1382 struct device *dev = chip->chip.dev;
1383
1384 mutex_lock(&chip->lock);
1385
1386 if (!test_bit(irq, chip->used))
1387 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1388 else
1389 clear_bit(irq, chip->used);
1390
1391 mutex_unlock(&chip->lock);
1392}
1393
1394static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1395{
1396 struct tegra_pcie *pcie = data;
1397 struct device *dev = pcie->dev;
1398 struct tegra_msi *msi = &pcie->msi;
1399 unsigned int i, processed = 0;
1400
1401 for (i = 0; i < 8; i++) {
1402 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1403
1404 while (reg) {
1405 unsigned int offset = find_first_bit(®, 32);
1406 unsigned int index = i * 32 + offset;
1407 unsigned int irq;
1408
1409
1410 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1411
1412 irq = irq_find_mapping(msi->domain, index);
1413 if (irq) {
1414 if (test_bit(index, msi->used))
1415 generic_handle_irq(irq);
1416 else
1417 dev_info(dev, "unhandled MSI\n");
1418 } else {
1419
1420
1421
1422
1423 dev_info(dev, "unexpected MSI\n");
1424 }
1425
1426
1427 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1428
1429 processed++;
1430 }
1431 }
1432
1433 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1434}
1435
1436static int tegra_msi_setup_irq(struct msi_controller *chip,
1437 struct pci_dev *pdev, struct msi_desc *desc)
1438{
1439 struct tegra_msi *msi = to_tegra_msi(chip);
1440 struct msi_msg msg;
1441 unsigned int irq;
1442 int hwirq;
1443
1444 hwirq = tegra_msi_alloc(msi);
1445 if (hwirq < 0)
1446 return hwirq;
1447
1448 irq = irq_create_mapping(msi->domain, hwirq);
1449 if (!irq) {
1450 tegra_msi_free(msi, hwirq);
1451 return -EINVAL;
1452 }
1453
1454 irq_set_msi_desc(irq, desc);
1455
1456 msg.address_lo = virt_to_phys((void *)msi->pages);
1457
1458 msg.address_hi = 0;
1459 msg.data = hwirq;
1460
1461 pci_write_msi_msg(irq, &msg);
1462
1463 return 0;
1464}
1465
1466static void tegra_msi_teardown_irq(struct msi_controller *chip,
1467 unsigned int irq)
1468{
1469 struct tegra_msi *msi = to_tegra_msi(chip);
1470 struct irq_data *d = irq_get_irq_data(irq);
1471 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1472
1473 irq_dispose_mapping(irq);
1474 tegra_msi_free(msi, hwirq);
1475}
1476
1477static struct irq_chip tegra_msi_irq_chip = {
1478 .name = "Tegra PCIe MSI",
1479 .irq_enable = pci_msi_unmask_irq,
1480 .irq_disable = pci_msi_mask_irq,
1481 .irq_mask = pci_msi_mask_irq,
1482 .irq_unmask = pci_msi_unmask_irq,
1483};
1484
1485static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1486 irq_hw_number_t hwirq)
1487{
1488 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1489 irq_set_chip_data(irq, domain->host_data);
1490
1491 tegra_cpuidle_pcie_irqs_in_use();
1492
1493 return 0;
1494}
1495
1496static const struct irq_domain_ops msi_domain_ops = {
1497 .map = tegra_msi_map,
1498};
1499
1500static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1501{
1502 struct device *dev = pcie->dev;
1503 struct platform_device *pdev = to_platform_device(dev);
1504 const struct tegra_pcie_soc *soc = pcie->soc;
1505 struct tegra_msi *msi = &pcie->msi;
1506 unsigned long base;
1507 int err;
1508 u32 reg;
1509
1510 mutex_init(&msi->lock);
1511
1512 msi->chip.dev = dev;
1513 msi->chip.setup_irq = tegra_msi_setup_irq;
1514 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1515
1516 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1517 &msi_domain_ops, &msi->chip);
1518 if (!msi->domain) {
1519 dev_err(dev, "failed to create IRQ domain\n");
1520 return -ENOMEM;
1521 }
1522
1523 err = platform_get_irq_byname(pdev, "msi");
1524 if (err < 0) {
1525 dev_err(dev, "failed to get IRQ: %d\n", err);
1526 goto err;
1527 }
1528
1529 msi->irq = err;
1530
1531 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1532 tegra_msi_irq_chip.name, pcie);
1533 if (err < 0) {
1534 dev_err(dev, "failed to request IRQ: %d\n", err);
1535 goto err;
1536 }
1537
1538
1539 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1540 base = virt_to_phys((void *)msi->pages);
1541
1542 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1543 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1544
1545 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1546
1547
1548 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1549 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1550 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1551 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1552 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1553 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1554 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1555 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1556
1557
1558 reg = afi_readl(pcie, AFI_INTR_MASK);
1559 reg |= AFI_INTR_MASK_MSI_MASK;
1560 afi_writel(pcie, reg, AFI_INTR_MASK);
1561
1562 return 0;
1563
1564err:
1565 irq_domain_remove(msi->domain);
1566 return err;
1567}
1568
1569static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1570{
1571 struct tegra_msi *msi = &pcie->msi;
1572 unsigned int i, irq;
1573 u32 value;
1574
1575
1576 value = afi_readl(pcie, AFI_INTR_MASK);
1577 value &= ~AFI_INTR_MASK_MSI_MASK;
1578 afi_writel(pcie, value, AFI_INTR_MASK);
1579
1580
1581 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1582 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1583 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1584 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1585 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1586 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1587 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1588 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1589
1590 free_pages(msi->pages, 0);
1591
1592 if (msi->irq > 0)
1593 free_irq(msi->irq, pcie);
1594
1595 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1596 irq = irq_find_mapping(msi->domain, i);
1597 if (irq > 0)
1598 irq_dispose_mapping(irq);
1599 }
1600
1601 irq_domain_remove(msi->domain);
1602
1603 return 0;
1604}
1605
1606static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1607 u32 *xbar)
1608{
1609 struct device *dev = pcie->dev;
1610 struct device_node *np = dev->of_node;
1611
1612 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1613 switch (lanes) {
1614 case 0x0000104:
1615 dev_info(dev, "4x1, 1x1 configuration\n");
1616 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1617 return 0;
1618
1619 case 0x0000102:
1620 dev_info(dev, "2x1, 1x1 configuration\n");
1621 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1622 return 0;
1623 }
1624 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1625 switch (lanes) {
1626 case 0x00000204:
1627 dev_info(dev, "4x1, 2x1 configuration\n");
1628 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1629 return 0;
1630
1631 case 0x00020202:
1632 dev_info(dev, "2x3 configuration\n");
1633 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1634 return 0;
1635
1636 case 0x00010104:
1637 dev_info(dev, "4x1, 1x2 configuration\n");
1638 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1639 return 0;
1640 }
1641 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1642 switch (lanes) {
1643 case 0x00000004:
1644 dev_info(dev, "single-mode configuration\n");
1645 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1646 return 0;
1647
1648 case 0x00000202:
1649 dev_info(dev, "dual-mode configuration\n");
1650 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1651 return 0;
1652 }
1653 }
1654
1655 return -EINVAL;
1656}
1657
1658
1659
1660
1661
1662
1663static bool of_regulator_bulk_available(struct device_node *np,
1664 struct regulator_bulk_data *supplies,
1665 unsigned int num_supplies)
1666{
1667 char property[32];
1668 unsigned int i;
1669
1670 for (i = 0; i < num_supplies; i++) {
1671 snprintf(property, 32, "%s-supply", supplies[i].supply);
1672
1673 if (of_find_property(np, property, NULL) == NULL)
1674 return false;
1675 }
1676
1677 return true;
1678}
1679
1680
1681
1682
1683
1684
1685
1686
1687static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1688{
1689 struct device *dev = pcie->dev;
1690 struct device_node *np = dev->of_node;
1691
1692 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1693 pcie->num_supplies = 3;
1694 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1695 pcie->num_supplies = 2;
1696
1697 if (pcie->num_supplies == 0) {
1698 dev_err(dev, "device %s not supported in legacy mode\n",
1699 np->full_name);
1700 return -ENODEV;
1701 }
1702
1703 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1704 sizeof(*pcie->supplies),
1705 GFP_KERNEL);
1706 if (!pcie->supplies)
1707 return -ENOMEM;
1708
1709 pcie->supplies[0].supply = "pex-clk";
1710 pcie->supplies[1].supply = "vdd";
1711
1712 if (pcie->num_supplies > 2)
1713 pcie->supplies[2].supply = "avdd";
1714
1715 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1716}
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1728{
1729 struct device *dev = pcie->dev;
1730 struct device_node *np = dev->of_node;
1731 unsigned int i = 0;
1732
1733 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1734 pcie->num_supplies = 7;
1735
1736 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1737 sizeof(*pcie->supplies),
1738 GFP_KERNEL);
1739 if (!pcie->supplies)
1740 return -ENOMEM;
1741
1742 pcie->supplies[i++].supply = "avddio-pex";
1743 pcie->supplies[i++].supply = "dvddio-pex";
1744 pcie->supplies[i++].supply = "avdd-pex-pll";
1745 pcie->supplies[i++].supply = "hvdd-pex";
1746 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1747 pcie->supplies[i++].supply = "vddio-pex-ctl";
1748 pcie->supplies[i++].supply = "avdd-pll-erefe";
1749 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1750 bool need_pexa = false, need_pexb = false;
1751
1752
1753 if (lane_mask & 0x0f)
1754 need_pexa = true;
1755
1756
1757 if (lane_mask & 0x30)
1758 need_pexb = true;
1759
1760 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1761 (need_pexb ? 2 : 0);
1762
1763 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1764 sizeof(*pcie->supplies),
1765 GFP_KERNEL);
1766 if (!pcie->supplies)
1767 return -ENOMEM;
1768
1769 pcie->supplies[i++].supply = "avdd-pex-pll";
1770 pcie->supplies[i++].supply = "hvdd-pex";
1771 pcie->supplies[i++].supply = "vddio-pex-ctl";
1772 pcie->supplies[i++].supply = "avdd-plle";
1773
1774 if (need_pexa) {
1775 pcie->supplies[i++].supply = "avdd-pexa";
1776 pcie->supplies[i++].supply = "vdd-pexa";
1777 }
1778
1779 if (need_pexb) {
1780 pcie->supplies[i++].supply = "avdd-pexb";
1781 pcie->supplies[i++].supply = "vdd-pexb";
1782 }
1783 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1784 pcie->num_supplies = 5;
1785
1786 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1787 sizeof(*pcie->supplies),
1788 GFP_KERNEL);
1789 if (!pcie->supplies)
1790 return -ENOMEM;
1791
1792 pcie->supplies[0].supply = "avdd-pex";
1793 pcie->supplies[1].supply = "vdd-pex";
1794 pcie->supplies[2].supply = "avdd-pex-pll";
1795 pcie->supplies[3].supply = "avdd-plle";
1796 pcie->supplies[4].supply = "vddio-pex-clk";
1797 }
1798
1799 if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1800 pcie->num_supplies))
1801 return devm_regulator_bulk_get(dev, pcie->num_supplies,
1802 pcie->supplies);
1803
1804
1805
1806
1807
1808
1809 dev_info(dev, "using legacy DT binding for power supplies\n");
1810
1811 devm_kfree(dev, pcie->supplies);
1812 pcie->num_supplies = 0;
1813
1814 return tegra_pcie_get_legacy_regulators(pcie);
1815}
1816
1817static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1818{
1819 struct device *dev = pcie->dev;
1820 struct device_node *np = dev->of_node, *port;
1821 const struct tegra_pcie_soc *soc = pcie->soc;
1822 struct of_pci_range_parser parser;
1823 struct of_pci_range range;
1824 u32 lanes = 0, mask = 0;
1825 unsigned int lane = 0;
1826 struct resource res;
1827 int err;
1828
1829 if (of_pci_range_parser_init(&parser, np)) {
1830 dev_err(dev, "missing \"ranges\" property\n");
1831 return -EINVAL;
1832 }
1833
1834 for_each_of_pci_range(&parser, &range) {
1835 err = of_pci_range_to_resource(&range, np, &res);
1836 if (err < 0)
1837 return err;
1838
1839 switch (res.flags & IORESOURCE_TYPE_BITS) {
1840 case IORESOURCE_IO:
1841
1842 pcie->offset.io = res.start - range.pci_addr;
1843
1844 memcpy(&pcie->pio, &res, sizeof(res));
1845 pcie->pio.name = np->full_name;
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855 pcie->io.start = range.cpu_addr;
1856 pcie->io.end = range.cpu_addr + range.size - 1;
1857 pcie->io.flags = IORESOURCE_MEM;
1858 pcie->io.name = "I/O";
1859
1860 memcpy(&res, &pcie->io, sizeof(res));
1861 break;
1862
1863 case IORESOURCE_MEM:
1864
1865
1866
1867
1868
1869
1870 pcie->offset.mem = res.start - range.pci_addr;
1871
1872 if (res.flags & IORESOURCE_PREFETCH) {
1873 memcpy(&pcie->prefetch, &res, sizeof(res));
1874 pcie->prefetch.name = "prefetchable";
1875 } else {
1876 memcpy(&pcie->mem, &res, sizeof(res));
1877 pcie->mem.name = "non-prefetchable";
1878 }
1879 break;
1880 }
1881 }
1882
1883 err = of_pci_parse_bus_range(np, &pcie->busn);
1884 if (err < 0) {
1885 dev_err(dev, "failed to parse ranges property: %d\n", err);
1886 pcie->busn.name = np->name;
1887 pcie->busn.start = 0;
1888 pcie->busn.end = 0xff;
1889 pcie->busn.flags = IORESOURCE_BUS;
1890 }
1891
1892
1893 for_each_child_of_node(np, port) {
1894 struct tegra_pcie_port *rp;
1895 unsigned int index;
1896 u32 value;
1897
1898 err = of_pci_get_devfn(port);
1899 if (err < 0) {
1900 dev_err(dev, "failed to parse address: %d\n", err);
1901 return err;
1902 }
1903
1904 index = PCI_SLOT(err);
1905
1906 if (index < 1 || index > soc->num_ports) {
1907 dev_err(dev, "invalid port number: %d\n", index);
1908 return -EINVAL;
1909 }
1910
1911 index--;
1912
1913 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1914 if (err < 0) {
1915 dev_err(dev, "failed to parse # of lanes: %d\n",
1916 err);
1917 return err;
1918 }
1919
1920 if (value > 16) {
1921 dev_err(dev, "invalid # of lanes: %u\n", value);
1922 return -EINVAL;
1923 }
1924
1925 lanes |= value << (index << 3);
1926
1927 if (!of_device_is_available(port)) {
1928 lane += value;
1929 continue;
1930 }
1931
1932 mask |= ((1 << value) - 1) << lane;
1933 lane += value;
1934
1935 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
1936 if (!rp)
1937 return -ENOMEM;
1938
1939 err = of_address_to_resource(port, 0, &rp->regs);
1940 if (err < 0) {
1941 dev_err(dev, "failed to parse address: %d\n", err);
1942 return err;
1943 }
1944
1945 INIT_LIST_HEAD(&rp->list);
1946 rp->index = index;
1947 rp->lanes = value;
1948 rp->pcie = pcie;
1949 rp->np = port;
1950
1951 rp->base = devm_ioremap_resource(dev, &rp->regs);
1952 if (IS_ERR(rp->base))
1953 return PTR_ERR(rp->base);
1954
1955 list_add_tail(&rp->list, &pcie->ports);
1956 }
1957
1958 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1959 if (err < 0) {
1960 dev_err(dev, "invalid lane configuration\n");
1961 return err;
1962 }
1963
1964 err = tegra_pcie_get_regulators(pcie, mask);
1965 if (err < 0)
1966 return err;
1967
1968 return 0;
1969}
1970
1971
1972
1973
1974
1975
1976#define TEGRA_PCIE_LINKUP_TIMEOUT 200
1977static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1978{
1979 struct device *dev = port->pcie->dev;
1980 unsigned int retries = 3;
1981 unsigned long value;
1982
1983
1984 value = readl(port->base + RP_PRIV_MISC);
1985 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1986 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1987 writel(value, port->base + RP_PRIV_MISC);
1988
1989 do {
1990 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1991
1992 do {
1993 value = readl(port->base + RP_VEND_XP);
1994
1995 if (value & RP_VEND_XP_DL_UP)
1996 break;
1997
1998 usleep_range(1000, 2000);
1999 } while (--timeout);
2000
2001 if (!timeout) {
2002 dev_err(dev, "link %u down, retrying\n", port->index);
2003 goto retry;
2004 }
2005
2006 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2007
2008 do {
2009 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2010
2011 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2012 return true;
2013
2014 usleep_range(1000, 2000);
2015 } while (--timeout);
2016
2017retry:
2018 tegra_pcie_port_reset(port);
2019 } while (--retries);
2020
2021 return false;
2022}
2023
2024static int tegra_pcie_enable(struct tegra_pcie *pcie)
2025{
2026 struct device *dev = pcie->dev;
2027 struct tegra_pcie_port *port, *tmp;
2028 struct hw_pci hw;
2029
2030 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2031 dev_info(dev, "probing port %u, using %u lanes\n",
2032 port->index, port->lanes);
2033
2034 tegra_pcie_port_enable(port);
2035
2036 if (tegra_pcie_port_check_link(port))
2037 continue;
2038
2039 dev_info(dev, "link %u down, ignoring\n", port->index);
2040
2041 tegra_pcie_port_disable(port);
2042 tegra_pcie_port_free(port);
2043 }
2044
2045 memset(&hw, 0, sizeof(hw));
2046
2047#ifdef CONFIG_PCI_MSI
2048 hw.msi_ctrl = &pcie->msi.chip;
2049#endif
2050
2051 hw.nr_controllers = 1;
2052 hw.private_data = (void **)&pcie;
2053 hw.setup = tegra_pcie_setup;
2054 hw.map_irq = tegra_pcie_map_irq;
2055 hw.ops = &tegra_pcie_ops;
2056
2057 pci_common_init_dev(dev, &hw);
2058 return 0;
2059}
2060
2061static const struct tegra_pcie_soc tegra20_pcie = {
2062 .num_ports = 2,
2063 .msi_base_shift = 0,
2064 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2065 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2066 .pads_refclk_cfg0 = 0xfa5cfa5c,
2067 .has_pex_clkreq_en = false,
2068 .has_pex_bias_ctrl = false,
2069 .has_intr_prsnt_sense = false,
2070 .has_cml_clk = false,
2071 .has_gen2 = false,
2072};
2073
2074static const struct tegra_pcie_soc tegra30_pcie = {
2075 .num_ports = 3,
2076 .msi_base_shift = 8,
2077 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2078 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2079 .pads_refclk_cfg0 = 0xfa5cfa5c,
2080 .pads_refclk_cfg1 = 0xfa5cfa5c,
2081 .has_pex_clkreq_en = true,
2082 .has_pex_bias_ctrl = true,
2083 .has_intr_prsnt_sense = true,
2084 .has_cml_clk = true,
2085 .has_gen2 = false,
2086};
2087
2088static const struct tegra_pcie_soc tegra124_pcie = {
2089 .num_ports = 2,
2090 .msi_base_shift = 8,
2091 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2092 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2093 .pads_refclk_cfg0 = 0x44ac44ac,
2094 .has_pex_clkreq_en = true,
2095 .has_pex_bias_ctrl = true,
2096 .has_intr_prsnt_sense = true,
2097 .has_cml_clk = true,
2098 .has_gen2 = true,
2099};
2100
2101static const struct of_device_id tegra_pcie_of_match[] = {
2102 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2103 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2104 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2105 { },
2106};
2107
2108static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2109{
2110 struct tegra_pcie *pcie = s->private;
2111
2112 if (list_empty(&pcie->ports))
2113 return NULL;
2114
2115 seq_printf(s, "Index Status\n");
2116
2117 return seq_list_start(&pcie->ports, *pos);
2118}
2119
2120static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2121{
2122 struct tegra_pcie *pcie = s->private;
2123
2124 return seq_list_next(v, &pcie->ports, pos);
2125}
2126
2127static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2128{
2129}
2130
2131static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2132{
2133 bool up = false, active = false;
2134 struct tegra_pcie_port *port;
2135 unsigned int value;
2136
2137 port = list_entry(v, struct tegra_pcie_port, list);
2138
2139 value = readl(port->base + RP_VEND_XP);
2140
2141 if (value & RP_VEND_XP_DL_UP)
2142 up = true;
2143
2144 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2145
2146 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2147 active = true;
2148
2149 seq_printf(s, "%2u ", port->index);
2150
2151 if (up)
2152 seq_printf(s, "up");
2153
2154 if (active) {
2155 if (up)
2156 seq_printf(s, ", ");
2157
2158 seq_printf(s, "active");
2159 }
2160
2161 seq_printf(s, "\n");
2162 return 0;
2163}
2164
2165static const struct seq_operations tegra_pcie_ports_seq_ops = {
2166 .start = tegra_pcie_ports_seq_start,
2167 .next = tegra_pcie_ports_seq_next,
2168 .stop = tegra_pcie_ports_seq_stop,
2169 .show = tegra_pcie_ports_seq_show,
2170};
2171
2172static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2173{
2174 struct tegra_pcie *pcie = inode->i_private;
2175 struct seq_file *s;
2176 int err;
2177
2178 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2179 if (err)
2180 return err;
2181
2182 s = file->private_data;
2183 s->private = pcie;
2184
2185 return 0;
2186}
2187
2188static const struct file_operations tegra_pcie_ports_ops = {
2189 .owner = THIS_MODULE,
2190 .open = tegra_pcie_ports_open,
2191 .read = seq_read,
2192 .llseek = seq_lseek,
2193 .release = seq_release,
2194};
2195
2196static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2197{
2198 struct dentry *file;
2199
2200 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2201 if (!pcie->debugfs)
2202 return -ENOMEM;
2203
2204 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2205 pcie, &tegra_pcie_ports_ops);
2206 if (!file)
2207 goto remove;
2208
2209 return 0;
2210
2211remove:
2212 debugfs_remove_recursive(pcie->debugfs);
2213 pcie->debugfs = NULL;
2214 return -ENOMEM;
2215}
2216
2217static int tegra_pcie_probe(struct platform_device *pdev)
2218{
2219 struct device *dev = &pdev->dev;
2220 struct tegra_pcie *pcie;
2221 int err;
2222
2223 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2224 if (!pcie)
2225 return -ENOMEM;
2226
2227 pcie->soc = of_device_get_match_data(dev);
2228 INIT_LIST_HEAD(&pcie->buses);
2229 INIT_LIST_HEAD(&pcie->ports);
2230 pcie->dev = dev;
2231
2232 err = tegra_pcie_parse_dt(pcie);
2233 if (err < 0)
2234 return err;
2235
2236 err = tegra_pcie_get_resources(pcie);
2237 if (err < 0) {
2238 dev_err(dev, "failed to request resources: %d\n", err);
2239 return err;
2240 }
2241
2242 err = tegra_pcie_enable_controller(pcie);
2243 if (err)
2244 goto put_resources;
2245
2246
2247 tegra_pcie_setup_translations(pcie);
2248
2249 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2250 err = tegra_pcie_enable_msi(pcie);
2251 if (err < 0) {
2252 dev_err(dev, "failed to enable MSI support: %d\n", err);
2253 goto put_resources;
2254 }
2255 }
2256
2257 err = tegra_pcie_enable(pcie);
2258 if (err < 0) {
2259 dev_err(dev, "failed to enable PCIe ports: %d\n", err);
2260 goto disable_msi;
2261 }
2262
2263 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2264 err = tegra_pcie_debugfs_init(pcie);
2265 if (err < 0)
2266 dev_err(dev, "failed to setup debugfs: %d\n", err);
2267 }
2268
2269 return 0;
2270
2271disable_msi:
2272 if (IS_ENABLED(CONFIG_PCI_MSI))
2273 tegra_pcie_disable_msi(pcie);
2274put_resources:
2275 tegra_pcie_put_resources(pcie);
2276 return err;
2277}
2278
2279static struct platform_driver tegra_pcie_driver = {
2280 .driver = {
2281 .name = "tegra-pcie",
2282 .of_match_table = tegra_pcie_of_match,
2283 .suppress_bind_attrs = true,
2284 },
2285 .probe = tegra_pcie_probe,
2286};
2287builtin_platform_driver(tegra_pcie_driver);
2288