1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/clk.h>
30#include <linux/debugfs.h>
31#include <linux/delay.h>
32#include <linux/export.h>
33#include <linux/interrupt.h>
34#include <linux/irq.h>
35#include <linux/irqdomain.h>
36#include <linux/kernel.h>
37#include <linux/init.h>
38#include <linux/msi.h>
39#include <linux/of_address.h>
40#include <linux/of_pci.h>
41#include <linux/of_platform.h>
42#include <linux/pci.h>
43#include <linux/phy/phy.h>
44#include <linux/platform_device.h>
45#include <linux/reset.h>
46#include <linux/sizes.h>
47#include <linux/slab.h>
48#include <linux/vmalloc.h>
49#include <linux/regulator/consumer.h>
50
51#include <soc/tegra/cpuidle.h>
52#include <soc/tegra/pmc.h>
53
54#define INT_PCI_MSI_NR (8 * 32)
55
56
57
58#define AFI_AXI_BAR0_SZ 0x00
59#define AFI_AXI_BAR1_SZ 0x04
60#define AFI_AXI_BAR2_SZ 0x08
61#define AFI_AXI_BAR3_SZ 0x0c
62#define AFI_AXI_BAR4_SZ 0x10
63#define AFI_AXI_BAR5_SZ 0x14
64
65#define AFI_AXI_BAR0_START 0x18
66#define AFI_AXI_BAR1_START 0x1c
67#define AFI_AXI_BAR2_START 0x20
68#define AFI_AXI_BAR3_START 0x24
69#define AFI_AXI_BAR4_START 0x28
70#define AFI_AXI_BAR5_START 0x2c
71
72#define AFI_FPCI_BAR0 0x30
73#define AFI_FPCI_BAR1 0x34
74#define AFI_FPCI_BAR2 0x38
75#define AFI_FPCI_BAR3 0x3c
76#define AFI_FPCI_BAR4 0x40
77#define AFI_FPCI_BAR5 0x44
78
79#define AFI_CACHE_BAR0_SZ 0x48
80#define AFI_CACHE_BAR0_ST 0x4c
81#define AFI_CACHE_BAR1_SZ 0x50
82#define AFI_CACHE_BAR1_ST 0x54
83
84#define AFI_MSI_BAR_SZ 0x60
85#define AFI_MSI_FPCI_BAR_ST 0x64
86#define AFI_MSI_AXI_BAR_ST 0x68
87
88#define AFI_MSI_VEC0 0x6c
89#define AFI_MSI_VEC1 0x70
90#define AFI_MSI_VEC2 0x74
91#define AFI_MSI_VEC3 0x78
92#define AFI_MSI_VEC4 0x7c
93#define AFI_MSI_VEC5 0x80
94#define AFI_MSI_VEC6 0x84
95#define AFI_MSI_VEC7 0x88
96
97#define AFI_MSI_EN_VEC0 0x8c
98#define AFI_MSI_EN_VEC1 0x90
99#define AFI_MSI_EN_VEC2 0x94
100#define AFI_MSI_EN_VEC3 0x98
101#define AFI_MSI_EN_VEC4 0x9c
102#define AFI_MSI_EN_VEC5 0xa0
103#define AFI_MSI_EN_VEC6 0xa4
104#define AFI_MSI_EN_VEC7 0xa8
105
106#define AFI_CONFIGURATION 0xac
107#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
108
109#define AFI_FPCI_ERROR_MASKS 0xb0
110
111#define AFI_INTR_MASK 0xb4
112#define AFI_INTR_MASK_INT_MASK (1 << 0)
113#define AFI_INTR_MASK_MSI_MASK (1 << 8)
114
115#define AFI_INTR_CODE 0xb8
116#define AFI_INTR_CODE_MASK 0xf
117#define AFI_INTR_INI_SLAVE_ERROR 1
118#define AFI_INTR_INI_DECODE_ERROR 2
119#define AFI_INTR_TARGET_ABORT 3
120#define AFI_INTR_MASTER_ABORT 4
121#define AFI_INTR_INVALID_WRITE 5
122#define AFI_INTR_LEGACY 6
123#define AFI_INTR_FPCI_DECODE_ERROR 7
124#define AFI_INTR_AXI_DECODE_ERROR 8
125#define AFI_INTR_FPCI_TIMEOUT 9
126#define AFI_INTR_PE_PRSNT_SENSE 10
127#define AFI_INTR_PE_CLKREQ_SENSE 11
128#define AFI_INTR_CLKCLAMP_SENSE 12
129#define AFI_INTR_RDY4PD_SENSE 13
130#define AFI_INTR_P2P_ERROR 14
131
132#define AFI_INTR_SIGNATURE 0xbc
133#define AFI_UPPER_FPCI_ADDRESS 0xc0
134#define AFI_SM_INTR_ENABLE 0xc4
135#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
136#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
137#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
138#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
139#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
140#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
141#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
142#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
143
144#define AFI_AFI_INTR_ENABLE 0xc8
145#define AFI_INTR_EN_INI_SLVERR (1 << 0)
146#define AFI_INTR_EN_INI_DECERR (1 << 1)
147#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
148#define AFI_INTR_EN_TGT_DECERR (1 << 3)
149#define AFI_INTR_EN_TGT_WRERR (1 << 4)
150#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
151#define AFI_INTR_EN_AXI_DECERR (1 << 6)
152#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
153#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
154
155#define AFI_PCIE_CONFIG 0x0f8
156#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
157#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
158#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
159#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
160#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
161#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
162#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
163#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
164#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
165#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
166
167#define AFI_FUSE 0x104
168#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
169
170#define AFI_PEX0_CTRL 0x110
171#define AFI_PEX1_CTRL 0x118
172#define AFI_PEX2_CTRL 0x128
173#define AFI_PEX_CTRL_RST (1 << 0)
174#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
175#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
176#define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
177
178#define AFI_PLLE_CONTROL 0x160
179#define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
180#define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
181
182#define AFI_PEXBIAS_CTRL_0 0x168
183
184#define RP_VEND_XP 0x00000f00
185#define RP_VEND_XP_DL_UP (1 << 30)
186
187#define RP_VEND_CTL2 0x00000fa8
188#define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
189
190#define RP_PRIV_MISC 0x00000fe0
191#define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
192#define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
193
194#define RP_LINK_CONTROL_STATUS 0x00000090
195#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
196#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
197
198#define PADS_CTL_SEL 0x0000009c
199
200#define PADS_CTL 0x000000a0
201#define PADS_CTL_IDDQ_1L (1 << 0)
202#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
203#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
204
205#define PADS_PLL_CTL_TEGRA20 0x000000b8
206#define PADS_PLL_CTL_TEGRA30 0x000000b4
207#define PADS_PLL_CTL_RST_B4SM (1 << 1)
208#define PADS_PLL_CTL_LOCKDET (1 << 8)
209#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
210#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
211#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
212#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
213#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
214#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
215#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
216#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
217
218#define PADS_REFCLK_CFG0 0x000000c8
219#define PADS_REFCLK_CFG1 0x000000cc
220#define PADS_REFCLK_BIAS 0x000000d0
221
222
223
224
225
226
227#define PADS_REFCLK_CFG_TERM_SHIFT 2
228#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
229#define PADS_REFCLK_CFG_PREDI_SHIFT 8
230#define PADS_REFCLK_CFG_DRVI_SHIFT 12
231
232struct tegra_msi {
233 struct msi_controller chip;
234 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
235 struct irq_domain *domain;
236 struct mutex lock;
237 u64 phys;
238 int irq;
239};
240
241
242struct tegra_pcie_soc {
243 unsigned int num_ports;
244 unsigned int msi_base_shift;
245 u32 pads_pll_ctl;
246 u32 tx_ref_sel;
247 u32 pads_refclk_cfg0;
248 u32 pads_refclk_cfg1;
249 bool has_pex_clkreq_en;
250 bool has_pex_bias_ctrl;
251 bool has_intr_prsnt_sense;
252 bool has_cml_clk;
253 bool has_gen2;
254 bool force_pca_enable;
255};
256
257static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
258{
259 return container_of(chip, struct tegra_msi, chip);
260}
261
262struct tegra_pcie {
263 struct device *dev;
264
265 void __iomem *pads;
266 void __iomem *afi;
267 int irq;
268
269 struct list_head buses;
270 struct resource *cs;
271
272 struct resource io;
273 struct resource pio;
274 struct resource mem;
275 struct resource prefetch;
276 struct resource busn;
277
278 struct {
279 resource_size_t mem;
280 resource_size_t io;
281 } offset;
282
283 struct clk *pex_clk;
284 struct clk *afi_clk;
285 struct clk *pll_e;
286 struct clk *cml_clk;
287
288 struct reset_control *pex_rst;
289 struct reset_control *afi_rst;
290 struct reset_control *pcie_xrst;
291
292 bool legacy_phy;
293 struct phy *phy;
294
295 struct tegra_msi msi;
296
297 struct list_head ports;
298 u32 xbar_config;
299
300 struct regulator_bulk_data *supplies;
301 unsigned int num_supplies;
302
303 const struct tegra_pcie_soc *soc;
304 struct dentry *debugfs;
305};
306
307struct tegra_pcie_port {
308 struct tegra_pcie *pcie;
309 struct device_node *np;
310 struct list_head list;
311 struct resource regs;
312 void __iomem *base;
313 unsigned int index;
314 unsigned int lanes;
315
316 struct phy **phys;
317};
318
319struct tegra_pcie_bus {
320 struct vm_struct *area;
321 struct list_head list;
322 unsigned int nr;
323};
324
325static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
326 unsigned long offset)
327{
328 writel(value, pcie->afi + offset);
329}
330
331static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
332{
333 return readl(pcie->afi + offset);
334}
335
336static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
337 unsigned long offset)
338{
339 writel(value, pcie->pads + offset);
340}
341
342static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
343{
344 return readl(pcie->pads + offset);
345}
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
374{
375 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
376 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
377}
378
379static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
380 unsigned int busnr)
381{
382 struct device *dev = pcie->dev;
383 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
384 phys_addr_t cs = pcie->cs->start;
385 struct tegra_pcie_bus *bus;
386 unsigned int i;
387 int err;
388
389 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
390 if (!bus)
391 return ERR_PTR(-ENOMEM);
392
393 INIT_LIST_HEAD(&bus->list);
394 bus->nr = busnr;
395
396
397 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
398 if (!bus->area) {
399 err = -ENOMEM;
400 goto free;
401 }
402
403
404 for (i = 0; i < 16; i++) {
405 unsigned long virt = (unsigned long)bus->area->addr +
406 i * SZ_64K;
407 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
408
409 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
410 if (err < 0) {
411 dev_err(dev, "ioremap_page_range() failed: %d\n", err);
412 goto unmap;
413 }
414 }
415
416 return bus;
417
418unmap:
419 vunmap(bus->area->addr);
420free:
421 kfree(bus);
422 return ERR_PTR(err);
423}
424
425static int tegra_pcie_add_bus(struct pci_bus *bus)
426{
427 struct pci_host_bridge *host = pci_find_host_bridge(bus);
428 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
429 struct tegra_pcie_bus *b;
430
431 b = tegra_pcie_bus_alloc(pcie, bus->number);
432 if (IS_ERR(b))
433 return PTR_ERR(b);
434
435 list_add_tail(&b->list, &pcie->buses);
436
437 return 0;
438}
439
440static void tegra_pcie_remove_bus(struct pci_bus *child)
441{
442 struct pci_host_bridge *host = pci_find_host_bridge(child);
443 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
444 struct tegra_pcie_bus *bus, *tmp;
445
446 list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
447 if (bus->nr == child->number) {
448 vunmap(bus->area->addr);
449 list_del(&bus->list);
450 kfree(bus);
451 break;
452 }
453 }
454}
455
456static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
457 unsigned int devfn,
458 int where)
459{
460 struct pci_host_bridge *host = pci_find_host_bridge(bus);
461 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
462 struct device *dev = pcie->dev;
463 void __iomem *addr = NULL;
464
465 if (bus->number == 0) {
466 unsigned int slot = PCI_SLOT(devfn);
467 struct tegra_pcie_port *port;
468
469 list_for_each_entry(port, &pcie->ports, list) {
470 if (port->index + 1 == slot) {
471 addr = port->base + (where & ~3);
472 break;
473 }
474 }
475 } else {
476 struct tegra_pcie_bus *b;
477
478 list_for_each_entry(b, &pcie->buses, list)
479 if (b->nr == bus->number)
480 addr = (void __iomem *)b->area->addr;
481
482 if (!addr) {
483 dev_err(dev, "failed to map cfg. space for bus %u\n",
484 bus->number);
485 return NULL;
486 }
487
488 addr += tegra_pcie_conf_offset(devfn, where);
489 }
490
491 return addr;
492}
493
494static struct pci_ops tegra_pcie_ops = {
495 .add_bus = tegra_pcie_add_bus,
496 .remove_bus = tegra_pcie_remove_bus,
497 .map_bus = tegra_pcie_map_bus,
498 .read = pci_generic_config_read32,
499 .write = pci_generic_config_write32,
500};
501
502static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
503{
504 unsigned long ret = 0;
505
506 switch (port->index) {
507 case 0:
508 ret = AFI_PEX0_CTRL;
509 break;
510
511 case 1:
512 ret = AFI_PEX1_CTRL;
513 break;
514
515 case 2:
516 ret = AFI_PEX2_CTRL;
517 break;
518 }
519
520 return ret;
521}
522
523static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
524{
525 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
526 unsigned long value;
527
528
529 value = afi_readl(port->pcie, ctrl);
530 value &= ~AFI_PEX_CTRL_RST;
531 afi_writel(port->pcie, value, ctrl);
532
533 usleep_range(1000, 2000);
534
535 value = afi_readl(port->pcie, ctrl);
536 value |= AFI_PEX_CTRL_RST;
537 afi_writel(port->pcie, value, ctrl);
538}
539
540static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
541{
542 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
543 const struct tegra_pcie_soc *soc = port->pcie->soc;
544 unsigned long value;
545
546
547 value = afi_readl(port->pcie, ctrl);
548 value |= AFI_PEX_CTRL_REFCLK_EN;
549
550 if (soc->has_pex_clkreq_en)
551 value |= AFI_PEX_CTRL_CLKREQ_EN;
552
553 value |= AFI_PEX_CTRL_OVERRIDE_EN;
554
555 afi_writel(port->pcie, value, ctrl);
556
557 tegra_pcie_port_reset(port);
558
559 if (soc->force_pca_enable) {
560 value = readl(port->base + RP_VEND_CTL2);
561 value |= RP_VEND_CTL2_PCA_ENABLE;
562 writel(value, port->base + RP_VEND_CTL2);
563 }
564}
565
566static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
567{
568 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
569 const struct tegra_pcie_soc *soc = port->pcie->soc;
570 unsigned long value;
571
572
573 value = afi_readl(port->pcie, ctrl);
574 value &= ~AFI_PEX_CTRL_RST;
575 afi_writel(port->pcie, value, ctrl);
576
577
578 value = afi_readl(port->pcie, ctrl);
579
580 if (soc->has_pex_clkreq_en)
581 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
582
583 value &= ~AFI_PEX_CTRL_REFCLK_EN;
584 afi_writel(port->pcie, value, ctrl);
585}
586
587static void tegra_pcie_port_free(struct tegra_pcie_port *port)
588{
589 struct tegra_pcie *pcie = port->pcie;
590 struct device *dev = pcie->dev;
591
592 devm_iounmap(dev, port->base);
593 devm_release_mem_region(dev, port->regs.start,
594 resource_size(&port->regs));
595 list_del(&port->list);
596 devm_kfree(dev, port);
597}
598
599
600static void tegra_pcie_fixup_class(struct pci_dev *dev)
601{
602 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
603}
604DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
605DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
606DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
607DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
608
609
610static void tegra_pcie_relax_enable(struct pci_dev *dev)
611{
612 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
613}
614DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
615
616static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
617{
618 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
619 struct list_head *windows = &host->windows;
620 struct device *dev = pcie->dev;
621 int err;
622
623 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
624 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
625 pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
626 pci_add_resource(windows, &pcie->busn);
627
628 err = devm_request_pci_bus_resources(dev, windows);
629 if (err < 0)
630 return err;
631
632 pci_remap_iospace(&pcie->pio, pcie->io.start);
633
634 return 0;
635}
636
637static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
638{
639 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
640 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
641 int irq;
642
643 tegra_cpuidle_pcie_irqs_in_use();
644
645 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
646 if (!irq)
647 irq = pcie->irq;
648
649 return irq;
650}
651
652static irqreturn_t tegra_pcie_isr(int irq, void *arg)
653{
654 const char *err_msg[] = {
655 "Unknown",
656 "AXI slave error",
657 "AXI decode error",
658 "Target abort",
659 "Master abort",
660 "Invalid write",
661 "Legacy interrupt",
662 "Response decoding error",
663 "AXI response decoding error",
664 "Transaction timeout",
665 "Slot present pin change",
666 "Slot clock request change",
667 "TMS clock ramp change",
668 "TMS ready for power down",
669 "Peer2Peer error",
670 };
671 struct tegra_pcie *pcie = arg;
672 struct device *dev = pcie->dev;
673 u32 code, signature;
674
675 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
676 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
677 afi_writel(pcie, 0, AFI_INTR_CODE);
678
679 if (code == AFI_INTR_LEGACY)
680 return IRQ_NONE;
681
682 if (code >= ARRAY_SIZE(err_msg))
683 code = 0;
684
685
686
687
688
689 if (code == AFI_INTR_MASTER_ABORT)
690 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
691 else
692 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
693
694 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
695 code == AFI_INTR_FPCI_DECODE_ERROR) {
696 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
697 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
698
699 if (code == AFI_INTR_MASTER_ABORT)
700 dev_dbg(dev, " FPCI address: %10llx\n", address);
701 else
702 dev_err(dev, " FPCI address: %10llx\n", address);
703 }
704
705 return IRQ_HANDLED;
706}
707
708
709
710
711
712
713
714
715
716static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
717{
718 u32 fpci_bar, size, axi_address;
719
720
721 fpci_bar = 0xfe100000;
722 size = resource_size(pcie->cs);
723 axi_address = pcie->cs->start;
724 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
725 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
726 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
727
728
729 fpci_bar = 0xfdfc0000;
730 size = resource_size(&pcie->io);
731 axi_address = pcie->io.start;
732 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
733 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
734 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
735
736
737 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
738 size = resource_size(&pcie->prefetch);
739 axi_address = pcie->prefetch.start;
740 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
741 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
742 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
743
744
745 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
746 size = resource_size(&pcie->mem);
747 axi_address = pcie->mem.start;
748 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
749 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
750 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
751
752
753 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
754 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
755 afi_writel(pcie, 0, AFI_FPCI_BAR4);
756
757 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
758 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
759 afi_writel(pcie, 0, AFI_FPCI_BAR5);
760
761
762 afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
763 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
764 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
765 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
766
767
768 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
769 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
770 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
771 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
772}
773
774static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
775{
776 const struct tegra_pcie_soc *soc = pcie->soc;
777 u32 value;
778
779 timeout = jiffies + msecs_to_jiffies(timeout);
780
781 while (time_before(jiffies, timeout)) {
782 value = pads_readl(pcie, soc->pads_pll_ctl);
783 if (value & PADS_PLL_CTL_LOCKDET)
784 return 0;
785 }
786
787 return -ETIMEDOUT;
788}
789
790static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
791{
792 struct device *dev = pcie->dev;
793 const struct tegra_pcie_soc *soc = pcie->soc;
794 u32 value;
795 int err;
796
797
798 pads_writel(pcie, 0x0, PADS_CTL_SEL);
799
800
801 value = pads_readl(pcie, PADS_CTL);
802 value |= PADS_CTL_IDDQ_1L;
803 pads_writel(pcie, value, PADS_CTL);
804
805
806
807
808
809 value = pads_readl(pcie, soc->pads_pll_ctl);
810 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
811 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
812 pads_writel(pcie, value, soc->pads_pll_ctl);
813
814
815 value = pads_readl(pcie, soc->pads_pll_ctl);
816 value &= ~PADS_PLL_CTL_RST_B4SM;
817 pads_writel(pcie, value, soc->pads_pll_ctl);
818
819 usleep_range(20, 100);
820
821
822 value = pads_readl(pcie, soc->pads_pll_ctl);
823 value |= PADS_PLL_CTL_RST_B4SM;
824 pads_writel(pcie, value, soc->pads_pll_ctl);
825
826
827 err = tegra_pcie_pll_wait(pcie, 500);
828 if (err < 0) {
829 dev_err(dev, "PLL failed to lock: %d\n", err);
830 return err;
831 }
832
833
834 value = pads_readl(pcie, PADS_CTL);
835 value &= ~PADS_CTL_IDDQ_1L;
836 pads_writel(pcie, value, PADS_CTL);
837
838
839 value = pads_readl(pcie, PADS_CTL);
840 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
841 pads_writel(pcie, value, PADS_CTL);
842
843 return 0;
844}
845
846static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
847{
848 const struct tegra_pcie_soc *soc = pcie->soc;
849 u32 value;
850
851
852 value = pads_readl(pcie, PADS_CTL);
853 value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
854 pads_writel(pcie, value, PADS_CTL);
855
856
857 value = pads_readl(pcie, PADS_CTL);
858 value |= PADS_CTL_IDDQ_1L;
859 pads_writel(pcie, value, PADS_CTL);
860
861
862 value = pads_readl(pcie, soc->pads_pll_ctl);
863 value &= ~PADS_PLL_CTL_RST_B4SM;
864 pads_writel(pcie, value, soc->pads_pll_ctl);
865
866 usleep_range(20, 100);
867
868 return 0;
869}
870
871static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
872{
873 struct device *dev = port->pcie->dev;
874 unsigned int i;
875 int err;
876
877 for (i = 0; i < port->lanes; i++) {
878 err = phy_power_on(port->phys[i]);
879 if (err < 0) {
880 dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
881 return err;
882 }
883 }
884
885 return 0;
886}
887
888static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
889{
890 struct device *dev = port->pcie->dev;
891 unsigned int i;
892 int err;
893
894 for (i = 0; i < port->lanes; i++) {
895 err = phy_power_off(port->phys[i]);
896 if (err < 0) {
897 dev_err(dev, "failed to power off PHY#%u: %d\n", i,
898 err);
899 return err;
900 }
901 }
902
903 return 0;
904}
905
906static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
907{
908 struct device *dev = pcie->dev;
909 const struct tegra_pcie_soc *soc = pcie->soc;
910 struct tegra_pcie_port *port;
911 int err;
912
913 if (pcie->legacy_phy) {
914 if (pcie->phy)
915 err = phy_power_on(pcie->phy);
916 else
917 err = tegra_pcie_phy_enable(pcie);
918
919 if (err < 0)
920 dev_err(dev, "failed to power on PHY: %d\n", err);
921
922 return err;
923 }
924
925 list_for_each_entry(port, &pcie->ports, list) {
926 err = tegra_pcie_port_phy_power_on(port);
927 if (err < 0) {
928 dev_err(dev,
929 "failed to power on PCIe port %u PHY: %d\n",
930 port->index, err);
931 return err;
932 }
933 }
934
935
936 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
937
938 if (soc->num_ports > 2)
939 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
940
941 return 0;
942}
943
944static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
945{
946 struct device *dev = pcie->dev;
947 struct tegra_pcie_port *port;
948 int err;
949
950 if (pcie->legacy_phy) {
951 if (pcie->phy)
952 err = phy_power_off(pcie->phy);
953 else
954 err = tegra_pcie_phy_disable(pcie);
955
956 if (err < 0)
957 dev_err(dev, "failed to power off PHY: %d\n", err);
958
959 return err;
960 }
961
962 list_for_each_entry(port, &pcie->ports, list) {
963 err = tegra_pcie_port_phy_power_off(port);
964 if (err < 0) {
965 dev_err(dev,
966 "failed to power off PCIe port %u PHY: %d\n",
967 port->index, err);
968 return err;
969 }
970 }
971
972 return 0;
973}
974
975static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
976{
977 struct device *dev = pcie->dev;
978 const struct tegra_pcie_soc *soc = pcie->soc;
979 struct tegra_pcie_port *port;
980 unsigned long value;
981 int err;
982
983
984 if (pcie->phy) {
985 value = afi_readl(pcie, AFI_PLLE_CONTROL);
986 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
987 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
988 afi_writel(pcie, value, AFI_PLLE_CONTROL);
989 }
990
991
992 if (soc->has_pex_bias_ctrl)
993 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
994
995
996 value = afi_readl(pcie, AFI_PCIE_CONFIG);
997 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
998 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
999
1000 list_for_each_entry(port, &pcie->ports, list)
1001 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1002
1003 afi_writel(pcie, value, AFI_PCIE_CONFIG);
1004
1005 if (soc->has_gen2) {
1006 value = afi_readl(pcie, AFI_FUSE);
1007 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1008 afi_writel(pcie, value, AFI_FUSE);
1009 } else {
1010 value = afi_readl(pcie, AFI_FUSE);
1011 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1012 afi_writel(pcie, value, AFI_FUSE);
1013 }
1014
1015 err = tegra_pcie_phy_power_on(pcie);
1016 if (err < 0) {
1017 dev_err(dev, "failed to power on PHY(s): %d\n", err);
1018 return err;
1019 }
1020
1021
1022 reset_control_deassert(pcie->pcie_xrst);
1023
1024
1025 value = afi_readl(pcie, AFI_CONFIGURATION);
1026 value |= AFI_CONFIGURATION_EN_FPCI;
1027 afi_writel(pcie, value, AFI_CONFIGURATION);
1028
1029 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1030 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1031 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1032
1033 if (soc->has_intr_prsnt_sense)
1034 value |= AFI_INTR_EN_PRSNT_SENSE;
1035
1036 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1037 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1038
1039
1040 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1041
1042
1043 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1044
1045 return 0;
1046}
1047
1048static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1049{
1050 struct device *dev = pcie->dev;
1051 int err;
1052
1053
1054
1055 err = tegra_pcie_phy_power_off(pcie);
1056 if (err < 0)
1057 dev_err(dev, "failed to power off PHY(s): %d\n", err);
1058
1059 reset_control_assert(pcie->pcie_xrst);
1060 reset_control_assert(pcie->afi_rst);
1061 reset_control_assert(pcie->pex_rst);
1062
1063 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1064
1065 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1066 if (err < 0)
1067 dev_warn(dev, "failed to disable regulators: %d\n", err);
1068}
1069
1070static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1071{
1072 struct device *dev = pcie->dev;
1073 const struct tegra_pcie_soc *soc = pcie->soc;
1074 int err;
1075
1076 reset_control_assert(pcie->pcie_xrst);
1077 reset_control_assert(pcie->afi_rst);
1078 reset_control_assert(pcie->pex_rst);
1079
1080 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1081
1082
1083 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1084 if (err < 0)
1085 dev_err(dev, "failed to enable regulators: %d\n", err);
1086
1087 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1088 pcie->pex_clk,
1089 pcie->pex_rst);
1090 if (err) {
1091 dev_err(dev, "powerup sequence failed: %d\n", err);
1092 return err;
1093 }
1094
1095 reset_control_deassert(pcie->afi_rst);
1096
1097 err = clk_prepare_enable(pcie->afi_clk);
1098 if (err < 0) {
1099 dev_err(dev, "failed to enable AFI clock: %d\n", err);
1100 return err;
1101 }
1102
1103 if (soc->has_cml_clk) {
1104 err = clk_prepare_enable(pcie->cml_clk);
1105 if (err < 0) {
1106 dev_err(dev, "failed to enable CML clock: %d\n", err);
1107 return err;
1108 }
1109 }
1110
1111 err = clk_prepare_enable(pcie->pll_e);
1112 if (err < 0) {
1113 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1114 return err;
1115 }
1116
1117 return 0;
1118}
1119
1120static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1121{
1122 struct device *dev = pcie->dev;
1123 const struct tegra_pcie_soc *soc = pcie->soc;
1124
1125 pcie->pex_clk = devm_clk_get(dev, "pex");
1126 if (IS_ERR(pcie->pex_clk))
1127 return PTR_ERR(pcie->pex_clk);
1128
1129 pcie->afi_clk = devm_clk_get(dev, "afi");
1130 if (IS_ERR(pcie->afi_clk))
1131 return PTR_ERR(pcie->afi_clk);
1132
1133 pcie->pll_e = devm_clk_get(dev, "pll_e");
1134 if (IS_ERR(pcie->pll_e))
1135 return PTR_ERR(pcie->pll_e);
1136
1137 if (soc->has_cml_clk) {
1138 pcie->cml_clk = devm_clk_get(dev, "cml");
1139 if (IS_ERR(pcie->cml_clk))
1140 return PTR_ERR(pcie->cml_clk);
1141 }
1142
1143 return 0;
1144}
1145
1146static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1147{
1148 struct device *dev = pcie->dev;
1149
1150 pcie->pex_rst = devm_reset_control_get(dev, "pex");
1151 if (IS_ERR(pcie->pex_rst))
1152 return PTR_ERR(pcie->pex_rst);
1153
1154 pcie->afi_rst = devm_reset_control_get(dev, "afi");
1155 if (IS_ERR(pcie->afi_rst))
1156 return PTR_ERR(pcie->afi_rst);
1157
1158 pcie->pcie_xrst = devm_reset_control_get(dev, "pcie_x");
1159 if (IS_ERR(pcie->pcie_xrst))
1160 return PTR_ERR(pcie->pcie_xrst);
1161
1162 return 0;
1163}
1164
1165static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1166{
1167 struct device *dev = pcie->dev;
1168 int err;
1169
1170 pcie->phy = devm_phy_optional_get(dev, "pcie");
1171 if (IS_ERR(pcie->phy)) {
1172 err = PTR_ERR(pcie->phy);
1173 dev_err(dev, "failed to get PHY: %d\n", err);
1174 return err;
1175 }
1176
1177 err = phy_init(pcie->phy);
1178 if (err < 0) {
1179 dev_err(dev, "failed to initialize PHY: %d\n", err);
1180 return err;
1181 }
1182
1183 pcie->legacy_phy = true;
1184
1185 return 0;
1186}
1187
1188static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1189 struct device_node *np,
1190 const char *consumer,
1191 unsigned int index)
1192{
1193 struct phy *phy;
1194 char *name;
1195
1196 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1197 if (!name)
1198 return ERR_PTR(-ENOMEM);
1199
1200 phy = devm_of_phy_get(dev, np, name);
1201 kfree(name);
1202
1203 if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1204 phy = NULL;
1205
1206 return phy;
1207}
1208
1209static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1210{
1211 struct device *dev = port->pcie->dev;
1212 struct phy *phy;
1213 unsigned int i;
1214 int err;
1215
1216 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1217 if (!port->phys)
1218 return -ENOMEM;
1219
1220 for (i = 0; i < port->lanes; i++) {
1221 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1222 if (IS_ERR(phy)) {
1223 dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1224 PTR_ERR(phy));
1225 return PTR_ERR(phy);
1226 }
1227
1228 err = phy_init(phy);
1229 if (err < 0) {
1230 dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1231 err);
1232 return err;
1233 }
1234
1235 port->phys[i] = phy;
1236 }
1237
1238 return 0;
1239}
1240
1241static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1242{
1243 const struct tegra_pcie_soc *soc = pcie->soc;
1244 struct device_node *np = pcie->dev->of_node;
1245 struct tegra_pcie_port *port;
1246 int err;
1247
1248 if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1249 return tegra_pcie_phys_get_legacy(pcie);
1250
1251 list_for_each_entry(port, &pcie->ports, list) {
1252 err = tegra_pcie_port_get_phys(port);
1253 if (err < 0)
1254 return err;
1255 }
1256
1257 return 0;
1258}
1259
1260static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1261{
1262 struct device *dev = pcie->dev;
1263 struct platform_device *pdev = to_platform_device(dev);
1264 struct resource *pads, *afi, *res;
1265 int err;
1266
1267 err = tegra_pcie_clocks_get(pcie);
1268 if (err) {
1269 dev_err(dev, "failed to get clocks: %d\n", err);
1270 return err;
1271 }
1272
1273 err = tegra_pcie_resets_get(pcie);
1274 if (err) {
1275 dev_err(dev, "failed to get resets: %d\n", err);
1276 return err;
1277 }
1278
1279 err = tegra_pcie_phys_get(pcie);
1280 if (err < 0) {
1281 dev_err(dev, "failed to get PHYs: %d\n", err);
1282 return err;
1283 }
1284
1285 err = tegra_pcie_power_on(pcie);
1286 if (err) {
1287 dev_err(dev, "failed to power up: %d\n", err);
1288 return err;
1289 }
1290
1291 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1292 pcie->pads = devm_ioremap_resource(dev, pads);
1293 if (IS_ERR(pcie->pads)) {
1294 err = PTR_ERR(pcie->pads);
1295 goto poweroff;
1296 }
1297
1298 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1299 pcie->afi = devm_ioremap_resource(dev, afi);
1300 if (IS_ERR(pcie->afi)) {
1301 err = PTR_ERR(pcie->afi);
1302 goto poweroff;
1303 }
1304
1305
1306 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1307 if (!res) {
1308 err = -EADDRNOTAVAIL;
1309 goto poweroff;
1310 }
1311
1312 pcie->cs = devm_request_mem_region(dev, res->start,
1313 resource_size(res), res->name);
1314 if (!pcie->cs) {
1315 err = -EADDRNOTAVAIL;
1316 goto poweroff;
1317 }
1318
1319
1320 err = platform_get_irq_byname(pdev, "intr");
1321 if (err < 0) {
1322 dev_err(dev, "failed to get IRQ: %d\n", err);
1323 goto poweroff;
1324 }
1325
1326 pcie->irq = err;
1327
1328 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1329 if (err) {
1330 dev_err(dev, "failed to register IRQ: %d\n", err);
1331 goto poweroff;
1332 }
1333
1334 return 0;
1335
1336poweroff:
1337 tegra_pcie_power_off(pcie);
1338 return err;
1339}
1340
1341static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1342{
1343 struct device *dev = pcie->dev;
1344 int err;
1345
1346 if (pcie->irq > 0)
1347 free_irq(pcie->irq, pcie);
1348
1349 tegra_pcie_power_off(pcie);
1350
1351 err = phy_exit(pcie->phy);
1352 if (err < 0)
1353 dev_err(dev, "failed to teardown PHY: %d\n", err);
1354
1355 return 0;
1356}
1357
1358static int tegra_msi_alloc(struct tegra_msi *chip)
1359{
1360 int msi;
1361
1362 mutex_lock(&chip->lock);
1363
1364 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1365 if (msi < INT_PCI_MSI_NR)
1366 set_bit(msi, chip->used);
1367 else
1368 msi = -ENOSPC;
1369
1370 mutex_unlock(&chip->lock);
1371
1372 return msi;
1373}
1374
1375static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1376{
1377 struct device *dev = chip->chip.dev;
1378
1379 mutex_lock(&chip->lock);
1380
1381 if (!test_bit(irq, chip->used))
1382 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1383 else
1384 clear_bit(irq, chip->used);
1385
1386 mutex_unlock(&chip->lock);
1387}
1388
1389static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1390{
1391 struct tegra_pcie *pcie = data;
1392 struct device *dev = pcie->dev;
1393 struct tegra_msi *msi = &pcie->msi;
1394 unsigned int i, processed = 0;
1395
1396 for (i = 0; i < 8; i++) {
1397 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1398
1399 while (reg) {
1400 unsigned int offset = find_first_bit(®, 32);
1401 unsigned int index = i * 32 + offset;
1402 unsigned int irq;
1403
1404
1405 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1406
1407 irq = irq_find_mapping(msi->domain, index);
1408 if (irq) {
1409 if (test_bit(index, msi->used))
1410 generic_handle_irq(irq);
1411 else
1412 dev_info(dev, "unhandled MSI\n");
1413 } else {
1414
1415
1416
1417
1418 dev_info(dev, "unexpected MSI\n");
1419 }
1420
1421
1422 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1423
1424 processed++;
1425 }
1426 }
1427
1428 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1429}
1430
1431static int tegra_msi_setup_irq(struct msi_controller *chip,
1432 struct pci_dev *pdev, struct msi_desc *desc)
1433{
1434 struct tegra_msi *msi = to_tegra_msi(chip);
1435 struct msi_msg msg;
1436 unsigned int irq;
1437 int hwirq;
1438
1439 hwirq = tegra_msi_alloc(msi);
1440 if (hwirq < 0)
1441 return hwirq;
1442
1443 irq = irq_create_mapping(msi->domain, hwirq);
1444 if (!irq) {
1445 tegra_msi_free(msi, hwirq);
1446 return -EINVAL;
1447 }
1448
1449 irq_set_msi_desc(irq, desc);
1450
1451 msg.address_lo = lower_32_bits(msi->phys);
1452 msg.address_hi = upper_32_bits(msi->phys);
1453 msg.data = hwirq;
1454
1455 pci_write_msi_msg(irq, &msg);
1456
1457 return 0;
1458}
1459
1460static void tegra_msi_teardown_irq(struct msi_controller *chip,
1461 unsigned int irq)
1462{
1463 struct tegra_msi *msi = to_tegra_msi(chip);
1464 struct irq_data *d = irq_get_irq_data(irq);
1465 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1466
1467 irq_dispose_mapping(irq);
1468 tegra_msi_free(msi, hwirq);
1469}
1470
1471static struct irq_chip tegra_msi_irq_chip = {
1472 .name = "Tegra PCIe MSI",
1473 .irq_enable = pci_msi_unmask_irq,
1474 .irq_disable = pci_msi_mask_irq,
1475 .irq_mask = pci_msi_mask_irq,
1476 .irq_unmask = pci_msi_unmask_irq,
1477};
1478
1479static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1480 irq_hw_number_t hwirq)
1481{
1482 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1483 irq_set_chip_data(irq, domain->host_data);
1484
1485 tegra_cpuidle_pcie_irqs_in_use();
1486
1487 return 0;
1488}
1489
1490static const struct irq_domain_ops msi_domain_ops = {
1491 .map = tegra_msi_map,
1492};
1493
1494static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1495{
1496 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1497 struct platform_device *pdev = to_platform_device(pcie->dev);
1498 const struct tegra_pcie_soc *soc = pcie->soc;
1499 struct tegra_msi *msi = &pcie->msi;
1500 struct device *dev = pcie->dev;
1501 int err;
1502 u32 reg;
1503
1504 mutex_init(&msi->lock);
1505
1506 msi->chip.dev = dev;
1507 msi->chip.setup_irq = tegra_msi_setup_irq;
1508 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1509
1510 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1511 &msi_domain_ops, &msi->chip);
1512 if (!msi->domain) {
1513 dev_err(dev, "failed to create IRQ domain\n");
1514 return -ENOMEM;
1515 }
1516
1517 err = platform_get_irq_byname(pdev, "msi");
1518 if (err < 0) {
1519 dev_err(dev, "failed to get IRQ: %d\n", err);
1520 goto err;
1521 }
1522
1523 msi->irq = err;
1524
1525 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1526 tegra_msi_irq_chip.name, pcie);
1527 if (err < 0) {
1528 dev_err(dev, "failed to request IRQ: %d\n", err);
1529 goto err;
1530 }
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 msi->phys = 0xfcfffff000;
1548
1549 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1550 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1551
1552 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1553
1554
1555 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1556 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1557 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1558 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1559 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1560 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1561 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1562 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1563
1564
1565 reg = afi_readl(pcie, AFI_INTR_MASK);
1566 reg |= AFI_INTR_MASK_MSI_MASK;
1567 afi_writel(pcie, reg, AFI_INTR_MASK);
1568
1569 host->msi = &msi->chip;
1570
1571 return 0;
1572
1573err:
1574 irq_domain_remove(msi->domain);
1575 return err;
1576}
1577
1578static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1579{
1580 struct tegra_msi *msi = &pcie->msi;
1581 unsigned int i, irq;
1582 u32 value;
1583
1584
1585 value = afi_readl(pcie, AFI_INTR_MASK);
1586 value &= ~AFI_INTR_MASK_MSI_MASK;
1587 afi_writel(pcie, value, AFI_INTR_MASK);
1588
1589
1590 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1591 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1592 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1593 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1594 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1595 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1596 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1597 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1598
1599 if (msi->irq > 0)
1600 free_irq(msi->irq, pcie);
1601
1602 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1603 irq = irq_find_mapping(msi->domain, i);
1604 if (irq > 0)
1605 irq_dispose_mapping(irq);
1606 }
1607
1608 irq_domain_remove(msi->domain);
1609
1610 return 0;
1611}
1612
1613static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1614 u32 *xbar)
1615{
1616 struct device *dev = pcie->dev;
1617 struct device_node *np = dev->of_node;
1618
1619 if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1620 of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1621 switch (lanes) {
1622 case 0x0000104:
1623 dev_info(dev, "4x1, 1x1 configuration\n");
1624 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1625 return 0;
1626
1627 case 0x0000102:
1628 dev_info(dev, "2x1, 1x1 configuration\n");
1629 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1630 return 0;
1631 }
1632 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1633 switch (lanes) {
1634 case 0x00000204:
1635 dev_info(dev, "4x1, 2x1 configuration\n");
1636 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1637 return 0;
1638
1639 case 0x00020202:
1640 dev_info(dev, "2x3 configuration\n");
1641 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1642 return 0;
1643
1644 case 0x00010104:
1645 dev_info(dev, "4x1, 1x2 configuration\n");
1646 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1647 return 0;
1648 }
1649 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1650 switch (lanes) {
1651 case 0x00000004:
1652 dev_info(dev, "single-mode configuration\n");
1653 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1654 return 0;
1655
1656 case 0x00000202:
1657 dev_info(dev, "dual-mode configuration\n");
1658 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1659 return 0;
1660 }
1661 }
1662
1663 return -EINVAL;
1664}
1665
1666
1667
1668
1669
1670
1671static bool of_regulator_bulk_available(struct device_node *np,
1672 struct regulator_bulk_data *supplies,
1673 unsigned int num_supplies)
1674{
1675 char property[32];
1676 unsigned int i;
1677
1678 for (i = 0; i < num_supplies; i++) {
1679 snprintf(property, 32, "%s-supply", supplies[i].supply);
1680
1681 if (of_find_property(np, property, NULL) == NULL)
1682 return false;
1683 }
1684
1685 return true;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1696{
1697 struct device *dev = pcie->dev;
1698 struct device_node *np = dev->of_node;
1699
1700 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1701 pcie->num_supplies = 3;
1702 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1703 pcie->num_supplies = 2;
1704
1705 if (pcie->num_supplies == 0) {
1706 dev_err(dev, "device %s not supported in legacy mode\n",
1707 np->full_name);
1708 return -ENODEV;
1709 }
1710
1711 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1712 sizeof(*pcie->supplies),
1713 GFP_KERNEL);
1714 if (!pcie->supplies)
1715 return -ENOMEM;
1716
1717 pcie->supplies[0].supply = "pex-clk";
1718 pcie->supplies[1].supply = "vdd";
1719
1720 if (pcie->num_supplies > 2)
1721 pcie->supplies[2].supply = "avdd";
1722
1723 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1736{
1737 struct device *dev = pcie->dev;
1738 struct device_node *np = dev->of_node;
1739 unsigned int i = 0;
1740
1741 if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1742 pcie->num_supplies = 6;
1743
1744 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1745 sizeof(*pcie->supplies),
1746 GFP_KERNEL);
1747 if (!pcie->supplies)
1748 return -ENOMEM;
1749
1750 pcie->supplies[i++].supply = "avdd-pll-uerefe";
1751 pcie->supplies[i++].supply = "hvddio-pex";
1752 pcie->supplies[i++].supply = "dvddio-pex";
1753 pcie->supplies[i++].supply = "dvdd-pex-pll";
1754 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1755 pcie->supplies[i++].supply = "vddio-pex-ctl";
1756 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1757 pcie->num_supplies = 7;
1758
1759 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1760 sizeof(*pcie->supplies),
1761 GFP_KERNEL);
1762 if (!pcie->supplies)
1763 return -ENOMEM;
1764
1765 pcie->supplies[i++].supply = "avddio-pex";
1766 pcie->supplies[i++].supply = "dvddio-pex";
1767 pcie->supplies[i++].supply = "avdd-pex-pll";
1768 pcie->supplies[i++].supply = "hvdd-pex";
1769 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1770 pcie->supplies[i++].supply = "vddio-pex-ctl";
1771 pcie->supplies[i++].supply = "avdd-pll-erefe";
1772 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1773 bool need_pexa = false, need_pexb = false;
1774
1775
1776 if (lane_mask & 0x0f)
1777 need_pexa = true;
1778
1779
1780 if (lane_mask & 0x30)
1781 need_pexb = true;
1782
1783 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1784 (need_pexb ? 2 : 0);
1785
1786 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1787 sizeof(*pcie->supplies),
1788 GFP_KERNEL);
1789 if (!pcie->supplies)
1790 return -ENOMEM;
1791
1792 pcie->supplies[i++].supply = "avdd-pex-pll";
1793 pcie->supplies[i++].supply = "hvdd-pex";
1794 pcie->supplies[i++].supply = "vddio-pex-ctl";
1795 pcie->supplies[i++].supply = "avdd-plle";
1796
1797 if (need_pexa) {
1798 pcie->supplies[i++].supply = "avdd-pexa";
1799 pcie->supplies[i++].supply = "vdd-pexa";
1800 }
1801
1802 if (need_pexb) {
1803 pcie->supplies[i++].supply = "avdd-pexb";
1804 pcie->supplies[i++].supply = "vdd-pexb";
1805 }
1806 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1807 pcie->num_supplies = 5;
1808
1809 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1810 sizeof(*pcie->supplies),
1811 GFP_KERNEL);
1812 if (!pcie->supplies)
1813 return -ENOMEM;
1814
1815 pcie->supplies[0].supply = "avdd-pex";
1816 pcie->supplies[1].supply = "vdd-pex";
1817 pcie->supplies[2].supply = "avdd-pex-pll";
1818 pcie->supplies[3].supply = "avdd-plle";
1819 pcie->supplies[4].supply = "vddio-pex-clk";
1820 }
1821
1822 if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1823 pcie->num_supplies))
1824 return devm_regulator_bulk_get(dev, pcie->num_supplies,
1825 pcie->supplies);
1826
1827
1828
1829
1830
1831
1832 dev_info(dev, "using legacy DT binding for power supplies\n");
1833
1834 devm_kfree(dev, pcie->supplies);
1835 pcie->num_supplies = 0;
1836
1837 return tegra_pcie_get_legacy_regulators(pcie);
1838}
1839
1840static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1841{
1842 struct device *dev = pcie->dev;
1843 struct device_node *np = dev->of_node, *port;
1844 const struct tegra_pcie_soc *soc = pcie->soc;
1845 struct of_pci_range_parser parser;
1846 struct of_pci_range range;
1847 u32 lanes = 0, mask = 0;
1848 unsigned int lane = 0;
1849 struct resource res;
1850 int err;
1851
1852 if (of_pci_range_parser_init(&parser, np)) {
1853 dev_err(dev, "missing \"ranges\" property\n");
1854 return -EINVAL;
1855 }
1856
1857 for_each_of_pci_range(&parser, &range) {
1858 err = of_pci_range_to_resource(&range, np, &res);
1859 if (err < 0)
1860 return err;
1861
1862 switch (res.flags & IORESOURCE_TYPE_BITS) {
1863 case IORESOURCE_IO:
1864
1865 pcie->offset.io = res.start - range.pci_addr;
1866
1867 memcpy(&pcie->pio, &res, sizeof(res));
1868 pcie->pio.name = np->full_name;
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 pcie->io.start = range.cpu_addr;
1879 pcie->io.end = range.cpu_addr + range.size - 1;
1880 pcie->io.flags = IORESOURCE_MEM;
1881 pcie->io.name = "I/O";
1882
1883 memcpy(&res, &pcie->io, sizeof(res));
1884 break;
1885
1886 case IORESOURCE_MEM:
1887
1888
1889
1890
1891
1892
1893 pcie->offset.mem = res.start - range.pci_addr;
1894
1895 if (res.flags & IORESOURCE_PREFETCH) {
1896 memcpy(&pcie->prefetch, &res, sizeof(res));
1897 pcie->prefetch.name = "prefetchable";
1898 } else {
1899 memcpy(&pcie->mem, &res, sizeof(res));
1900 pcie->mem.name = "non-prefetchable";
1901 }
1902 break;
1903 }
1904 }
1905
1906 err = of_pci_parse_bus_range(np, &pcie->busn);
1907 if (err < 0) {
1908 dev_err(dev, "failed to parse ranges property: %d\n", err);
1909 pcie->busn.name = np->name;
1910 pcie->busn.start = 0;
1911 pcie->busn.end = 0xff;
1912 pcie->busn.flags = IORESOURCE_BUS;
1913 }
1914
1915
1916 for_each_child_of_node(np, port) {
1917 struct tegra_pcie_port *rp;
1918 unsigned int index;
1919 u32 value;
1920
1921 err = of_pci_get_devfn(port);
1922 if (err < 0) {
1923 dev_err(dev, "failed to parse address: %d\n", err);
1924 return err;
1925 }
1926
1927 index = PCI_SLOT(err);
1928
1929 if (index < 1 || index > soc->num_ports) {
1930 dev_err(dev, "invalid port number: %d\n", index);
1931 return -EINVAL;
1932 }
1933
1934 index--;
1935
1936 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1937 if (err < 0) {
1938 dev_err(dev, "failed to parse # of lanes: %d\n",
1939 err);
1940 return err;
1941 }
1942
1943 if (value > 16) {
1944 dev_err(dev, "invalid # of lanes: %u\n", value);
1945 return -EINVAL;
1946 }
1947
1948 lanes |= value << (index << 3);
1949
1950 if (!of_device_is_available(port)) {
1951 lane += value;
1952 continue;
1953 }
1954
1955 mask |= ((1 << value) - 1) << lane;
1956 lane += value;
1957
1958 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
1959 if (!rp)
1960 return -ENOMEM;
1961
1962 err = of_address_to_resource(port, 0, &rp->regs);
1963 if (err < 0) {
1964 dev_err(dev, "failed to parse address: %d\n", err);
1965 return err;
1966 }
1967
1968 INIT_LIST_HEAD(&rp->list);
1969 rp->index = index;
1970 rp->lanes = value;
1971 rp->pcie = pcie;
1972 rp->np = port;
1973
1974 rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
1975 if (IS_ERR(rp->base))
1976 return PTR_ERR(rp->base);
1977
1978 list_add_tail(&rp->list, &pcie->ports);
1979 }
1980
1981 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1982 if (err < 0) {
1983 dev_err(dev, "invalid lane configuration\n");
1984 return err;
1985 }
1986
1987 err = tegra_pcie_get_regulators(pcie, mask);
1988 if (err < 0)
1989 return err;
1990
1991 return 0;
1992}
1993
1994
1995
1996
1997
1998
1999#define TEGRA_PCIE_LINKUP_TIMEOUT 200
2000static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2001{
2002 struct device *dev = port->pcie->dev;
2003 unsigned int retries = 3;
2004 unsigned long value;
2005
2006
2007 value = readl(port->base + RP_PRIV_MISC);
2008 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2009 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2010 writel(value, port->base + RP_PRIV_MISC);
2011
2012 do {
2013 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2014
2015 do {
2016 value = readl(port->base + RP_VEND_XP);
2017
2018 if (value & RP_VEND_XP_DL_UP)
2019 break;
2020
2021 usleep_range(1000, 2000);
2022 } while (--timeout);
2023
2024 if (!timeout) {
2025 dev_err(dev, "link %u down, retrying\n", port->index);
2026 goto retry;
2027 }
2028
2029 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2030
2031 do {
2032 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2033
2034 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2035 return true;
2036
2037 usleep_range(1000, 2000);
2038 } while (--timeout);
2039
2040retry:
2041 tegra_pcie_port_reset(port);
2042 } while (--retries);
2043
2044 return false;
2045}
2046
2047static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2048{
2049 struct device *dev = pcie->dev;
2050 struct tegra_pcie_port *port, *tmp;
2051
2052 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2053 dev_info(dev, "probing port %u, using %u lanes\n",
2054 port->index, port->lanes);
2055
2056 tegra_pcie_port_enable(port);
2057
2058 if (tegra_pcie_port_check_link(port))
2059 continue;
2060
2061 dev_info(dev, "link %u down, ignoring\n", port->index);
2062
2063 tegra_pcie_port_disable(port);
2064 tegra_pcie_port_free(port);
2065 }
2066}
2067
2068static const struct tegra_pcie_soc tegra20_pcie = {
2069 .num_ports = 2,
2070 .msi_base_shift = 0,
2071 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2072 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2073 .pads_refclk_cfg0 = 0xfa5cfa5c,
2074 .has_pex_clkreq_en = false,
2075 .has_pex_bias_ctrl = false,
2076 .has_intr_prsnt_sense = false,
2077 .has_cml_clk = false,
2078 .has_gen2 = false,
2079 .force_pca_enable = false,
2080};
2081
2082static const struct tegra_pcie_soc tegra30_pcie = {
2083 .num_ports = 3,
2084 .msi_base_shift = 8,
2085 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2086 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2087 .pads_refclk_cfg0 = 0xfa5cfa5c,
2088 .pads_refclk_cfg1 = 0xfa5cfa5c,
2089 .has_pex_clkreq_en = true,
2090 .has_pex_bias_ctrl = true,
2091 .has_intr_prsnt_sense = true,
2092 .has_cml_clk = true,
2093 .has_gen2 = false,
2094 .force_pca_enable = false,
2095};
2096
2097static const struct tegra_pcie_soc tegra124_pcie = {
2098 .num_ports = 2,
2099 .msi_base_shift = 8,
2100 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2101 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2102 .pads_refclk_cfg0 = 0x44ac44ac,
2103 .has_pex_clkreq_en = true,
2104 .has_pex_bias_ctrl = true,
2105 .has_intr_prsnt_sense = true,
2106 .has_cml_clk = true,
2107 .has_gen2 = true,
2108 .force_pca_enable = false,
2109};
2110
2111static const struct tegra_pcie_soc tegra210_pcie = {
2112 .num_ports = 2,
2113 .msi_base_shift = 8,
2114 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2115 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2116 .pads_refclk_cfg0 = 0x90b890b8,
2117 .has_pex_clkreq_en = true,
2118 .has_pex_bias_ctrl = true,
2119 .has_intr_prsnt_sense = true,
2120 .has_cml_clk = true,
2121 .has_gen2 = true,
2122 .force_pca_enable = true,
2123};
2124
2125static const struct of_device_id tegra_pcie_of_match[] = {
2126 { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2127 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2128 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2129 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2130 { },
2131};
2132
2133static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2134{
2135 struct tegra_pcie *pcie = s->private;
2136
2137 if (list_empty(&pcie->ports))
2138 return NULL;
2139
2140 seq_printf(s, "Index Status\n");
2141
2142 return seq_list_start(&pcie->ports, *pos);
2143}
2144
2145static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2146{
2147 struct tegra_pcie *pcie = s->private;
2148
2149 return seq_list_next(v, &pcie->ports, pos);
2150}
2151
2152static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2153{
2154}
2155
2156static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2157{
2158 bool up = false, active = false;
2159 struct tegra_pcie_port *port;
2160 unsigned int value;
2161
2162 port = list_entry(v, struct tegra_pcie_port, list);
2163
2164 value = readl(port->base + RP_VEND_XP);
2165
2166 if (value & RP_VEND_XP_DL_UP)
2167 up = true;
2168
2169 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2170
2171 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2172 active = true;
2173
2174 seq_printf(s, "%2u ", port->index);
2175
2176 if (up)
2177 seq_printf(s, "up");
2178
2179 if (active) {
2180 if (up)
2181 seq_printf(s, ", ");
2182
2183 seq_printf(s, "active");
2184 }
2185
2186 seq_printf(s, "\n");
2187 return 0;
2188}
2189
2190static const struct seq_operations tegra_pcie_ports_seq_ops = {
2191 .start = tegra_pcie_ports_seq_start,
2192 .next = tegra_pcie_ports_seq_next,
2193 .stop = tegra_pcie_ports_seq_stop,
2194 .show = tegra_pcie_ports_seq_show,
2195};
2196
2197static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2198{
2199 struct tegra_pcie *pcie = inode->i_private;
2200 struct seq_file *s;
2201 int err;
2202
2203 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2204 if (err)
2205 return err;
2206
2207 s = file->private_data;
2208 s->private = pcie;
2209
2210 return 0;
2211}
2212
2213static const struct file_operations tegra_pcie_ports_ops = {
2214 .owner = THIS_MODULE,
2215 .open = tegra_pcie_ports_open,
2216 .read = seq_read,
2217 .llseek = seq_lseek,
2218 .release = seq_release,
2219};
2220
2221static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2222{
2223 struct dentry *file;
2224
2225 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2226 if (!pcie->debugfs)
2227 return -ENOMEM;
2228
2229 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2230 pcie, &tegra_pcie_ports_ops);
2231 if (!file)
2232 goto remove;
2233
2234 return 0;
2235
2236remove:
2237 debugfs_remove_recursive(pcie->debugfs);
2238 pcie->debugfs = NULL;
2239 return -ENOMEM;
2240}
2241
2242static int tegra_pcie_probe(struct platform_device *pdev)
2243{
2244 struct device *dev = &pdev->dev;
2245 struct pci_host_bridge *host;
2246 struct tegra_pcie *pcie;
2247 struct pci_bus *child;
2248 int err;
2249
2250 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2251 if (!host)
2252 return -ENOMEM;
2253
2254 pcie = pci_host_bridge_priv(host);
2255
2256 pcie->soc = of_device_get_match_data(dev);
2257 INIT_LIST_HEAD(&pcie->buses);
2258 INIT_LIST_HEAD(&pcie->ports);
2259 pcie->dev = dev;
2260
2261 err = tegra_pcie_parse_dt(pcie);
2262 if (err < 0)
2263 return err;
2264
2265 err = tegra_pcie_get_resources(pcie);
2266 if (err < 0) {
2267 dev_err(dev, "failed to request resources: %d\n", err);
2268 return err;
2269 }
2270
2271 err = tegra_pcie_enable_controller(pcie);
2272 if (err)
2273 goto put_resources;
2274
2275 err = tegra_pcie_request_resources(pcie);
2276 if (err)
2277 goto put_resources;
2278
2279
2280 tegra_pcie_setup_translations(pcie);
2281
2282 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2283 err = tegra_pcie_enable_msi(pcie);
2284 if (err < 0) {
2285 dev_err(dev, "failed to enable MSI support: %d\n", err);
2286 goto put_resources;
2287 }
2288 }
2289
2290 tegra_pcie_enable_ports(pcie);
2291
2292 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
2293 host->busnr = pcie->busn.start;
2294 host->dev.parent = &pdev->dev;
2295 host->ops = &tegra_pcie_ops;
2296 host->map_irq = tegra_pcie_map_irq;
2297 host->swizzle_irq = pci_common_swizzle;
2298
2299 err = pci_scan_root_bus_bridge(host);
2300 if (err < 0) {
2301 dev_err(dev, "failed to register host: %d\n", err);
2302 goto disable_msi;
2303 }
2304
2305 pci_bus_size_bridges(host->bus);
2306 pci_bus_assign_resources(host->bus);
2307
2308 list_for_each_entry(child, &host->bus->children, node)
2309 pcie_bus_configure_settings(child);
2310
2311 pci_bus_add_devices(host->bus);
2312
2313 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2314 err = tegra_pcie_debugfs_init(pcie);
2315 if (err < 0)
2316 dev_err(dev, "failed to setup debugfs: %d\n", err);
2317 }
2318
2319 return 0;
2320
2321disable_msi:
2322 if (IS_ENABLED(CONFIG_PCI_MSI))
2323 tegra_pcie_disable_msi(pcie);
2324put_resources:
2325 tegra_pcie_put_resources(pcie);
2326 return err;
2327}
2328
2329static struct platform_driver tegra_pcie_driver = {
2330 .driver = {
2331 .name = "tegra-pcie",
2332 .of_match_table = tegra_pcie_of_match,
2333 .suppress_bind_attrs = true,
2334 },
2335 .probe = tegra_pcie_probe,
2336};
2337builtin_platform_driver(tegra_pcie_driver);
2338