1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/bitops.h>
19#include <linux/delay.h>
20#include <linux/gpio/consumer.h>
21#include <linux/iopoll.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/of_platform.h>
28#include <linux/pci.h>
29#include <linux/phy/phy.h>
30#include <linux/platform_device.h>
31#include <linux/reset.h>
32#include <linux/sys_soc.h>
33#include <mt7621.h>
34#include <ralink_regs.h>
35
36#include "../../pci/pci.h"
37
38
39#define MT7621_GPIO_MODE 0x60
40
41
42#define PCIE_FTS_NUM 0x70c
43#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
44#define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8)
45
46
47#define RALINK_CLKCFG1 0x30
48
49
50#define RALINK_PCI_PCICFG_ADDR 0x0000
51#define RALINK_PCI_PCIMSK_ADDR 0x000C
52#define RALINK_PCI_CONFIG_ADDR 0x0020
53#define RALINK_PCI_CONFIG_DATA 0x0024
54#define RALINK_PCI_MEMBASE 0x0028
55#define RALINK_PCI_IOBASE 0x002C
56
57
58#define PCIE_P2P_CNT 3
59#define PCIE_P2P_BR_DEVNUM_SHIFT(p) (16 + (p) * 4)
60#define PCIE_P2P_BR_DEVNUM0_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(0)
61#define PCIE_P2P_BR_DEVNUM1_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(1)
62#define PCIE_P2P_BR_DEVNUM2_SHIFT PCIE_P2P_BR_DEVNUM_SHIFT(2)
63#define PCIE_P2P_BR_DEVNUM_MASK 0xf
64#define PCIE_P2P_BR_DEVNUM_MASK_FULL (0xfff << PCIE_P2P_BR_DEVNUM0_SHIFT)
65
66
67#define MT7621_PCIE_OFFSET 0x2000
68#define MT7621_NEXT_PORT 0x1000
69
70#define RALINK_PCI_BAR0SETUP_ADDR 0x0010
71#define RALINK_PCI_ID 0x0030
72#define RALINK_PCI_CLASS 0x0034
73#define RALINK_PCI_SUBID 0x0038
74#define RALINK_PCI_STATUS 0x0050
75
76
77#define PCIE_REVISION_ID BIT(0)
78#define PCIE_CLASS_CODE (0x60400 << 8)
79#define PCIE_BAR_MAP_MAX GENMASK(30, 16)
80#define PCIE_BAR_ENABLE BIT(0)
81#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
82#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
83#define PCIE_PORT_LINKUP BIT(0)
84
85#define PERST_MODE_MASK GENMASK(11, 10)
86#define PERST_MODE_GPIO BIT(10)
87#define PERST_DELAY_MS 100
88
89
90
91
92
93
94
95
96
97
98
99
100
101struct mt7621_pcie_port {
102 void __iomem *base;
103 struct list_head list;
104 struct mt7621_pcie *pcie;
105 struct phy *phy;
106 struct reset_control *pcie_rst;
107 struct gpio_desc *gpio_rst;
108 u32 slot;
109 int irq;
110 bool enabled;
111};
112
113
114
115
116
117
118
119
120
121
122
123
124
125struct mt7621_pcie {
126 void __iomem *base;
127 struct device *dev;
128 struct resource io;
129 struct resource *mem;
130 unsigned long io_map_base;
131 struct list_head ports;
132 int irq_map[PCIE_P2P_CNT];
133 bool resets_inverted;
134};
135
136static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
137{
138 return readl(pcie->base + reg);
139}
140
141static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
142{
143 writel(val, pcie->base + reg);
144}
145
146static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
147{
148 u32 val = readl(pcie->base + reg);
149
150 val &= ~clr;
151 val |= set;
152 writel(val, pcie->base + reg);
153}
154
155static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
156{
157 return readl(port->base + reg);
158}
159
160static inline void pcie_port_write(struct mt7621_pcie_port *port,
161 u32 val, u32 reg)
162{
163 writel(val, port->base + reg);
164}
165
166static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
167 unsigned int func, unsigned int where)
168{
169 return (((where & 0xF00) >> 8) << 24) | (bus << 16) | (slot << 11) |
170 (func << 8) | (where & 0xfc) | 0x80000000;
171}
172
173static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
174 unsigned int devfn, int where)
175{
176 struct mt7621_pcie *pcie = bus->sysdata;
177 u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
178 PCI_FUNC(devfn), where);
179
180 writel(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
181
182 return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
183}
184
185struct pci_ops mt7621_pci_ops = {
186 .map_bus = mt7621_pcie_map_bus,
187 .read = pci_generic_config_read,
188 .write = pci_generic_config_write,
189};
190
191static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
192{
193 u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
194
195 pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
196 return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
197}
198
199static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
200 u32 reg, u32 val)
201{
202 u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
203
204 pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
205 pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
206}
207
208static inline void mt7621_rst_gpio_pcie_assert(struct mt7621_pcie_port *port)
209{
210 if (port->gpio_rst)
211 gpiod_set_value(port->gpio_rst, 1);
212}
213
214static inline void mt7621_rst_gpio_pcie_deassert(struct mt7621_pcie_port *port)
215{
216 if (port->gpio_rst)
217 gpiod_set_value(port->gpio_rst, 0);
218}
219
220static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
221{
222 return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0;
223}
224
225static inline void mt7621_pcie_port_clk_enable(struct mt7621_pcie_port *port)
226{
227 rt_sysc_m32(0, PCIE_PORT_CLK_EN(port->slot), RALINK_CLKCFG1);
228}
229
230static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
231{
232 rt_sysc_m32(PCIE_PORT_CLK_EN(port->slot), 0, RALINK_CLKCFG1);
233}
234
235static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
236{
237 struct mt7621_pcie *pcie = port->pcie;
238
239 if (pcie->resets_inverted)
240 reset_control_assert(port->pcie_rst);
241 else
242 reset_control_deassert(port->pcie_rst);
243}
244
245static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
246{
247 struct mt7621_pcie *pcie = port->pcie;
248
249 if (pcie->resets_inverted)
250 reset_control_deassert(port->pcie_rst);
251 else
252 reset_control_assert(port->pcie_rst);
253}
254
255static void setup_cm_memory_region(struct mt7621_pcie *pcie)
256{
257 struct resource *mem_resource = pcie->mem;
258 struct device *dev = pcie->dev;
259 resource_size_t mask;
260
261 if (mips_cps_numiocu(0)) {
262
263
264
265
266
267 mask = ~(mem_resource->end - mem_resource->start);
268
269 write_gcr_reg1_base(mem_resource->start);
270 write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
271 dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
272 (unsigned long long)read_gcr_reg1_base(),
273 (unsigned long long)read_gcr_reg1_mask());
274 }
275}
276
277static int mt7621_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
278{
279 struct mt7621_pcie *pcie = pdev->bus->sysdata;
280 struct device *dev = pcie->dev;
281 int irq = pcie->irq_map[slot];
282
283 dev_info(dev, "bus=%d slot=%d irq=%d\n", pdev->bus->number, slot, irq);
284 return irq;
285}
286
287static int mt7621_pci_parse_request_of_pci_ranges(struct pci_host_bridge *host)
288{
289 struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
290 struct device *dev = pcie->dev;
291 struct device_node *node = dev->of_node;
292 struct of_pci_range_parser parser;
293 struct resource_entry *entry;
294 struct of_pci_range range;
295 LIST_HEAD(res);
296
297 if (of_pci_range_parser_init(&parser, node)) {
298 dev_err(dev, "missing \"ranges\" property\n");
299 return -EINVAL;
300 }
301
302
303
304
305
306
307
308
309 for_each_of_pci_range(&parser, &range) {
310 switch (range.flags & IORESOURCE_TYPE_BITS) {
311 case IORESOURCE_IO:
312 pcie->io_map_base =
313 (unsigned long)ioremap(range.cpu_addr,
314 range.size);
315 of_pci_range_to_resource(&range, node, &pcie->io);
316 pcie->io.start = range.cpu_addr;
317 pcie->io.end = range.cpu_addr + range.size - 1;
318 set_io_port_base(pcie->io_map_base);
319 break;
320 }
321 }
322
323 entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
324 if (!entry) {
325 dev_err(dev, "Cannot get memory resource");
326 return -EINVAL;
327 }
328
329 pcie->mem = entry->res;
330 pci_add_resource(&res, &pcie->io);
331 pci_add_resource(&res, entry->res);
332 list_splice_init(&res, &host->windows);
333
334 return 0;
335}
336
337static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
338 int slot)
339{
340 struct mt7621_pcie_port *port;
341 struct device *dev = pcie->dev;
342 struct platform_device *pdev = to_platform_device(dev);
343 char name[10];
344
345 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
346 if (!port)
347 return -ENOMEM;
348
349 port->base = devm_platform_ioremap_resource(pdev, slot + 1);
350 if (IS_ERR(port->base))
351 return PTR_ERR(port->base);
352
353 snprintf(name, sizeof(name), "pcie%d", slot);
354 port->pcie_rst = devm_reset_control_get_exclusive(dev, name);
355 if (PTR_ERR(port->pcie_rst) == -EPROBE_DEFER) {
356 dev_err(dev, "failed to get pcie%d reset control\n", slot);
357 return PTR_ERR(port->pcie_rst);
358 }
359
360 snprintf(name, sizeof(name), "pcie-phy%d", slot);
361 port->phy = devm_phy_get(dev, name);
362 if (IS_ERR(port->phy) && slot != 1)
363 return PTR_ERR(port->phy);
364
365 port->gpio_rst = devm_gpiod_get_index_optional(dev, "reset", slot,
366 GPIOD_OUT_LOW);
367 if (IS_ERR(port->gpio_rst)) {
368 dev_err(dev, "Failed to get GPIO for PCIe%d\n", slot);
369 return PTR_ERR(port->gpio_rst);
370 }
371
372 port->slot = slot;
373 port->pcie = pcie;
374
375 port->irq = platform_get_irq(pdev, slot);
376 if (port->irq < 0) {
377 dev_err(dev, "Failed to get IRQ for PCIe%d\n", slot);
378 return -ENXIO;
379 }
380
381 INIT_LIST_HEAD(&port->list);
382 list_add_tail(&port->list, &pcie->ports);
383
384 return 0;
385}
386
387static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
388{
389 struct device *dev = pcie->dev;
390 struct platform_device *pdev = to_platform_device(dev);
391 struct device_node *node = dev->of_node, *child;
392 int err;
393
394 pcie->base = devm_platform_ioremap_resource(pdev, 0);
395 if (IS_ERR(pcie->base))
396 return PTR_ERR(pcie->base);
397
398 for_each_available_child_of_node(node, child) {
399 int slot;
400
401 err = of_pci_get_devfn(child);
402 if (err < 0) {
403 of_node_put(child);
404 dev_err(dev, "failed to parse devfn: %d\n", err);
405 return err;
406 }
407
408 slot = PCI_SLOT(err);
409
410 err = mt7621_pcie_parse_port(pcie, slot);
411 if (err) {
412 of_node_put(child);
413 return err;
414 }
415 }
416
417 return 0;
418}
419
420static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
421{
422 struct mt7621_pcie *pcie = port->pcie;
423 struct device *dev = pcie->dev;
424 u32 slot = port->slot;
425 int err;
426
427 err = phy_init(port->phy);
428 if (err) {
429 dev_err(dev, "failed to initialize port%d phy\n", slot);
430 return err;
431 }
432
433 err = phy_power_on(port->phy);
434 if (err) {
435 dev_err(dev, "failed to power on port%d phy\n", slot);
436 phy_exit(port->phy);
437 return err;
438 }
439
440 port->enabled = true;
441
442 return 0;
443}
444
445static void mt7621_pcie_reset_assert(struct mt7621_pcie *pcie)
446{
447 struct mt7621_pcie_port *port;
448
449 list_for_each_entry(port, &pcie->ports, list) {
450
451 mt7621_control_assert(port);
452
453
454 mt7621_rst_gpio_pcie_assert(port);
455 }
456
457 mdelay(PERST_DELAY_MS);
458}
459
460static void mt7621_pcie_reset_rc_deassert(struct mt7621_pcie *pcie)
461{
462 struct mt7621_pcie_port *port;
463
464 list_for_each_entry(port, &pcie->ports, list)
465 mt7621_control_deassert(port);
466}
467
468static void mt7621_pcie_reset_ep_deassert(struct mt7621_pcie *pcie)
469{
470 struct mt7621_pcie_port *port;
471
472 list_for_each_entry(port, &pcie->ports, list)
473 mt7621_rst_gpio_pcie_deassert(port);
474
475 mdelay(PERST_DELAY_MS);
476}
477
478static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
479{
480 struct device *dev = pcie->dev;
481 struct mt7621_pcie_port *port, *tmp;
482 int err;
483
484 rt_sysc_m32(PERST_MODE_MASK, PERST_MODE_GPIO, MT7621_GPIO_MODE);
485
486 mt7621_pcie_reset_assert(pcie);
487 mt7621_pcie_reset_rc_deassert(pcie);
488
489 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
490 u32 slot = port->slot;
491
492 if (slot == 1) {
493 port->enabled = true;
494 continue;
495 }
496
497 err = mt7621_pcie_init_port(port);
498 if (err) {
499 dev_err(dev, "Initiating port %d failed\n", slot);
500 list_del(&port->list);
501 }
502 }
503
504 mt7621_pcie_reset_ep_deassert(pcie);
505
506 tmp = NULL;
507 list_for_each_entry(port, &pcie->ports, list) {
508 u32 slot = port->slot;
509
510 if (!mt7621_pcie_port_is_linkup(port)) {
511 dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
512 slot);
513 mt7621_control_assert(port);
514 mt7621_pcie_port_clk_disable(port);
515 port->enabled = false;
516
517 if (slot == 0) {
518 tmp = port;
519 continue;
520 }
521
522 if (slot == 1 && tmp && !tmp->enabled)
523 phy_power_off(tmp->phy);
524
525 }
526 }
527}
528
529static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
530{
531 struct mt7621_pcie *pcie = port->pcie;
532 u32 slot = port->slot;
533 u32 offset = MT7621_PCIE_OFFSET + (slot * MT7621_NEXT_PORT);
534 u32 val;
535
536
537 val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
538 val |= PCIE_PORT_INT_EN(slot);
539 pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
540
541
542 pcie_write(pcie, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
543 offset + RALINK_PCI_BAR0SETUP_ADDR);
544
545
546 pcie_write(pcie, PCIE_CLASS_CODE | PCIE_REVISION_ID,
547 offset + RALINK_PCI_CLASS);
548}
549
550static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
551{
552 struct device *dev = pcie->dev;
553 struct mt7621_pcie_port *port;
554 u8 num_slots_enabled = 0;
555 u32 slot;
556 u32 val;
557
558
559 pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
560 pcie_write(pcie, pcie->io.start, RALINK_PCI_IOBASE);
561
562 list_for_each_entry(port, &pcie->ports, list) {
563 if (port->enabled) {
564 mt7621_pcie_port_clk_enable(port);
565 mt7621_pcie_enable_port(port);
566 dev_info(dev, "PCIE%d enabled\n", port->slot);
567 num_slots_enabled++;
568 }
569 }
570
571 for (slot = 0; slot < num_slots_enabled; slot++) {
572 val = read_config(pcie, slot, PCI_COMMAND);
573 val |= PCI_COMMAND_MASTER;
574 write_config(pcie, slot, PCI_COMMAND, val);
575
576 val = read_config(pcie, slot, PCIE_FTS_NUM);
577 val &= ~PCIE_FTS_NUM_MASK;
578 val |= PCIE_FTS_NUM_L0(0x50);
579 write_config(pcie, slot, PCIE_FTS_NUM, val);
580 }
581}
582
583static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
584{
585 u32 pcie_link_status = 0;
586 u32 n = 0;
587 int i = 0;
588 u32 p2p_br_devnum[PCIE_P2P_CNT];
589 int irqs[PCIE_P2P_CNT];
590 struct mt7621_pcie_port *port;
591
592 list_for_each_entry(port, &pcie->ports, list) {
593 u32 slot = port->slot;
594
595 irqs[i++] = port->irq;
596 if (port->enabled)
597 pcie_link_status |= BIT(slot);
598 }
599
600 if (pcie_link_status == 0)
601 return -1;
602
603
604
605
606
607
608 for (i = 0; i < PCIE_P2P_CNT; i++)
609 if (pcie_link_status & BIT(i))
610 p2p_br_devnum[i] = n++;
611
612 for (i = 0; i < PCIE_P2P_CNT; i++)
613 if ((pcie_link_status & BIT(i)) == 0)
614 p2p_br_devnum[i] = n++;
615
616 pcie_rmw(pcie, RALINK_PCI_PCICFG_ADDR,
617 PCIE_P2P_BR_DEVNUM_MASK_FULL,
618 (p2p_br_devnum[0] << PCIE_P2P_BR_DEVNUM0_SHIFT) |
619 (p2p_br_devnum[1] << PCIE_P2P_BR_DEVNUM1_SHIFT) |
620 (p2p_br_devnum[2] << PCIE_P2P_BR_DEVNUM2_SHIFT));
621
622
623 n = 0;
624 for (i = 0; i < PCIE_P2P_CNT; i++)
625 if (pcie_link_status & BIT(i))
626 pcie->irq_map[n++] = irqs[i];
627
628 for (i = n; i < PCIE_P2P_CNT; i++)
629 pcie->irq_map[i] = -1;
630
631 return 0;
632}
633
634static int mt7621_pcie_register_host(struct pci_host_bridge *host)
635{
636 struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
637
638 host->ops = &mt7621_pci_ops;
639 host->map_irq = mt7621_map_irq;
640 host->sysdata = pcie;
641
642 return pci_host_probe(host);
643}
644
645static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
646 { .soc_id = "mt7621", .revision = "E2" }
647};
648
649static int mt7621_pci_probe(struct platform_device *pdev)
650{
651 struct device *dev = &pdev->dev;
652 const struct soc_device_attribute *attr;
653 struct mt7621_pcie *pcie;
654 struct pci_host_bridge *bridge;
655 int err;
656
657 if (!dev->of_node)
658 return -ENODEV;
659
660 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
661 if (!bridge)
662 return -ENOMEM;
663
664 pcie = pci_host_bridge_priv(bridge);
665 pcie->dev = dev;
666 platform_set_drvdata(pdev, pcie);
667 INIT_LIST_HEAD(&pcie->ports);
668
669 attr = soc_device_match(mt7621_pci_quirks_match);
670 if (attr)
671 pcie->resets_inverted = true;
672
673 err = mt7621_pcie_parse_dt(pcie);
674 if (err) {
675 dev_err(dev, "Parsing DT failed\n");
676 return err;
677 }
678
679 err = mt7621_pci_parse_request_of_pci_ranges(bridge);
680 if (err) {
681 dev_err(dev, "Error requesting pci resources from ranges");
682 return err;
683 }
684
685
686 ioport_resource.start = pcie->io.start;
687 ioport_resource.end = pcie->io.end;
688
689 mt7621_pcie_init_ports(pcie);
690
691 err = mt7621_pcie_init_virtual_bridges(pcie);
692 if (err) {
693 dev_err(dev, "Nothing is connected in virtual bridges. Exiting...");
694 return 0;
695 }
696
697 mt7621_pcie_enable_ports(pcie);
698
699 setup_cm_memory_region(pcie);
700
701 err = mt7621_pcie_register_host(bridge);
702 if (err) {
703 dev_err(dev, "Error registering host\n");
704 return err;
705 }
706
707 return 0;
708}
709
710static const struct of_device_id mt7621_pci_ids[] = {
711 { .compatible = "mediatek,mt7621-pci" },
712 {},
713};
714MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
715
716static struct platform_driver mt7621_pci_driver = {
717 .probe = mt7621_pci_probe,
718 .driver = {
719 .name = "mt7621-pci",
720 .of_match_table = of_match_ptr(mt7621_pci_ids),
721 },
722};
723
724builtin_platform_driver(mt7621_pci_driver);
725