1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/bitops.h>
19#include <linux/delay.h>
20#include <linux/gpio/consumer.h>
21#include <linux/iopoll.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_address.h>
25#include <linux/of_irq.h>
26#include <linux/of_pci.h>
27#include <linux/of_platform.h>
28#include <linux/pci.h>
29#include <linux/phy/phy.h>
30#include <linux/platform_device.h>
31#include <linux/reset.h>
32#include <mt7621.h>
33#include <ralink_regs.h>
34
35#include "../../pci/pci.h"
36
37
38#define MT7621_CHIP_REV_ID 0x0c
39#define MT7621_GPIO_MODE 0x60
40#define CHIP_REV_MT7621_E2 0x0101
41
42
43#define PCIE_FTS_NUM 0x70c
44#define PCIE_FTS_NUM_MASK GENMASK(15, 8)
45#define PCIE_FTS_NUM_L0(x) (((x) & 0xff) << 8)
46
47
48#define RALINK_CLKCFG1 0x30
49#define RALINK_PCIE_CLK_GEN 0x7c
50#define RALINK_PCIE_CLK_GEN1 0x80
51
52
53#define RALINK_PCI_PCICFG_ADDR 0x0000
54#define RALINK_PCI_PCIMSK_ADDR 0x000C
55#define RALINK_PCI_CONFIG_ADDR 0x0020
56#define RALINK_PCI_CONFIG_DATA 0x0024
57#define RALINK_PCI_MEMBASE 0x0028
58#define RALINK_PCI_IOBASE 0x002C
59
60
61#define MT7621_BR0_MASK GENMASK(19, 16)
62#define MT7621_BR1_MASK GENMASK(23, 20)
63#define MT7621_BR2_MASK GENMASK(27, 24)
64#define MT7621_BR_ALL_MASK GENMASK(27, 16)
65#define MT7621_BR0_SHIFT 16
66#define MT7621_BR1_SHIFT 20
67#define MT7621_BR2_SHIFT 24
68
69
70#define MT7621_PCIE_OFFSET 0x2000
71#define MT7621_NEXT_PORT 0x1000
72
73#define RALINK_PCI_BAR0SETUP_ADDR 0x0010
74#define RALINK_PCI_IMBASEBAR0_ADDR 0x0018
75#define RALINK_PCI_ID 0x0030
76#define RALINK_PCI_CLASS 0x0034
77#define RALINK_PCI_SUBID 0x0038
78#define RALINK_PCI_STATUS 0x0050
79
80
81#define PCIE_REVISION_ID BIT(0)
82#define PCIE_CLASS_CODE (0x60400 << 8)
83#define PCIE_BAR_MAP_MAX GENMASK(30, 16)
84#define PCIE_BAR_ENABLE BIT(0)
85#define PCIE_PORT_INT_EN(x) BIT(20 + (x))
86#define PCIE_PORT_CLK_EN(x) BIT(24 + (x))
87#define PCIE_PORT_LINKUP BIT(0)
88
89#define PCIE_CLK_GEN_EN BIT(31)
90#define PCIE_CLK_GEN_DIS 0
91#define PCIE_CLK_GEN1_DIS GENMASK(30, 24)
92#define PCIE_CLK_GEN1_EN (BIT(27) | BIT(25))
93#define MEMORY_BASE 0x0
94#define PERST_MODE_MASK GENMASK(11, 10)
95#define PERST_MODE_GPIO BIT(10)
96#define PERST_DELAY_US 1000
97
98
99
100
101
102
103
104
105
106
107
108struct mt7621_pcie_port {
109 void __iomem *base;
110 struct list_head list;
111 struct mt7621_pcie *pcie;
112 struct phy *phy;
113 struct reset_control *pcie_rst;
114 u32 slot;
115 bool enabled;
116};
117
118
119
120
121
122
123
124
125
126
127
128
129
130struct mt7621_pcie {
131 void __iomem *base;
132 struct device *dev;
133 struct resource io;
134 struct resource mem;
135 struct resource busn;
136 struct {
137 resource_size_t mem;
138 resource_size_t io;
139 } offset;
140 struct list_head ports;
141 struct gpio_desc *perst;
142 struct reset_control *rst;
143};
144
145static inline u32 pcie_read(struct mt7621_pcie *pcie, u32 reg)
146{
147 return readl(pcie->base + reg);
148}
149
150static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
151{
152 writel(val, pcie->base + reg);
153}
154
155static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
156{
157 return readl(port->base + reg);
158}
159
160static inline void pcie_port_write(struct mt7621_pcie_port *port,
161 u32 val, u32 reg)
162{
163 writel(val, port->base + reg);
164}
165
166static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
167 unsigned int func, unsigned int where)
168{
169 return (((where & 0xF00) >> 8) << 24) | (bus << 16) | (slot << 11) |
170 (func << 8) | (where & 0xfc) | 0x80000000;
171}
172
173static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
174 unsigned int devfn, int where)
175{
176 struct mt7621_pcie *pcie = bus->sysdata;
177 u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
178 PCI_FUNC(devfn), where);
179
180 writel(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
181
182 return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
183}
184
185struct pci_ops mt7621_pci_ops = {
186 .map_bus = mt7621_pcie_map_bus,
187 .read = pci_generic_config_read,
188 .write = pci_generic_config_write,
189};
190
191static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
192{
193 u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
194
195 pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
196 return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
197}
198
199static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
200 u32 reg, u32 val)
201{
202 u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
203
204 pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
205 pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
206}
207
208static inline void mt7621_perst_gpio_pcie_assert(struct mt7621_pcie *pcie)
209{
210 gpiod_set_value(pcie->perst, 0);
211 mdelay(PERST_DELAY_US);
212}
213
214static inline void mt7621_perst_gpio_pcie_deassert(struct mt7621_pcie *pcie)
215{
216 gpiod_set_value(pcie->perst, 1);
217 mdelay(PERST_DELAY_US);
218}
219
220static inline bool mt7621_pcie_port_is_linkup(struct mt7621_pcie_port *port)
221{
222 return (pcie_port_read(port, RALINK_PCI_STATUS) & PCIE_PORT_LINKUP) != 0;
223}
224
225static inline void mt7621_pcie_port_clk_disable(struct mt7621_pcie_port *port)
226{
227 rt_sysc_m32(PCIE_PORT_CLK_EN(port->slot), 0, RALINK_CLKCFG1);
228}
229
230static inline void mt7621_control_assert(struct mt7621_pcie_port *port)
231{
232 u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
233
234 if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
235 reset_control_assert(port->pcie_rst);
236 else
237 reset_control_deassert(port->pcie_rst);
238}
239
240static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
241{
242 u32 chip_rev_id = rt_sysc_r32(MT7621_CHIP_REV_ID);
243
244 if ((chip_rev_id & 0xFFFF) == CHIP_REV_MT7621_E2)
245 reset_control_deassert(port->pcie_rst);
246 else
247 reset_control_assert(port->pcie_rst);
248}
249
250static void mt7621_reset_port(struct mt7621_pcie_port *port)
251{
252 mt7621_control_assert(port);
253 msleep(100);
254 mt7621_control_deassert(port);
255}
256
257static void setup_cm_memory_region(struct mt7621_pcie *pcie)
258{
259 struct resource *mem_resource = &pcie->mem;
260 struct device *dev = pcie->dev;
261 resource_size_t mask;
262
263 if (mips_cps_numiocu(0)) {
264
265
266
267
268
269 mask = ~(mem_resource->end - mem_resource->start);
270
271 write_gcr_reg1_base(mem_resource->start);
272 write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
273 dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
274 (unsigned long long)read_gcr_reg1_base(),
275 (unsigned long long)read_gcr_reg1_mask());
276 }
277}
278
279static int mt7621_pci_parse_request_of_pci_ranges(struct mt7621_pcie *pcie)
280{
281 struct device *dev = pcie->dev;
282 struct device_node *node = dev->of_node;
283 struct of_pci_range_parser parser;
284 struct of_pci_range range;
285 int err;
286
287 if (of_pci_range_parser_init(&parser, node)) {
288 dev_err(dev, "missing \"ranges\" property\n");
289 return -EINVAL;
290 }
291
292 for_each_of_pci_range(&parser, &range) {
293 struct resource *res = NULL;
294
295 switch (range.flags & IORESOURCE_TYPE_BITS) {
296 case IORESOURCE_IO:
297 ioremap(range.cpu_addr, range.size);
298 res = &pcie->io;
299 pcie->offset.io = 0x00000000UL;
300 break;
301 case IORESOURCE_MEM:
302 res = &pcie->mem;
303 pcie->offset.mem = 0x00000000UL;
304 break;
305 }
306
307 if (res)
308 of_pci_range_to_resource(&range, node, res);
309 }
310
311 err = of_pci_parse_bus_range(node, &pcie->busn);
312 if (err < 0) {
313 dev_err(dev, "failed to parse bus ranges property: %d\n", err);
314 pcie->busn.name = node->name;
315 pcie->busn.start = 0;
316 pcie->busn.end = 0xff;
317 pcie->busn.flags = IORESOURCE_BUS;
318 }
319
320 return 0;
321}
322
323static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
324 struct device_node *node,
325 int slot)
326{
327 struct mt7621_pcie_port *port;
328 struct device *dev = pcie->dev;
329 struct device_node *pnode = dev->of_node;
330 struct resource regs;
331 char name[10];
332 int err;
333
334 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
335 if (!port)
336 return -ENOMEM;
337
338 err = of_address_to_resource(pnode, slot + 1, ®s);
339 if (err) {
340 dev_err(dev, "missing \"reg\" property\n");
341 return err;
342 }
343
344 port->base = devm_ioremap_resource(dev, ®s);
345 if (IS_ERR(port->base))
346 return PTR_ERR(port->base);
347
348 snprintf(name, sizeof(name), "pcie%d", slot);
349 port->pcie_rst = devm_reset_control_get_exclusive(dev, name);
350 if (PTR_ERR(port->pcie_rst) == -EPROBE_DEFER) {
351 dev_err(dev, "failed to get pcie%d reset control\n", slot);
352 return PTR_ERR(port->pcie_rst);
353 }
354
355 snprintf(name, sizeof(name), "pcie-phy%d", slot);
356 port->phy = devm_phy_get(dev, name);
357 if (IS_ERR(port->phy))
358 return PTR_ERR(port->phy);
359
360 port->slot = slot;
361 port->pcie = pcie;
362
363 INIT_LIST_HEAD(&port->list);
364 list_add_tail(&port->list, &pcie->ports);
365
366 return 0;
367}
368
369static int mt7621_pcie_parse_dt(struct mt7621_pcie *pcie)
370{
371 struct device *dev = pcie->dev;
372 struct device_node *node = dev->of_node, *child;
373 struct resource regs;
374 int err;
375
376 pcie->perst = devm_gpiod_get(dev, "perst", GPIOD_OUT_HIGH);
377 if (IS_ERR(pcie->perst)) {
378 dev_err(dev, "failed to get gpio perst\n");
379 return PTR_ERR(pcie->perst);
380 }
381
382 err = of_address_to_resource(node, 0, ®s);
383 if (err) {
384 dev_err(dev, "missing \"reg\" property\n");
385 return err;
386 }
387
388 pcie->base = devm_ioremap_resource(dev, ®s);
389 if (IS_ERR(pcie->base))
390 return PTR_ERR(pcie->base);
391
392 pcie->rst = devm_reset_control_get_exclusive(dev, "pcie");
393 if (PTR_ERR(pcie->rst) == -EPROBE_DEFER) {
394 dev_err(dev, "failed to get pcie reset control\n");
395 return PTR_ERR(pcie->rst);
396 }
397
398 for_each_available_child_of_node(node, child) {
399 int slot;
400
401 err = of_pci_get_devfn(child);
402 if (err < 0) {
403 of_node_put(child);
404 dev_err(dev, "failed to parse devfn: %d\n", err);
405 return err;
406 }
407
408 slot = PCI_SLOT(err);
409
410 err = mt7621_pcie_parse_port(pcie, child, slot);
411 if (err) {
412 of_node_put(child);
413 return err;
414 }
415 }
416
417 return 0;
418}
419
420static int mt7621_pcie_init_port(struct mt7621_pcie_port *port)
421{
422 struct mt7621_pcie *pcie = port->pcie;
423 struct device *dev = pcie->dev;
424 u32 slot = port->slot;
425 int err;
426
427
428
429
430
431 mt7621_reset_port(port);
432
433 err = phy_init(port->phy);
434 if (err) {
435 dev_err(dev, "failed to initialize port%d phy\n", slot);
436 return err;
437 }
438
439 err = phy_power_on(port->phy);
440 if (err) {
441 dev_err(dev, "failed to power on port%d phy\n", slot);
442 phy_exit(port->phy);
443 return err;
444 }
445
446 port->enabled = true;
447
448 return 0;
449}
450
451static void mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
452{
453 struct device *dev = pcie->dev;
454 struct mt7621_pcie_port *port, *tmp;
455 u32 val = 0;
456 int err;
457
458 rt_sysc_m32(PERST_MODE_MASK, PERST_MODE_GPIO, MT7621_GPIO_MODE);
459
460 mt7621_perst_gpio_pcie_assert(pcie);
461
462 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
463 u32 slot = port->slot;
464
465 err = mt7621_pcie_init_port(port);
466 if (err) {
467 dev_err(dev, "Initiating port %d failed\n", slot);
468 list_del(&port->list);
469 } else {
470 val = read_config(pcie, slot, PCIE_FTS_NUM);
471 dev_info(dev, "Port %d N_FTS = %x\n", slot,
472 (unsigned int)val);
473 }
474 }
475
476 reset_control_assert(pcie->rst);
477
478 mt7621_perst_gpio_pcie_deassert(pcie);
479
480 list_for_each_entry(port, &pcie->ports, list) {
481 u32 slot = port->slot;
482
483 if (!mt7621_pcie_port_is_linkup(port)) {
484 dev_err(dev, "pcie%d no card, disable it (RST & CLK)\n",
485 slot);
486 phy_power_off(port->phy);
487 mt7621_control_assert(port);
488 mt7621_pcie_port_clk_disable(port);
489 port->enabled = false;
490 }
491 }
492
493 rt_sysc_m32(0x30, 2 << 4, SYSC_REG_SYSTEM_CONFIG1);
494 rt_sysc_m32(PCIE_CLK_GEN_EN, PCIE_CLK_GEN_DIS, RALINK_PCIE_CLK_GEN);
495 rt_sysc_m32(PCIE_CLK_GEN1_DIS, PCIE_CLK_GEN1_EN, RALINK_PCIE_CLK_GEN1);
496 rt_sysc_m32(PCIE_CLK_GEN_DIS, PCIE_CLK_GEN_EN, RALINK_PCIE_CLK_GEN);
497 msleep(50);
498 reset_control_deassert(pcie->rst);
499}
500
501static void mt7621_pcie_enable_port(struct mt7621_pcie_port *port)
502{
503 struct mt7621_pcie *pcie = port->pcie;
504 u32 slot = port->slot;
505 u32 offset = MT7621_PCIE_OFFSET + (slot * MT7621_NEXT_PORT);
506 u32 val;
507
508
509 val = pcie_read(pcie, RALINK_PCI_PCIMSK_ADDR);
510 val |= PCIE_PORT_INT_EN(slot);
511 pcie_write(pcie, val, RALINK_PCI_PCIMSK_ADDR);
512
513
514 pcie_write(pcie, PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE,
515 offset + RALINK_PCI_BAR0SETUP_ADDR);
516 pcie_write(pcie, MEMORY_BASE,
517 offset + RALINK_PCI_IMBASEBAR0_ADDR);
518
519
520 pcie_write(pcie, PCIE_CLASS_CODE | PCIE_REVISION_ID,
521 offset + RALINK_PCI_CLASS);
522}
523
524static void mt7621_pcie_enable_ports(struct mt7621_pcie *pcie)
525{
526 struct device *dev = pcie->dev;
527 struct mt7621_pcie_port *port;
528 u8 num_slots_enabled = 0;
529 u32 slot;
530 u32 val;
531
532 list_for_each_entry(port, &pcie->ports, list) {
533 if (port->enabled) {
534 mt7621_pcie_enable_port(port);
535 dev_info(dev, "PCIE%d enabled\n", num_slots_enabled);
536 num_slots_enabled++;
537 }
538 }
539
540 for (slot = 0; slot < num_slots_enabled; slot++) {
541 val = read_config(pcie, slot, PCI_COMMAND);
542 val |= PCI_COMMAND_MASTER;
543 write_config(pcie, slot, PCI_COMMAND, val);
544
545 val = read_config(pcie, slot, PCIE_FTS_NUM);
546 val &= ~PCIE_FTS_NUM_MASK;
547 val |= PCIE_FTS_NUM_L0(0x50);
548 write_config(pcie, slot, PCIE_FTS_NUM, val);
549 }
550}
551
552static int mt7621_pcie_init_virtual_bridges(struct mt7621_pcie *pcie)
553{
554 u32 pcie_link_status = 0;
555 u32 val = 0;
556 struct mt7621_pcie_port *port;
557
558 list_for_each_entry(port, &pcie->ports, list) {
559 u32 slot = port->slot;
560
561 if (port->enabled)
562 pcie_link_status |= BIT(slot);
563 }
564
565 if (pcie_link_status == 0)
566 return -1;
567
568
569
570
571
572
573
574
575
576
577
578
579 switch (pcie_link_status) {
580 case 2:
581 val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
582 val &= ~(MT7621_BR0_MASK | MT7621_BR1_MASK);
583 val |= 0x1 << MT7621_BR0_SHIFT;
584 val |= 0x0 << MT7621_BR1_SHIFT;
585 pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
586 break;
587 case 4:
588 val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
589 val &= ~MT7621_BR_ALL_MASK;
590 val |= 0x1 << MT7621_BR0_SHIFT;
591 val |= 0x2 << MT7621_BR1_SHIFT;
592 val |= 0x0 << MT7621_BR2_SHIFT;
593 pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
594 break;
595 case 5:
596 val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
597 val &= ~MT7621_BR_ALL_MASK;
598 val |= 0x0 << MT7621_BR0_SHIFT;
599 val |= 0x2 << MT7621_BR1_SHIFT;
600 val |= 0x1 << MT7621_BR2_SHIFT;
601 pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
602 break;
603 case 6:
604 val = pcie_read(pcie, RALINK_PCI_PCICFG_ADDR);
605 val &= ~MT7621_BR_ALL_MASK;
606 val |= 0x2 << MT7621_BR0_SHIFT;
607 val |= 0x0 << MT7621_BR1_SHIFT;
608 val |= 0x1 << MT7621_BR2_SHIFT;
609 pcie_write(pcie, val, RALINK_PCI_PCICFG_ADDR);
610 break;
611 }
612
613 return 0;
614}
615
616static int mt7621_pcie_request_resources(struct mt7621_pcie *pcie,
617 struct list_head *res)
618{
619 struct device *dev = pcie->dev;
620
621 pci_add_resource_offset(res, &pcie->io, pcie->offset.io);
622 pci_add_resource_offset(res, &pcie->mem, pcie->offset.mem);
623 pci_add_resource(res, &pcie->busn);
624
625 return devm_request_pci_bus_resources(dev, res);
626}
627
628static int mt7621_pcie_register_host(struct pci_host_bridge *host,
629 struct list_head *res)
630{
631 struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
632
633 list_splice_init(res, &host->windows);
634 host->busnr = pcie->busn.start;
635 host->dev.parent = pcie->dev;
636 host->ops = &mt7621_pci_ops;
637 host->map_irq = of_irq_parse_and_map_pci;
638 host->swizzle_irq = pci_common_swizzle;
639 host->sysdata = pcie;
640
641 return pci_host_probe(host);
642}
643
644static int mt7621_pci_probe(struct platform_device *pdev)
645{
646 struct device *dev = &pdev->dev;
647 struct mt7621_pcie *pcie;
648 struct pci_host_bridge *bridge;
649 int err;
650 LIST_HEAD(res);
651
652 if (!dev->of_node)
653 return -ENODEV;
654
655 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
656 if (!bridge)
657 return -ENOMEM;
658
659 pcie = pci_host_bridge_priv(bridge);
660 pcie->dev = dev;
661 platform_set_drvdata(pdev, pcie);
662 INIT_LIST_HEAD(&pcie->ports);
663
664 err = mt7621_pcie_parse_dt(pcie);
665 if (err) {
666 dev_err(dev, "Parsing DT failed\n");
667 return err;
668 }
669
670
671 iomem_resource.start = 0;
672 iomem_resource.end = ~0UL;
673 ioport_resource.start = 0;
674 ioport_resource.end = ~0UL;
675
676 mt7621_pcie_init_ports(pcie);
677
678 err = mt7621_pcie_init_virtual_bridges(pcie);
679 if (err) {
680 dev_err(dev, "Nothing is connected in virtual bridges. Exiting...");
681 return 0;
682 }
683
684 mt7621_pcie_enable_ports(pcie);
685
686 err = mt7621_pci_parse_request_of_pci_ranges(pcie);
687 if (err) {
688 dev_err(dev, "Error requesting pci resources from ranges");
689 return err;
690 }
691
692 setup_cm_memory_region(pcie);
693
694 err = mt7621_pcie_request_resources(pcie, &res);
695 if (err) {
696 dev_err(dev, "Error requesting resources\n");
697 return err;
698 }
699
700 err = mt7621_pcie_register_host(bridge, &res);
701 if (err) {
702 dev_err(dev, "Error registering host\n");
703 return err;
704 }
705
706 return 0;
707}
708
709static const struct of_device_id mt7621_pci_ids[] = {
710 { .compatible = "mediatek,mt7621-pci" },
711 {},
712};
713MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
714
715static struct platform_driver mt7621_pci_driver = {
716 .probe = mt7621_pci_probe,
717 .driver = {
718 .name = "mt7621-pci",
719 .of_match_table = of_match_ptr(mt7621_pci_ids),
720 },
721};
722
723static int __init mt7621_pci_init(void)
724{
725 return platform_driver_register(&mt7621_pci_driver);
726}
727
728module_init(mt7621_pci_init);
729