1
2
3
4
5#include <linux/clk-provider.h>
6#include <linux/pci.h>
7#include <linux/dmi.h>
8#include "dwmac-intel.h"
9#include "dwmac4.h"
10#include "stmmac.h"
11#include "stmmac_ptp.h"
12
13#define INTEL_MGBE_ADHOC_ADDR 0x15
14#define INTEL_MGBE_XPCS_ADDR 0x16
15
16
17#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
18#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
19#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
20#define PSE_PTP_CLK_FREQ_256MHZ (0)
21#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
22#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
23#define PCH_PTP_CLK_FREQ_200MHZ (0)
24
25
26#define ART_CPUID_LEAF 0x15
27#define EHL_PSE_ART_MHZ 19200000
28
29struct intel_priv_data {
30 int mdio_adhoc_addr;
31 unsigned long crossts_adj;
32 bool is_pse;
33};
34
35
36
37
38
39
40struct stmmac_pci_func_data {
41 unsigned int func;
42 int phy_addr;
43};
44
45struct stmmac_pci_dmi_data {
46 const struct stmmac_pci_func_data *func;
47 size_t nfuncs;
48};
49
50struct stmmac_pci_info {
51 int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
52};
53
54static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
55 const struct dmi_system_id *dmi_list)
56{
57 const struct stmmac_pci_func_data *func_data;
58 const struct stmmac_pci_dmi_data *dmi_data;
59 const struct dmi_system_id *dmi_id;
60 int func = PCI_FUNC(pdev->devfn);
61 size_t n;
62
63 dmi_id = dmi_first_match(dmi_list);
64 if (!dmi_id)
65 return -ENODEV;
66
67 dmi_data = dmi_id->driver_data;
68 func_data = dmi_data->func;
69
70 for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
71 if (func_data->func == func)
72 return func_data->phy_addr;
73
74 return -ENODEV;
75}
76
77static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
78 int phyreg, u32 mask, u32 val)
79{
80 unsigned int retries = 10;
81 int val_rd;
82
83 do {
84 val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
85 if ((val_rd & mask) == (val & mask))
86 return 0;
87 udelay(POLL_DELAY_US);
88 } while (--retries);
89
90 return -ETIMEDOUT;
91}
92
93static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
94{
95 struct intel_priv_data *intel_priv = priv_data;
96 struct stmmac_priv *priv = netdev_priv(ndev);
97 int serdes_phy_addr = 0;
98 u32 data = 0;
99
100 if (!intel_priv->mdio_adhoc_addr)
101 return 0;
102
103 serdes_phy_addr = intel_priv->mdio_adhoc_addr;
104
105
106 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
107 data |= SERDES_PLL_CLK;
108 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
109
110
111 data = serdes_status_poll(priv, serdes_phy_addr,
112 SERDES_GSR0,
113 SERDES_PLL_CLK,
114 SERDES_PLL_CLK);
115
116 if (data) {
117 dev_err(priv->device, "Serdes PLL clk request timeout\n");
118 return data;
119 }
120
121
122 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
123 data |= SERDES_RST;
124 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
125
126
127 data = serdes_status_poll(priv, serdes_phy_addr,
128 SERDES_GSR0,
129 SERDES_RST,
130 SERDES_RST);
131
132 if (data) {
133 dev_err(priv->device, "Serdes assert lane reset timeout\n");
134 return data;
135 }
136
137
138 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
139
140 data &= ~SERDES_PWR_ST_MASK;
141 data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
142
143 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
144
145
146 data = serdes_status_poll(priv, serdes_phy_addr,
147 SERDES_GSR0,
148 SERDES_PWR_ST_MASK,
149 SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
150
151 if (data) {
152 dev_err(priv->device, "Serdes power state P0 timeout.\n");
153 return data;
154 }
155
156
157 if (intel_priv->is_pse)
158 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
159 0, SERDES_PHY_RX_CLK);
160
161 return 0;
162}
163
164static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
165{
166 struct intel_priv_data *intel_priv = intel_data;
167 struct stmmac_priv *priv = netdev_priv(ndev);
168 int serdes_phy_addr = 0;
169 u32 data = 0;
170
171 if (!intel_priv->mdio_adhoc_addr)
172 return;
173
174 serdes_phy_addr = intel_priv->mdio_adhoc_addr;
175
176
177 if (intel_priv->is_pse)
178 mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
179 SERDES_PHY_RX_CLK, 0);
180
181
182 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
183
184 data &= ~SERDES_PWR_ST_MASK;
185 data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
186
187 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
188
189
190 data = serdes_status_poll(priv, serdes_phy_addr,
191 SERDES_GSR0,
192 SERDES_PWR_ST_MASK,
193 SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
194
195 if (data) {
196 dev_err(priv->device, "Serdes power state P3 timeout\n");
197 return;
198 }
199
200
201 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
202 data &= ~SERDES_PLL_CLK;
203 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
204
205
206 data = serdes_status_poll(priv, serdes_phy_addr,
207 SERDES_GSR0,
208 SERDES_PLL_CLK,
209 (u32)~SERDES_PLL_CLK);
210
211 if (data) {
212 dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
213 return;
214 }
215
216
217 data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
218 data &= ~SERDES_RST;
219 mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
220
221
222 data = serdes_status_poll(priv, serdes_phy_addr,
223 SERDES_GSR0,
224 SERDES_RST,
225 (u32)~SERDES_RST);
226
227 if (data) {
228 dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
229 return;
230 }
231}
232
233
234
235
236static void intel_mgbe_ptp_clk_freq_config(void *npriv)
237{
238 struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
239 struct intel_priv_data *intel_priv;
240 u32 gpio_value;
241
242 intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
243
244 gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
245
246 if (intel_priv->is_pse) {
247
248 gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
249 gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
250 } else {
251
252 gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
253 gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
254 }
255
256 writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
257}
258
259static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
260 u64 *art_time)
261{
262 u64 ns;
263
264 ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
265 ns <<= GMAC4_ART_TIME_SHIFT;
266 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
267 ns <<= GMAC4_ART_TIME_SHIFT;
268 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
269 ns <<= GMAC4_ART_TIME_SHIFT;
270 ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
271
272 *art_time = ns;
273}
274
275static int intel_crosststamp(ktime_t *device,
276 struct system_counterval_t *system,
277 void *ctx)
278{
279 struct intel_priv_data *intel_priv;
280
281 struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
282 void __iomem *ptpaddr = priv->ptpaddr;
283 void __iomem *ioaddr = priv->hw->pcsr;
284 unsigned long flags;
285 u64 art_time = 0;
286 u64 ptp_time = 0;
287 u32 num_snapshot;
288 u32 gpio_value;
289 u32 acr_value;
290 int ret;
291 u32 v;
292 int i;
293
294 if (!boot_cpu_has(X86_FEATURE_ART))
295 return -EOPNOTSUPP;
296
297 intel_priv = priv->plat->bsp_priv;
298
299
300
301
302 if (priv->plat->ext_snapshot_en)
303 return -EBUSY;
304
305 mutex_lock(&priv->aux_ts_lock);
306
307 acr_value = readl(ptpaddr + PTP_ACR);
308 acr_value &= ~PTP_ACR_MASK;
309 switch (priv->plat->int_snapshot_num) {
310 case AUX_SNAPSHOT0:
311 acr_value |= PTP_ACR_ATSEN0;
312 break;
313 case AUX_SNAPSHOT1:
314 acr_value |= PTP_ACR_ATSEN1;
315 break;
316 case AUX_SNAPSHOT2:
317 acr_value |= PTP_ACR_ATSEN2;
318 break;
319 case AUX_SNAPSHOT3:
320 acr_value |= PTP_ACR_ATSEN3;
321 break;
322 default:
323 mutex_unlock(&priv->aux_ts_lock);
324 return -EINVAL;
325 }
326 writel(acr_value, ptpaddr + PTP_ACR);
327
328
329 acr_value = readl(ptpaddr + PTP_ACR);
330 acr_value |= PTP_ACR_ATSFC;
331 writel(acr_value, ptpaddr + PTP_ACR);
332
333 mutex_unlock(&priv->aux_ts_lock);
334
335
336
337
338
339 gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
340 gpio_value &= ~GMAC_GPO1;
341 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
342 gpio_value |= GMAC_GPO1;
343 writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
344
345
346 ret = readl_poll_timeout(priv->ioaddr + GMAC_INT_STATUS, v,
347 (v & GMAC_INT_TSIE), 100, 10000);
348
349 if (ret == -ETIMEDOUT) {
350 pr_err("%s: Wait for time sync operation timeout\n", __func__);
351 return ret;
352 }
353
354 num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
355 GMAC_TIMESTAMP_ATSNS_MASK) >>
356 GMAC_TIMESTAMP_ATSNS_SHIFT;
357
358
359 for (i = 0; i < num_snapshot; i++) {
360 spin_lock_irqsave(&priv->ptp_lock, flags);
361 stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
362 *device = ns_to_ktime(ptp_time);
363 spin_unlock_irqrestore(&priv->ptp_lock, flags);
364 get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
365 *system = convert_art_to_tsc(art_time);
366 }
367
368 system->cycles *= intel_priv->crossts_adj;
369
370 return 0;
371}
372
373static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
374 int base)
375{
376 if (boot_cpu_has(X86_FEATURE_ART)) {
377 unsigned int art_freq;
378
379
380
381
382 art_freq = cpuid_ecx(ART_CPUID_LEAF);
383 do_div(art_freq, base);
384 intel_priv->crossts_adj = art_freq;
385 }
386}
387
388static void common_default_data(struct plat_stmmacenet_data *plat)
389{
390 plat->clk_csr = 2;
391 plat->has_gmac = 1;
392 plat->force_sf_dma_mode = 1;
393
394 plat->mdio_bus_data->needs_reset = true;
395
396
397 plat->multicast_filter_bins = HASH_TABLE_SIZE;
398
399
400 plat->unicast_filter_entries = 1;
401
402
403 plat->maxmtu = JUMBO_LEN;
404
405
406 plat->tx_queues_to_use = 1;
407 plat->rx_queues_to_use = 1;
408
409
410 plat->tx_queues_cfg[0].use_prio = false;
411 plat->rx_queues_cfg[0].use_prio = false;
412
413
414 plat->rx_queues_cfg[0].pkt_route = 0x0;
415}
416
417static int intel_mgbe_common_data(struct pci_dev *pdev,
418 struct plat_stmmacenet_data *plat)
419{
420 char clk_name[20];
421 int ret;
422 int i;
423
424 plat->pdev = pdev;
425 plat->phy_addr = -1;
426 plat->clk_csr = 5;
427 plat->has_gmac = 0;
428 plat->has_gmac4 = 1;
429 plat->force_sf_dma_mode = 0;
430 plat->tso_en = 1;
431
432 plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
433
434 for (i = 0; i < plat->rx_queues_to_use; i++) {
435 plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
436 plat->rx_queues_cfg[i].chan = i;
437
438
439 plat->rx_queues_cfg[i].use_prio = false;
440
441
442 plat->rx_queues_cfg[i].pkt_route = 0x0;
443 }
444
445 for (i = 0; i < plat->tx_queues_to_use; i++) {
446 plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
447
448
449 plat->tx_queues_cfg[i].use_prio = false;
450
451 if (i > 0)
452 plat->tx_queues_cfg[i].tbs_en = 1;
453 }
454
455
456 plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
457 plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
458
459 plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
460 plat->tx_queues_cfg[0].weight = 0x09;
461 plat->tx_queues_cfg[1].weight = 0x0A;
462 plat->tx_queues_cfg[2].weight = 0x0B;
463 plat->tx_queues_cfg[3].weight = 0x0C;
464 plat->tx_queues_cfg[4].weight = 0x0D;
465 plat->tx_queues_cfg[5].weight = 0x0E;
466 plat->tx_queues_cfg[6].weight = 0x0F;
467 plat->tx_queues_cfg[7].weight = 0x10;
468
469 plat->dma_cfg->pbl = 32;
470 plat->dma_cfg->pblx8 = true;
471 plat->dma_cfg->fixed_burst = 0;
472 plat->dma_cfg->mixed_burst = 0;
473 plat->dma_cfg->aal = 0;
474 plat->dma_cfg->dche = true;
475
476 plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
477 GFP_KERNEL);
478 if (!plat->axi)
479 return -ENOMEM;
480
481 plat->axi->axi_lpi_en = 0;
482 plat->axi->axi_xit_frm = 0;
483 plat->axi->axi_wr_osr_lmt = 1;
484 plat->axi->axi_rd_osr_lmt = 1;
485 plat->axi->axi_blen[0] = 4;
486 plat->axi->axi_blen[1] = 8;
487 plat->axi->axi_blen[2] = 16;
488
489 plat->ptp_max_adj = plat->clk_ptp_rate;
490 plat->eee_usecs_rate = plat->clk_ptp_rate;
491
492
493 sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
494
495 plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
496 clk_name, NULL, 0,
497 plat->clk_ptp_rate);
498
499 if (IS_ERR(plat->stmmac_clk)) {
500 dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
501 plat->stmmac_clk = NULL;
502 }
503
504 ret = clk_prepare_enable(plat->stmmac_clk);
505 if (ret) {
506 clk_unregister_fixed_rate(plat->stmmac_clk);
507 return ret;
508 }
509
510 plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
511
512
513 plat->multicast_filter_bins = HASH_TABLE_SIZE;
514
515
516 plat->unicast_filter_entries = 1;
517
518
519 plat->maxmtu = JUMBO_LEN;
520
521 plat->vlan_fail_q_en = true;
522
523
524 plat->vlan_fail_q = plat->rx_queues_to_use - 1;
525
526
527 if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII) {
528 plat->mdio_bus_data->has_xpcs = true;
529 plat->mdio_bus_data->xpcs_an_inband = true;
530 }
531
532
533 plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
534 plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
535
536 plat->int_snapshot_num = AUX_SNAPSHOT1;
537 plat->ext_snapshot_num = AUX_SNAPSHOT0;
538
539 plat->has_crossts = true;
540 plat->crosststamp = intel_crosststamp;
541
542
543 plat->msi_mac_vec = 29;
544 plat->msi_lpi_vec = 28;
545 plat->msi_sfty_ce_vec = 27;
546 plat->msi_sfty_ue_vec = 26;
547 plat->msi_rx_base_vec = 0;
548 plat->msi_tx_base_vec = 1;
549
550 return 0;
551}
552
553static int ehl_common_data(struct pci_dev *pdev,
554 struct plat_stmmacenet_data *plat)
555{
556 plat->rx_queues_to_use = 8;
557 plat->tx_queues_to_use = 8;
558 plat->clk_ptp_rate = 200000000;
559
560 return intel_mgbe_common_data(pdev, plat);
561}
562
563static int ehl_sgmii_data(struct pci_dev *pdev,
564 struct plat_stmmacenet_data *plat)
565{
566 plat->bus_id = 1;
567 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
568
569 plat->serdes_powerup = intel_serdes_powerup;
570 plat->serdes_powerdown = intel_serdes_powerdown;
571
572 return ehl_common_data(pdev, plat);
573}
574
575static struct stmmac_pci_info ehl_sgmii1g_info = {
576 .setup = ehl_sgmii_data,
577};
578
579static int ehl_rgmii_data(struct pci_dev *pdev,
580 struct plat_stmmacenet_data *plat)
581{
582 plat->bus_id = 1;
583 plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
584
585 return ehl_common_data(pdev, plat);
586}
587
588static struct stmmac_pci_info ehl_rgmii1g_info = {
589 .setup = ehl_rgmii_data,
590};
591
592static int ehl_pse0_common_data(struct pci_dev *pdev,
593 struct plat_stmmacenet_data *plat)
594{
595 struct intel_priv_data *intel_priv = plat->bsp_priv;
596
597 intel_priv->is_pse = true;
598 plat->bus_id = 2;
599 plat->addr64 = 32;
600
601 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
602
603 return ehl_common_data(pdev, plat);
604}
605
606static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
607 struct plat_stmmacenet_data *plat)
608{
609 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
610 return ehl_pse0_common_data(pdev, plat);
611}
612
613static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
614 .setup = ehl_pse0_rgmii1g_data,
615};
616
617static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
618 struct plat_stmmacenet_data *plat)
619{
620 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
621 plat->serdes_powerup = intel_serdes_powerup;
622 plat->serdes_powerdown = intel_serdes_powerdown;
623 return ehl_pse0_common_data(pdev, plat);
624}
625
626static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
627 .setup = ehl_pse0_sgmii1g_data,
628};
629
630static int ehl_pse1_common_data(struct pci_dev *pdev,
631 struct plat_stmmacenet_data *plat)
632{
633 struct intel_priv_data *intel_priv = plat->bsp_priv;
634
635 intel_priv->is_pse = true;
636 plat->bus_id = 3;
637 plat->addr64 = 32;
638
639 intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
640
641 return ehl_common_data(pdev, plat);
642}
643
644static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
645 struct plat_stmmacenet_data *plat)
646{
647 plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
648 return ehl_pse1_common_data(pdev, plat);
649}
650
651static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
652 .setup = ehl_pse1_rgmii1g_data,
653};
654
655static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
656 struct plat_stmmacenet_data *plat)
657{
658 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
659 plat->serdes_powerup = intel_serdes_powerup;
660 plat->serdes_powerdown = intel_serdes_powerdown;
661 return ehl_pse1_common_data(pdev, plat);
662}
663
664static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
665 .setup = ehl_pse1_sgmii1g_data,
666};
667
668static int tgl_common_data(struct pci_dev *pdev,
669 struct plat_stmmacenet_data *plat)
670{
671 plat->rx_queues_to_use = 6;
672 plat->tx_queues_to_use = 4;
673 plat->clk_ptp_rate = 200000000;
674
675 return intel_mgbe_common_data(pdev, plat);
676}
677
678static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
679 struct plat_stmmacenet_data *plat)
680{
681 plat->bus_id = 1;
682 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
683 plat->serdes_powerup = intel_serdes_powerup;
684 plat->serdes_powerdown = intel_serdes_powerdown;
685 return tgl_common_data(pdev, plat);
686}
687
688static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
689 .setup = tgl_sgmii_phy0_data,
690};
691
692static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
693 struct plat_stmmacenet_data *plat)
694{
695 plat->bus_id = 2;
696 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
697 plat->serdes_powerup = intel_serdes_powerup;
698 plat->serdes_powerdown = intel_serdes_powerdown;
699 return tgl_common_data(pdev, plat);
700}
701
702static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
703 .setup = tgl_sgmii_phy1_data,
704};
705
706static int adls_sgmii_phy0_data(struct pci_dev *pdev,
707 struct plat_stmmacenet_data *plat)
708{
709 plat->bus_id = 1;
710 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
711
712
713
714 return tgl_common_data(pdev, plat);
715}
716
717static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
718 .setup = adls_sgmii_phy0_data,
719};
720
721static int adls_sgmii_phy1_data(struct pci_dev *pdev,
722 struct plat_stmmacenet_data *plat)
723{
724 plat->bus_id = 2;
725 plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
726
727
728
729 return tgl_common_data(pdev, plat);
730}
731
732static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
733 .setup = adls_sgmii_phy1_data,
734};
735static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
736 {
737 .func = 6,
738 .phy_addr = 1,
739 },
740};
741
742static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
743 .func = galileo_stmmac_func_data,
744 .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
745};
746
747static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
748 {
749 .func = 6,
750 .phy_addr = 1,
751 },
752 {
753 .func = 7,
754 .phy_addr = 1,
755 },
756};
757
758static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
759 .func = iot2040_stmmac_func_data,
760 .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
761};
762
763static const struct dmi_system_id quark_pci_dmi[] = {
764 {
765 .matches = {
766 DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
767 },
768 .driver_data = (void *)&galileo_stmmac_dmi_data,
769 },
770 {
771 .matches = {
772 DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
773 },
774 .driver_data = (void *)&galileo_stmmac_dmi_data,
775 },
776
777
778
779
780
781 {
782 .matches = {
783 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
784 DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
785 "6ES7647-0AA00-0YA2"),
786 },
787 .driver_data = (void *)&galileo_stmmac_dmi_data,
788 },
789 {
790 .matches = {
791 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
792 },
793 .driver_data = (void *)&iot2040_stmmac_dmi_data,
794 },
795 {}
796};
797
798static int quark_default_data(struct pci_dev *pdev,
799 struct plat_stmmacenet_data *plat)
800{
801 int ret;
802
803
804 common_default_data(plat);
805
806
807
808
809 ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
810 if (ret < 0) {
811
812 if (dmi_get_system_info(DMI_BOARD_NAME))
813 return ret;
814
815
816
817
818
819 ret = 1;
820 }
821
822 plat->bus_id = pci_dev_id(pdev);
823 plat->phy_addr = ret;
824 plat->phy_interface = PHY_INTERFACE_MODE_RMII;
825
826 plat->dma_cfg->pbl = 16;
827 plat->dma_cfg->pblx8 = true;
828 plat->dma_cfg->fixed_burst = 1;
829
830
831 return 0;
832}
833
834static const struct stmmac_pci_info quark_info = {
835 .setup = quark_default_data,
836};
837
838static int stmmac_config_single_msi(struct pci_dev *pdev,
839 struct plat_stmmacenet_data *plat,
840 struct stmmac_resources *res)
841{
842 int ret;
843
844 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
845 if (ret < 0) {
846 dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
847 __func__);
848 return ret;
849 }
850
851 res->irq = pci_irq_vector(pdev, 0);
852 res->wol_irq = res->irq;
853 plat->multi_msi_en = 0;
854 dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
855 __func__);
856
857 return 0;
858}
859
860static int stmmac_config_multi_msi(struct pci_dev *pdev,
861 struct plat_stmmacenet_data *plat,
862 struct stmmac_resources *res)
863{
864 int ret;
865 int i;
866
867 if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
868 plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
869 dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
870 __func__);
871 return -1;
872 }
873
874 ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
875 PCI_IRQ_MSI | PCI_IRQ_MSIX);
876 if (ret < 0) {
877 dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
878 __func__);
879 return ret;
880 }
881
882
883 for (i = 0; i < plat->rx_queues_to_use; i++) {
884 res->rx_irq[i] = pci_irq_vector(pdev,
885 plat->msi_rx_base_vec + i * 2);
886 }
887
888
889 for (i = 0; i < plat->tx_queues_to_use; i++) {
890 res->tx_irq[i] = pci_irq_vector(pdev,
891 plat->msi_tx_base_vec + i * 2);
892 }
893
894 if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
895 res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
896 if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
897 res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
898 if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
899 res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
900 if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
901 res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
902 if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
903 res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
904
905 plat->multi_msi_en = 1;
906 dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
907
908 return 0;
909}
910
911
912
913
914
915
916
917
918
919
920
921
922
923static int intel_eth_pci_probe(struct pci_dev *pdev,
924 const struct pci_device_id *id)
925{
926 struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
927 struct intel_priv_data *intel_priv;
928 struct plat_stmmacenet_data *plat;
929 struct stmmac_resources res;
930 int ret;
931
932 intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
933 if (!intel_priv)
934 return -ENOMEM;
935
936 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
937 if (!plat)
938 return -ENOMEM;
939
940 plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
941 sizeof(*plat->mdio_bus_data),
942 GFP_KERNEL);
943 if (!plat->mdio_bus_data)
944 return -ENOMEM;
945
946 plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
947 GFP_KERNEL);
948 if (!plat->dma_cfg)
949 return -ENOMEM;
950
951
952 ret = pcim_enable_device(pdev);
953 if (ret) {
954 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
955 __func__);
956 return ret;
957 }
958
959 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
960 if (ret)
961 return ret;
962
963 pci_set_master(pdev);
964
965 plat->bsp_priv = intel_priv;
966 intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
967 intel_priv->crossts_adj = 1;
968
969
970
971
972
973 plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
974 plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
975 plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
976 plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
977 plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
978 plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
979 plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
980
981 ret = info->setup(pdev, plat);
982 if (ret)
983 return ret;
984
985 memset(&res, 0, sizeof(res));
986 res.addr = pcim_iomap_table(pdev)[0];
987
988 if (plat->eee_usecs_rate > 0) {
989 u32 tx_lpi_usec;
990
991 tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
992 writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
993 }
994
995 ret = stmmac_config_multi_msi(pdev, plat, &res);
996 if (ret) {
997 ret = stmmac_config_single_msi(pdev, plat, &res);
998 if (ret) {
999 dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
1000 __func__);
1001 goto err_alloc_irq;
1002 }
1003 }
1004
1005 ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
1006 if (ret) {
1007 goto err_dvr_probe;
1008 }
1009
1010 return 0;
1011
1012err_dvr_probe:
1013 pci_free_irq_vectors(pdev);
1014err_alloc_irq:
1015 clk_disable_unprepare(plat->stmmac_clk);
1016 clk_unregister_fixed_rate(plat->stmmac_clk);
1017 return ret;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027static void intel_eth_pci_remove(struct pci_dev *pdev)
1028{
1029 struct net_device *ndev = dev_get_drvdata(&pdev->dev);
1030 struct stmmac_priv *priv = netdev_priv(ndev);
1031
1032 stmmac_dvr_remove(&pdev->dev);
1033
1034 clk_unregister_fixed_rate(priv->plat->stmmac_clk);
1035
1036 pcim_iounmap_regions(pdev, BIT(0));
1037}
1038
1039static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
1040{
1041 struct pci_dev *pdev = to_pci_dev(dev);
1042 int ret;
1043
1044 ret = stmmac_suspend(dev);
1045 if (ret)
1046 return ret;
1047
1048 ret = pci_save_state(pdev);
1049 if (ret)
1050 return ret;
1051
1052 pci_wake_from_d3(pdev, true);
1053 return 0;
1054}
1055
1056static int __maybe_unused intel_eth_pci_resume(struct device *dev)
1057{
1058 struct pci_dev *pdev = to_pci_dev(dev);
1059 int ret;
1060
1061 pci_restore_state(pdev);
1062 pci_set_power_state(pdev, PCI_D0);
1063
1064 ret = pcim_enable_device(pdev);
1065 if (ret)
1066 return ret;
1067
1068 pci_set_master(pdev);
1069
1070 return stmmac_resume(dev);
1071}
1072
1073static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
1074 intel_eth_pci_resume);
1075
1076#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
1077#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
1078#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
1079#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32
1080
1081
1082
1083#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0
1084#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1
1085#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2
1086#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0
1087#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1
1088#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2
1089#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac
1090#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2
1091#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
1092#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
1093#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
1094
1095static const struct pci_device_id intel_eth_pci_id_table[] = {
1096 { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
1097 { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
1098 { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
1099 { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
1100 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
1101 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
1102 { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
1103 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
1104 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
1105 { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
1106 { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
1107 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
1108 { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
1109 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
1110 { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
1111 {}
1112};
1113MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
1114
1115static struct pci_driver intel_eth_pci_driver = {
1116 .name = "intel-eth-pci",
1117 .id_table = intel_eth_pci_id_table,
1118 .probe = intel_eth_pci_probe,
1119 .remove = intel_eth_pci_remove,
1120 .driver = {
1121 .pm = &intel_eth_pm_ops,
1122 },
1123};
1124
1125module_pci_driver(intel_eth_pci_driver);
1126
1127MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
1128MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
1129MODULE_LICENSE("GPL v2");
1130