1
2
3
4
5
6#include <config.h>
7#include <dm.h>
8#include <errno.h>
9#include <fdt_support.h>
10#include <malloc.h>
11#include <miiphy.h>
12#include <misc.h>
13#include <net.h>
14#include <netdev.h>
15#include <pci.h>
16#include <pci_ids.h>
17#include <asm/io.h>
18#include <asm/arch/board.h>
19#include <linux/delay.h>
20#include <linux/libfdt.h>
21
22#include "nic_reg.h"
23#include "nic.h"
24#include "bgx.h"
25
26static const phy_interface_t if_mode[] = {
27 [QLM_MODE_SGMII] = PHY_INTERFACE_MODE_SGMII,
28 [QLM_MODE_RGMII] = PHY_INTERFACE_MODE_RGMII,
29 [QLM_MODE_QSGMII] = PHY_INTERFACE_MODE_QSGMII,
30 [QLM_MODE_XAUI] = PHY_INTERFACE_MODE_XAUI,
31 [QLM_MODE_RXAUI] = PHY_INTERFACE_MODE_RXAUI,
32};
33
34struct lmac {
35 struct bgx *bgx;
36 int dmac;
37 u8 mac[6];
38 bool link_up;
39 bool init_pend;
40 int lmacid;
41 int phy_addr;
42 struct udevice *dev;
43 struct mii_dev *mii_bus;
44 struct phy_device *phydev;
45 unsigned int last_duplex;
46 unsigned int last_link;
47 unsigned int last_speed;
48 int lane_to_sds;
49 int use_training;
50 int lmac_type;
51 u8 qlm_mode;
52 int qlm;
53 bool is_1gx;
54};
55
56struct bgx {
57 u8 bgx_id;
58 int node;
59 struct lmac lmac[MAX_LMAC_PER_BGX];
60 int lmac_count;
61 u8 max_lmac;
62 void __iomem *reg_base;
63 struct pci_dev *pdev;
64 bool is_rgx;
65};
66
67struct bgx_board_info bgx_board_info[MAX_BGX_PER_NODE];
68
69struct bgx *bgx_vnic[MAX_BGX_PER_NODE];
70
71
72static u64 bgx_reg_read(struct bgx *bgx, uint8_t lmac, u64 offset)
73{
74 u64 addr = (uintptr_t)bgx->reg_base +
75 ((uint32_t)lmac << 20) + offset;
76
77 return readq((void *)addr);
78}
79
80static void bgx_reg_write(struct bgx *bgx, uint8_t lmac,
81 u64 offset, u64 val)
82{
83 u64 addr = (uintptr_t)bgx->reg_base +
84 ((uint32_t)lmac << 20) + offset;
85
86 writeq(val, (void *)addr);
87}
88
89static void bgx_reg_modify(struct bgx *bgx, uint8_t lmac,
90 u64 offset, u64 val)
91{
92 u64 addr = (uintptr_t)bgx->reg_base +
93 ((uint32_t)lmac << 20) + offset;
94
95 writeq(val | bgx_reg_read(bgx, lmac, offset), (void *)addr);
96}
97
98static int bgx_poll_reg(struct bgx *bgx, uint8_t lmac,
99 u64 reg, u64 mask, bool zero)
100{
101 int timeout = 200;
102 u64 reg_val;
103
104 while (timeout) {
105 reg_val = bgx_reg_read(bgx, lmac, reg);
106 if (zero && !(reg_val & mask))
107 return 0;
108 if (!zero && (reg_val & mask))
109 return 0;
110 mdelay(1);
111 timeout--;
112 }
113 return 1;
114}
115
116static int gser_poll_reg(u64 reg, int bit, u64 mask, u64 expected_val,
117 int timeout)
118{
119 u64 reg_val;
120
121 debug("%s reg = %#llx, mask = %#llx,", __func__, reg, mask);
122 debug(" expected_val = %#llx, bit = %d\n", expected_val, bit);
123 while (timeout) {
124 reg_val = readq(reg) >> bit;
125 if ((reg_val & mask) == (expected_val))
126 return 0;
127 mdelay(1);
128 timeout--;
129 }
130 return 1;
131}
132
133static bool is_bgx_port_valid(int bgx, int lmac)
134{
135 debug("%s bgx %d lmac %d valid %d\n", __func__, bgx, lmac,
136 bgx_board_info[bgx].lmac_reg[lmac]);
137
138 if (bgx_board_info[bgx].lmac_reg[lmac])
139 return 1;
140 else
141 return 0;
142}
143
144struct lmac *bgx_get_lmac(int node, int bgx_idx, int lmacid)
145{
146 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
147
148 if (bgx)
149 return &bgx->lmac[lmacid];
150
151 return NULL;
152}
153
154const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
155{
156 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
157
158 if (bgx)
159 return bgx->lmac[lmacid].mac;
160
161 return NULL;
162}
163
164void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
165{
166 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
167
168 if (!bgx)
169 return;
170
171 memcpy(bgx->lmac[lmacid].mac, mac, 6);
172}
173
174
175void bgx_get_count(int node, int *bgx_count)
176{
177 int i;
178 struct bgx *bgx;
179
180 *bgx_count = 0;
181 for (i = 0; i < MAX_BGX_PER_NODE; i++) {
182 bgx = bgx_vnic[node * MAX_BGX_PER_NODE + i];
183 debug("bgx_vnic[%u]: %p\n", node * MAX_BGX_PER_NODE + i,
184 bgx);
185 if (bgx)
186 *bgx_count |= (1 << i);
187 }
188}
189
190
191int bgx_get_lmac_count(int node, int bgx_idx)
192{
193 struct bgx *bgx;
194
195 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
196 if (bgx)
197 return bgx->lmac_count;
198
199 return 0;
200}
201
202void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
203{
204 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
205 u64 cfg;
206
207 if (!bgx)
208 return;
209
210 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
211 if (enable)
212 cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
213 else
214 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
215 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
216}
217
218static void bgx_flush_dmac_addrs(struct bgx *bgx, u64 lmac)
219{
220 u64 dmac = 0x00;
221 u64 offset, addr;
222
223 while (bgx->lmac[lmac].dmac > 0) {
224 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(dmac)) +
225 (lmac * MAX_DMAC_PER_LMAC * sizeof(dmac));
226 addr = (uintptr_t)bgx->reg_base +
227 BGX_CMR_RX_DMACX_CAM + offset;
228 writeq(dmac, (void *)addr);
229 bgx->lmac[lmac].dmac--;
230 }
231}
232
233
234void bgx_lmac_internal_loopback(int node, int bgx_idx,
235 int lmac_idx, bool enable)
236{
237 struct bgx *bgx;
238 struct lmac *lmac;
239 u64 cfg;
240
241 bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
242 if (!bgx)
243 return;
244
245 lmac = &bgx->lmac[lmac_idx];
246 if (lmac->qlm_mode == QLM_MODE_SGMII) {
247 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
248 if (enable)
249 cfg |= PCS_MRX_CTL_LOOPBACK1;
250 else
251 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
252 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
253 } else {
254 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
255 if (enable)
256 cfg |= SPU_CTL_LOOPBACK;
257 else
258 cfg &= ~SPU_CTL_LOOPBACK;
259 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
260 }
261}
262
263
264static int get_qlm_for_bgx(int node, int bgx_id, int index)
265{
266 int qlm = 0;
267 u64 cfg;
268
269 if (otx_is_soc(CN81XX)) {
270 qlm = (bgx_id) ? 2 : 0;
271 qlm += (index >= 2) ? 1 : 0;
272 } else if (otx_is_soc(CN83XX)) {
273 switch (bgx_id) {
274 case 0:
275 qlm = 2;
276 break;
277 case 1:
278 qlm = 3;
279 break;
280 case 2:
281 if (index >= 2)
282 qlm = 6;
283 else
284 qlm = 5;
285 break;
286 case 3:
287 qlm = 4;
288 break;
289 }
290 }
291
292 cfg = readq(GSERX_CFG(qlm)) & GSERX_CFG_BGX;
293 debug("%s:qlm%d: cfg = %lld\n", __func__, qlm, cfg);
294
295
296 if (cfg) {
297 if (readq(GSERX_PHY_CTL(qlm)))
298 return -1;
299 return qlm;
300 }
301 return -1;
302}
303
304static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
305{
306 u64 cfg;
307 struct lmac *lmac;
308
309 lmac = &bgx->lmac[lmacid];
310
311 debug("%s:bgx_id = %d, lmacid = %d\n", __func__, bgx->bgx_id, lmacid);
312
313 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
314
315 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
316
317
318 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
319 if (cfg & 1)
320 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
321
322
323 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
324
325
326 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
327 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
328 PCS_MRX_CTL_RESET, true)) {
329 printf("BGX PCS reset not completed\n");
330 return -1;
331 }
332
333
334 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
335 cfg &= ~PCS_MRX_CTL_PWR_DN;
336
337 if (bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis)
338 cfg |= (PCS_MRX_CTL_RST_AN);
339 else
340 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
341 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
342
343
344
345
346
347 if (lmac->qlm_mode == QLM_MODE_QSGMII) {
348 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
349 cfg &= ~PCS_MISCX_CTL_DISP_EN;
350 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
351 return 0;
352 }
353
354 if (lmac->is_1gx) {
355 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
356 cfg |= PCS_MISC_CTL_MODE;
357 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
358 }
359
360 if (lmac->qlm_mode == QLM_MODE_SGMII) {
361 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
362 PCS_MRX_STATUS_AN_CPT, false)) {
363 printf("BGX AN_CPT not completed\n");
364 return -1;
365 }
366 }
367
368 return 0;
369}
370
371static int bgx_lmac_sgmii_set_link_speed(struct lmac *lmac)
372{
373 u64 prtx_cfg;
374 u64 pcs_miscx_ctl;
375 u64 cfg;
376 struct bgx *bgx = lmac->bgx;
377 unsigned int lmacid = lmac->lmacid;
378
379 debug("%s: lmacid %d\n", __func__, lmac->lmacid);
380
381
382 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
383 cfg &= ~CMR_EN;
384 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
385
386
387 prtx_cfg = bgx_reg_read(bgx, lmacid,
388 BGX_GMP_GMI_PRTX_CFG);
389
390 pcs_miscx_ctl = bgx_reg_read(bgx, lmacid,
391 BGX_GMP_PCS_MISCX_CTL);
392
393
394 if (lmac->link_up) {
395 pcs_miscx_ctl &= ~PCS_MISC_CTL_GMX_ENO;
396
397 prtx_cfg |= GMI_PORT_CFG_DUPLEX;
398 } else {
399 pcs_miscx_ctl |= PCS_MISC_CTL_GMX_ENO;
400 }
401
402
403 switch (lmac->last_speed) {
404 case 10:
405 prtx_cfg &= ~GMI_PORT_CFG_SPEED;
406 prtx_cfg |= GMI_PORT_CFG_SPEED_MSB;
407 prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
408 pcs_miscx_ctl |= 50;
409 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
410 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
411 break;
412 case 100:
413 prtx_cfg &= ~GMI_PORT_CFG_SPEED;
414 prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
415 prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
416 pcs_miscx_ctl |= 0x5;
417 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
418 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
419 break;
420 case 1000:
421 prtx_cfg |= GMI_PORT_CFG_SPEED;
422 prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
423 prtx_cfg |= GMI_PORT_CFG_SLOT_TIME;
424 pcs_miscx_ctl |= 0x1;
425 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x200);
426 if (lmac->last_duplex)
427 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
428 else
429 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST,
430 0x2000);
431 break;
432 default:
433 break;
434 }
435
436
437 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, pcs_miscx_ctl);
438 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG, prtx_cfg);
439
440
441 bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG);
442
443
444 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
445 cfg |= CMR_EN;
446 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
447
448 return 0;
449}
450
451static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
452{
453 u64 cfg;
454 struct lmac *lmac;
455
456 lmac = &bgx->lmac[lmacid];
457
458
459 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
460 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
461 printf("BGX SPU reset not completed\n");
462 return -1;
463 }
464
465
466 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
467 cfg &= ~CMR_EN;
468 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
469
470 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
471
472 if (lmac->qlm_mode != QLM_MODE_RXAUI)
473 bgx_reg_modify(bgx, lmacid,
474 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
475 else
476 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
477 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
478
479
480 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
481 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
482 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
483 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
484 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
485 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
486
487 if (lmac->use_training) {
488 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
489 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
490 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
491
492 bgx_reg_modify(bgx, lmacid,
493 BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
494 }
495
496
497 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
498
499
500 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
501 cfg &= ~SPU_FEC_CTL_FEC_EN;
502 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
503
504
505 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
506 cfg = cfg & ~(SPU_AN_CTL_XNP_EN);
507 if (lmac->use_training)
508 cfg = cfg | (SPU_AN_CTL_AN_EN);
509 else
510 cfg = cfg & ~(SPU_AN_CTL_AN_EN);
511 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
512
513 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
514
515 cfg &= ~((0xfULL << 22) | (1ULL << 12));
516 if (lmac->qlm_mode == QLM_MODE_10G_KR)
517 cfg |= (1 << 23);
518 else if (lmac->qlm_mode == QLM_MODE_40G_KR4)
519 cfg |= (1 << 24);
520 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
521
522 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
523 if (lmac->use_training)
524 cfg |= SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
525 else
526 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
527 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
528
529
530 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
531
532 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
533 cfg &= ~SPU_CTL_LOW_POWER;
534 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
535
536 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
537 cfg &= ~SMU_TX_CTL_UNI_EN;
538 cfg |= SMU_TX_CTL_DIC_EN;
539 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
540
541
542 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
543
544 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
545
546 debug("xaui_init: lmacid = %d, qlm = %d, qlm_mode = %d\n",
547 lmacid, lmac->qlm, lmac->qlm_mode);
548
549 if (lmac->qlm_mode == QLM_MODE_RXAUI) {
550 char mii_name[20];
551 struct phy_info *phy;
552
553 phy = &bgx_board_info[bgx->bgx_id].phy_info[lmacid];
554 snprintf(mii_name, sizeof(mii_name), "smi%d", phy->mdio_bus);
555
556 debug("mii_name: %s\n", mii_name);
557 lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
558 lmac->phy_addr = phy->phy_addr;
559 rxaui_phy_xs_init(lmac->mii_bus, lmac->phy_addr);
560 }
561
562 return 0;
563}
564
565
566static int get_qlm_lanes(int qlm)
567{
568 if (otx_is_soc(CN81XX))
569 return 2;
570 else if (otx_is_soc(CN83XX))
571 return (qlm >= 5) ? 2 : 4;
572 else
573 return -1;
574}
575
576int __rx_equalization(int qlm, int lane)
577{
578 int max_lanes = get_qlm_lanes(qlm);
579 int l;
580 int fail = 0;
581
582
583
584
585
586 if (lane == -1) {
587 if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK, 0xf,
588 (1 << max_lanes) - 1, 100)) {
589 debug("ERROR: CDR Lock not detected");
590 debug(" on DLM%d for 2 lanes\n", qlm);
591 return -1;
592 }
593 } else {
594 if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK,
595 (0xf & (1 << lane)), (1 << lane), 100)) {
596 debug("ERROR: DLM%d: CDR Lock not detected", qlm);
597 debug(" on %d lane\n", lane);
598 return -1;
599 }
600 }
601
602 for (l = 0; l < max_lanes; l++) {
603 u64 rctl, reer;
604
605 if (lane != -1 && lane != l)
606 continue;
607
608
609 rctl = readq(GSER_BR_RXX_CTL(qlm, l));
610 rctl |= GSER_BR_RXX_CTL_RXT_SWM;
611 writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
612
613
614 reer = readq(GSER_BR_RXX_EER(qlm, l));
615 reer &= ~GSER_BR_RXX_EER_RXT_ESV;
616 reer |= GSER_BR_RXX_EER_RXT_EER;
617 writeq(reer, GSER_BR_RXX_EER(qlm, l));
618 }
619
620
621 for (l = 0; l < max_lanes; l++) {
622 u64 rctl, reer;
623
624 if (lane != -1 && lane != l)
625 continue;
626
627 gser_poll_reg(GSER_BR_RXX_EER(qlm, l), EER_RXT_ESV, 1, 1, 200);
628 reer = readq(GSER_BR_RXX_EER(qlm, l));
629
630
631 rctl = readq(GSER_BR_RXX_CTL(qlm, l));
632 rctl &= ~GSER_BR_RXX_CTL_RXT_SWM;
633 writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
634
635 if (reer & GSER_BR_RXX_EER_RXT_ESV) {
636 debug("Rx equalization completed on DLM%d", qlm);
637 debug(" QLM%d rxt_esm = 0x%llx\n", l, (reer & 0x3fff));
638 } else {
639 debug("Rx equalization timedout on DLM%d", qlm);
640 debug(" lane %d\n", l);
641 fail = 1;
642 }
643 }
644
645 return (fail) ? -1 : 0;
646}
647
648static int bgx_xaui_check_link(struct lmac *lmac)
649{
650 struct bgx *bgx = lmac->bgx;
651 int lmacid = lmac->lmacid;
652 int lmac_type = lmac->lmac_type;
653 u64 cfg;
654
655 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
656
657
658 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
659 if (cfg & SPU_AN_CTL_AN_EN) {
660 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_STATUS);
661 if (!(cfg & SPU_AN_STS_AN_COMPLETE)) {
662
663 debug("restarting auto-neg\n");
664 bgx_reg_modify(bgx, lmacid, BGX_SPUX_AN_CONTROL,
665 SPU_AN_CTL_AN_RESTART);
666 return -1;
667 }
668 }
669
670 debug("%s link use_training %d\n", __func__, lmac->use_training);
671 if (lmac->use_training) {
672 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
673 if (!(cfg & (1ull << 13))) {
674 debug("waiting for link training\n");
675
676 cfg = (1ull << 13) | (1ull << 14);
677 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
678
679 udelay(2000);
680
681 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
682 cfg |= (1ull << 0);
683 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
684 return -1;
685 }
686 }
687
688
689
690
691 if (!lmac->use_training) {
692 int qlm;
693 bool use_dlm = 0;
694
695 if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
696 bgx->bgx_id == 2))
697 use_dlm = 1;
698 switch (lmac->lmac_type) {
699 default:
700 case BGX_MODE_SGMII:
701 case BGX_MODE_RGMII:
702 case BGX_MODE_XAUI:
703
704 break;
705 case BGX_MODE_XLAUI:
706 if (use_dlm) {
707 if (__rx_equalization(lmac->qlm, -1) ||
708 __rx_equalization(lmac->qlm + 1, -1)) {
709 printf("BGX%d:%d", bgx->bgx_id, lmacid);
710 printf(" Waiting for RX Equalization");
711 printf(" on DLM%d/DLM%d\n",
712 lmac->qlm, lmac->qlm + 1);
713 return -1;
714 }
715 } else {
716 if (__rx_equalization(lmac->qlm, -1)) {
717 printf("BGX%d:%d", bgx->bgx_id, lmacid);
718 printf(" Waiting for RX Equalization");
719 printf(" on QLM%d\n", lmac->qlm);
720 return -1;
721 }
722 }
723 break;
724 case BGX_MODE_RXAUI:
725
726
727
728
729 qlm = lmac->qlm;
730 if (__rx_equalization(qlm, 0)) {
731 printf("BGX%d:%d", bgx->bgx_id, lmacid);
732 printf(" Waiting for RX Equalization");
733 printf(" on QLM%d, Lane0\n", qlm);
734 return -1;
735 }
736 if (__rx_equalization(qlm, 1)) {
737 printf("BGX%d:%d", bgx->bgx_id, lmacid);
738 printf(" Waiting for RX Equalization");
739 printf(" on QLM%d, Lane1\n", qlm);
740 return -1;
741 }
742 break;
743 case BGX_MODE_XFI:
744 {
745 int lid;
746 bool altpkg = otx_is_altpkg();
747
748 if (bgx->bgx_id == 0 && altpkg && lmacid)
749 lid = 0;
750 else if ((lmacid >= 2) && use_dlm)
751 lid = lmacid - 2;
752 else
753 lid = lmacid;
754
755 if (__rx_equalization(lmac->qlm, lid)) {
756 printf("BGX%d:%d", bgx->bgx_id, lid);
757 printf(" Waiting for RX Equalization");
758 printf(" on QLM%d\n", lmac->qlm);
759 }
760 }
761 break;
762 }
763 }
764
765
766 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
767 printf("BGX SPU reset not completed\n");
768 return -1;
769 }
770
771 if (lmac_type == 3 || lmac_type == 4) {
772 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
773 SPU_BR_STATUS_BLK_LOCK, false)) {
774 printf("SPU_BR_STATUS_BLK_LOCK not completed\n");
775 return -1;
776 }
777 } else {
778 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
779 SPU_BX_STATUS_RX_ALIGN, false)) {
780 printf("SPU_BX_STATUS_RX_ALIGN not completed\n");
781 return -1;
782 }
783 }
784
785
786 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
787 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
788 printf("Receive fault, retry training\n");
789 if (lmac->use_training) {
790 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
791 if (!(cfg & (1ull << 13))) {
792 cfg = (1ull << 13) | (1ull << 14);
793 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
794 cfg = bgx_reg_read(bgx, lmacid,
795 BGX_SPUX_BR_PMD_CRTL);
796 cfg |= (1ull << 0);
797 bgx_reg_write(bgx, lmacid,
798 BGX_SPUX_BR_PMD_CRTL, cfg);
799 return -1;
800 }
801 }
802 return -1;
803 }
804
805
806 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
807 SMU_RX_CTL_STATUS, true)) {
808 printf("SMU RX link not okay\n");
809 return -1;
810 }
811
812
813 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
814 printf("SMU RX not idle\n");
815 return -1;
816 }
817
818
819 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
820 printf("SMU TX not idle\n");
821 return -1;
822 }
823
824 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
825 printf("Receive fault\n");
826 return -1;
827 }
828
829
830 if (!(bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS1) &
831 SPU_STATUS1_RCV_LNK))
832 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1,
833 SPU_STATUS1_RCV_LNK);
834 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
835 SPU_STATUS1_RCV_LNK, false)) {
836 printf("SPU receive link down\n");
837 return -1;
838 }
839
840 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
841 cfg &= ~SPU_MISC_CTL_RX_DIS;
842 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
843 return 0;
844}
845
846static int bgx_lmac_enable(struct bgx *bgx, int8_t lmacid)
847{
848 struct lmac *lmac;
849 u64 cfg;
850
851 lmac = &bgx->lmac[lmacid];
852
853 debug("%s: lmac: %p, lmacid = %d\n", __func__, lmac, lmacid);
854
855 if (lmac->qlm_mode == QLM_MODE_SGMII ||
856 lmac->qlm_mode == QLM_MODE_RGMII ||
857 lmac->qlm_mode == QLM_MODE_QSGMII) {
858 if (bgx_lmac_sgmii_init(bgx, lmacid)) {
859 debug("bgx_lmac_sgmii_init failed\n");
860 return -1;
861 }
862 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
863 cfg |= ((1ull << 2) | (1ull << 1));
864 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
865 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
866 } else {
867 if (bgx_lmac_xaui_init(bgx, lmacid, lmac->lmac_type))
868 return -1;
869 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
870 cfg |= ((1ull << 2) | (1ull << 1));
871 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
872 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
873 }
874
875
876 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
877 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
878
879 return 0;
880}
881
882int bgx_poll_for_link(int node, int bgx_idx, int lmacid)
883{
884 int ret;
885 struct lmac *lmac = bgx_get_lmac(node, bgx_idx, lmacid);
886 char mii_name[10];
887 struct phy_info *phy;
888
889 if (!lmac) {
890 printf("LMAC %d/%d/%d is disabled or doesn't exist\n",
891 node, bgx_idx, lmacid);
892 return 0;
893 }
894
895 debug("%s: %d, lmac: %d/%d/%d %p\n",
896 __FILE__, __LINE__,
897 node, bgx_idx, lmacid, lmac);
898 if (lmac->init_pend) {
899 ret = bgx_lmac_enable(lmac->bgx, lmacid);
900 if (ret < 0) {
901 printf("BGX%d LMAC%d lmac_enable failed\n", bgx_idx,
902 lmacid);
903 return ret;
904 }
905 lmac->init_pend = 0;
906 mdelay(100);
907 }
908 if (lmac->qlm_mode == QLM_MODE_SGMII ||
909 lmac->qlm_mode == QLM_MODE_RGMII ||
910 lmac->qlm_mode == QLM_MODE_QSGMII) {
911 if (bgx_board_info[bgx_idx].phy_info[lmacid].phy_addr == -1) {
912 lmac->link_up = 1;
913 lmac->last_speed = 1000;
914 lmac->last_duplex = 1;
915 printf("BGX%d:LMAC %u link up\n", bgx_idx, lmacid);
916 return lmac->link_up;
917 }
918 snprintf(mii_name, sizeof(mii_name), "smi%d",
919 bgx_board_info[bgx_idx].phy_info[lmacid].mdio_bus);
920
921 debug("mii_name: %s\n", mii_name);
922
923 lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
924 phy = &bgx_board_info[bgx_idx].phy_info[lmacid];
925 lmac->phy_addr = phy->phy_addr;
926
927 debug("lmac->mii_bus: %p\n", lmac->mii_bus);
928 if (!lmac->mii_bus) {
929 printf("MDIO device %s not found\n", mii_name);
930 ret = -ENODEV;
931 return ret;
932 }
933
934 lmac->phydev = phy_connect(lmac->mii_bus, lmac->phy_addr,
935 lmac->dev,
936 if_mode[lmac->qlm_mode]);
937
938 if (!lmac->phydev) {
939 printf("%s: No PHY device\n", __func__);
940 return -1;
941 }
942
943 ret = phy_config(lmac->phydev);
944 if (ret) {
945 printf("%s: Could not initialize PHY %s\n",
946 __func__, lmac->phydev->dev->name);
947 return ret;
948 }
949
950 ret = phy_startup(lmac->phydev);
951 debug("%s: %d\n", __FILE__, __LINE__);
952 if (ret) {
953 printf("%s: Could not initialize PHY %s\n",
954 __func__, lmac->phydev->dev->name);
955 }
956
957#ifdef OCTEONTX_XCV
958 if (lmac->qlm_mode == QLM_MODE_RGMII)
959 xcv_setup_link(lmac->phydev->link, lmac->phydev->speed);
960#endif
961
962 lmac->link_up = lmac->phydev->link;
963 lmac->last_speed = lmac->phydev->speed;
964 lmac->last_duplex = lmac->phydev->duplex;
965
966 debug("%s qlm_mode %d phy link status 0x%x,last speed 0x%x,",
967 __func__, lmac->qlm_mode, lmac->link_up,
968 lmac->last_speed);
969 debug(" duplex 0x%x\n", lmac->last_duplex);
970
971 if (lmac->qlm_mode != QLM_MODE_RGMII)
972 bgx_lmac_sgmii_set_link_speed(lmac);
973
974 } else {
975 u64 status1;
976 u64 tx_ctl;
977 u64 rx_ctl;
978
979 status1 = bgx_reg_read(lmac->bgx, lmac->lmacid,
980 BGX_SPUX_STATUS1);
981 tx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_TX_CTL);
982 rx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
983
984 debug("BGX%d LMAC%d BGX_SPUX_STATUS2: %lx\n", bgx_idx, lmacid,
985 (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
986 BGX_SPUX_STATUS2));
987 debug("BGX%d LMAC%d BGX_SPUX_STATUS1: %lx\n", bgx_idx, lmacid,
988 (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
989 BGX_SPUX_STATUS1));
990 debug("BGX%d LMAC%d BGX_SMUX_RX_CTL: %lx\n", bgx_idx, lmacid,
991 (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
992 BGX_SMUX_RX_CTL));
993 debug("BGX%d LMAC%d BGX_SMUX_TX_CTL: %lx\n", bgx_idx, lmacid,
994 (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
995 BGX_SMUX_TX_CTL));
996
997 if ((status1 & SPU_STATUS1_RCV_LNK) &&
998 ((tx_ctl & SMU_TX_CTL_LNK_STATUS) == 0) &&
999 ((rx_ctl & SMU_RX_CTL_STATUS) == 0)) {
1000 lmac->link_up = 1;
1001 if (lmac->lmac_type == 4)
1002 lmac->last_speed = 40000;
1003 else
1004 lmac->last_speed = 10000;
1005 lmac->last_duplex = 1;
1006 } else {
1007 lmac->link_up = 0;
1008 lmac->last_speed = 0;
1009 lmac->last_duplex = 0;
1010 return bgx_xaui_check_link(lmac);
1011 }
1012
1013 lmac->last_link = lmac->link_up;
1014 }
1015
1016 printf("BGX%d:LMAC %u link %s\n", bgx_idx, lmacid,
1017 (lmac->link_up) ? "up" : "down");
1018
1019 return lmac->link_up;
1020}
1021
1022void bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
1023{
1024 struct lmac *lmac;
1025 u64 cmrx_cfg;
1026
1027 lmac = &bgx->lmac[lmacid];
1028
1029 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1030 cmrx_cfg &= ~(1 << 15);
1031 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
1032 bgx_flush_dmac_addrs(bgx, lmacid);
1033
1034 if (lmac->phydev)
1035 phy_shutdown(lmac->phydev);
1036
1037 lmac->phydev = NULL;
1038}
1039
1040
1041
1042
1043
1044static void bgx_init_hw(struct bgx *bgx)
1045{
1046 struct lmac *lmac;
1047 int i, lmacid, count = 0, inc = 0;
1048 char buf[40];
1049 static int qsgmii_configured;
1050
1051 for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
1052 struct lmac *tlmac;
1053
1054 lmac = &bgx->lmac[lmacid];
1055 debug("%s: lmacid = %d, qlm = %d, mode = %d\n",
1056 __func__, lmacid, lmac->qlm, lmac->qlm_mode);
1057
1058 if (lmac->qlm == -1)
1059 continue;
1060
1061 switch (lmac->qlm_mode) {
1062 case QLM_MODE_SGMII:
1063 {
1064
1065
1066
1067 if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1068 if (lmacid % 2)
1069 continue;
1070 }
1071 lmac->lane_to_sds = lmacid;
1072 lmac->lmac_type = 0;
1073 snprintf(buf, sizeof(buf),
1074 "BGX%d QLM%d LMAC%d mode: %s\n",
1075 bgx->bgx_id, lmac->qlm, lmacid,
1076 lmac->is_1gx ? "1000Base-X" : "SGMII");
1077 break;
1078 }
1079 case QLM_MODE_XAUI:
1080 if (lmacid != 0)
1081 continue;
1082 lmac->lmac_type = 1;
1083 lmac->lane_to_sds = 0xE4;
1084 snprintf(buf, sizeof(buf),
1085 "BGX%d QLM%d LMAC%d mode: XAUI\n",
1086 bgx->bgx_id, lmac->qlm, lmacid);
1087 break;
1088 case QLM_MODE_RXAUI:
1089 if (lmacid == 0) {
1090 lmac->lmac_type = 2;
1091 lmac->lane_to_sds = 0x4;
1092 } else if (lmacid == 1) {
1093 struct lmac *tlmac;
1094
1095 tlmac = &bgx->lmac[2];
1096 if (tlmac->qlm_mode == QLM_MODE_RXAUI) {
1097 lmac->lmac_type = 2;
1098 lmac->lane_to_sds = 0xe;
1099 lmac->qlm = tlmac->qlm;
1100 }
1101 } else {
1102 continue;
1103 }
1104 snprintf(buf, sizeof(buf),
1105 "BGX%d QLM%d LMAC%d mode: RXAUI\n",
1106 bgx->bgx_id, lmac->qlm, lmacid);
1107 break;
1108 case QLM_MODE_XFI:
1109
1110
1111
1112 if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1113 if (lmacid % 2)
1114 continue;
1115 }
1116 lmac->lane_to_sds = lmacid;
1117 lmac->lmac_type = 3;
1118 snprintf(buf, sizeof(buf),
1119 "BGX%d QLM%d LMAC%d mode: XFI\n",
1120 bgx->bgx_id, lmac->qlm, lmacid);
1121 break;
1122 case QLM_MODE_XLAUI:
1123 if (lmacid != 0)
1124 continue;
1125 lmac->lmac_type = 4;
1126 lmac->lane_to_sds = 0xE4;
1127 snprintf(buf, sizeof(buf),
1128 "BGX%d QLM%d LMAC%d mode: XLAUI\n",
1129 bgx->bgx_id, lmac->qlm, lmacid);
1130 break;
1131 case QLM_MODE_10G_KR:
1132
1133
1134
1135 if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1136 if (lmacid % 2)
1137 continue;
1138 }
1139 lmac->lane_to_sds = lmacid;
1140 lmac->lmac_type = 3;
1141 lmac->use_training = 1;
1142 snprintf(buf, sizeof(buf),
1143 "BGX%d QLM%d LMAC%d mode: 10G-KR\n",
1144 bgx->bgx_id, lmac->qlm, lmacid);
1145 break;
1146 case QLM_MODE_40G_KR4:
1147 if (lmacid != 0)
1148 continue;
1149 lmac->lmac_type = 4;
1150 lmac->lane_to_sds = 0xE4;
1151 lmac->use_training = 1;
1152 snprintf(buf, sizeof(buf),
1153 "BGX%d QLM%d LMAC%d mode: 40G-KR4\n",
1154 bgx->bgx_id, lmac->qlm, lmacid);
1155 break;
1156 case QLM_MODE_RGMII:
1157 if (lmacid != 0)
1158 continue;
1159 lmac->lmac_type = 5;
1160 lmac->lane_to_sds = 0xE4;
1161 snprintf(buf, sizeof(buf),
1162 "BGX%d LMAC%d mode: RGMII\n",
1163 bgx->bgx_id, lmacid);
1164 break;
1165 case QLM_MODE_QSGMII:
1166 if (qsgmii_configured)
1167 continue;
1168 if (lmacid == 0 || lmacid == 2) {
1169 count = 4;
1170 printf("BGX%d QLM%d LMAC%d mode: QSGMII\n",
1171 bgx->bgx_id, lmac->qlm, lmacid);
1172 for (i = 0; i < count; i++) {
1173 struct lmac *l;
1174 int type;
1175
1176 l = &bgx->lmac[i];
1177 l->lmac_type = 6;
1178 type = l->lmac_type;
1179 l->qlm_mode = QLM_MODE_QSGMII;
1180 l->lane_to_sds = lmacid + i;
1181 if (is_bgx_port_valid(bgx->bgx_id, i))
1182 bgx_reg_write(bgx, i,
1183 BGX_CMRX_CFG,
1184 (type << 8) |
1185 l->lane_to_sds);
1186 }
1187 qsgmii_configured = 1;
1188 }
1189 continue;
1190 default:
1191 continue;
1192 }
1193
1194
1195 if (is_bgx_port_valid(bgx->bgx_id, count) &&
1196 lmac->qlm_mode != QLM_MODE_QSGMII) {
1197 int lmac_en = 0;
1198 int tmp, idx;
1199
1200 tlmac = &bgx->lmac[count];
1201 tlmac->lmac_type = lmac->lmac_type;
1202 idx = bgx->bgx_id;
1203 tmp = count + inc;
1204
1205 for (; tmp < MAX_LMAC_PER_BGX; inc++) {
1206 lmac_en = bgx_board_info[idx].lmac_enable[tmp];
1207 if (lmac_en)
1208 break;
1209 tmp = count + inc;
1210 }
1211
1212 if (inc != 0 && inc < MAX_LMAC_PER_BGX &&
1213 lmac_en && inc != count)
1214 tlmac->lane_to_sds =
1215 lmac->lane_to_sds + abs(inc - count);
1216 else
1217 tlmac->lane_to_sds = lmac->lane_to_sds;
1218 tlmac->qlm = lmac->qlm;
1219 tlmac->qlm_mode = lmac->qlm_mode;
1220
1221 printf("%s", buf);
1222
1223 bgx_reg_write(bgx, count, BGX_CMRX_CFG,
1224 (tlmac->lmac_type << 8) |
1225 tlmac->lane_to_sds);
1226
1227 if (tlmac->lmac_type == BGX_MODE_SGMII) {
1228 if (tlmac->is_1gx) {
1229
1230
1231
1232 bgx_reg_modify(bgx, count,
1233 BGX_GMP_PCS_MISCX_CTL,
1234 PCS_MISC_CTL_MODE);
1235 }
1236
1237 if (!bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis) {
1238
1239
1240
1241
1242
1243 bgx_reg_modify(bgx, count,
1244 BGX_GMP_PCS_MRX_CTL,
1245 PCS_MRX_CTL_AN_EN);
1246 }
1247 }
1248
1249 count += 1;
1250 }
1251 }
1252
1253
1254 qsgmii_configured = 0;
1255
1256 printf("BGX%d LMACs: %d\n", bgx->bgx_id, count);
1257 bgx->lmac_count = count;
1258 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, count);
1259 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, count);
1260
1261 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1262 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1263 printf("BGX%d BIST failed\n", bgx->bgx_id);
1264
1265
1266 for (i = 0; i < bgx->lmac_count; i++)
1267 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1268 ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1269 (i * MAX_BGX_CHANS_PER_LMAC));
1270
1271
1272 for (i = 0; i < RX_DMAC_COUNT; i++)
1273 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1274
1275
1276 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1277 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1278}
1279
1280static void bgx_get_qlm_mode(struct bgx *bgx)
1281{
1282 struct lmac *lmac;
1283 int lmacid;
1284
1285
1286
1287
1288 for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
1289 int lmac_type;
1290 int train_en;
1291 int index = 0;
1292
1293 if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
1294 bgx->bgx_id == 2))
1295 index = (lmacid < 2) ? 0 : 2;
1296
1297 lmac = &bgx->lmac[lmacid];
1298
1299
1300 if (lmac->qlm == -1)
1301 continue;
1302
1303 lmac_type = bgx_reg_read(bgx, index, BGX_CMRX_CFG);
1304 lmac->lmac_type = (lmac_type >> 8) & 0x07;
1305 debug("%s:%d:%d: lmac_type = %d, altpkg = %d\n", __func__,
1306 bgx->bgx_id, lmacid, lmac->lmac_type, otx_is_altpkg());
1307
1308 train_en = (readq(GSERX_SCRATCH(lmac->qlm))) & 0xf;
1309 lmac->is_1gx = bgx_reg_read(bgx, index, BGX_GMP_PCS_MISCX_CTL)
1310 & (PCS_MISC_CTL_MODE) ? true : false;
1311
1312 switch (lmac->lmac_type) {
1313 case BGX_MODE_SGMII:
1314 if (bgx->is_rgx) {
1315 if (lmacid == 0) {
1316 lmac->qlm_mode = QLM_MODE_RGMII;
1317 debug("BGX%d LMAC%d mode: RGMII\n",
1318 bgx->bgx_id, lmacid);
1319 }
1320 continue;
1321 } else {
1322 if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1323 if (lmacid % 2)
1324 continue;
1325 }
1326 lmac->qlm_mode = QLM_MODE_SGMII;
1327 debug("BGX%d QLM%d LMAC%d mode: %s\n",
1328 bgx->bgx_id, lmac->qlm, lmacid,
1329 lmac->is_1gx ? "1000Base-X" : "SGMII");
1330 }
1331 break;
1332 case BGX_MODE_XAUI:
1333 if (bgx->bgx_id == 0 && otx_is_altpkg())
1334 continue;
1335 lmac->qlm_mode = QLM_MODE_XAUI;
1336 if (lmacid != 0)
1337 continue;
1338 debug("BGX%d QLM%d LMAC%d mode: XAUI\n",
1339 bgx->bgx_id, lmac->qlm, lmacid);
1340 break;
1341 case BGX_MODE_RXAUI:
1342 if (bgx->bgx_id == 0 && otx_is_altpkg())
1343 continue;
1344 lmac->qlm_mode = QLM_MODE_RXAUI;
1345 if (index == lmacid) {
1346 debug("BGX%d QLM%d LMAC%d mode: RXAUI\n",
1347 bgx->bgx_id, lmac->qlm, (index ? 1 : 0));
1348 }
1349 break;
1350 case BGX_MODE_XFI:
1351 if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1352 if (lmacid % 2)
1353 continue;
1354 }
1355 if ((lmacid < 2 && (train_en & (1 << lmacid))) ||
1356 (train_en & (1 << (lmacid - 2)))) {
1357 lmac->qlm_mode = QLM_MODE_10G_KR;
1358 debug("BGX%d QLM%d LMAC%d mode: 10G_KR\n",
1359 bgx->bgx_id, lmac->qlm, lmacid);
1360 } else {
1361 lmac->qlm_mode = QLM_MODE_XFI;
1362 debug("BGX%d QLM%d LMAC%d mode: XFI\n",
1363 bgx->bgx_id, lmac->qlm, lmacid);
1364 }
1365 break;
1366 case BGX_MODE_XLAUI:
1367 if (bgx->bgx_id == 0 && otx_is_altpkg())
1368 continue;
1369 if (train_en) {
1370 lmac->qlm_mode = QLM_MODE_40G_KR4;
1371 if (lmacid != 0)
1372 break;
1373 debug("BGX%d QLM%d LMAC%d mode: 40G_KR4\n",
1374 bgx->bgx_id, lmac->qlm, lmacid);
1375 } else {
1376 lmac->qlm_mode = QLM_MODE_XLAUI;
1377 if (lmacid != 0)
1378 break;
1379 debug("BGX%d QLM%d LMAC%d mode: XLAUI\n",
1380 bgx->bgx_id, lmac->qlm, lmacid);
1381 }
1382 break;
1383 case BGX_MODE_QSGMII:
1384
1385 if (otx_is_soc(CN83XX) && lmacid == 2 &&
1386 bgx->bgx_id != 2) {
1387
1388 continue;
1389 }
1390
1391 if (lmacid == 0 || lmacid == 2) {
1392 lmac->qlm_mode = QLM_MODE_QSGMII;
1393 debug("BGX%d QLM%d LMAC%d mode: QSGMII\n",
1394 bgx->bgx_id, lmac->qlm, lmacid);
1395 }
1396 break;
1397 default:
1398 break;
1399 }
1400 }
1401}
1402
1403void bgx_set_board_info(int bgx_id, int *mdio_bus,
1404 int *phy_addr, bool *autoneg_dis, bool *lmac_reg,
1405 bool *lmac_enable)
1406{
1407 unsigned int i;
1408
1409 for (i = 0; i < MAX_LMAC_PER_BGX; i++) {
1410 bgx_board_info[bgx_id].phy_info[i].phy_addr = phy_addr[i];
1411 bgx_board_info[bgx_id].phy_info[i].mdio_bus = mdio_bus[i];
1412 bgx_board_info[bgx_id].phy_info[i].autoneg_dis = autoneg_dis[i];
1413 bgx_board_info[bgx_id].lmac_reg[i] = lmac_reg[i];
1414 bgx_board_info[bgx_id].lmac_enable[i] = lmac_enable[i];
1415 debug("%s bgx_id %d lmac %d\n", __func__, bgx_id, i);
1416 debug("phy addr %x mdio bus %d autoneg_dis %d lmac_reg %d\n",
1417 bgx_board_info[bgx_id].phy_info[i].phy_addr,
1418 bgx_board_info[bgx_id].phy_info[i].mdio_bus,
1419 bgx_board_info[bgx_id].phy_info[i].autoneg_dis,
1420 bgx_board_info[bgx_id].lmac_reg[i]);
1421 debug("lmac_enable = %x\n",
1422 bgx_board_info[bgx_id].lmac_enable[i]);
1423 }
1424}
1425
1426int octeontx_bgx_remove(struct udevice *dev)
1427{
1428 int lmacid;
1429 u64 cfg;
1430 int count = MAX_LMAC_PER_BGX;
1431 struct bgx *bgx = dev_get_priv(dev);
1432
1433 if (!bgx->reg_base)
1434 return 0;
1435
1436 if (bgx->is_rgx)
1437 count = 1;
1438
1439 for (lmacid = 0; lmacid < count; lmacid++) {
1440 struct lmac *lmac;
1441
1442 lmac = &bgx->lmac[lmacid];
1443 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1444 cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
1445 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
1446
1447
1448 if (lmac->lmac_type == BGX_MODE_SGMII ||
1449 lmac->lmac_type == BGX_MODE_QSGMII) {
1450 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
1451 cfg |= PCS_MRX_CTL_PWR_DN;
1452 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
1453 }
1454
1455 debug("%s disabling bgx%d lmacid%d\n", __func__, bgx->bgx_id,
1456 lmacid);
1457 bgx_lmac_disable(bgx, lmacid);
1458 }
1459 return 0;
1460}
1461
1462int octeontx_bgx_probe(struct udevice *dev)
1463{
1464 struct bgx *bgx = dev_get_priv(dev);
1465 u8 lmac = 0;
1466 int qlm[4] = {-1, -1, -1, -1};
1467 int bgx_idx, node;
1468 int inc = 1;
1469
1470 bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
1471 PCI_REGION_MEM);
1472 if (!bgx->reg_base) {
1473 debug("No PCI region found\n");
1474 return 0;
1475 }
1476
1477#ifdef OCTEONTX_XCV
1478
1479 if ((((uintptr_t)bgx->reg_base >> 24) & 0xf) == 0x8) {
1480 bgx->bgx_id = 2;
1481 bgx->is_rgx = true;
1482 for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
1483 if (lmac == 0) {
1484 bgx->lmac[lmac].lmacid = 0;
1485 bgx->lmac[lmac].qlm = 0;
1486 } else {
1487 bgx->lmac[lmac].qlm = -1;
1488 }
1489 }
1490 xcv_init_hw();
1491 goto skip_qlm_config;
1492 }
1493#endif
1494
1495 node = node_id(bgx->reg_base);
1496 bgx_idx = ((uintptr_t)bgx->reg_base >> 24) & 3;
1497 bgx->bgx_id = (node * MAX_BGX_PER_NODE) + bgx_idx;
1498 if (otx_is_soc(CN81XX))
1499 inc = 2;
1500 else if (otx_is_soc(CN83XX) && (bgx_idx == 2))
1501 inc = 2;
1502
1503 for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac += inc) {
1504
1505 if (otx_is_soc(CN83XX) && bgx_idx == 3 && lmac >= 2)
1506 continue;
1507 qlm[lmac + 0] = get_qlm_for_bgx(node, bgx_idx, lmac);
1508
1509
1510
1511 if (inc == 2)
1512 qlm[lmac + 1] = qlm[lmac];
1513 debug("qlm[%d] = %d\n", lmac, qlm[lmac]);
1514 }
1515
1516
1517
1518
1519 if (otx_is_soc(CN81XX))
1520 if ((qlm[0] == -1) && (qlm[2] == -1))
1521 return -ENODEV;
1522
1523
1524 for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
1525 bgx->lmac[lmac].qlm = qlm[lmac];
1526 bgx->lmac[lmac].lmacid = lmac;
1527 }
1528
1529#ifdef OCTEONTX_XCV
1530skip_qlm_config:
1531#endif
1532 bgx_vnic[bgx->bgx_id] = bgx;
1533 bgx_get_qlm_mode(bgx);
1534 debug("bgx_vnic[%u]: %p\n", bgx->bgx_id, bgx);
1535
1536 bgx_init_hw(bgx);
1537
1538
1539 for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1540 struct lmac *tlmac = &bgx->lmac[lmac];
1541
1542 tlmac->dev = dev;
1543 tlmac->init_pend = 1;
1544 tlmac->bgx = bgx;
1545 }
1546
1547 return 0;
1548}
1549
1550U_BOOT_DRIVER(octeontx_bgx) = {
1551 .name = "octeontx_bgx",
1552 .id = UCLASS_MISC,
1553 .probe = octeontx_bgx_probe,
1554 .remove = octeontx_bgx_remove,
1555 .priv_auto_alloc_size = sizeof(struct bgx),
1556 .flags = DM_FLAG_OS_PREPARE,
1557};
1558
1559static struct pci_device_id octeontx_bgx_supported[] = {
1560 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BGX) },
1561 { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RGX) },
1562 {}
1563};
1564
1565U_BOOT_PCI_DEVICE(octeontx_bgx, octeontx_bgx_supported);
1566