1
2
3
4
5
6#include <dm.h>
7#include <time.h>
8#include <linux/delay.h>
9
10#include <mach/cvmx-regs.h>
11#include <mach/octeon-model.h>
12#include <mach/cvmx-fuse.h>
13#include <mach/cvmx-qlm.h>
14#include <mach/octeon_qlm.h>
15#include <mach/cvmx-pcie.h>
16
17#include <mach/cvmx-bgxx-defs.h>
18#include <mach/cvmx-ciu-defs.h>
19#include <mach/cvmx-gmxx-defs.h>
20#include <mach/cvmx-gserx-defs.h>
21#include <mach/cvmx-mio-defs.h>
22#include <mach/cvmx-pciercx-defs.h>
23#include <mach/cvmx-pemx-defs.h>
24#include <mach/cvmx-pexp-defs.h>
25#include <mach/cvmx-rst-defs.h>
26#include <mach/cvmx-sata-defs.h>
27#include <mach/cvmx-sli-defs.h>
28#include <mach/cvmx-sriomaintx-defs.h>
29#include <mach/cvmx-sriox-defs.h>
30
31DECLARE_GLOBAL_DATA_PTR;
32
33
34#define R_2_5G_REFCLK100 0x0
35
36#define R_5G_REFCLK100 0x1
37
38#define R_8G_REFCLK100 0x2
39
40#define R_125G_REFCLK15625_KX 0x3
41
42#define R_3125G_REFCLK15625_XAUI 0x4
43
44#define R_103125G_REFCLK15625_KR 0x5
45
46#define R_125G_REFCLK15625_SGMII 0x6
47
48#define R_5G_REFCLK15625_QSGMII 0x7
49
50#define R_625G_REFCLK15625_RXAUI 0x8
51
52#define R_2_5G_REFCLK125 0x9
53
54#define R_5G_REFCLK125 0xa
55
56#define R_8G_REFCLK125 0xb
57
58#define R_NUM_LANE_MODES 0xc
59
60int cvmx_qlm_is_ref_clock(int qlm, int reference_mhz)
61{
62 int ref_clock = cvmx_qlm_measure_clock(qlm);
63 int mhz = ref_clock / 1000000;
64 int range = reference_mhz / 10;
65
66 return ((mhz >= reference_mhz - range) && (mhz <= reference_mhz + range));
67}
68
69static int __get_qlm_spd(int qlm, int speed)
70{
71 int qlm_spd = 0xf;
72
73 if (cvmx_qlm_is_ref_clock(qlm, 100)) {
74 if (speed == 1250)
75 qlm_spd = 0x3;
76 else if (speed == 2500)
77 qlm_spd = 0x2;
78 else if (speed == 5000)
79 qlm_spd = 0x0;
80 else
81 qlm_spd = 0xf;
82 } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
83 if (speed == 1250)
84 qlm_spd = 0xa;
85 else if (speed == 2500)
86 qlm_spd = 0x9;
87 else if (speed == 3125)
88 qlm_spd = 0x8;
89 else if (speed == 5000)
90 qlm_spd = 0x6;
91 else if (speed == 6250)
92 qlm_spd = 0x5;
93 else
94 qlm_spd = 0xf;
95 } else if (cvmx_qlm_is_ref_clock(qlm, 156)) {
96 if (speed == 1250)
97 qlm_spd = 0x4;
98 else if (speed == 2500)
99 qlm_spd = 0x7;
100 else if (speed == 3125)
101 qlm_spd = 0xe;
102 else if (speed == 3750)
103 qlm_spd = 0xd;
104 else if (speed == 5000)
105 qlm_spd = 0xb;
106 else if (speed == 6250)
107 qlm_spd = 0xc;
108 else
109 qlm_spd = 0xf;
110 } else if (cvmx_qlm_is_ref_clock(qlm, 161)) {
111 if (speed == 6316)
112 qlm_spd = 0xc;
113 }
114 return qlm_spd;
115}
116
117static void __set_qlm_pcie_mode_61xx(int pcie_port, int root_complex)
118{
119 int rc = root_complex ? 1 : 0;
120 int ep = root_complex ? 0 : 1;
121 cvmx_ciu_soft_prst1_t soft_prst1;
122 cvmx_ciu_soft_prst_t soft_prst;
123 cvmx_mio_rst_ctlx_t rst_ctl;
124
125 if (pcie_port) {
126 soft_prst1.u64 = csr_rd(CVMX_CIU_SOFT_PRST1);
127 soft_prst1.s.soft_prst = 1;
128 csr_wr(CVMX_CIU_SOFT_PRST1, soft_prst1.u64);
129 } else {
130 soft_prst.u64 = csr_rd(CVMX_CIU_SOFT_PRST);
131 soft_prst.s.soft_prst = 1;
132 csr_wr(CVMX_CIU_SOFT_PRST, soft_prst.u64);
133 }
134
135 rst_ctl.u64 = csr_rd(CVMX_MIO_RST_CTLX(pcie_port));
136
137 rst_ctl.s.prst_link = rc;
138 rst_ctl.s.rst_link = ep;
139 rst_ctl.s.prtmode = rc;
140 rst_ctl.s.rst_drv = rc;
141 rst_ctl.s.rst_rcv = 0;
142 rst_ctl.s.rst_chip = ep;
143 csr_wr(CVMX_MIO_RST_CTLX(pcie_port), rst_ctl.u64);
144
145 if (root_complex == 0) {
146 if (pcie_port) {
147 soft_prst1.u64 = csr_rd(CVMX_CIU_SOFT_PRST1);
148 soft_prst1.s.soft_prst = 0;
149 csr_wr(CVMX_CIU_SOFT_PRST1, soft_prst1.u64);
150 } else {
151 soft_prst.u64 = csr_rd(CVMX_CIU_SOFT_PRST);
152 soft_prst.s.soft_prst = 0;
153 csr_wr(CVMX_CIU_SOFT_PRST, soft_prst.u64);
154 }
155 }
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181static int octeon_configure_qlm_cn61xx(int qlm, int speed, int mode, int rc, int pcie2x1)
182{
183 cvmx_mio_qlmx_cfg_t qlm_cfg;
184
185
186
187
188 if (!OCTEON_IS_MODEL(OCTEON_CN61XX))
189 return -1;
190
191 if (qlm < 3) {
192 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
193 } else {
194 debug("WARNING: Invalid QLM(%d) passed\n", qlm);
195 return -1;
196 }
197
198 switch (qlm) {
199
200 case 2: {
201 if (mode < 2) {
202 qlm_cfg.s.qlm_spd = 0xf;
203 break;
204 }
205 qlm_cfg.s.qlm_spd = __get_qlm_spd(qlm, speed);
206 qlm_cfg.s.qlm_cfg = mode;
207 break;
208 }
209 case 1: {
210 if (mode == 1) {
211 cvmx_mio_qlmx_cfg_t qlm0;
212
213
214
215
216
217
218 qlm0.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
219 if (qlm0.s.qlm_spd != 0xf && qlm0.s.qlm_cfg == 0) {
220 debug("Invalid mode(%d) for QLM(%d) as QLM1 is PCIe mode\n",
221 mode, qlm);
222 qlm_cfg.s.qlm_spd = 0xf;
223 break;
224 }
225
226
227 if (cvmx_qlm_is_ref_clock(qlm, 100)) {
228 if (pcie2x1 == 0x3)
229 qlm_cfg.s.qlm_spd = 0x0;
230 else if (pcie2x1 == 0x1)
231 qlm_cfg.s.qlm_spd = 0x2;
232 else if (pcie2x1 == 0x2)
233 qlm_cfg.s.qlm_spd = 0x1;
234 else if (pcie2x1 == 0x0)
235 qlm_cfg.s.qlm_spd = 0x3;
236 else
237 qlm_cfg.s.qlm_spd = 0xf;
238 } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
239 if (pcie2x1 == 0x3)
240 qlm_cfg.s.qlm_spd = 0x4;
241 else if (pcie2x1 == 0x1)
242 qlm_cfg.s.qlm_spd = 0x6;
243 else if (pcie2x1 == 0x2)
244 qlm_cfg.s.qlm_spd = 0x9;
245 else if (pcie2x1 == 0x0)
246 qlm_cfg.s.qlm_spd = 0x7;
247 else
248 qlm_cfg.s.qlm_spd = 0xf;
249 }
250 qlm_cfg.s.qlm_cfg = mode;
251 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
252
253
254 __set_qlm_pcie_mode_61xx(0, rc);
255 __set_qlm_pcie_mode_61xx(1, rc);
256 return 0;
257 } else if (mode > 1) {
258 debug("Invalid mode(%d) for QLM(%d).\n", mode, qlm);
259 qlm_cfg.s.qlm_spd = 0xf;
260 break;
261 }
262
263
264 if (cvmx_qlm_is_ref_clock(qlm, 100)) {
265 if (speed == 5000)
266 qlm_cfg.s.qlm_spd = 0x1;
267 else if (speed == 2500)
268 qlm_cfg.s.qlm_spd = 0x2;
269 else
270 qlm_cfg.s.qlm_spd = 0xf;
271 } else if (cvmx_qlm_is_ref_clock(qlm, 125)) {
272 if (speed == 5000)
273 qlm_cfg.s.qlm_spd = 0x4;
274 else if (speed == 2500)
275 qlm_cfg.s.qlm_spd = 0x6;
276 else
277 qlm_cfg.s.qlm_spd = 0xf;
278 } else {
279 qlm_cfg.s.qlm_spd = 0xf;
280 }
281
282 qlm_cfg.s.qlm_cfg = mode;
283 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
284
285
286 __set_qlm_pcie_mode_61xx(1, rc);
287 return 0;
288 }
289 case 0: {
290
291 if (mode == 1) {
292 qlm_cfg.s.qlm_spd = 0xf;
293 break;
294 }
295
296 if (mode == 0 && speed != 5000 && speed != 2500) {
297 qlm_cfg.s.qlm_spd = 0xf;
298 break;
299 }
300
301
302 qlm_cfg.s.qlm_spd = __get_qlm_spd(qlm, speed);
303 qlm_cfg.s.qlm_cfg = mode;
304 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
305
306
307 if (mode == 0)
308 __set_qlm_pcie_mode_61xx(0, rc);
309
310 return 0;
311 }
312 default:
313 debug("WARNING: Invalid QLM(%d) passed\n", qlm);
314 qlm_cfg.s.qlm_spd = 0xf;
315 }
316 csr_wr(CVMX_MIO_QLMX_CFG(qlm), qlm_cfg.u64);
317 return 0;
318}
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static int __dlm_setup_pll_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input,
334 int is_sff7000_rxaui)
335{
336 cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
337 cvmx_gserx_dlmx_ref_ssp_en_t dlmx_ref_ssp_en;
338 cvmx_gserx_dlmx_mpll_en_t dlmx_mpll_en;
339 cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
340 cvmx_gserx_dlmx_tx_amplitude_t tx_amplitude;
341 cvmx_gserx_dlmx_tx_preemph_t tx_preemph;
342 cvmx_gserx_dlmx_rx_eq_t rx_eq;
343 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
344 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
345 int gmx_ref_clk = 100;
346
347 debug("%s(%d, %d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input,
348 is_sff7000_rxaui);
349 if (ref_clk_sel == 1)
350 gmx_ref_clk = 125;
351 else if (ref_clk_sel == 2)
352 gmx_ref_clk = 156;
353
354 if (qlm != 0 && ref_clk_input == 2) {
355 printf("%s: Error: can only use reference clock inputs 0 or 1 for DLM %d\n",
356 __func__, qlm);
357 return -1;
358 }
359
360
361 tx_amplitude.u64 = csr_rd(CVMX_GSERX_DLMX_TX_AMPLITUDE(qlm, 0));
362 if (is_sff7000_rxaui) {
363 tx_amplitude.s.tx0_amplitude = 100;
364 tx_amplitude.s.tx1_amplitude = 100;
365 } else {
366 tx_amplitude.s.tx0_amplitude = 65;
367 tx_amplitude.s.tx1_amplitude = 65;
368 }
369
370 csr_wr(CVMX_GSERX_DLMX_TX_AMPLITUDE(qlm, 0), tx_amplitude.u64);
371
372 tx_preemph.u64 = csr_rd(CVMX_GSERX_DLMX_TX_PREEMPH(qlm, 0));
373
374 if (is_sff7000_rxaui) {
375 tx_preemph.s.tx0_preemph = 0;
376 tx_preemph.s.tx1_preemph = 0;
377 } else {
378 tx_preemph.s.tx0_preemph = 22;
379 tx_preemph.s.tx1_preemph = 22;
380 }
381 csr_wr(CVMX_GSERX_DLMX_TX_PREEMPH(qlm, 0), tx_preemph.u64);
382
383 rx_eq.u64 = csr_rd(CVMX_GSERX_DLMX_RX_EQ(qlm, 0));
384 rx_eq.s.rx0_eq = 0;
385 rx_eq.s.rx1_eq = 0;
386 csr_wr(CVMX_GSERX_DLMX_RX_EQ(qlm, 0), rx_eq.u64);
387
388
389
390
391
392
393
394
395
396
397
398 csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(0, 0), ((ref_clk_input == 2) && (qlm == 0)) ? 1 : 0);
399
400
401
402
403
404
405
406
407 csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(0, 0), ref_clk_input & 1);
408
409
410
411
412
413
414
415 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
416 if (gmx_ref_clk == 100)
417 ref_clkdiv2.s.ref_clkdiv2 = 0;
418 else
419 ref_clkdiv2.s.ref_clkdiv2 = 1;
420 csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
421
422
423 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
424 dlmx_phy_reset.s.phy_reset = 1;
425 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
426
427
428
429
430
431 dlmx_mpll_en.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_EN(0, 0));
432 dlmx_mpll_en.s.mpll_en = 1;
433 csr_wr(CVMX_GSERX_DLMX_MPLL_EN(0, 0), dlmx_mpll_en.u64);
434
435
436
437
438
439 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
440 if (gmx_ref_clk == 100)
441 mpll_multiplier.s.mpll_multiplier = 35;
442 else if (gmx_ref_clk == 125)
443 mpll_multiplier.s.mpll_multiplier = 56;
444 else
445 mpll_multiplier.s.mpll_multiplier = 45;
446 debug("%s: Setting mpll multiplier to %u for DLM%d, baud %d, clock rate %uMHz\n",
447 __func__, mpll_multiplier.s.mpll_multiplier, qlm, baud_mhz, gmx_ref_clk);
448
449 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
450
451
452 dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
453 dlmx_test_powerdown.s.test_powerdown = 0;
454 csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
455
456
457 dlmx_ref_ssp_en.u64 = csr_rd(CVMX_GSERX_DLMX_REF_SSP_EN(qlm, 0));
458 dlmx_ref_ssp_en.s.ref_ssp_en = 1;
459 csr_wr(CVMX_GSERX_DLMX_REF_SSP_EN(0, 0), dlmx_ref_ssp_en.u64);
460
461
462 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
463 dlmx_phy_reset.s.phy_reset = 0;
464 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
465
466
467
468
469
470
471
472
473
474
475
476
477 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
478 __cvmx_qlm_set_mult(qlm, baud_mhz, mpll_multiplier.s.mpll_multiplier);
479
480
481
482
483 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_MPLL_STATUS(qlm, 0),
484 cvmx_gserx_dlmx_mpll_status_t, mpll_status, ==, 1, 10000)) {
485 printf("PLL for DLM%d failed to lock\n", qlm);
486 return -1;
487 }
488 return 0;
489}
490
491static int __dlm0_setup_tx_cn70xx(int speed, int ref_clk_sel)
492{
493 int need0, need1;
494 cvmx_gmxx_inf_mode_t mode0, mode1;
495 cvmx_gserx_dlmx_tx_rate_t rate;
496 cvmx_gserx_dlmx_tx_en_t en;
497 cvmx_gserx_dlmx_tx_cm_en_t cm_en;
498 cvmx_gserx_dlmx_tx_data_en_t data_en;
499 cvmx_gserx_dlmx_tx_reset_t tx_reset;
500
501 debug("%s(%d, %d)\n", __func__, speed, ref_clk_sel);
502 mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
503 mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
504
505
506 need0 = (mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED);
507 need1 = (mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED) ||
508 (mode0.s.mode == CVMX_GMX_INF_MODE_RXAUI);
509
510
511
512
513 rate.u64 = csr_rd(CVMX_GSERX_DLMX_TX_RATE(0, 0));
514 debug("%s: speed: %d\n", __func__, speed);
515 switch (speed) {
516 case 1250:
517 case 2500:
518 switch (ref_clk_sel) {
519 case OCTEON_QLM_REF_CLK_100MHZ:
520 case OCTEON_QLM_REF_CLK_125MHZ:
521 case OCTEON_QLM_REF_CLK_156MHZ:
522 rate.s.tx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
523 rate.s.tx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
524 break;
525 default:
526 printf("Invalid reference clock select %d\n", ref_clk_sel);
527 return -1;
528 }
529 break;
530 case 3125:
531 switch (ref_clk_sel) {
532 case OCTEON_QLM_REF_CLK_125MHZ:
533 case OCTEON_QLM_REF_CLK_156MHZ:
534 rate.s.tx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
535 rate.s.tx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
536 break;
537 default:
538 printf("Invalid reference clock select %d\n", ref_clk_sel);
539 return -1;
540 }
541 break;
542 case 5000:
543 switch (ref_clk_sel) {
544 case OCTEON_QLM_REF_CLK_100MHZ:
545 rate.s.tx0_rate = 0;
546 rate.s.tx1_rate = 0;
547 break;
548 case OCTEON_QLM_REF_CLK_125MHZ:
549 case OCTEON_QLM_REF_CLK_156MHZ:
550 rate.s.tx0_rate = 0;
551 rate.s.tx1_rate = 0;
552 break;
553 default:
554 printf("Invalid reference clock select %d\n", ref_clk_sel);
555 return -1;
556 }
557 break;
558 case 6250:
559 switch (ref_clk_sel) {
560 case OCTEON_QLM_REF_CLK_125MHZ:
561 case OCTEON_QLM_REF_CLK_156MHZ:
562 rate.s.tx0_rate = 0;
563 rate.s.tx1_rate = 0;
564 break;
565 default:
566 printf("Invalid reference clock select %d\n", ref_clk_sel);
567 return -1;
568 }
569 break;
570 default:
571 printf("%s: Invalid rate %d\n", __func__, speed);
572 return -1;
573 }
574 debug("%s: tx 0 rate: %d, tx 1 rate: %d\n", __func__, rate.s.tx0_rate, rate.s.tx1_rate);
575 csr_wr(CVMX_GSERX_DLMX_TX_RATE(0, 0), rate.u64);
576
577
578 en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_EN(0, 0));
579 en.s.tx0_en = need0;
580 en.s.tx1_en = need1;
581 csr_wr(CVMX_GSERX_DLMX_TX_EN(0, 0), en.u64);
582
583
584 cm_en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_CM_EN(0, 0));
585 cm_en.s.tx0_cm_en = need0;
586 cm_en.s.tx1_cm_en = need1;
587 csr_wr(CVMX_GSERX_DLMX_TX_CM_EN(0, 0), cm_en.u64);
588
589
590 data_en.u64 = csr_rd(CVMX_GSERX_DLMX_TX_DATA_EN(0, 0));
591 data_en.s.tx0_data_en = need0;
592 data_en.s.tx1_data_en = need1;
593 csr_wr(CVMX_GSERX_DLMX_TX_DATA_EN(0, 0), data_en.u64);
594
595
596 tx_reset.u64 = csr_rd(CVMX_GSERX_DLMX_TX_RESET(0, 0));
597 tx_reset.s.tx0_reset = !need0;
598 tx_reset.s.tx1_reset = !need1;
599 csr_wr(CVMX_GSERX_DLMX_TX_RESET(0, 0), tx_reset.u64);
600
601
602
603
604
605 if (need0) {
606 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
607 cvmx_gserx_dlmx_tx_status_t, tx0_status, ==, 1, 10000)) {
608 printf("DLM0 TX0 status fail\n");
609 return -1;
610 }
611 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
612 cvmx_gserx_dlmx_tx_status_t, tx0_cm_status, ==, 1,
613 10000)) {
614 printf("DLM0 TX0 CM status fail\n");
615 return -1;
616 }
617 }
618 if (need1) {
619 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
620 cvmx_gserx_dlmx_tx_status_t, tx1_status, ==, 1, 10000)) {
621 printf("DLM0 TX1 status fail\n");
622 return -1;
623 }
624 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_TX_STATUS(0, 0),
625 cvmx_gserx_dlmx_tx_status_t, tx1_cm_status, ==, 1,
626 10000)) {
627 printf("DLM0 TX1 CM status fail\n");
628 return -1;
629 }
630 }
631 return 0;
632}
633
634static int __dlm0_setup_rx_cn70xx(int speed, int ref_clk_sel)
635{
636 int need0, need1;
637 cvmx_gmxx_inf_mode_t mode0, mode1;
638 cvmx_gserx_dlmx_rx_rate_t rate;
639 cvmx_gserx_dlmx_rx_pll_en_t pll_en;
640 cvmx_gserx_dlmx_rx_data_en_t data_en;
641 cvmx_gserx_dlmx_rx_reset_t rx_reset;
642
643 debug("%s(%d, %d)\n", __func__, speed, ref_clk_sel);
644 mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
645 mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
646
647
648 need0 = (mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED);
649 need1 = (mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED) ||
650 (mode0.s.mode == CVMX_GMX_INF_MODE_RXAUI);
651
652
653
654
655 rate.u64 = csr_rd(CVMX_GSERX_DLMX_RX_RATE(0, 0));
656 switch (speed) {
657 case 1250:
658 case 2500:
659 switch (ref_clk_sel) {
660 case OCTEON_QLM_REF_CLK_100MHZ:
661 case OCTEON_QLM_REF_CLK_125MHZ:
662 case OCTEON_QLM_REF_CLK_156MHZ:
663 rate.s.rx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
664 rate.s.rx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 2 : 0;
665 break;
666 default:
667 printf("Invalid reference clock select %d\n", ref_clk_sel);
668 return -1;
669 }
670 break;
671 case 3125:
672 switch (ref_clk_sel) {
673 case OCTEON_QLM_REF_CLK_125MHZ:
674 case OCTEON_QLM_REF_CLK_156MHZ:
675 rate.s.rx0_rate = (mode0.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
676 rate.s.rx1_rate = (mode1.s.mode == CVMX_GMX_INF_MODE_SGMII) ? 1 : 0;
677 break;
678 default:
679 printf("Invalid reference clock select %d\n", ref_clk_sel);
680 return -1;
681 }
682 break;
683 case 5000:
684 switch (ref_clk_sel) {
685 case OCTEON_QLM_REF_CLK_100MHZ:
686 case OCTEON_QLM_REF_CLK_125MHZ:
687 case OCTEON_QLM_REF_CLK_156MHZ:
688 rate.s.rx0_rate = 0;
689 rate.s.rx1_rate = 0;
690 break;
691 default:
692 printf("Invalid reference clock select %d\n", ref_clk_sel);
693 return -1;
694 }
695 break;
696 case 6250:
697 switch (ref_clk_sel) {
698 case OCTEON_QLM_REF_CLK_125MHZ:
699 case OCTEON_QLM_REF_CLK_156MHZ:
700 rate.s.rx0_rate = 0;
701 rate.s.rx1_rate = 0;
702 break;
703 default:
704 printf("Invalid reference clock select %d\n", ref_clk_sel);
705 return -1;
706 }
707 break;
708 default:
709 printf("%s: Invalid rate %d\n", __func__, speed);
710 return -1;
711 }
712 debug("%s: rx 0 rate: %d, rx 1 rate: %d\n", __func__, rate.s.rx0_rate, rate.s.rx1_rate);
713 csr_wr(CVMX_GSERX_DLMX_RX_RATE(0, 0), rate.u64);
714
715
716 pll_en.u64 = csr_rd(CVMX_GSERX_DLMX_RX_PLL_EN(0, 0));
717 pll_en.s.rx0_pll_en = need0;
718 pll_en.s.rx1_pll_en = need1;
719 csr_wr(CVMX_GSERX_DLMX_RX_PLL_EN(0, 0), pll_en.u64);
720
721
722 data_en.u64 = csr_rd(CVMX_GSERX_DLMX_RX_DATA_EN(0, 0));
723 data_en.s.rx0_data_en = need0;
724 data_en.s.rx1_data_en = need1;
725 csr_wr(CVMX_GSERX_DLMX_RX_DATA_EN(0, 0), data_en.u64);
726
727
728
729
730 rx_reset.u64 = csr_rd(CVMX_GSERX_DLMX_RX_RESET(0, 0));
731 rx_reset.s.rx0_reset = !need0;
732 rx_reset.s.rx1_reset = !need1;
733 csr_wr(CVMX_GSERX_DLMX_RX_RESET(0, 0), rx_reset.u64);
734
735 return 0;
736}
737
738static int a_clk;
739
740static int __dlm2_sata_uctl_init_cn70xx(void)
741{
742 cvmx_sata_uctl_ctl_t uctl_ctl;
743 const int MAX_A_CLK = 333000000;
744 int divisor, a_clkdiv;
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
770 uctl_ctl.s.sata_uahc_rst = 1;
771 uctl_ctl.s.sata_uctl_rst = 1;
772 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
773
774
775
776
777
778
779
780
781
782 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
783 uctl_ctl.s.a_clkdiv_rst = 1;
784 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
785
786 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
787
788 divisor = (gd->bus_clk + MAX_A_CLK - 1) / MAX_A_CLK;
789 if (divisor <= 4) {
790 a_clkdiv = divisor - 1;
791 } else if (divisor <= 6) {
792 a_clkdiv = 4;
793 divisor = 6;
794 } else if (divisor <= 8) {
795 a_clkdiv = 5;
796 divisor = 8;
797 } else if (divisor <= 16) {
798 a_clkdiv = 6;
799 divisor = 16;
800 } else if (divisor <= 24) {
801 a_clkdiv = 7;
802 divisor = 24;
803 } else {
804 printf("Unable to determine SATA clock divisor\n");
805 return -1;
806 }
807
808
809 a_clk = gd->bus_clk / divisor;
810
811 uctl_ctl.s.a_clkdiv_sel = a_clkdiv;
812 uctl_ctl.s.a_clk_en = 1;
813 uctl_ctl.s.a_clk_byp_sel = 0;
814 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
815
816 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
817 uctl_ctl.s.a_clkdiv_rst = 0;
818 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
819
820 udelay(1);
821
822 return 0;
823}
824
825static int __sata_dlm_init_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
826{
827 cvmx_gserx_sata_cfg_t sata_cfg;
828 cvmx_gserx_sata_lane_rst_t sata_lane_rst;
829 cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
830 cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
831 cvmx_gserx_sata_ref_ssp_en_t ref_ssp_en;
832 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
833 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
834 cvmx_sata_uctl_shim_cfg_t shim_cfg;
835 cvmx_gserx_phyx_ovrd_in_lo_t ovrd_in;
836 cvmx_sata_uctl_ctl_t uctl_ctl;
837 int sata_ref_clk;
838
839 debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
840
841 switch (ref_clk_sel) {
842 case 0:
843 sata_ref_clk = 100;
844 break;
845 case 1:
846 sata_ref_clk = 125;
847 break;
848 case 2:
849 sata_ref_clk = 156;
850 break;
851 default:
852 printf("%s: Invalid reference clock select %d for qlm %d\n", __func__,
853 ref_clk_sel, qlm);
854 return -1;
855 }
856
857
858
859 sata_cfg.u64 = csr_rd(CVMX_GSERX_SATA_CFG(0));
860 sata_cfg.s.sata_en = 1;
861 csr_wr(CVMX_GSERX_SATA_CFG(0), sata_cfg.u64);
862
863
864
865
866 if (ref_clk_input < 2) {
867 csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(qlm, 0), ref_clk_input);
868 csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(qlm, 0), 0);
869 } else {
870 csr_wr(CVMX_GSERX_DLMX_REF_USE_PAD(qlm, 0), 1);
871 }
872
873 ref_ssp_en.u64 = csr_rd(CVMX_GSERX_SATA_REF_SSP_EN(0));
874 ref_ssp_en.s.ref_ssp_en = 1;
875 csr_wr(CVMX_GSERX_SATA_REF_SSP_EN(0), ref_ssp_en.u64);
876
877
878
879
880 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
881 if (sata_ref_clk == 100)
882 ref_clkdiv2.s.ref_clkdiv2 = 0;
883 else
884 ref_clkdiv2.s.ref_clkdiv2 = 1;
885 csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
886
887
888 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
889 dlmx_phy_reset.s.phy_reset = 1;
890 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
891
892
893
894
895
896
897
898
899
900
901 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
902 if (sata_ref_clk == 100)
903 mpll_multiplier.s.mpll_multiplier = 35;
904 else
905 mpll_multiplier.s.mpll_multiplier = 56;
906 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
907
908
909 dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
910 dlmx_test_powerdown.s.test_powerdown = 0;
911 csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
912
913
914
915
916 sata_lane_rst.u64 = csr_rd(CVMX_GSERX_SATA_LANE_RST(0));
917 sata_lane_rst.s.l0_rst = 0;
918 sata_lane_rst.s.l1_rst = 0;
919 csr_wr(CVMX_GSERX_SATA_LANE_RST(0), sata_lane_rst.u64);
920
921 udelay(1);
922
923
924 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
925 dlmx_phy_reset.s.phy_reset = 0;
926 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
927
928
929
930
931 ovrd_in.u64 = csr_rd(CVMX_GSERX_PHYX_OVRD_IN_LO(qlm, 0));
932 ovrd_in.s.mpll_en = 1;
933 ovrd_in.s.mpll_en_ovrd = 1;
934 csr_wr(CVMX_GSERX_PHYX_OVRD_IN_LO(qlm, 0), ovrd_in.u64);
935
936
937
938
939
940
941
942
943 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
944 if (sata_ref_clk == 100)
945 mpll_multiplier.s.mpll_multiplier = 0x1e;
946 else
947 mpll_multiplier.s.mpll_multiplier = 0x30;
948 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
949
950 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_MPLL_STATUS(qlm, 0),
951 cvmx_gserx_dlmx_mpll_status_t, mpll_status, ==, 1, 10000)) {
952 printf("ERROR: SATA MPLL failed to set\n");
953 return -1;
954 }
955
956 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_RX_STATUS(qlm, 0), cvmx_gserx_dlmx_rx_status_t,
957 rx0_status, ==, 1, 10000)) {
958 printf("ERROR: SATA RX0_STATUS failed to set\n");
959 return -1;
960 }
961 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_DLMX_RX_STATUS(qlm, 0), cvmx_gserx_dlmx_rx_status_t,
962 rx1_status, ==, 1, 10000)) {
963 printf("ERROR: SATA RX1_STATUS failed to set\n");
964 return -1;
965 }
966
967
968
969
970
971
972 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
973 uctl_ctl.s.sata_uctl_rst = 0;
974 uctl_ctl.s.sata_uahc_rst = 0;
975 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
976
977 udelay(1);
978
979
980
981
982 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
983 uctl_ctl.s.csclk_en = 1;
984 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
985
986
987
988
989
990
991 shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
992 shim_cfg.s.dma_endian_mode = 1;
993 shim_cfg.s.csr_endian_mode = 3;
994 csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
995
996 return 0;
997}
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009static int __sata_dlm_init_cn73xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
1010{
1011 cvmx_sata_uctl_shim_cfg_t shim_cfg;
1012 cvmx_gserx_refclk_sel_t refclk_sel;
1013 cvmx_gserx_phy_ctl_t phy_ctl;
1014 cvmx_gserx_rx_pwr_ctrl_p2_t pwr_ctrl_p2;
1015 cvmx_gserx_lanex_misc_cfg_0_t misc_cfg_0;
1016 cvmx_gserx_sata_lane_rst_t lane_rst;
1017 cvmx_gserx_pll_px_mode_0_t pmode_0;
1018 cvmx_gserx_pll_px_mode_1_t pmode_1;
1019 cvmx_gserx_lane_px_mode_0_t lane_pmode_0;
1020 cvmx_gserx_lane_px_mode_1_t lane_pmode_1;
1021 cvmx_gserx_cfg_t gserx_cfg;
1022 cvmx_sata_uctl_ctl_t uctl_ctl;
1023 int l;
1024 int i;
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 refclk_sel.u64 = 0;
1038 if (ref_clk_input == 0) {
1039 refclk_sel.s.com_clk_sel = 0;
1040 refclk_sel.s.use_com1 = 0;
1041 } else if (ref_clk_input == 1) {
1042 refclk_sel.s.com_clk_sel = 1;
1043 refclk_sel.s.use_com1 = 0;
1044 } else {
1045 refclk_sel.s.com_clk_sel = 1;
1046 refclk_sel.s.use_com1 = 1;
1047 }
1048
1049 if (ref_clk_sel != 0) {
1050 printf("Wrong reference clock selected for QLM4\n");
1051 return -1;
1052 }
1053
1054 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
1055
1056
1057 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
1058 phy_ctl.s.phy_reset = 1;
1059 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
1060
1061 udelay(1);
1062
1063
1064
1065
1066 gserx_cfg.u64 = 0;
1067 gserx_cfg.s.sata = 1;
1068 csr_wr(CVMX_GSERX_CFG(qlm), gserx_cfg.u64);
1069
1070
1071
1072
1073
1074 lane_rst.u64 = csr_rd(CVMX_GSERX_SATA_LANE_RST(qlm));
1075 lane_rst.s.l0_rst = 0;
1076 lane_rst.s.l1_rst = 0;
1077 csr_wr(CVMX_GSERX_SATA_LANE_RST(qlm), lane_rst.u64);
1078 csr_rd(CVMX_GSERX_SATA_LANE_RST(qlm));
1079
1080 udelay(1);
1081
1082
1083
1084
1085
1086 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
1087 phy_ctl.s.phy_reset = 0;
1088 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
1089
1090
1091
1092
1093
1094 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
1095 rst_rdy, ==, 1, 10000)) {
1096 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
1097 return -1;
1098 }
1099
1100
1101
1102
1103 for (i = 0; i < 2; i++) {
1104 cvmx_gserx_slicex_pcie1_mode_t pcie1;
1105 cvmx_gserx_slicex_pcie2_mode_t pcie2;
1106 cvmx_gserx_slicex_pcie3_mode_t pcie3;
1107
1108 pcie1.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE1_MODE(i, qlm));
1109 pcie1.s.rx_pi_bwsel = 1;
1110 pcie1.s.rx_ldll_bwsel = 1;
1111 pcie1.s.rx_sdll_bwsel = 1;
1112 csr_wr(CVMX_GSERX_SLICEX_PCIE1_MODE(i, qlm), pcie1.u64);
1113
1114 pcie2.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE2_MODE(i, qlm));
1115 pcie2.s.rx_pi_bwsel = 1;
1116 pcie2.s.rx_ldll_bwsel = 1;
1117 pcie2.s.rx_sdll_bwsel = 1;
1118 csr_wr(CVMX_GSERX_SLICEX_PCIE2_MODE(i, qlm), pcie2.u64);
1119
1120 pcie3.u64 = csr_rd(CVMX_GSERX_SLICEX_PCIE3_MODE(i, qlm));
1121 pcie3.s.rx_pi_bwsel = 1;
1122 pcie3.s.rx_ldll_bwsel = 1;
1123 pcie3.s.rx_sdll_bwsel = 1;
1124 csr_wr(CVMX_GSERX_SLICEX_PCIE3_MODE(i, qlm), pcie3.u64);
1125 }
1126
1127
1128
1129
1130
1131 pwr_ctrl_p2.u64 = csr_rd(CVMX_GSERX_RX_PWR_CTRL_P2(qlm));
1132 pwr_ctrl_p2.s.p2_rx_subblk_pd &= 0x1e;
1133 csr_wr(CVMX_GSERX_RX_PWR_CTRL_P2(qlm), pwr_ctrl_p2.u64);
1134
1135
1136
1137
1138
1139 for (i = 0; i < 2; i++) {
1140 misc_cfg_0.u64 = csr_rd(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm));
1141 misc_cfg_0.s.eie_det_stl_on_time = 4;
1142 csr_wr(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm), misc_cfg_0.u64);
1143 }
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154 for (l = 0; l < 3; l++) {
1155 pmode_1.u64 = csr_rd(CVMX_GSERX_PLL_PX_MODE_1(l, qlm));
1156 lane_pmode_0.u64 = csr_rd(CVMX_GSERX_LANE_PX_MODE_0(l, qlm));
1157 lane_pmode_1.u64 = csr_rd(CVMX_GSERX_LANE_PX_MODE_1(l, qlm));
1158
1159 pmode_1.s.pll_cpadj = 0x2;
1160 pmode_1.s.pll_opr = 0x0;
1161 pmode_1.s.pll_div = 0x1e;
1162 pmode_1.s.pll_pcie3en = 0x0;
1163 pmode_1.s.pll_16p5en = 0x0;
1164
1165 lane_pmode_0.s.ctle = 0x0;
1166 lane_pmode_0.s.pcie = 0x0;
1167 lane_pmode_0.s.tx_ldiv = 0x0;
1168 lane_pmode_0.s.srate = 0;
1169 lane_pmode_0.s.tx_mode = 0x3;
1170 lane_pmode_0.s.rx_mode = 0x3;
1171
1172 lane_pmode_1.s.vma_mm = 1;
1173 lane_pmode_1.s.vma_fine_cfg_sel = 0;
1174 lane_pmode_1.s.cdr_fgain = 0xa;
1175 lane_pmode_1.s.ph_acc_adj = 0x15;
1176
1177 if (l == R_2_5G_REFCLK100)
1178 lane_pmode_0.s.rx_ldiv = 0x2;
1179 else if (l == R_5G_REFCLK100)
1180 lane_pmode_0.s.rx_ldiv = 0x1;
1181 else
1182 lane_pmode_0.s.rx_ldiv = 0x0;
1183
1184 csr_wr(CVMX_GSERX_PLL_PX_MODE_1(l, qlm), pmode_1.u64);
1185 csr_wr(CVMX_GSERX_LANE_PX_MODE_0(l, qlm), lane_pmode_0.u64);
1186 csr_wr(CVMX_GSERX_LANE_PX_MODE_1(l, qlm), lane_pmode_1.u64);
1187 }
1188
1189 for (l = 0; l < 3; l++) {
1190 pmode_0.u64 = csr_rd(CVMX_GSERX_PLL_PX_MODE_0(l, qlm));
1191 pmode_0.s.pll_icp = 0x1;
1192 pmode_0.s.pll_rloop = 0x3;
1193 pmode_0.s.pll_pcs_div = 0x5;
1194 csr_wr(CVMX_GSERX_PLL_PX_MODE_0(l, qlm), pmode_0.u64);
1195 }
1196
1197 for (i = 0; i < 2; i++) {
1198 cvmx_gserx_slicex_rx_sdll_ctrl_t rx_sdll;
1199
1200 rx_sdll.u64 = csr_rd(CVMX_GSERX_SLICEX_RX_SDLL_CTRL(i, qlm));
1201 rx_sdll.s.pcs_sds_oob_clk_ctrl = 2;
1202 rx_sdll.s.pcs_sds_rx_sdll_tune = 0;
1203 rx_sdll.s.pcs_sds_rx_sdll_swsel = 0;
1204 csr_wr(CVMX_GSERX_SLICEX_RX_SDLL_CTRL(i, qlm), rx_sdll.u64);
1205 }
1206
1207 for (i = 0; i < 2; i++) {
1208 cvmx_gserx_lanex_misc_cfg_0_t misc_cfg;
1209
1210 misc_cfg.u64 = csr_rd(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm));
1211 misc_cfg.s.use_pma_polarity = 0;
1212 misc_cfg.s.cfg_pcs_loopback = 0;
1213 misc_cfg.s.pcs_tx_mode_ovrrd_en = 0;
1214 misc_cfg.s.pcs_rx_mode_ovrrd_en = 0;
1215 misc_cfg.s.cfg_eie_det_cnt = 0;
1216 misc_cfg.s.eie_det_stl_on_time = 4;
1217 misc_cfg.s.eie_det_stl_off_time = 0;
1218 misc_cfg.s.tx_bit_order = 1;
1219 misc_cfg.s.rx_bit_order = 1;
1220 csr_wr(CVMX_GSERX_LANEX_MISC_CFG_0(i, qlm), misc_cfg.u64);
1221 }
1222
1223
1224
1225
1226
1227 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
1228 rst_rdy, ==, 1, 10000)) {
1229 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
1230 return -1;
1231 }
1232
1233
1234 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_SATA_STATUS(qlm), cvmx_gserx_sata_status_t,
1235 p0_rdy, ==, 1, 10000)) {
1236 printf("QLM4: Timeout waiting for GSERX_SATA_STATUS[p0_rdy]\n");
1237 return -1;
1238 }
1239
1240
1241 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_SATA_STATUS(qlm), cvmx_gserx_sata_status_t,
1242 p1_rdy, ==, 1, 10000)) {
1243 printf("QLM4: Timeout waiting for GSERX_SATA_STATUS[p1_rdy]\n");
1244 return -1;
1245 }
1246
1247 udelay(2000);
1248
1249
1250
1251
1252
1253
1254 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
1255 uctl_ctl.s.sata_uctl_rst = 0;
1256 uctl_ctl.s.sata_uahc_rst = 0;
1257 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
1258
1259 udelay(1);
1260
1261
1262
1263
1264 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
1265 uctl_ctl.s.csclk_en = 1;
1266 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
1267
1268
1269 shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
1270 shim_cfg.s.dma_endian_mode = 1;
1271 shim_cfg.s.csr_endian_mode = 3;
1272 csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
1273
1274 return 0;
1275}
1276
1277static int __dlm2_sata_uahc_init_cn70xx(int baud_mhz)
1278{
1279 cvmx_sata_uahc_gbl_cap_t gbl_cap;
1280 cvmx_sata_uahc_px_sctl_t sctl;
1281 cvmx_sata_uahc_gbl_pi_t pi;
1282 cvmx_sata_uahc_px_cmd_t cmd;
1283 cvmx_sata_uahc_px_sctl_t sctl0, sctl1;
1284 cvmx_sata_uahc_px_ssts_t ssts;
1285 cvmx_sata_uahc_px_tfd_t tfd;
1286 cvmx_sata_uahc_gbl_timer1ms_t gbl_timer1ms;
1287 u64 done;
1288 int result = -1;
1289 int retry_count = 0;
1290 int spd;
1291
1292
1293
1294
1295 gbl_timer1ms.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_TIMER1MS);
1296 gbl_timer1ms.s.timv = a_clk / 1000;
1297 csr_wr32(CVMX_SATA_UAHC_GBL_TIMER1MS, gbl_timer1ms.u32);
1298 gbl_timer1ms.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_TIMER1MS);
1299
1300
1301 gbl_cap.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_CAP);
1302 debug("%s: SATA_UAHC_GBL_CAP before: 0x%x\n", __func__, gbl_cap.u32);
1303 gbl_cap.s.sss = 1;
1304 gbl_cap.s.smps = 1;
1305 csr_wr32(CVMX_SATA_UAHC_GBL_CAP, gbl_cap.u32);
1306 gbl_cap.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_CAP);
1307 debug("%s: SATA_UAHC_GBL_CAP after: 0x%x\n", __func__, gbl_cap.u32);
1308
1309
1310
1311 if (baud_mhz == 1500)
1312 spd = 1;
1313 else if (baud_mhz == 3000)
1314 spd = 2;
1315 else
1316 spd = 3;
1317
1318 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1319 debug("%s: SATA_UAHC_P0_SCTL before: 0x%x\n", __func__, sctl.u32);
1320 sctl.s.spd = spd;
1321 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl.u32);
1322 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1323 debug("%s: SATA_UAHC_P0_SCTL after: 0x%x\n", __func__, sctl.u32);
1324 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1325 debug("%s: SATA_UAHC_P1_SCTL before: 0x%x\n", __func__, sctl.u32);
1326 sctl.s.spd = spd;
1327 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl.u32);
1328 sctl.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1329 debug("%s: SATA_UAHC_P1_SCTL after: 0x%x\n", __func__, sctl.u32);
1330
1331
1332 pi.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_PI);
1333 debug("%s: SATA_UAHC_GBL_PI before: 0x%x\n", __func__, pi.u32);
1334 pi.s.pi = 3;
1335 csr_wr32(CVMX_SATA_UAHC_GBL_PI, pi.u32);
1336 pi.u32 = csr_rd32(CVMX_SATA_UAHC_GBL_PI);
1337 debug("%s: SATA_UAHC_GBL_PI after: 0x%x\n", __func__, pi.u32);
1338
1339retry0:
1340
1341 csr_wr32(CVMX_SATA_UAHC_PX_SERR(0), csr_rd32(CVMX_SATA_UAHC_PX_SERR(0)));
1342 csr_wr32(CVMX_SATA_UAHC_PX_IS(0), csr_rd32(CVMX_SATA_UAHC_PX_IS(0)));
1343
1344
1345 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(0));
1346 debug("%s: SATA_UAHC_P0_CMD before: 0x%x\n", __func__, cmd.u32);
1347 cmd.s.fre = 1;
1348 cmd.s.sud = 1;
1349 cmd.s.pod = 1;
1350 cmd.s.st = 1;
1351 cmd.s.icc = 1;
1352 cmd.s.fbscp = 1;
1353 csr_wr32(CVMX_SATA_UAHC_PX_CMD(0), cmd.u32);
1354 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(0));
1355 debug("%s: SATA_UAHC_P0_CMD after: 0x%x\n", __func__, cmd.u32);
1356
1357 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1358 sctl0.s.det = 1;
1359 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
1360
1361
1362 done = get_timer(0);
1363 while (1) {
1364 ssts.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SSTS(0));
1365
1366 if (ssts.s.ipm == 1 && ssts.s.det == 3) {
1367 result = 0;
1368 break;
1369 } else if (get_timer(done) > 100) {
1370 result = -1;
1371 break;
1372 }
1373
1374 udelay(100);
1375 }
1376
1377 if (result != -1) {
1378
1379
1380
1381 csr_wr32(CVMX_SATA_UAHC_PX_SERR(0), -1);
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 done = get_timer(0);
1395 while (1) {
1396 tfd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_TFD(0));
1397 if ((tfd.s.sts & 0x89) == 0) {
1398 result = 0;
1399 break;
1400 } else if (get_timer(done) > 500) {
1401 if (retry_count < 3) {
1402 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1403 sctl0.s.det = 1;
1404 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
1405 udelay(1000);
1406 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(0));
1407 sctl0.s.det = 0;
1408 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(0), sctl0.u32);
1409 retry_count++;
1410 goto retry0;
1411 }
1412 result = -1;
1413 break;
1414 }
1415
1416 udelay(100);
1417 }
1418 }
1419
1420 if (result == -1)
1421 printf("SATA0: not available\n");
1422 else
1423 printf("SATA0: available\n");
1424
1425 sctl1.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1426 sctl1.s.det = 1;
1427 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl1.u32);
1428
1429 result = -1;
1430 retry_count = 0;
1431
1432retry1:
1433
1434 csr_wr32(CVMX_SATA_UAHC_PX_SERR(1), csr_rd32(CVMX_SATA_UAHC_PX_SERR(1)));
1435 csr_wr32(CVMX_SATA_UAHC_PX_IS(1), csr_rd32(CVMX_SATA_UAHC_PX_IS(1)));
1436
1437
1438 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(1));
1439 debug("%s: SATA_UAHC_P1_CMD before: 0x%x\n", __func__, cmd.u32);
1440 cmd.s.fre = 1;
1441 cmd.s.sud = 1;
1442 cmd.s.pod = 1;
1443 cmd.s.st = 1;
1444 cmd.s.icc = 1;
1445 cmd.s.fbscp = 1;
1446 csr_wr32(CVMX_SATA_UAHC_PX_CMD(1), cmd.u32);
1447 cmd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_CMD(1));
1448 debug("%s: SATA_UAHC_P1_CMD after: 0x%x\n", __func__, cmd.u32);
1449
1450
1451 done = get_timer(0);
1452 while (1) {
1453 ssts.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SSTS(1));
1454
1455 if (ssts.s.ipm == 1 && ssts.s.det == 3) {
1456 result = 0;
1457 break;
1458 } else if (get_timer(done) > 1000) {
1459 result = -1;
1460 break;
1461 }
1462
1463 udelay(100);
1464 }
1465
1466 if (result != -1) {
1467
1468
1469
1470 csr_wr32(CVMX_SATA_UAHC_PX_SERR(1), csr_rd32(CVMX_SATA_UAHC_PX_SERR(1)));
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 done = get_timer(0);
1484 while (1) {
1485 tfd.u32 = csr_rd32(CVMX_SATA_UAHC_PX_TFD(1));
1486 if ((tfd.s.sts & 0x89) == 0) {
1487 result = 0;
1488 break;
1489 } else if (get_timer(done) > 500) {
1490 if (retry_count < 3) {
1491 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1492 sctl0.s.det = 1;
1493 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl0.u32);
1494 udelay(1000);
1495 sctl0.u32 = csr_rd32(CVMX_SATA_UAHC_PX_SCTL(1));
1496 sctl0.s.det = 0;
1497 csr_wr32(CVMX_SATA_UAHC_PX_SCTL(1), sctl0.u32);
1498 retry_count++;
1499 goto retry1;
1500 }
1501 result = -1;
1502 break;
1503 }
1504
1505 udelay(100);
1506 }
1507 }
1508
1509 if (result == -1)
1510 printf("SATA1: not available\n");
1511 else
1512 printf("SATA1: available\n");
1513
1514 return 0;
1515}
1516
1517static int __sata_bist_cn70xx(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
1518{
1519 cvmx_sata_uctl_bist_status_t bist_status;
1520 cvmx_sata_uctl_ctl_t uctl_ctl;
1521 cvmx_sata_uctl_shim_cfg_t shim_cfg;
1522 u64 done;
1523 int result = -1;
1524
1525 debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1526 bist_status.u64 = csr_rd(CVMX_SATA_UCTL_BIST_STATUS);
1527
1528 {
1529 if (__dlm2_sata_uctl_init_cn70xx()) {
1530 printf("ERROR: Failed to initialize SATA UCTL CSRs\n");
1531 return -1;
1532 }
1533 if (OCTEON_IS_MODEL(OCTEON_CN73XX))
1534 result = __sata_dlm_init_cn73xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1535 else
1536 result = __sata_dlm_init_cn70xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1537 if (result) {
1538 printf("ERROR: Failed to initialize SATA GSER CSRs\n");
1539 return -1;
1540 }
1541
1542 uctl_ctl.u64 = csr_rd(CVMX_SATA_UCTL_CTL);
1543 uctl_ctl.s.start_bist = 1;
1544 csr_wr(CVMX_SATA_UCTL_CTL, uctl_ctl.u64);
1545
1546
1547 done = get_timer(0);
1548 while (1) {
1549 bist_status.u64 = csr_rd(CVMX_SATA_UCTL_BIST_STATUS);
1550 if ((bist_status.s.uctl_xm_r_bist_ndone |
1551 bist_status.s.uctl_xm_w_bist_ndone |
1552 bist_status.s.uahc_p0_rxram_bist_ndone |
1553 bist_status.s.uahc_p1_rxram_bist_ndone |
1554 bist_status.s.uahc_p0_txram_bist_ndone |
1555 bist_status.s.uahc_p1_txram_bist_ndone) == 0) {
1556 result = 0;
1557 break;
1558 } else if (get_timer(done) > 1000) {
1559 result = -1;
1560 break;
1561 }
1562
1563 udelay(100);
1564 }
1565 if (result == -1) {
1566 printf("ERROR: SATA_UCTL_BIST_STATUS = 0x%llx\n",
1567 (unsigned long long)bist_status.u64);
1568 return -1;
1569 }
1570
1571 debug("%s: Initializing UAHC\n", __func__);
1572 if (__dlm2_sata_uahc_init_cn70xx(baud_mhz)) {
1573 printf("ERROR: Failed to initialize SATA UAHC CSRs\n");
1574 return -1;
1575 }
1576 }
1577
1578
1579
1580
1581 shim_cfg.u64 = csr_rd(CVMX_SATA_UCTL_SHIM_CFG);
1582 shim_cfg.s.csr_endian_mode = 1;
1583 csr_wr(CVMX_SATA_UCTL_SHIM_CFG, shim_cfg.u64);
1584
1585 return 0;
1586}
1587
1588static int __setup_sata(int qlm, int baud_mhz, int ref_clk_sel, int ref_clk_input)
1589{
1590 debug("%s(%d, %d, %d, %d)\n", __func__, qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1591 return __sata_bist_cn70xx(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
1592}
1593
1594static int __dlmx_setup_pcie_cn70xx(int qlm, enum cvmx_qlm_mode mode, int gen2, int rc,
1595 int ref_clk_sel, int ref_clk_input)
1596{
1597 cvmx_gserx_dlmx_phy_reset_t dlmx_phy_reset;
1598 cvmx_gserx_dlmx_test_powerdown_t dlmx_test_powerdown;
1599 cvmx_gserx_dlmx_mpll_multiplier_t mpll_multiplier;
1600 cvmx_gserx_dlmx_ref_clkdiv2_t ref_clkdiv2;
1601 static const u8 ref_clk_mult[2] = { 35, 56 };
1602
1603 debug("%s(%d, %d, %d, %d, %d, %d)\n", __func__, qlm, mode, gen2, rc, ref_clk_sel,
1604 ref_clk_input);
1605 if (rc == 0) {
1606 debug("Skipping initializing PCIe dlm %d in endpoint mode\n", qlm);
1607 return 0;
1608 }
1609
1610 if (qlm > 0 && ref_clk_input > 1) {
1611 printf("%s: Error: ref_clk_input can only be 0 or 1 for QLM %d\n",
1612 __func__, qlm);
1613 return -1;
1614 }
1615
1616 if (ref_clk_sel > OCTEON_QLM_REF_CLK_125MHZ) {
1617 printf("%s: Error: ref_clk_sel can only be 100 or 125 MHZ.\n", __func__);
1618 return -1;
1619 }
1620
1621
1622
1623
1624
1625 csr_wr(CVMX_GSERX_DLMX_REFCLK_SEL(qlm, 0), ref_clk_input);
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 if (qlm == 1) {
1647 cvmx_gserx_pcie_pipe_port_sel_t pipe_port;
1648
1649 pipe_port.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_PORT_SEL(0));
1650 pipe_port.s.cfg_pem1_dlm2 = (mode == CVMX_QLM_MODE_PCIE_1X1) ? 1 : 0;
1651 pipe_port.s.pipe_port_sel =
1652 (mode == CVMX_QLM_MODE_PCIE) ? 1 :
1653 (mode == CVMX_QLM_MODE_PCIE_1X2) ? 2 :
1654 (mode == CVMX_QLM_MODE_PCIE_1X1) ? 3 :
1655 (mode == CVMX_QLM_MODE_PCIE_2X1) ? 3 :
1656 0;
1657 csr_wr(CVMX_GSERX_PCIE_PIPE_PORT_SEL(0), pipe_port.u64);
1658 }
1659
1660
1661
1662
1663 ref_clkdiv2.u64 = csr_rd(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0));
1664 ref_clkdiv2.s.ref_clkdiv2 = ref_clk_sel > 0;
1665 csr_wr(CVMX_GSERX_DLMX_REF_CLKDIV2(qlm, 0), ref_clkdiv2.u64);
1666
1667
1668 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
1669 dlmx_phy_reset.s.phy_reset = 1;
1670 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 mpll_multiplier.u64 = csr_rd(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0));
1681 mpll_multiplier.s.mpll_multiplier = ref_clk_mult[ref_clk_sel];
1682 debug("%s: Setting MPLL multiplier to %d\n", __func__,
1683 (int)mpll_multiplier.s.mpll_multiplier);
1684 csr_wr(CVMX_GSERX_DLMX_MPLL_MULTIPLIER(qlm, 0), mpll_multiplier.u64);
1685
1686
1687
1688 dlmx_test_powerdown.u64 = csr_rd(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0));
1689 dlmx_test_powerdown.s.test_powerdown = 0;
1690 csr_wr(CVMX_GSERX_DLMX_TEST_POWERDOWN(qlm, 0), dlmx_test_powerdown.u64);
1691
1692
1693
1694
1695 dlmx_phy_reset.u64 = csr_rd(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0));
1696 dlmx_phy_reset.s.phy_reset = 0;
1697 csr_wr(CVMX_GSERX_DLMX_PHY_RESET(qlm, 0), dlmx_phy_reset.u64);
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713 if (qlm == 1) {
1714 cvmx_pemx_cfg_t pemx_cfg;
1715 cvmx_pemx_on_t pemx_on;
1716 cvmx_gserx_pcie_pipe_rst_t pipe_rst;
1717 cvmx_rst_ctlx_t rst_ctl;
1718
1719 switch (mode) {
1720 case CVMX_QLM_MODE_PCIE:
1721 case CVMX_QLM_MODE_PCIE_1X2:
1722 case CVMX_QLM_MODE_PCIE_1X1:
1723 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
1724 pemx_cfg.cn70xx.hostmd = rc;
1725 if (mode == CVMX_QLM_MODE_PCIE_1X1) {
1726 pemx_cfg.cn70xx.md =
1727 gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1728 } else if (mode == CVMX_QLM_MODE_PCIE) {
1729 pemx_cfg.cn70xx.md =
1730 gen2 ? CVMX_PEM_MD_GEN2_4LANE : CVMX_PEM_MD_GEN1_4LANE;
1731 } else {
1732 pemx_cfg.cn70xx.md =
1733 gen2 ? CVMX_PEM_MD_GEN2_2LANE : CVMX_PEM_MD_GEN1_2LANE;
1734 }
1735 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
1736
1737 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(0));
1738 rst_ctl.s.rst_drv = 1;
1739 csr_wr(CVMX_RST_CTLX(0), rst_ctl.u64);
1740
1741
1742 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1743 pipe_rst.s.pipe0_rst = 0;
1744 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1745
1746 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
1747 pemx_on.s.pemon = 1;
1748 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
1749 break;
1750 case CVMX_QLM_MODE_PCIE_2X1:
1751 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
1752 pemx_cfg.cn70xx.hostmd = rc;
1753 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1754 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
1755
1756 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(0));
1757 rst_ctl.s.rst_drv = 1;
1758 csr_wr(CVMX_RST_CTLX(0), rst_ctl.u64);
1759
1760
1761 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1762 pipe_rst.s.pipe0_rst = 0;
1763 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1764
1765 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
1766 pemx_on.s.pemon = 1;
1767 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
1768
1769 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
1770 pemx_cfg.cn70xx.hostmd = 1;
1771 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1772 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
1773 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
1774 rst_ctl.s.rst_drv = 1;
1775 csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
1776
1777 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1778 pipe_rst.s.pipe1_rst = 0;
1779 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1780 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
1781 pemx_on.s.pemon = 1;
1782 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
1783 break;
1784 default:
1785 break;
1786 }
1787 } else {
1788 cvmx_pemx_cfg_t pemx_cfg;
1789 cvmx_pemx_on_t pemx_on;
1790 cvmx_gserx_pcie_pipe_rst_t pipe_rst;
1791 cvmx_rst_ctlx_t rst_ctl;
1792
1793 switch (mode) {
1794 case CVMX_QLM_MODE_PCIE_1X2:
1795 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
1796 pemx_cfg.cn70xx.hostmd = 1;
1797 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_2LANE : CVMX_PEM_MD_GEN1_2LANE;
1798 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
1799
1800 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
1801 rst_ctl.s.rst_drv = 1;
1802 csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
1803
1804
1805 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1806 pipe_rst.s.pipe1_rst = 0;
1807 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1808
1809 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
1810 pemx_on.s.pemon = 1;
1811 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
1812 break;
1813 case CVMX_QLM_MODE_PCIE_2X1:
1814 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
1815 pemx_cfg.cn70xx.hostmd = 1;
1816 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1817 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
1818
1819 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(1));
1820 rst_ctl.s.rst_drv = 1;
1821 csr_wr(CVMX_RST_CTLX(1), rst_ctl.u64);
1822
1823
1824 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1825 pipe_rst.s.pipe2_rst = 0;
1826 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1827
1828 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
1829 pemx_on.s.pemon = 1;
1830 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
1831
1832 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
1833 pemx_cfg.cn70xx.hostmd = 1;
1834 pemx_cfg.cn70xx.md = gen2 ? CVMX_PEM_MD_GEN2_1LANE : CVMX_PEM_MD_GEN1_1LANE;
1835 csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
1836
1837 rst_ctl.u64 = csr_rd(CVMX_RST_CTLX(2));
1838 rst_ctl.s.rst_drv = 1;
1839 csr_wr(CVMX_RST_CTLX(2), rst_ctl.u64);
1840
1841
1842 pipe_rst.u64 = csr_rd(CVMX_GSERX_PCIE_PIPE_RST(0));
1843 pipe_rst.s.pipe3_rst = 0;
1844 csr_wr(CVMX_GSERX_PCIE_PIPE_RST(0), pipe_rst.u64);
1845
1846 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
1847 pemx_on.s.pemon = 1;
1848 csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
1849 break;
1850 default:
1851 break;
1852 }
1853 }
1854 return 0;
1855}
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887static int octeon_configure_qlm_cn70xx(int qlm, int speed, int mode, int rc, int gen2,
1888 int ref_clk_sel, int ref_clk_input)
1889{
1890 debug("%s(%d, %d, %d, %d, %d, %d, %d)\n", __func__, qlm, speed, mode, rc, gen2, ref_clk_sel,
1891 ref_clk_input);
1892 switch (qlm) {
1893 case 0: {
1894 int is_sff7000_rxaui = 0;
1895 cvmx_gmxx_inf_mode_t inf_mode0, inf_mode1;
1896
1897 inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
1898 inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
1899 if (inf_mode0.s.en || inf_mode1.s.en) {
1900 debug("DLM0 already configured\n");
1901 return -1;
1902 }
1903
1904 switch (mode) {
1905 case CVMX_QLM_MODE_SGMII_SGMII:
1906 debug(" Mode SGMII SGMII\n");
1907 inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
1908 inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
1909 break;
1910 case CVMX_QLM_MODE_SGMII_QSGMII:
1911 debug(" Mode SGMII QSGMII\n");
1912 inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
1913 inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1914 break;
1915 case CVMX_QLM_MODE_SGMII_DISABLED:
1916 debug(" Mode SGMII Disabled\n");
1917 inf_mode0.s.mode = CVMX_GMX_INF_MODE_SGMII;
1918 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1919 break;
1920 case CVMX_QLM_MODE_DISABLED_SGMII:
1921 debug("Mode Disabled SGMII\n");
1922 inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1923 inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
1924 break;
1925 case CVMX_QLM_MODE_QSGMII_SGMII:
1926 debug(" Mode QSGMII SGMII\n");
1927 inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1928 inf_mode1.s.mode = CVMX_GMX_INF_MODE_SGMII;
1929 break;
1930 case CVMX_QLM_MODE_QSGMII_QSGMII:
1931 debug(" Mode QSGMII QSGMII\n");
1932 inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1933 inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1934 break;
1935 case CVMX_QLM_MODE_QSGMII_DISABLED:
1936 debug(" Mode QSGMII Disabled\n");
1937 inf_mode0.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1938 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1939 break;
1940 case CVMX_QLM_MODE_DISABLED_QSGMII:
1941 debug("Mode Disabled QSGMII\n");
1942 inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1943 inf_mode1.s.mode = CVMX_GMX_INF_MODE_QSGMII;
1944 break;
1945 case CVMX_QLM_MODE_RXAUI:
1946 debug(" Mode RXAUI\n");
1947 inf_mode0.s.mode = CVMX_GMX_INF_MODE_RXAUI;
1948 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1949
1950 break;
1951 default:
1952 debug(" Mode Disabled Disabled\n");
1953 inf_mode0.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1954 inf_mode1.s.mode = CVMX_GMX_INF_MODE_DISABLED;
1955 break;
1956 }
1957 csr_wr(CVMX_GMXX_INF_MODE(0), inf_mode0.u64);
1958 csr_wr(CVMX_GMXX_INF_MODE(1), inf_mode1.u64);
1959
1960
1961 if (__dlm_setup_pll_cn70xx(qlm, speed, ref_clk_sel, ref_clk_input,
1962 is_sff7000_rxaui))
1963 return -1;
1964
1965
1966 if (__dlm0_setup_tx_cn70xx(speed, ref_clk_sel))
1967 return -1;
1968
1969
1970 if (__dlm0_setup_rx_cn70xx(speed, ref_clk_sel))
1971 return -1;
1972
1973
1974 inf_mode0.u64 = csr_rd(CVMX_GMXX_INF_MODE(0));
1975 if (inf_mode0.s.mode != CVMX_GMX_INF_MODE_DISABLED)
1976 inf_mode0.s.en = 1;
1977 csr_wr(CVMX_GMXX_INF_MODE(0), inf_mode0.u64);
1978 inf_mode1.u64 = csr_rd(CVMX_GMXX_INF_MODE(1));
1979 if (inf_mode1.s.mode != CVMX_GMX_INF_MODE_DISABLED)
1980 inf_mode1.s.en = 1;
1981 csr_wr(CVMX_GMXX_INF_MODE(1), inf_mode1.u64);
1982 break;
1983 }
1984 case 1:
1985 switch (mode) {
1986 case CVMX_QLM_MODE_PCIE:
1987 debug(" Mode PCIe\n");
1988 if (__dlmx_setup_pcie_cn70xx(1, mode, gen2, rc, ref_clk_sel, ref_clk_input))
1989 return -1;
1990 if (__dlmx_setup_pcie_cn70xx(2, mode, gen2, rc, ref_clk_sel, ref_clk_input))
1991 return -1;
1992 break;
1993 case CVMX_QLM_MODE_PCIE_1X2:
1994 case CVMX_QLM_MODE_PCIE_2X1:
1995 case CVMX_QLM_MODE_PCIE_1X1:
1996 debug(" Mode PCIe 1x2, 2x1 or 1x1\n");
1997 if (__dlmx_setup_pcie_cn70xx(qlm, mode, gen2, rc, ref_clk_sel,
1998 ref_clk_input))
1999 return -1;
2000 break;
2001 case CVMX_QLM_MODE_DISABLED:
2002 debug(" Mode disabled\n");
2003 break;
2004 default:
2005 debug("DLM1 illegal mode specified\n");
2006 return -1;
2007 }
2008 break;
2009 case 2:
2010 switch (mode) {
2011 case CVMX_QLM_MODE_SATA_2X1:
2012 debug("%s: qlm 2, mode is SATA 2x1\n", __func__);
2013
2014 if (__setup_sata(qlm, speed, ref_clk_sel, ref_clk_input))
2015 return -1;
2016 break;
2017 case CVMX_QLM_MODE_PCIE:
2018 debug(" Mode PCIe\n");
2019
2020
2021 break;
2022 case CVMX_QLM_MODE_PCIE_1X2:
2023 case CVMX_QLM_MODE_PCIE_2X1:
2024 debug(" Mode PCIe 1x2 or 2x1\n");
2025 if (__dlmx_setup_pcie_cn70xx(qlm, mode, gen2, rc, ref_clk_sel,
2026 ref_clk_input))
2027 return -1;
2028 break;
2029 case CVMX_QLM_MODE_DISABLED:
2030 debug(" Mode Disabled\n");
2031 break;
2032 default:
2033 debug("DLM2 illegal mode specified\n");
2034 return -1;
2035 }
2036 default:
2037 return -1;
2038 }
2039
2040 return 0;
2041}
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053void octeon_qlm_dfe_disable(int node, int qlm, int lane, int baud_mhz, int mode)
2054{
2055 int num_lanes = cvmx_qlm_get_lanes(qlm);
2056 int l;
2057 cvmx_gserx_lanex_rx_loop_ctrl_t loop_ctrl;
2058 cvmx_gserx_lanex_rx_valbbd_ctrl_0_t ctrl_0;
2059 cvmx_gserx_lanex_rx_valbbd_ctrl_1_t ctrl_1;
2060 cvmx_gserx_lanex_rx_valbbd_ctrl_2_t ctrl_2;
2061 cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
2062
2063
2064 if (baud_mhz < 5000)
2065 return;
2066
2067
2068 switch (mode) {
2069 case CVMX_QLM_MODE_10G_KR_1X2:
2070 case CVMX_QLM_MODE_10G_KR:
2071 case CVMX_QLM_MODE_40G_KR4:
2072 return;
2073 case CVMX_QLM_MODE_PCIE_1X1:
2074 case CVMX_QLM_MODE_PCIE_2X1:
2075 case CVMX_QLM_MODE_PCIE_1X2:
2076 case CVMX_QLM_MODE_PCIE:
2077 case CVMX_QLM_MODE_PCIE_1X8:
2078 return;
2079 case CVMX_QLM_MODE_SATA_2X1:
2080 return;
2081 default:
2082 break;
2083 }
2084
2085
2086 lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
2087 lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = 0;
2088 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
2089
2090 for (l = 0; l < num_lanes; l++) {
2091 if (lane != -1 && lane != l)
2092 continue;
2093
2094
2095
2096
2097
2098 loop_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm));
2099 loop_ctrl.s.cfg_rx_lctrl = loop_ctrl.s.cfg_rx_lctrl & 0x3fd;
2100 csr_wr_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm), loop_ctrl.u64);
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110 ctrl_1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_1(l, qlm));
2111 ctrl_1.s.dfe_c3_mval = 0;
2112 ctrl_1.s.dfe_c3_msgn = 0;
2113 ctrl_1.s.dfe_c2_mval = 0;
2114 ctrl_1.s.dfe_c2_msgn = 0;
2115 ctrl_1.s.dfe_c2_mval = 0;
2116 ctrl_1.s.dfe_c1_mval = 0;
2117 ctrl_1.s.dfe_c1_msgn = 0;
2118 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_1(l, qlm), ctrl_1.u64);
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128 ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm));
2129 ctrl_0.s.dfe_gain = 0x1;
2130 ctrl_0.s.dfe_c5_mval = 0;
2131 ctrl_0.s.dfe_c5_msgn = 0;
2132 ctrl_0.s.dfe_c4_mval = 0;
2133 ctrl_0.s.dfe_c4_msgn = 0;
2134 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm), ctrl_0.u64);
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145 ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_2(l, qlm));
2146 ctrl_2.s.dfe_ovrd_en = 0x1;
2147 ctrl_2.s.dfe_c5_ovrd_val = 0x1;
2148 ctrl_2.s.dfe_c4_ovrd_val = 0x1;
2149 ctrl_2.s.dfe_c3_ovrd_val = 0x1;
2150 ctrl_2.s.dfe_c2_ovrd_val = 0x1;
2151 ctrl_2.s.dfe_c1_ovrd_val = 0x1;
2152 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_2(l, qlm), ctrl_2.u64);
2153 }
2154}
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174int octeon_qlm_dfe_disable_ctle_agc(int node, int qlm, int lane, int baud_mhz, int mode,
2175 int ctle_zero, int agc_pre_ctle, int agc_post_ctle)
2176{
2177 int num_lanes = cvmx_qlm_get_lanes(qlm);
2178 int l;
2179 cvmx_gserx_lanex_rx_loop_ctrl_t loop_ctrl;
2180 cvmx_gserx_lanex_rx_valbbd_ctrl_0_t ctrl_0;
2181 cvmx_gserx_lanex_pwr_ctrl_t lanex_pwr_ctrl;
2182 cvmx_gserx_lane_mode_t lmode;
2183 cvmx_gserx_lane_px_mode_1_t px_mode_1;
2184 cvmx_gserx_lanex_rx_cfg_5_t rx_cfg_5;
2185 cvmx_gserx_lanex_rx_cfg_2_t rx_cfg_2;
2186 cvmx_gserx_lanex_rx_ctle_ctrl_t ctle_ctrl;
2187
2188
2189 if (ctle_zero < 0 || ctle_zero > 15) {
2190 printf("Error: N%d.QLM%d: Invalid CTLE_ZERO(%d). Must be between -1 and 15.\n",
2191 node, qlm, ctle_zero);
2192 return -1;
2193 }
2194 if (agc_pre_ctle < 0 || agc_pre_ctle > 15) {
2195 printf("Error: N%d.QLM%d: Invalid AGC_Pre_CTLE(%d)\n",
2196 node, qlm, agc_pre_ctle);
2197 return -1;
2198 }
2199
2200 if (agc_post_ctle < 0 || agc_post_ctle > 15) {
2201 printf("Error: N%d.QLM%d: Invalid AGC_Post_CTLE(%d)\n",
2202 node, qlm, agc_post_ctle);
2203 return -1;
2204 }
2205
2206
2207 if (baud_mhz < 5000)
2208 return 0;
2209
2210
2211 switch (mode) {
2212 case CVMX_QLM_MODE_10G_KR_1X2:
2213 case CVMX_QLM_MODE_10G_KR:
2214 case CVMX_QLM_MODE_40G_KR4:
2215 return 0;
2216 case CVMX_QLM_MODE_PCIE_1X1:
2217 case CVMX_QLM_MODE_PCIE_2X1:
2218 case CVMX_QLM_MODE_PCIE_1X2:
2219 case CVMX_QLM_MODE_PCIE:
2220 case CVMX_QLM_MODE_PCIE_1X8:
2221 return 0;
2222 case CVMX_QLM_MODE_SATA_2X1:
2223 return 0;
2224 default:
2225 break;
2226 }
2227
2228 lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
2229
2230
2231 px_mode_1.u64 = csr_rd_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm));
2232 px_mode_1.s.vma_mm = 1;
2233 csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm), px_mode_1.u64);
2234
2235
2236 octeon_qlm_dfe_disable(node, qlm, lane, baud_mhz, mode);
2237
2238 for (l = 0; l < num_lanes; l++) {
2239 if (lane != -1 && lane != l)
2240 continue;
2241
2242
2243 ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm));
2244 ctrl_0.s.agc_gain = 0x2;
2245 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(l, qlm), ctrl_0.u64);
2246
2247
2248
2249
2250
2251 loop_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm));
2252 loop_ctrl.s.cfg_rx_lctrl = loop_ctrl.s.cfg_rx_lctrl | 0x101;
2253 csr_wr_node(node, CVMX_GSERX_LANEX_RX_LOOP_CTRL(l, qlm), loop_ctrl.u64);
2254
2255
2256
2257
2258
2259
2260 lanex_pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(l, qlm));
2261 lanex_pwr_ctrl.s.rx_lctrl_ovrrd_en = 1;
2262 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(l, qlm), lanex_pwr_ctrl.u64);
2263
2264
2265 rx_cfg_5.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_5(l, qlm));
2266 rx_cfg_5.s.rx_agc_men_ovrrd_val = 1;
2267 rx_cfg_5.s.rx_agc_men_ovrrd_en = 1;
2268 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_5(l, qlm), rx_cfg_5.u64);
2269
2270 ctle_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(l, qlm));
2271 ctle_ctrl.s.pcs_sds_rx_ctle_zero = ctle_zero;
2272 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CTLE_CTRL(l, qlm), ctle_ctrl.u64);
2273
2274 rx_cfg_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_2(l, qlm));
2275 rx_cfg_2.s.rx_sds_rx_agc_mval = (agc_pre_ctle << 4) | agc_post_ctle;
2276 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_2(l, qlm), rx_cfg_2.u64);
2277 }
2278 return 0;
2279}
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295void octeon_qlm_tune_per_lane_v3(int node, int qlm, int baud_mhz, int lane, int tx_swing,
2296 int tx_pre, int tx_post, int tx_gain, int tx_vboost)
2297{
2298 cvmx_gserx_cfg_t gserx_cfg;
2299 cvmx_gserx_lanex_tx_cfg_0_t tx_cfg0;
2300 cvmx_gserx_lanex_tx_pre_emphasis_t pre_emphasis;
2301 cvmx_gserx_lanex_tx_cfg_1_t tx_cfg1;
2302 cvmx_gserx_lanex_tx_cfg_3_t tx_cfg3;
2303 cvmx_bgxx_spux_br_pmd_control_t pmd_control;
2304 cvmx_gserx_lanex_pcs_ctlifc_0_t pcs_ctlifc_0;
2305 cvmx_gserx_lanex_pcs_ctlifc_2_t pcs_ctlifc_2;
2306 int bgx, lmac;
2307
2308
2309 gserx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
2310 if (gserx_cfg.s.pcie)
2311 return;
2312
2313
2314 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
2315 bgx = (qlm < 2) ? qlm : (qlm - 2);
2316 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
2317 bgx = (qlm < 4) ? (qlm - 2) : 2;
2318 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
2319 bgx = 0;
2320 else
2321 return;
2322
2323 if ((OCTEON_IS_MODEL(OCTEON_CN73XX) && qlm == 6) ||
2324 (OCTEON_IS_MODEL(OCTEON_CNF75XX) && qlm == 5))
2325 lmac = 2;
2326 else
2327 lmac = lane;
2328
2329
2330 pmd_control.u64 = csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(lmac, bgx));
2331 if (pmd_control.s.train_en)
2332 return;
2333
2334 if (tx_pre != -1 && tx_post == -1)
2335 tx_post = 0;
2336
2337 if (tx_post != -1 && tx_pre == -1)
2338 tx_pre = 0;
2339
2340
2341 if (tx_swing < -1 || tx_swing > 25) {
2342 printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_SWING(%d). TX_SWING must be <= 25.\n",
2343 node, qlm, lane, tx_swing);
2344 return;
2345 }
2346
2347 if (tx_pre < -1 || tx_pre > 10) {
2348 printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_PRE(%d). TX_PRE must be <= 10.\n",
2349 node, qlm, lane, tx_swing);
2350 return;
2351 }
2352
2353 if (tx_post < -1 || tx_post > 31) {
2354 printf("ERROR: N%d:QLM%d: Lane %d: Invalid TX_POST(%d). TX_POST must be <= 15.\n",
2355 node, qlm, lane, tx_swing);
2356 return;
2357 }
2358
2359 if (tx_pre >= 0 && tx_post >= 0 && tx_swing >= 0 &&
2360 tx_pre + tx_post - tx_swing > 2) {
2361 printf("ERROR: N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) - TX_SWING(%d) must be <= 2\n",
2362 node, qlm, lane, tx_pre, tx_post, tx_swing);
2363 return;
2364 }
2365
2366 if (tx_pre >= 0 && tx_post >= 0 && tx_swing >= 0 &&
2367 tx_pre + tx_post + tx_swing > 35) {
2368 printf("ERROR: N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) + TX_SWING(%d) must be <= 35\n",
2369 node, qlm, lane, tx_pre, tx_post, tx_swing);
2370 return;
2371 }
2372
2373 if (tx_gain < -1 || tx_gain > 7) {
2374 printf("ERROR: N%d.QLM%d: Lane %d: Invalid TX_GAIN(%d). TX_GAIN must be between 0 and 7\n",
2375 node, qlm, lane, tx_gain);
2376 return;
2377 }
2378
2379 if (tx_vboost < -1 || tx_vboost > 1) {
2380 printf("ERROR: N%d.QLM%d: Lane %d: Invalid TX_VBOOST(%d). TX_VBOOST must be 0 or 1.\n",
2381 node, qlm, lane, tx_vboost);
2382 return;
2383 }
2384
2385 debug("N%d.QLM%d: Lane %d: TX_SWING=%d, TX_PRE=%d, TX_POST=%d, TX_GAIN=%d, TX_VBOOST=%d\n",
2386 node, qlm, lane, tx_swing, tx_pre, tx_post, tx_gain, tx_vboost);
2387
2388
2389
2390 tx_cfg1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_1(lane, qlm));
2391 tx_cfg1.s.tx_swing_ovrrd_en = (tx_swing != -1);
2392 tx_cfg1.s.tx_premptap_ovrrd_val = (tx_pre != -1) && (tx_post != -1);
2393 tx_cfg1.s.tx_vboost_en_ovrrd_en = (tx_vboost != -1);
2394 ;
2395 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_1(lane, qlm), tx_cfg1.u64);
2396
2397
2398
2399 if (tx_swing != -1) {
2400 tx_cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(lane, qlm));
2401 tx_cfg0.s.cfg_tx_swing = tx_swing;
2402 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(lane, qlm), tx_cfg0.u64);
2403 }
2404
2405 if ((tx_pre != -1) && (tx_post != -1)) {
2406 pre_emphasis.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_PRE_EMPHASIS(lane, qlm));
2407 pre_emphasis.s.cfg_tx_premptap = (tx_post << 4) | tx_pre;
2408 csr_wr_node(node, CVMX_GSERX_LANEX_TX_PRE_EMPHASIS(lane, qlm), pre_emphasis.u64);
2409 }
2410
2411
2412 if (tx_gain != -1) {
2413 tx_cfg3.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm));
2414 tx_cfg3.s.pcs_sds_tx_gain = tx_gain;
2415 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm), tx_cfg3.u64);
2416 }
2417
2418
2419 if (tx_vboost != -1) {
2420 tx_cfg3.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm));
2421 tx_cfg3.s.cfg_tx_vboost_en = tx_vboost;
2422 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_3(lane, qlm), tx_cfg3.u64);
2423 }
2424
2425
2426 pcs_ctlifc_0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm));
2427 if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
2428 pcs_ctlifc_0.s.cfg_tx_coeff_req_ovrrd_val = 0x1;
2429 if (tx_vboost != -1)
2430 pcs_ctlifc_0.s.cfg_tx_vboost_en_ovrrd_val = 1;
2431 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(lane, qlm), pcs_ctlifc_0.u64);
2432
2433
2434 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2435 if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
2436 pcs_ctlifc_2.s.cfg_tx_coeff_req_ovrrd_en = 0x1;
2437 if (tx_vboost != -1)
2438 pcs_ctlifc_2.s.cfg_tx_vboost_en_ovrrd_en = 1;
2439 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2440
2441
2442 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2443 pcs_ctlifc_2.s.ctlifc_ovrrd_req = 0x1;
2444 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2445
2446
2447 udelay(1000);
2448
2449
2450
2451 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2452 pcs_ctlifc_2.s.cfg_tx_coeff_req_ovrrd_en = 0;
2453 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2454
2455 pcs_ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2456 pcs_ctlifc_2.s.ctlifc_ovrrd_req = 0x1;
2457 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), pcs_ctlifc_2.u64);
2458}
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474void octeon_qlm_tune_v3(int node, int qlm, int baud_mhz, int tx_swing, int tx_premptap, int tx_gain,
2475 int tx_vboost)
2476{
2477 int lane;
2478 int num_lanes = cvmx_qlm_get_lanes(qlm);
2479
2480 for (lane = 0; lane < num_lanes; lane++) {
2481 int tx_pre = (tx_premptap == -1) ? -1 : tx_premptap & 0xf;
2482 int tx_post = (tx_premptap == -1) ? -1 : (tx_premptap >> 4) & 0x1f;
2483
2484 octeon_qlm_tune_per_lane_v3(node, qlm, baud_mhz, lane, tx_swing, tx_pre, tx_post,
2485 tx_gain, tx_vboost);
2486 }
2487}
2488
2489
2490
2491
2492
2493
2494
2495
2496void octeon_qlm_set_channel_v3(int node, int qlm, int pre_ctle)
2497{
2498 cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
2499
2500 lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
2501 lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = pre_ctle;
2502 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
2503}
2504
2505static void __qlm_init_errata_20844(int node, int qlm)
2506{
2507 int lane;
2508
2509
2510 if (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
2511 return;
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525 for (lane = 0; lane < 4; lane++) {
2526 cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
2527 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc_2;
2528
2529 ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2530 ctlifc_2.s.cfg_rx_cdr_coast_req_ovrrd_en = 1;
2531 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), ctlifc_2.u64);
2532
2533 misc_ovrrd.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm));
2534 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_en = 1;
2535 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_val = 0;
2536 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm), misc_ovrrd.u64);
2537
2538 udelay(1);
2539
2540 misc_ovrrd.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm));
2541 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_en = 1;
2542 misc_ovrrd.s.cfg_rx_eie_det_ovrrd_val = 1;
2543 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, qlm), misc_ovrrd.u64);
2544 ctlifc_2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm));
2545 ctlifc_2.s.ctlifc_ovrrd_req = 1;
2546 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(lane, qlm), ctlifc_2.u64);
2547 }
2548}
2549
2550
2551struct refclk_settings_cn78xx {
2552 bool valid;
2553 union cvmx_gserx_pll_px_mode_0 mode_0;
2554 union cvmx_gserx_pll_px_mode_1 mode_1;
2555 union cvmx_gserx_lane_px_mode_0 pmode_0;
2556 union cvmx_gserx_lane_px_mode_1 pmode_1;
2557};
2558
2559
2560static const u8 def_ref_clk_cn78xx[R_NUM_LANE_MODES] = { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 };
2561
2562
2563
2564
2565
2566
2567
2568static u8 ref_clk_cn78xx[CVMX_MAX_NODES][8][R_NUM_LANE_MODES] = {
2569 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2570 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2571 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2572 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2573 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2574 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2575 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2576 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
2577 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2578 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2579 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2580 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2581 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2582 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2583 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2584 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
2585 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2586 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2587 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2588 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2589 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2590 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2591 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2592 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } },
2593 { { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2594 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2595 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2596 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2597 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2598 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2599 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 },
2600 { 0, 0, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1 } }
2601};
2602
2603
2604
2605
2606
2607
2608static const struct refclk_settings_cn78xx refclk_settings_cn78xx[R_NUM_LANE_MODES][4] = {
2609 {
2610 {
2611 .valid = true,
2612 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
2613 .mode_1.s = { .pll_16p5en = 0x0,
2614 .pll_cpadj = 0x2,
2615 .pll_pcie3en = 0x0,
2616 .pll_opr = 0x0,
2617 .pll_div = 0x19 },
2618 .pmode_0.s = { .ctle = 0x0,
2619 .pcie = 0x1,
2620 .tx_ldiv = 0x1,
2621 .rx_ldiv = 0x1,
2622 .srate = 0x0,
2623 .tx_mode = 0x3,
2624 .rx_mode = 0x3 },
2625 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2626 .vma_mm = 0x1,
2627 .cdr_fgain = 0xa,
2628 .ph_acc_adj = 0x14 } },
2629 {
2630 .valid = true,
2631 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
2632 .mode_1.s = { .pll_16p5en = 0x0,
2633 .pll_cpadj = 0x1,
2634 .pll_pcie3en = 0x0,
2635 .pll_opr = 0x0,
2636 .pll_div = 0x14 },
2637 .pmode_0.s = { .ctle = 0x0,
2638 .pcie = 0x1,
2639 .tx_ldiv = 0x1,
2640 .rx_ldiv = 0x1,
2641 .srate = 0x0,
2642 .tx_mode = 0x3,
2643 .rx_mode = 0x3 },
2644 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2645 .vma_mm = 0x1,
2646 .cdr_fgain = 0xa,
2647 .ph_acc_adj = 0x14 } },
2648 {
2649 .valid = true,
2650 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
2651 .mode_1.s = { .pll_16p5en = 0x0,
2652 .pll_cpadj = 0x2,
2653 .pll_pcie3en = 0x0,
2654 .pll_opr = 0x0,
2655 .pll_div = 0x10 },
2656 .pmode_0.s = { .ctle = 0x0,
2657 .pcie = 0x1,
2658 .tx_ldiv = 0x1,
2659 .rx_ldiv = 0x1,
2660 .srate = 0x0,
2661 .tx_mode = 0x3,
2662 .rx_mode = 0x3 },
2663 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2664 .vma_mm = 0x1,
2665 .cdr_fgain = 0xa,
2666 .ph_acc_adj = 0x14 } },
2667 {
2668
2669 .valid = false,
2670 } },
2671 {
2672
2673 {
2674 .valid = true,
2675 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2676 .mode_1.s = { .pll_16p5en = 0x0,
2677 .pll_cpadj = 0x2,
2678 .pll_pcie3en = 0x0,
2679 .pll_opr = 0x0,
2680 .pll_div = 0x19 },
2681 .pmode_0.s = { .ctle = 0x0,
2682 .pcie = 0x1,
2683 .tx_ldiv = 0x0,
2684 .rx_ldiv = 0x0,
2685 .srate = 0x0,
2686 .tx_mode = 0x3,
2687 .rx_mode = 0x3 },
2688 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2689 .vma_mm = 0x0,
2690 .cdr_fgain = 0xa,
2691 .ph_acc_adj = 0x14 } },
2692 {
2693 .valid = true,
2694 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2695 .mode_1.s = { .pll_16p5en = 0x0,
2696 .pll_cpadj = 0x1,
2697 .pll_pcie3en = 0x0,
2698 .pll_opr = 0x0,
2699 .pll_div = 0x14 },
2700 .pmode_0.s = { .ctle = 0x0,
2701 .pcie = 0x1,
2702 .tx_ldiv = 0x0,
2703 .rx_ldiv = 0x0,
2704 .srate = 0x0,
2705 .tx_mode = 0x3,
2706 .rx_mode = 0x3 },
2707 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2708 .vma_mm = 0x0,
2709 .cdr_fgain = 0xa,
2710 .ph_acc_adj = 0x14 } },
2711 {
2712 .valid = true,
2713 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2714 .mode_1.s = { .pll_16p5en = 0x0,
2715 .pll_cpadj = 0x2,
2716 .pll_pcie3en = 0x0,
2717 .pll_opr = 0x0,
2718 .pll_div = 0x10 },
2719 .pmode_0.s = { .ctle = 0x0,
2720 .pcie = 0x1,
2721 .tx_ldiv = 0x0,
2722 .rx_ldiv = 0x0,
2723 .srate = 0x0,
2724 .tx_mode = 0x3,
2725 .rx_mode = 0x3 },
2726 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2727 .vma_mm = 0x0,
2728 .cdr_fgain = 0xa,
2729 .ph_acc_adj = 0x14 } },
2730 {
2731
2732 .valid = false,
2733 },
2734 },
2735 {
2736 {
2737 .valid = true,
2738 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2739 .mode_1.s = { .pll_16p5en = 0x0,
2740 .pll_cpadj = 0x2,
2741 .pll_pcie3en = 0x1,
2742 .pll_opr = 0x1,
2743 .pll_div = 0x28 },
2744 .pmode_0.s = { .ctle = 0x3,
2745 .pcie = 0x0,
2746 .tx_ldiv = 0x0,
2747 .rx_ldiv = 0x0,
2748 .srate = 0x0,
2749 .tx_mode = 0x3,
2750 .rx_mode = 0x3 },
2751 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2752 .vma_mm = 0x0,
2753 .cdr_fgain = 0xb,
2754 .ph_acc_adj = 0x23 } },
2755 {
2756 .valid = true,
2757 .mode_0.s = { .pll_icp = 0x2, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2758 .mode_1.s = { .pll_16p5en = 0x0,
2759 .pll_cpadj = 0x1,
2760 .pll_pcie3en = 0x1,
2761 .pll_opr = 0x1,
2762 .pll_div = 0x20 },
2763 .pmode_0.s = { .ctle = 0x3,
2764 .pcie = 0x0,
2765 .tx_ldiv = 0x0,
2766 .rx_ldiv = 0x0,
2767 .srate = 0x0,
2768 .tx_mode = 0x3,
2769 .rx_mode = 0x3 },
2770 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2771 .vma_mm = 0x0,
2772 .cdr_fgain = 0xb,
2773 .ph_acc_adj = 0x23 } },
2774 {
2775 .valid = false } },
2776 {
2777
2778 {
2779 .valid = true,
2780 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2781 .mode_1.s = { .pll_16p5en = 0x1,
2782 .pll_cpadj = 0x2,
2783 .pll_pcie3en = 0x0,
2784 .pll_opr = 0x0,
2785 .pll_div = 0x19 },
2786 .pmode_0.s = { .ctle = 0x0,
2787 .pcie = 0x0,
2788 .tx_ldiv = 0x2,
2789 .rx_ldiv = 0x2,
2790 .srate = 0x0,
2791 .tx_mode = 0x3,
2792 .rx_mode = 0x3 },
2793 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2794 .vma_mm = 0x1,
2795 .cdr_fgain = 0xc,
2796 .ph_acc_adj = 0x1e } },
2797 {
2798 .valid = true,
2799 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2800 .mode_1.s = { .pll_16p5en = 0x1,
2801 .pll_cpadj = 0x2,
2802 .pll_pcie3en = 0x0,
2803 .pll_opr = 0x0,
2804 .pll_div = 0x14 },
2805 .pmode_0.s = { .ctle = 0x0,
2806 .pcie = 0x0,
2807 .tx_ldiv = 0x2,
2808 .rx_ldiv = 0x2,
2809 .srate = 0x0,
2810 .tx_mode = 0x3,
2811 .rx_mode = 0x3 },
2812 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2813 .vma_mm = 0x1,
2814 .cdr_fgain = 0xc,
2815 .ph_acc_adj = 0x1e } },
2816 {
2817 .valid = true,
2818 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2819 .mode_1.s = { .pll_16p5en = 0x1,
2820 .pll_cpadj = 0x3,
2821 .pll_pcie3en = 0x0,
2822 .pll_opr = 0x0,
2823 .pll_div = 0x10 },
2824 .pmode_0.s = { .ctle = 0x0,
2825 .pcie = 0x0,
2826 .tx_ldiv = 0x2,
2827 .rx_ldiv = 0x2,
2828 .srate = 0x0,
2829 .tx_mode = 0x3,
2830 .rx_mode = 0x3 },
2831 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2832 .vma_mm = 0x1,
2833 .cdr_fgain = 0xc,
2834 .ph_acc_adj = 0x1e } },
2835 {
2836
2837 .valid = false,
2838 },
2839 },
2840 {
2841 {
2842 .valid = false },
2843 {
2844 .valid = true,
2845 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x14 },
2846 .mode_1.s = { .pll_16p5en = 0x1,
2847 .pll_cpadj = 0x2,
2848 .pll_pcie3en = 0x0,
2849 .pll_opr = 0x0,
2850 .pll_div = 0x19 },
2851 .pmode_0.s = { .ctle = 0x0,
2852 .pcie = 0x0,
2853 .tx_ldiv = 0x1,
2854 .rx_ldiv = 0x1,
2855 .srate = 0x0,
2856 .tx_mode = 0x3,
2857 .rx_mode = 0x3 },
2858 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2859 .vma_mm = 0x1,
2860 .cdr_fgain = 0xc,
2861 .ph_acc_adj = 0x1e } },
2862 {
2863 .valid = true,
2864 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x14 },
2865 .mode_1.s = { .pll_16p5en = 0x1,
2866 .pll_cpadj = 0x2,
2867 .pll_pcie3en = 0x0,
2868 .pll_opr = 0x0,
2869 .pll_div = 0x14 },
2870 .pmode_0.s = { .ctle = 0x0,
2871 .pcie = 0x0,
2872 .tx_ldiv = 0x1,
2873 .rx_ldiv = 0x1,
2874 .srate = 0x0,
2875 .tx_mode = 0x3,
2876 .rx_mode = 0x3 },
2877 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2878 .vma_mm = 0x1,
2879 .cdr_fgain = 0xc,
2880 .ph_acc_adj = 0x1e } },
2881 {
2882
2883 .valid = false,
2884 } },
2885 {
2886 {
2887 .valid = false },
2888 {
2889 .valid = false },
2890 {
2891 .valid = true,
2892 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2893 .mode_1.s = { .pll_16p5en = 0x1,
2894 .pll_cpadj = 0x2,
2895 .pll_pcie3en = 0x0,
2896 .pll_opr = 0x1,
2897 .pll_div = 0x21 },
2898 .pmode_0.s = { .ctle = 0x3,
2899 .pcie = 0x0,
2900 .tx_ldiv = 0x0,
2901 .rx_ldiv = 0x0,
2902 .srate = 0x0,
2903 .tx_mode = 0x3,
2904 .rx_mode = 0x3 },
2905 .pmode_1.s = { .vma_fine_cfg_sel = 0x1,
2906 .vma_mm = 0x0,
2907 .cdr_fgain = 0xa,
2908 .ph_acc_adj = 0xf } },
2909 {
2910 .valid = true,
2911 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
2912 .mode_1.s = { .pll_16p5en = 0x1,
2913 .pll_cpadj = 0x2,
2914 .pll_pcie3en = 0x0,
2915 .pll_opr = 0x1,
2916 .pll_div = 0x20 },
2917 .pmode_0.s = { .ctle = 0x3,
2918 .pcie = 0x0,
2919 .tx_ldiv = 0x0,
2920 .rx_ldiv = 0x0,
2921 .srate = 0x0,
2922 .tx_mode = 0x3,
2923 .rx_mode = 0x3 },
2924 .pmode_1.s = { .vma_fine_cfg_sel = 0x1,
2925 .vma_mm = 0x0,
2926 .cdr_fgain = 0xa,
2927 .ph_acc_adj = 0xf } } },
2928 {
2929 {
2930 .valid = 1,
2931 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2932 .mode_1.s = { .pll_16p5en = 0x1,
2933 .pll_cpadj = 0x2,
2934 .pll_pcie3en = 0x0,
2935 .pll_opr = 0x0,
2936 .pll_div = 0x19 },
2937 .pmode_0.s = { .ctle = 0x0,
2938 .pcie = 0x0,
2939 .tx_ldiv = 0x2,
2940 .rx_ldiv = 0x2,
2941 .srate = 0x0,
2942 .tx_mode = 0x3,
2943 .rx_mode = 0x3 },
2944 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2945 .vma_mm = 0x1,
2946 .cdr_fgain = 0xc,
2947 .ph_acc_adj = 0x1e } },
2948 {
2949 .valid = 1,
2950 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2951 .mode_1.s = { .pll_16p5en = 0x1,
2952 .pll_cpadj = 0x2,
2953 .pll_pcie3en = 0x0,
2954 .pll_opr = 0x0,
2955 .pll_div = 0x14 },
2956 .pmode_0.s = { .ctle = 0x0,
2957 .pcie = 0x0,
2958 .tx_ldiv = 0x2,
2959 .rx_ldiv = 0x2,
2960 .srate = 0x0,
2961 .tx_mode = 0x3,
2962 .rx_mode = 0x3 },
2963 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2964 .vma_mm = 0x0,
2965 .cdr_fgain = 0xc,
2966 .ph_acc_adj = 0x1e } },
2967 {
2968 .valid = 1,
2969 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0x28 },
2970 .mode_1.s = { .pll_16p5en = 0x1,
2971 .pll_cpadj = 0x3,
2972 .pll_pcie3en = 0x0,
2973 .pll_opr = 0x0,
2974 .pll_div = 0x10 },
2975 .pmode_0.s = { .ctle = 0x0,
2976 .pcie = 0x0,
2977 .tx_ldiv = 0x2,
2978 .rx_ldiv = 0x2,
2979 .srate = 0x0,
2980 .tx_mode = 0x3,
2981 .rx_mode = 0x3 },
2982 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
2983 .vma_mm = 0x1,
2984 .cdr_fgain = 0xc,
2985 .ph_acc_adj = 0x1e } } },
2986 {
2987 {
2988 .valid = true,
2989 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
2990 .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x2, .pll_pcie3en = 0x0,
2991 .pll_div = 0x19 },
2992 .pmode_0.s = { .ctle = 0x0,
2993 .pcie = 0x0,
2994 .tx_ldiv = 0x0,
2995 .rx_ldiv = 0x0,
2996 .srate = 0x0,
2997 .tx_mode = 0x3,
2998 .rx_mode = 0x3 },
2999 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3000 .vma_mm = 0x1,
3001 .cdr_fgain = 0xc,
3002 .ph_acc_adj = 0x1e } },
3003 {
3004 .valid = true,
3005 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3006 .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x1, .pll_pcie3en = 0x0,
3007 .pll_div = 0x14 },
3008 .pmode_0.s = { .ctle = 0x0,
3009 .pcie = 0x0,
3010 .tx_ldiv = 0x0,
3011 .rx_ldiv = 0x0,
3012 .srate = 0x0,
3013 .tx_mode = 0x3,
3014 .rx_mode = 0x3 },
3015 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3016 .vma_mm = 0x1,
3017 .cdr_fgain = 0xc,
3018 .ph_acc_adj = 0x1e } },
3019 {
3020 .valid = true,
3021 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3022 .mode_1.s = { .pll_16p5en = 0x0, .pll_cpadj = 0x2, .pll_pcie3en = 0x0,
3023 .pll_div = 0x10 },
3024 .pmode_0.s = { .ctle = 0x0,
3025 .pcie = 0x0,
3026 .tx_ldiv = 0x0,
3027 .rx_ldiv = 0x0,
3028 .srate = 0x0,
3029 .tx_mode = 0x3,
3030 .rx_mode = 0x3 },
3031 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3032 .vma_mm = 0x1,
3033 .cdr_fgain = 0xc,
3034 .ph_acc_adj = 0x1e } },
3035 {
3036
3037 .valid = false,
3038 } },
3039 {
3040 {
3041 .valid = false },
3042 {
3043 .valid = true,
3044 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3045 .mode_1.s = { .pll_16p5en = 0x0,
3046 .pll_cpadj = 0x2,
3047 .pll_pcie3en = 0x0,
3048 .pll_opr = 0x0,
3049 .pll_div = 0x19 },
3050 .pmode_0.s = { .ctle = 0x0,
3051 .pcie = 0x0,
3052 .tx_ldiv = 0x0,
3053 .rx_ldiv = 0x0,
3054 .srate = 0x0,
3055 .tx_mode = 0x3,
3056 .rx_mode = 0x3 },
3057 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3058 .vma_mm = 0x0,
3059 .cdr_fgain = 0xa,
3060 .ph_acc_adj = 0x14 } },
3061 {
3062 .valid = true,
3063 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3064 .mode_1.s = { .pll_16p5en = 0x0,
3065 .pll_cpadj = 0x2,
3066 .pll_pcie3en = 0x0,
3067 .pll_opr = 0x0,
3068 .pll_div = 0x14 },
3069 .pmode_0.s = { .ctle = 0x0,
3070 .pcie = 0x0,
3071 .tx_ldiv = 0x0,
3072 .rx_ldiv = 0x0,
3073 .srate = 0x0,
3074 .tx_mode = 0x3,
3075 .rx_mode = 0x3 },
3076 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3077 .vma_mm = 0x0,
3078 .cdr_fgain = 0xa,
3079 .ph_acc_adj = 0x14 } },
3080 {
3081 .valid = true,
3082 .mode_0.s = { .pll_icp = 0x1, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3083 .mode_1.s = { .pll_16p5en = 0x0,
3084 .pll_cpadj = 0x2,
3085 .pll_pcie3en = 0x0,
3086 .pll_opr = 0x0,
3087 .pll_div = 0x14 },
3088 .pmode_0.s = { .ctle = 0x0,
3089 .pcie = 0x0,
3090 .tx_ldiv = 0x0,
3091 .rx_ldiv = 0x0,
3092 .srate = 0x0,
3093 .tx_mode = 0x3,
3094 .rx_mode = 0x3 },
3095 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3096 .vma_mm = 0x0,
3097 .cdr_fgain = 0xa,
3098 .ph_acc_adj = 0x14 } } },
3099 {
3100 {
3101 .valid = true,
3102 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
3103 .mode_1.s = { .pll_16p5en = 0x0,
3104 .pll_cpadj = 0x2,
3105 .pll_pcie3en = 0x0,
3106 .pll_opr = 0x0,
3107 .pll_div = 0x19 },
3108 .pmode_0.s = { .ctle = 0x0,
3109 .pcie = 0x1,
3110 .tx_ldiv = 0x1,
3111 .rx_ldiv = 0x1,
3112 .srate = 0x0,
3113 .tx_mode = 0x3,
3114 .rx_mode = 0x3 },
3115 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3116 .vma_mm = 0x1,
3117 .cdr_fgain = 0xa,
3118 .ph_acc_adj = 0x14 } },
3119 {
3120 .valid = true,
3121 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
3122 .mode_1.s = { .pll_16p5en = 0x0,
3123 .pll_cpadj = 0x1,
3124 .pll_pcie3en = 0x0,
3125 .pll_opr = 0x0,
3126 .pll_div = 0x14 },
3127 .pmode_0.s = { .ctle = 0x0,
3128 .pcie = 0x1,
3129 .tx_ldiv = 0x1,
3130 .rx_ldiv = 0x1,
3131 .srate = 0x0,
3132 .tx_mode = 0x3,
3133 .rx_mode = 0x3 },
3134 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3135 .vma_mm = 0x1,
3136 .cdr_fgain = 0xa,
3137 .ph_acc_adj = 0x14 } },
3138 {
3139 .valid = true,
3140 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0x5 },
3141 .mode_1.s = { .pll_16p5en = 0x0,
3142 .pll_cpadj = 0x2,
3143 .pll_pcie3en = 0x0,
3144 .pll_opr = 0x0,
3145 .pll_div = 0x10 },
3146 .pmode_0.s = { .ctle = 0x0,
3147 .pcie = 0x1,
3148 .tx_ldiv = 0x1,
3149 .rx_ldiv = 0x1,
3150 .srate = 0x0,
3151 .tx_mode = 0x3,
3152 .rx_mode = 0x3 },
3153 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3154 .vma_mm = 0x1,
3155 .cdr_fgain = 0xa,
3156 .ph_acc_adj = 0x14 } },
3157 {
3158
3159 .valid = false,
3160 } },
3161 {
3162 {
3163 .valid = true,
3164 .mode_0.s = { .pll_icp = 0x4, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3165 .mode_1.s = { .pll_16p5en = 0x0,
3166 .pll_cpadj = 0x2,
3167 .pll_pcie3en = 0x0,
3168 .pll_opr = 0x0,
3169 .pll_div = 0x19 },
3170 .pmode_0.s = { .ctle = 0x0,
3171 .pcie = 0x1,
3172 .tx_ldiv = 0x0,
3173 .rx_ldiv = 0x0,
3174 .srate = 0x0,
3175 .tx_mode = 0x3,
3176 .rx_mode = 0x3 },
3177 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3178 .vma_mm = 0x0,
3179 .cdr_fgain = 0xa,
3180 .ph_acc_adj = 0x14 } },
3181 {
3182 .valid = true,
3183 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3184 .mode_1.s = { .pll_16p5en = 0x0,
3185 .pll_cpadj = 0x1,
3186 .pll_pcie3en = 0x0,
3187 .pll_opr = 0x0,
3188 .pll_div = 0x14 },
3189 .pmode_0.s = { .ctle = 0x0,
3190 .pcie = 0x1,
3191 .tx_ldiv = 0x0,
3192 .rx_ldiv = 0x0,
3193 .srate = 0x0,
3194 .tx_mode = 0x3,
3195 .rx_mode = 0x3 },
3196 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3197 .vma_mm = 0x0,
3198 .cdr_fgain = 0xa,
3199 .ph_acc_adj = 0x14 } },
3200 {
3201 .valid = true,
3202 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x3, .pll_pcs_div = 0xa },
3203 .mode_1.s = { .pll_16p5en = 0x0,
3204 .pll_cpadj = 0x2,
3205 .pll_pcie3en = 0x0,
3206 .pll_opr = 0x0,
3207 .pll_div = 0x10 },
3208 .pmode_0.s = { .ctle = 0x0,
3209 .pcie = 0x1,
3210 .tx_ldiv = 0x0,
3211 .rx_ldiv = 0x0,
3212 .srate = 0x0,
3213 .tx_mode = 0x3,
3214 .rx_mode = 0x3 },
3215 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3216 .vma_mm = 0x0,
3217 .cdr_fgain = 0xa,
3218 .ph_acc_adj = 0x14 } },
3219 {
3220
3221 .valid = false,
3222 } },
3223 {
3224 {
3225 .valid = true,
3226 .mode_0.s = { .pll_icp = 0x3, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
3227 .mode_1.s = { .pll_16p5en = 0x0,
3228 .pll_cpadj = 0x2,
3229 .pll_pcie3en = 0x1,
3230 .pll_opr = 0x1,
3231 .pll_div = 0x28 },
3232 .pmode_0.s = { .ctle = 0x3,
3233 .pcie = 0x0,
3234 .tx_ldiv = 0x0,
3235 .rx_ldiv = 0x0,
3236 .srate = 0x0,
3237 .tx_mode = 0x3,
3238 .rx_mode = 0x3 },
3239 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3240 .vma_mm = 0x0,
3241 .cdr_fgain = 0xb,
3242 .ph_acc_adj = 0x23 } },
3243 {
3244 .valid = true,
3245 .mode_0.s = { .pll_icp = 0x2, .pll_rloop = 0x5, .pll_pcs_div = 0xa },
3246 .mode_1.s = { .pll_16p5en = 0x0,
3247 .pll_cpadj = 0x1,
3248 .pll_pcie3en = 0x1,
3249 .pll_opr = 0x1,
3250 .pll_div = 0x20 },
3251 .pmode_0.s = { .ctle = 0x3,
3252 .pcie = 0x0,
3253 .tx_ldiv = 0x0,
3254 .rx_ldiv = 0x0,
3255 .srate = 0x0,
3256 .tx_mode = 0x3,
3257 .rx_mode = 0x3 },
3258 .pmode_1.s = { .vma_fine_cfg_sel = 0x0,
3259 .vma_mm = 0x0,
3260 .cdr_fgain = 0xb,
3261 .ph_acc_adj = 0x23 } },
3262 {
3263 .valid = false },
3264 {
3265
3266 .valid = false,
3267 } }
3268};
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285static int __set_qlm_ref_clk_cn78xx(int node, int qlm, int lane_mode, int ref_clk_sel)
3286{
3287 if (ref_clk_sel > 3 || ref_clk_sel < 0 ||
3288 !refclk_settings_cn78xx[lane_mode][ref_clk_sel].valid) {
3289 debug("%s: Invalid reference clock %d for lane mode %d for node %d, QLM %d\n",
3290 __func__, ref_clk_sel, lane_mode, node, qlm);
3291 return -1;
3292 }
3293 debug("%s(%d, %d, 0x%x, %d)\n", __func__, node, qlm, lane_mode, ref_clk_sel);
3294 ref_clk_cn78xx[node][qlm][lane_mode] = ref_clk_sel;
3295 return 0;
3296}
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307static void __qlm_kr_inc_dec_gser26636(int node, int qlm)
3308{
3309 cvmx_gserx_rx_txdir_ctrl_1_t rx_txdir_ctrl;
3310
3311
3312
3313
3314 rx_txdir_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm));
3315 rx_txdir_ctrl.s.rx_precorr_chg_dir = 1;
3316 rx_txdir_ctrl.s.rx_tap1_chg_dir = 1;
3317 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm), rx_txdir_ctrl.u64);
3318}
3319
3320
3321
3322
3323
3324
3325
3326
3327static void __qlm_rx_eq_temp_gser27140(int node, int qlm)
3328{
3329 int lane;
3330 int num_lanes = cvmx_qlm_get_lanes(qlm);
3331 cvmx_gserx_lanex_rx_valbbd_ctrl_0_t rx_valbbd_ctrl_0;
3332 cvmx_gserx_lane_vma_fine_ctrl_2_t lane_vma_fine_ctrl_2;
3333 cvmx_gserx_lane_vma_fine_ctrl_0_t lane_vma_fine_ctrl_0;
3334 cvmx_gserx_rx_txdir_ctrl_1_t rx_txdir_ctrl_1;
3335 cvmx_gserx_eq_wait_time_t eq_wait_time;
3336 cvmx_gserx_rx_txdir_ctrl_2_t rx_txdir_ctrl_2;
3337 cvmx_gserx_rx_txdir_ctrl_0_t rx_txdir_ctrl_0;
3338
3339 for (lane = 0; lane < num_lanes; lane++) {
3340 rx_valbbd_ctrl_0.u64 =
3341 csr_rd_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(lane, qlm));
3342 rx_valbbd_ctrl_0.s.agc_gain = 3;
3343 rx_valbbd_ctrl_0.s.dfe_gain = 2;
3344 csr_wr_node(node, CVMX_GSERX_LANEX_RX_VALBBD_CTRL_0(lane, qlm),
3345 rx_valbbd_ctrl_0.u64);
3346 }
3347
3348
3349 lane_vma_fine_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm));
3350
3351 lane_vma_fine_ctrl_2.s.rx_prectle_gain_max_fine = 11;
3352
3353 lane_vma_fine_ctrl_2.s.rx_prectle_gain_min_fine = 6;
3354 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_2(qlm), lane_vma_fine_ctrl_2.u64);
3355
3356
3357 rx_txdir_ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_0(qlm));
3358 rx_txdir_ctrl_0.s.rx_boost_hi_thrs = 11;
3359 rx_txdir_ctrl_0.s.rx_boost_lo_thrs = 4;
3360 rx_txdir_ctrl_0.s.rx_boost_hi_val = 15;
3361 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_0(qlm), rx_txdir_ctrl_0.u64);
3362
3363
3364 lane_vma_fine_ctrl_0.u64 = csr_rd_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_0(qlm));
3365 lane_vma_fine_ctrl_0.s.rx_sdll_iq_max_fine = 14;
3366 lane_vma_fine_ctrl_0.s.rx_sdll_iq_min_fine = 8;
3367 lane_vma_fine_ctrl_0.s.rx_sdll_iq_step_fine = 2;
3368
3369
3370 lane_vma_fine_ctrl_0.s.vma_window_wait_fine = 5;
3371 lane_vma_fine_ctrl_0.s.lms_wait_time_fine = 5;
3372
3373 csr_wr_node(node, CVMX_GSERX_LANE_VMA_FINE_CTRL_0(qlm), lane_vma_fine_ctrl_0.u64);
3374
3375
3376 rx_txdir_ctrl_1.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm));
3377 rx_txdir_ctrl_1.s.rx_tap1_lo_thrs = 8;
3378 rx_txdir_ctrl_1.s.rx_tap1_hi_thrs = 0x17;
3379 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_1(qlm), rx_txdir_ctrl_1.u64);
3380
3381
3382 eq_wait_time.u64 = csr_rd_node(node, CVMX_GSERX_EQ_WAIT_TIME(qlm));
3383 eq_wait_time.s.rxeq_wait_cnt = 6;
3384 csr_wr_node(node, CVMX_GSERX_EQ_WAIT_TIME(qlm), eq_wait_time.u64);
3385
3386
3387 rx_txdir_ctrl_2.u64 = csr_rd_node(node, CVMX_GSERX_RX_TXDIR_CTRL_2(qlm));
3388 rx_txdir_ctrl_2.s.rx_precorr_hi_thrs = 0xc0;
3389 rx_txdir_ctrl_2.s.rx_precorr_lo_thrs = 0x40;
3390 csr_wr_node(node, CVMX_GSERX_RX_TXDIR_CTRL_2(qlm), rx_txdir_ctrl_2.u64);
3391}
3392
3393
3394
3395
3396
3397static int __qlm_errata_gser_26150(int node, int qlm, int is_pcie)
3398{
3399 int num_lanes = 4;
3400 int i;
3401 cvmx_gserx_glbl_pll_cfg_3_t pll_cfg_3;
3402 cvmx_gserx_glbl_misc_config_1_t misc_config_1;
3403
3404
3405 if (is_pcie) {
3406
3407
3408
3409
3410 pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
3411 pll_cfg_3.s.pcs_sds_pll_vco_amp = 0;
3412 pll_cfg_3.s.pll_vctrl_sel_lcvco_val = 2;
3413 csr_wr_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm), pll_cfg_3.u64);
3414
3415
3416 misc_config_1.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm));
3417 misc_config_1.s.pcs_sds_trim_chp_reg = 2;
3418 csr_wr_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm), misc_config_1.u64);
3419 return 0;
3420 }
3421
3422
3423 pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
3424 if (pll_cfg_3.s.pll_vctrl_sel_lcvco_val == 0x2)
3425 return 0;
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435 if (OCTEON_IS_MODEL(OCTEON_CN73XX) && (qlm == 5 || qlm == 6))
3436 num_lanes = 2;
3437
3438
3439
3440
3441 for (i = 0; i < num_lanes; i++) {
3442 cvmx_gserx_lanex_pcs_ctlifc_0_t ctlifc0;
3443 cvmx_gserx_lanex_pcs_ctlifc_1_t ctlifc1;
3444 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
3445
3446
3447
3448
3449
3450 ctlifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm));
3451 ctlifc0.s.cfg_tx_pstate_req_ovrrd_val = 0x3;
3452 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm), ctlifc0.u64);
3453
3454
3455
3456
3457 ctlifc1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm));
3458 ctlifc1.s.cfg_rx_pstate_req_ovrrd_val = 0x3;
3459 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm), ctlifc1.u64);
3460
3461
3462
3463
3464
3465 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3466 ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
3467 ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x1;
3468 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3469
3470
3471
3472 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3473 ctlifc2.s.ctlifc_ovrrd_req = 0x1;
3474 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3475 }
3476
3477
3478
3479
3480
3481 pll_cfg_3.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm));
3482 pll_cfg_3.s.pcs_sds_pll_vco_amp = 0;
3483 pll_cfg_3.s.pll_vctrl_sel_lcvco_val = 2;
3484 csr_wr_node(node, CVMX_GSERX_GLBL_PLL_CFG_3(qlm), pll_cfg_3.u64);
3485
3486
3487 misc_config_1.u64 = csr_rd_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm));
3488 misc_config_1.s.pcs_sds_trim_chp_reg = 2;
3489 csr_wr_node(node, CVMX_GSERX_GLBL_MISC_CONFIG_1(qlm), misc_config_1.u64);
3490
3491
3492
3493
3494 for (i = 0; i < num_lanes; i++) {
3495 cvmx_gserx_lanex_pcs_ctlifc_0_t ctlifc0;
3496 cvmx_gserx_lanex_pcs_ctlifc_1_t ctlifc1;
3497 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
3498
3499
3500
3501 ctlifc0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm));
3502 ctlifc0.s.cfg_tx_pstate_req_ovrrd_val = 0x0;
3503 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_0(i, qlm), ctlifc0.u64);
3504
3505
3506
3507 ctlifc1.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm));
3508 ctlifc1.s.cfg_rx_pstate_req_ovrrd_val = 0x0;
3509 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_1(i, qlm), ctlifc1.u64);
3510
3511
3512
3513
3514
3515 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3516 ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
3517 ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x1;
3518 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3519
3520
3521
3522 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3523 ctlifc2.s.ctlifc_ovrrd_req = 0x1;
3524 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3525 }
3526
3527
3528 mdelay(10);
3529
3530
3531 for (i = 0; i < num_lanes; i++) {
3532 cvmx_gserx_lanex_pcs_ctlifc_2_t ctlifc2;
3533
3534 ctlifc2.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm));
3535 ctlifc2.s.cfg_tx_pstate_req_ovrrd_en = 0x0;
3536 ctlifc2.s.cfg_rx_pstate_req_ovrrd_en = 0x0;
3537 csr_wr_node(node, CVMX_GSERX_LANEX_PCS_CTLIFC_2(i, qlm), ctlifc2.u64);
3538 }
3539
3540
3541
3542
3543 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
3544 pll_lock, ==, 1, 10000)) {
3545 printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", node, qlm);
3546 return -1;
3547 }
3548
3549
3550
3551
3552 if (is_pcie == 0 &&
3553 CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
3554 rst_rdy, ==, 1, 10000)) {
3555 printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", node, qlm);
3556 return -1;
3557 }
3558
3559 return 0;
3560}
3561
3562
3563
3564
3565
3566
3567
3568
3569static void __qlm_setup_pll_cn78xx(int node, int qlm)
3570{
3571 cvmx_gserx_pll_px_mode_0_t mode_0;
3572 cvmx_gserx_pll_px_mode_1_t mode_1;
3573 cvmx_gserx_lane_px_mode_0_t pmode_0;
3574 cvmx_gserx_lane_px_mode_1_t pmode_1;
3575 int lane_mode;
3576 int ref_clk;
3577 const struct refclk_settings_cn78xx *clk_settings;
3578
3579 for (lane_mode = 0; lane_mode < R_NUM_LANE_MODES; lane_mode++) {
3580 mode_0.u64 = csr_rd_node(node, CVMX_GSERX_PLL_PX_MODE_0(lane_mode, qlm));
3581 mode_1.u64 = csr_rd_node(node, CVMX_GSERX_PLL_PX_MODE_1(lane_mode, qlm));
3582 pmode_0.u64 = 0;
3583 pmode_1.u64 = 0;
3584 ref_clk = ref_clk_cn78xx[node][qlm][lane_mode];
3585 clk_settings = &refclk_settings_cn78xx[lane_mode][ref_clk];
3586 debug("%s(%d, %d): lane_mode: 0x%x, ref_clk: %d\n", __func__, node, qlm, lane_mode,
3587 ref_clk);
3588
3589 if (!clk_settings->valid) {
3590 printf("%s: Error: reference clock %d is not supported for lane mode %d on qlm %d\n",
3591 __func__, ref_clk, lane_mode, qlm);
3592 continue;
3593 }
3594
3595 mode_0.s.pll_icp = clk_settings->mode_0.s.pll_icp;
3596 mode_0.s.pll_rloop = clk_settings->mode_0.s.pll_rloop;
3597 mode_0.s.pll_pcs_div = clk_settings->mode_0.s.pll_pcs_div;
3598
3599 mode_1.s.pll_16p5en = clk_settings->mode_1.s.pll_16p5en;
3600 mode_1.s.pll_cpadj = clk_settings->mode_1.s.pll_cpadj;
3601 mode_1.s.pll_pcie3en = clk_settings->mode_1.s.pll_pcie3en;
3602 mode_1.s.pll_opr = clk_settings->mode_1.s.pll_opr;
3603 mode_1.s.pll_div = clk_settings->mode_1.s.pll_div;
3604
3605 pmode_0.u64 = clk_settings->pmode_0.u64;
3606
3607 pmode_1.u64 = clk_settings->pmode_1.u64;
3608
3609 csr_wr_node(node, CVMX_GSERX_PLL_PX_MODE_1(lane_mode, qlm), mode_1.u64);
3610 csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_0(lane_mode, qlm), pmode_0.u64);
3611 csr_wr_node(node, CVMX_GSERX_LANE_PX_MODE_1(lane_mode, qlm), pmode_1.u64);
3612 csr_wr_node(node, CVMX_GSERX_PLL_PX_MODE_0(lane_mode, qlm), mode_0.u64);
3613 }
3614}
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631static int __get_lane_mode_for_speed_and_ref_clk(int ref_clk_sel, int baud_mhz,
3632 bool *alt_pll_settings)
3633{
3634 if (alt_pll_settings)
3635 *alt_pll_settings = false;
3636 switch (baud_mhz) {
3637 case 98304:
3638 case 49152:
3639 case 24576:
3640 case 12288:
3641 if (ref_clk_sel != 3) {
3642 printf("Error: Invalid ref clock\n");
3643 return -1;
3644 }
3645 return 0x5;
3646 case 6144:
3647 case 3072:
3648 if (ref_clk_sel != 3) {
3649 printf("Error: Invalid ref clock\n");
3650 return -1;
3651 }
3652 return 0x8;
3653 case 1250:
3654 if (alt_pll_settings)
3655 *alt_pll_settings = (ref_clk_sel != 2);
3656 return R_125G_REFCLK15625_SGMII;
3657 case 2500:
3658 if (ref_clk_sel == 0)
3659 return R_2_5G_REFCLK100;
3660
3661 if (alt_pll_settings)
3662 *alt_pll_settings = (ref_clk_sel != 1);
3663 return R_2_5G_REFCLK125;
3664 case 3125:
3665 if (ref_clk_sel == 2) {
3666 return R_3125G_REFCLK15625_XAUI;
3667 } else if (ref_clk_sel == 1) {
3668 if (alt_pll_settings)
3669 *alt_pll_settings = true;
3670 return R_3125G_REFCLK15625_XAUI;
3671 }
3672
3673 printf("Error: Invalid speed\n");
3674 return -1;
3675 case 5000:
3676 if (ref_clk_sel == 0) {
3677 return R_5G_REFCLK100;
3678 } else if (ref_clk_sel == 1) {
3679 if (alt_pll_settings)
3680 *alt_pll_settings = (ref_clk_sel != 1);
3681 return R_5G_REFCLK125;
3682 } else {
3683 return R_5G_REFCLK15625_QSGMII;
3684 }
3685 case 6250:
3686 if (ref_clk_sel != 0) {
3687 if (alt_pll_settings)
3688 *alt_pll_settings = (ref_clk_sel != 2);
3689 return R_625G_REFCLK15625_RXAUI;
3690 }
3691
3692 printf("Error: Invalid speed\n");
3693 return -1;
3694 case 6316:
3695 if (ref_clk_sel != 3) {
3696 printf("Error: Invalid speed\n");
3697 } else {
3698 *alt_pll_settings = true;
3699 return R_625G_REFCLK15625_RXAUI;
3700 }
3701 case 8000:
3702 if (ref_clk_sel == 0)
3703 return R_8G_REFCLK100;
3704 else if (ref_clk_sel == 1)
3705 return R_8G_REFCLK125;
3706
3707 printf("Error: Invalid speed\n");
3708 return -1;
3709 case 103125:
3710 if (ref_clk_sel == 3 && alt_pll_settings)
3711 *alt_pll_settings = true;
3712
3713 if (ref_clk_sel == 2 || ref_clk_sel == 3)
3714 return R_103125G_REFCLK15625_KR;
3715
3716 default:
3717 printf("Error: Invalid speed\n");
3718 return -1;
3719 }
3720
3721 return -1;
3722}
3723
3724
3725
3726
3727
3728static void __set_sli_window_ctl_errata_31375(int node)
3729{
3730 if (OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX) ||
3731 OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
3732 cvmx_sli_window_ctl_t window_ctl;
3733
3734 window_ctl.u64 = csr_rd_node(node, CVMX_PEXP_SLI_WINDOW_CTL);
3735
3736 if (window_ctl.s.time != 8191)
3737 return;
3738
3739 window_ctl.s.time = gd->bus_clk * 525ull / 1000000;
3740 csr_wr_node(node, CVMX_PEXP_SLI_WINDOW_CTL, window_ctl.u64);
3741 }
3742}
3743
3744static void __cvmx_qlm_pcie_errata_ep_cn78xx(int node, int pem)
3745{
3746 cvmx_pciercx_cfg031_t cfg031;
3747 cvmx_pciercx_cfg032_t cfg032;
3748 cvmx_pciercx_cfg040_t cfg040;
3749 cvmx_pemx_cfg_t pemx_cfg;
3750 cvmx_pemx_on_t pemx_on;
3751 int low_qlm, high_qlm;
3752 int qlm, lane;
3753 u64 start_cycle;
3754
3755 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(pem));
3756
3757
3758
3759
3760 printf("PCIe%d: Waiting for EP out of reset\n", pem);
3761 while (pemx_on.s.pemoor == 0) {
3762 udelay(1000);
3763 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(pem));
3764 }
3765
3766
3767 printf("PCIe%d: Enabling Gen3 for EP\n", pem);
3768
3769 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
3770 pemx_cfg.s.md = 2;
3771 csr_wr_node(node, CVMX_PEMX_CFG(pem), pemx_cfg.u64);
3772 cfg031.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG031(pem));
3773 cfg031.s.mls = 2;
3774 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG031(pem), cfg031.u32);
3775 cfg040.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG040(pem));
3776 cfg040.s.tls = 3;
3777 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG040(pem), cfg040.u32);
3778
3779
3780 start_cycle = get_timer(0);
3781 do {
3782 if (get_timer(start_cycle) > 10)
3783 return;
3784
3785 mdelay(1);
3786 cfg032.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG032(pem));
3787 } while (cfg032.s.ls != 3);
3788
3789 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
3790 low_qlm = pem;
3791 high_qlm = (pemx_cfg.cn78xx.lanes8) ? low_qlm + 1 : low_qlm;
3792
3793
3794
3795
3796 for (qlm = low_qlm; qlm <= high_qlm; qlm++) {
3797 for (lane = 0; lane < 4; lane++) {
3798 cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
3799 cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
3800
3801 misc_ovrrd.u64 =
3802 csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem));
3803 misc_ovrrd.s.cfg_rx_dll_locken_ovrrd_en = 1;
3804 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem),
3805 misc_ovrrd.u64);
3806 pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem));
3807 pwr_ctrl.s.rx_resetn_ovrrd_en = 1;
3808 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem), pwr_ctrl.u64);
3809 }
3810 }
3811 for (qlm = low_qlm; qlm <= high_qlm; qlm++) {
3812 for (lane = 0; lane < 4; lane++) {
3813 cvmx_gserx_lanex_rx_misc_ovrrd_t misc_ovrrd;
3814 cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
3815
3816 misc_ovrrd.u64 =
3817 csr_rd_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem));
3818 misc_ovrrd.s.cfg_rx_dll_locken_ovrrd_en = 0;
3819 csr_wr_node(node, CVMX_GSERX_LANEX_RX_MISC_OVRRD(lane, pem),
3820 misc_ovrrd.u64);
3821 pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem));
3822 pwr_ctrl.s.rx_resetn_ovrrd_en = 0;
3823 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(lane, pem), pwr_ctrl.u64);
3824 }
3825 }
3826
3827
3828 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_PEMX_ON(pem), cvmx_pemx_on_t, pemoor, ==, 1,
3829 1000000)) {
3830 printf("PCIe%d: Timeout waiting for EP link up at Gen3\n", pem);
3831 return;
3832 }
3833}
3834
3835static void __cvmx_qlm_pcie_errata_cn78xx(int node, int qlm)
3836{
3837 int pem, i, q;
3838 int is_8lanes;
3839 int is_high_lanes;
3840 int low_qlm, high_qlm, is_host;
3841 int need_ep_monitor;
3842 cvmx_pemx_cfg_t pem_cfg, pem3_cfg;
3843 cvmx_gserx_slice_cfg_t slice_cfg;
3844 cvmx_gserx_rx_pwr_ctrl_p1_t pwr_ctrl_p1;
3845 cvmx_rst_soft_prstx_t soft_prst;
3846
3847
3848 if (!OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
3849 return;
3850
3851
3852
3853
3854 switch (qlm) {
3855 case 0:
3856 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
3857 pem = 0;
3858 is_8lanes = pem_cfg.cn78xx.lanes8;
3859 is_high_lanes = 0;
3860 break;
3861 case 1:
3862 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
3863 pem = (pem_cfg.cn78xx.lanes8) ? 0 : 1;
3864 is_8lanes = pem_cfg.cn78xx.lanes8;
3865 is_high_lanes = is_8lanes;
3866 break;
3867 case 2:
3868 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
3869 pem = 2;
3870 is_8lanes = pem_cfg.cn78xx.lanes8;
3871 is_high_lanes = 0;
3872 break;
3873 case 3:
3874 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
3875 pem3_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
3876 pem = (pem_cfg.cn78xx.lanes8) ? 2 : 3;
3877 is_8lanes = (pem == 2) ? pem_cfg.cn78xx.lanes8 : pem3_cfg.cn78xx.lanes8;
3878 is_high_lanes = (pem == 2) && is_8lanes;
3879 break;
3880 case 4:
3881 pem = 3;
3882 is_8lanes = 1;
3883 is_high_lanes = 1;
3884 break;
3885 default:
3886 return;
3887 }
3888
3889
3890
3891
3892 if (is_8lanes && !is_high_lanes)
3893 return;
3894
3895 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
3896 is_host = pem_cfg.cn78xx.hostmd;
3897 low_qlm = (is_8lanes) ? qlm - 1 : qlm;
3898 high_qlm = qlm;
3899 qlm = -1;
3900
3901 if (!is_host) {
3902
3903
3904
3905
3906 slice_cfg.u64 = csr_rd_node(node, CVMX_GSERX_SLICE_CFG(low_qlm));
3907 if (slice_cfg.s.tx_rx_detect_lvl_enc == 7 && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
3908 return;
3909 }
3910
3911 if (is_host && OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
3912
3913 cvmx_gserx_phy_ctl_t phy_ctl;
3914
3915 for (q = low_qlm; q <= high_qlm; q++) {
3916 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(q));
3917 phy_ctl.s.phy_reset = 1;
3918 csr_wr_node(node, CVMX_GSERX_PHY_CTL(q), phy_ctl.u64);
3919 }
3920 udelay(5);
3921
3922 for (q = low_qlm; q <= high_qlm; q++) {
3923 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(q));
3924 phy_ctl.s.phy_reset = 0;
3925 csr_wr_node(node, CVMX_GSERX_PHY_CTL(q), phy_ctl.u64);
3926 }
3927 udelay(5);
3928 }
3929
3930 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
3931
3932 for (q = low_qlm; q <= high_qlm; q++) {
3933 slice_cfg.u64 = csr_rd_node(node, CVMX_GSERX_SLICE_CFG(q));
3934 slice_cfg.s.tx_rx_detect_lvl_enc = 7;
3935 csr_wr_node(node, CVMX_GSERX_SLICE_CFG(q), slice_cfg.u64);
3936 }
3937
3938
3939
3940
3941 for (q = low_qlm; q <= high_qlm; q++) {
3942 pwr_ctrl_p1.u64 = csr_rd_node(node, CVMX_GSERX_RX_PWR_CTRL_P1(q));
3943 pwr_ctrl_p1.s.p1_rx_subblk_pd &= ~4;
3944 csr_wr_node(node, CVMX_GSERX_RX_PWR_CTRL_P1(q), pwr_ctrl_p1.u64);
3945 }
3946
3947
3948
3949
3950 for (q = low_qlm; q <= high_qlm; q++) {
3951 for (i = 0; i < 4; i++) {
3952 cvmx_gserx_lanex_tx_cfg_0_t tx_cfg;
3953 cvmx_gserx_lanex_pwr_ctrl_t pwr_ctrl;
3954
3955 tx_cfg.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(i, q));
3956 tx_cfg.s.tx_resetn_ovrrd_val = 1;
3957 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(i, q), tx_cfg.u64);
3958 pwr_ctrl.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_PWR_CTRL(i, q));
3959 pwr_ctrl.s.tx_p2s_resetn_ovrrd_en = 1;
3960 csr_wr_node(node, CVMX_GSERX_LANEX_PWR_CTRL(i, q), pwr_ctrl.u64);
3961 }
3962 }
3963 }
3964
3965 if (!is_host) {
3966 cvmx_pciercx_cfg089_t cfg089;
3967 cvmx_pciercx_cfg090_t cfg090;
3968 cvmx_pciercx_cfg091_t cfg091;
3969 cvmx_pciercx_cfg092_t cfg092;
3970 cvmx_pciercx_cfg548_t cfg548;
3971 cvmx_pciercx_cfg554_t cfg554;
3972
3973 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
3974
3975
3976
3977
3978
3979 cfg089.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG089(pem));
3980 cfg089.s.l1urph = 2;
3981 cfg089.s.l1utp = 7;
3982 cfg089.s.l0urph = 2;
3983 cfg089.s.l0utp = 7;
3984 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG089(pem), cfg089.u32);
3985 cfg090.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG090(pem));
3986 cfg090.s.l3urph = 2;
3987 cfg090.s.l3utp = 7;
3988 cfg090.s.l2urph = 2;
3989 cfg090.s.l2utp = 7;
3990 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG090(pem), cfg090.u32);
3991 cfg091.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG091(pem));
3992 cfg091.s.l5urph = 2;
3993 cfg091.s.l5utp = 7;
3994 cfg091.s.l4urph = 2;
3995 cfg091.s.l4utp = 7;
3996 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG091(pem), cfg091.u32);
3997 cfg092.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG092(pem));
3998 cfg092.s.l7urph = 2;
3999 cfg092.s.l7utp = 7;
4000 cfg092.s.l6urph = 2;
4001 cfg092.s.l6utp = 7;
4002 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG092(pem), cfg092.u32);
4003
4004 cfg548.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG548(pem));
4005 cfg548.s.ep2p3d = 1;
4006 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG548(pem), cfg548.u32);
4007 }
4008
4009
4010 cfg554.u32 = cvmx_pcie_cfgx_read_node(node, pem, CVMX_PCIERCX_CFG554(pem));
4011 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0))
4012 cfg554.s.p23td = 1;
4013 cfg554.s.prv = 0x3ff;
4014 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG554(pem), cfg554.u32);
4015
4016 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
4017 need_ep_monitor = (pem_cfg.s.md == 2);
4018 if (need_ep_monitor) {
4019 cvmx_pciercx_cfg031_t cfg031;
4020 cvmx_pciercx_cfg040_t cfg040;
4021
4022
4023
4024
4025 pem_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(pem));
4026 pem_cfg.s.md = 0;
4027 csr_wr_node(node, CVMX_PEMX_CFG(pem), pem_cfg.u64);
4028 cfg031.u32 = cvmx_pcie_cfgx_read_node(node, pem,
4029 CVMX_PCIERCX_CFG031(pem));
4030 cfg031.s.mls = 0;
4031 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG031(pem),
4032 cfg031.u32);
4033 cfg040.u32 = cvmx_pcie_cfgx_read_node(node, pem,
4034 CVMX_PCIERCX_CFG040(pem));
4035 cfg040.s.tls = 1;
4036 cvmx_pcie_cfgx_write_node(node, pem, CVMX_PCIERCX_CFG040(pem),
4037 cfg040.u32);
4038 __cvmx_qlm_pcie_errata_ep_cn78xx(node, pem);
4039 }
4040 return;
4041 }
4042 }
4043
4044 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) {
4045
4046
4047
4048 soft_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(pem));
4049 soft_prst.s.soft_prst = 0;
4050 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(pem), soft_prst.u64);
4051 udelay(1);
4052
4053
4054
4055
4056 soft_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(pem));
4057 soft_prst.s.soft_prst = 1;
4058 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(pem), soft_prst.u64);
4059 }
4060 udelay(1);
4061}
4062
4063
4064
4065
4066
4067
4068
4069
4070
4071static void __setup_pem_reset(int node, int pem, int is_endpoint)
4072{
4073 cvmx_rst_ctlx_t rst_ctl;
4074
4075
4076 is_endpoint = (is_endpoint != 0);
4077 rst_ctl.u64 = csr_rd_node(node, CVMX_RST_CTLX(pem));
4078 rst_ctl.s.prst_link = 0;
4079 rst_ctl.s.rst_link = is_endpoint;
4080 rst_ctl.s.rst_drv = !is_endpoint;
4081 rst_ctl.s.rst_rcv = is_endpoint;
4082 rst_ctl.s.rst_chip = 0;
4083 csr_wr_node(node, CVMX_RST_CTLX(pem), rst_ctl.u64);
4084}
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108int octeon_configure_qlm_cn78xx(int node, int qlm, int baud_mhz, int mode, int rc, int gen3,
4109 int ref_clk_sel, int ref_clk_input)
4110{
4111 cvmx_gserx_phy_ctl_t phy_ctl;
4112 cvmx_gserx_lane_mode_t lmode;
4113 cvmx_gserx_cfg_t cfg;
4114 cvmx_gserx_refclk_sel_t refclk_sel;
4115
4116 int is_pcie = 0;
4117 int is_ilk = 0;
4118 int is_bgx = 0;
4119 int lane_mode = 0;
4120 int lmac_type = 0;
4121 bool alt_pll = false;
4122 int num_ports = 0;
4123 int lane_to_sds = 0;
4124
4125 debug("%s(node: %d, qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
4126 __func__, node, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
4127 if (OCTEON_IS_MODEL(OCTEON_CN76XX) && qlm > 4) {
4128 debug("%s: qlm %d not present on CN76XX\n", __func__, qlm);
4129 return -1;
4130 }
4131
4132
4133
4134
4135 __set_sli_window_ctl_errata_31375(node);
4136
4137 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
4138
4139
4140 if (cfg.s.pcie && rc == 0) {
4141 debug("%s: node %d, qlm %d is in PCIe endpoint mode, returning\n",
4142 __func__, node, qlm);
4143 return 0;
4144 }
4145
4146
4147 refclk_sel.u64 = 0;
4148 if (ref_clk_input == 0) {
4149 refclk_sel.s.com_clk_sel = 0;
4150 refclk_sel.s.use_com1 = 0;
4151 } else if (ref_clk_input == 1) {
4152 refclk_sel.s.com_clk_sel = 1;
4153 refclk_sel.s.use_com1 = 0;
4154 } else {
4155 refclk_sel.s.com_clk_sel = 1;
4156 refclk_sel.s.use_com1 = 1;
4157 }
4158
4159 csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4160
4161
4162 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4163 phy_ctl.s.phy_reset = 1;
4164 phy_ctl.s.phy_pd = 1;
4165 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4166
4167 udelay(1000);
4168
4169
4170 memcpy(ref_clk_cn78xx[node][qlm], def_ref_clk_cn78xx, sizeof(def_ref_clk_cn78xx));
4171 switch (mode) {
4172 case CVMX_QLM_MODE_PCIE:
4173 case CVMX_QLM_MODE_PCIE_1X8: {
4174 cvmx_pemx_cfg_t pemx_cfg;
4175 cvmx_pemx_on_t pemx_on;
4176
4177 is_pcie = 1;
4178
4179 if (ref_clk_sel == 0) {
4180 refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
4181 refclk_sel.s.pcie_refclk125 = 0;
4182 csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4183 if (gen3 == 0)
4184 lane_mode = R_2_5G_REFCLK100;
4185 else if (gen3 == 1)
4186 lane_mode = R_5G_REFCLK100;
4187 else
4188 lane_mode = R_8G_REFCLK100;
4189 } else if (ref_clk_sel == 1) {
4190 refclk_sel.u64 = csr_rd_node(node, CVMX_GSERX_REFCLK_SEL(qlm));
4191 refclk_sel.s.pcie_refclk125 = 1;
4192 csr_wr_node(node, CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4193 if (gen3 == 0)
4194 lane_mode = R_2_5G_REFCLK125;
4195 else if (gen3 == 1)
4196 lane_mode = R_5G_REFCLK125;
4197 else
4198 lane_mode = R_8G_REFCLK125;
4199 } else {
4200 printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
4201 return -1;
4202 }
4203
4204 switch (qlm) {
4205 case 0:
4206 {
4207 cvmx_rst_soft_prstx_t rst_prst;
4208
4209 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(0));
4210 rst_prst.s.soft_prst = rc;
4211 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
4212 __setup_pem_reset(node, 0, !rc);
4213
4214 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(0));
4215 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4216 pemx_cfg.cn78xx.hostmd = rc;
4217 pemx_cfg.cn78xx.md = gen3;
4218 csr_wr_node(node, CVMX_PEMX_CFG(0), pemx_cfg.u64);
4219
4220 if (mode == CVMX_QLM_MODE_PCIE) {
4221 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
4222 pemx_on.s.pemon = 1;
4223 csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
4224 }
4225 break;
4226 }
4227 case 1:
4228 {
4229 if (mode == CVMX_QLM_MODE_PCIE) {
4230 cvmx_rst_soft_prstx_t rst_prst;
4231 cvmx_pemx_cfg_t pemx_cfg;
4232
4233 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(1));
4234 rst_prst.s.soft_prst = rc;
4235 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
4236 __setup_pem_reset(node, 1, !rc);
4237
4238 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(1));
4239 pemx_cfg.cn78xx.lanes8 = 0;
4240 pemx_cfg.cn78xx.hostmd = rc;
4241 pemx_cfg.cn78xx.md = gen3;
4242 csr_wr_node(node, CVMX_PEMX_CFG(1), pemx_cfg.u64);
4243
4244 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(1));
4245 pemx_on.s.pemon = 1;
4246 csr_wr_node(node, CVMX_PEMX_ON(1), pemx_on.u64);
4247 } else {
4248 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(0));
4249 pemx_on.s.pemon = 1;
4250 csr_wr_node(node, CVMX_PEMX_ON(0), pemx_on.u64);
4251 }
4252 break;
4253 }
4254 case 2:
4255 {
4256 cvmx_rst_soft_prstx_t rst_prst;
4257
4258 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(2));
4259 rst_prst.s.soft_prst = rc;
4260 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
4261 __setup_pem_reset(node, 2, !rc);
4262
4263 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
4264 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4265 pemx_cfg.cn78xx.hostmd = rc;
4266 pemx_cfg.cn78xx.md = gen3;
4267 csr_wr_node(node, CVMX_PEMX_CFG(2), pemx_cfg.u64);
4268
4269 if (mode == CVMX_QLM_MODE_PCIE) {
4270 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
4271 pemx_on.s.pemon = 1;
4272 csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
4273 }
4274 break;
4275 }
4276 case 3:
4277 {
4278 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(2));
4279 if (pemx_cfg.cn78xx.lanes8) {
4280
4281
4282 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(2));
4283 pemx_on.s.pemon = 1;
4284 csr_wr_node(node, CVMX_PEMX_ON(2), pemx_on.u64);
4285 }
4286
4287 if (mode == CVMX_QLM_MODE_PCIE) {
4288 cvmx_rst_soft_prstx_t rst_prst;
4289
4290 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
4291 rst_prst.s.soft_prst = rc;
4292 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
4293 __setup_pem_reset(node, 3, !rc);
4294
4295 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
4296 pemx_cfg.cn78xx.lanes8 = 0;
4297 pemx_cfg.cn78xx.hostmd = rc;
4298 pemx_cfg.cn78xx.md = gen3;
4299 csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
4300
4301 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
4302 pemx_on.s.pemon = 1;
4303 csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
4304 }
4305 break;
4306 }
4307 case 4:
4308 {
4309 if (mode == CVMX_QLM_MODE_PCIE_1X8) {
4310
4311
4312 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
4313 pemx_on.s.pemon = 1;
4314 csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
4315 } else {
4316
4317 cvmx_pemx_qlm_t pemx_qlm;
4318 cvmx_rst_soft_prstx_t rst_prst;
4319
4320 rst_prst.u64 = csr_rd_node(node, CVMX_RST_SOFT_PRSTX(3));
4321 rst_prst.s.soft_prst = rc;
4322 csr_wr_node(node, CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
4323 __setup_pem_reset(node, 3, !rc);
4324
4325 pemx_cfg.u64 = csr_rd_node(node, CVMX_PEMX_CFG(3));
4326 pemx_cfg.cn78xx.lanes8 = 0;
4327 pemx_cfg.cn78xx.hostmd = rc;
4328 pemx_cfg.cn78xx.md = gen3;
4329 csr_wr_node(node, CVMX_PEMX_CFG(3), pemx_cfg.u64);
4330
4331 pemx_qlm.u64 = csr_rd_node(node, CVMX_PEMX_QLM(3));
4332 pemx_qlm.cn78xx.pem3qlm = 1;
4333 csr_wr_node(node, CVMX_PEMX_QLM(3), pemx_qlm.u64);
4334 pemx_on.u64 = csr_rd_node(node, CVMX_PEMX_ON(3));
4335 pemx_on.s.pemon = 1;
4336 csr_wr_node(node, CVMX_PEMX_ON(3), pemx_on.u64);
4337 }
4338 break;
4339 }
4340 default:
4341 break;
4342 }
4343 break;
4344 }
4345 case CVMX_QLM_MODE_ILK:
4346 is_ilk = 1;
4347 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4348 if (lane_mode == -1)
4349 return -1;
4350
4351 break;
4352 case CVMX_QLM_MODE_SGMII:
4353 is_bgx = 1;
4354 lmac_type = 0;
4355 lane_to_sds = 1;
4356 num_ports = 4;
4357 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4358 debug("%s: SGMII lane mode: %d, alternate PLL: %s\n", __func__, lane_mode,
4359 alt_pll ? "true" : "false");
4360 if (lane_mode == -1)
4361 return -1;
4362 break;
4363 case CVMX_QLM_MODE_XAUI:
4364 is_bgx = 5;
4365 lmac_type = 1;
4366 lane_to_sds = 0xe4;
4367 num_ports = 1;
4368 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4369 debug("%s: XAUI lane mode: %d\n", __func__, lane_mode);
4370 if (lane_mode == -1)
4371 return -1;
4372 break;
4373 case CVMX_QLM_MODE_RXAUI:
4374 is_bgx = 3;
4375 lmac_type = 2;
4376 lane_to_sds = 0;
4377 num_ports = 2;
4378 debug("%s: RXAUI lane mode: %d\n", __func__, lane_mode);
4379 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4380 if (lane_mode == -1)
4381 return -1;
4382 break;
4383 case CVMX_QLM_MODE_XFI:
4384 case CVMX_QLM_MODE_10G_KR:
4385 is_bgx = 1;
4386 lmac_type = 3;
4387 lane_to_sds = 1;
4388 num_ports = 4;
4389 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4390 debug("%s: XFI/10G_KR lane mode: %d\n", __func__, lane_mode);
4391 if (lane_mode == -1)
4392 return -1;
4393 break;
4394 case CVMX_QLM_MODE_XLAUI:
4395 case CVMX_QLM_MODE_40G_KR4:
4396 is_bgx = 5;
4397 lmac_type = 4;
4398 lane_to_sds = 0xe4;
4399 num_ports = 1;
4400 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
4401 debug("%s: XLAUI/40G_KR4 lane mode: %d\n", __func__, lane_mode);
4402 if (lane_mode == -1)
4403 return -1;
4404 break;
4405 case CVMX_QLM_MODE_DISABLED:
4406
4407 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4408 phy_ctl.s.phy_pd = 1;
4409 phy_ctl.s.phy_reset = 1;
4410 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4411
4412 csr_wr_node(node, CVMX_GSERX_CFG(qlm), 0);
4413
4414 return 0;
4415 default:
4416 break;
4417 }
4418
4419 if (alt_pll) {
4420 debug("%s: alternate PLL settings used for node %d, qlm %d, lane mode %d, reference clock %d\n",
4421 __func__, node, qlm, lane_mode, ref_clk_sel);
4422 if (__set_qlm_ref_clk_cn78xx(node, qlm, lane_mode, ref_clk_sel)) {
4423 printf("%s: Error: reference clock %d is not supported for node %d, qlm %d\n",
4424 __func__, ref_clk_sel, node, qlm);
4425 return -1;
4426 }
4427 }
4428
4429
4430 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4431 phy_ctl.s.phy_pd = 0;
4432 phy_ctl.s.phy_reset = 1;
4433 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4434
4435
4436
4437
4438 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
4439 is_bgx &= 3;
4440
4441
4442 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
4443 cfg.s.ila = is_ilk;
4444 cfg.s.bgx = is_bgx & 1;
4445 cfg.s.bgx_quad = (is_bgx >> 2) & 1;
4446 cfg.s.bgx_dual = (is_bgx >> 1) & 1;
4447 cfg.s.pcie = is_pcie;
4448 csr_wr_node(node, CVMX_GSERX_CFG(qlm), cfg.u64);
4449
4450
4451 lmode.u64 = csr_rd_node(node, CVMX_GSERX_LANE_MODE(qlm));
4452 lmode.s.lmode = lane_mode;
4453 csr_wr_node(node, CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
4454
4455
4456
4457
4458 if (qlm < 4 && is_bgx) {
4459 int bgx = qlm & 1;
4460 int use_upper = (qlm >> 1) & 1;
4461 cvmx_bgxx_cmr_global_config_t global_cfg;
4462
4463 global_cfg.u64 = csr_rd_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
4464 global_cfg.s.pmux_sds_sel = use_upper;
4465 csr_wr_node(node, CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_cfg.u64);
4466 }
4467
4468
4469 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4470 phy_ctl.s.phy_reset = 0;
4471 csr_wr_node(node, CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4472 csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
4473
4474
4475
4476
4477
4478 udelay(1);
4479
4480 if (is_bgx) {
4481 int bgx = (qlm < 2) ? qlm : qlm - 2;
4482 cvmx_bgxx_cmrx_config_t cmr_config;
4483 int index;
4484
4485 for (index = 0; index < num_ports; index++) {
4486 cmr_config.u64 = csr_rd_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx));
4487 cmr_config.s.enable = 0;
4488 cmr_config.s.data_pkt_tx_en = 0;
4489 cmr_config.s.data_pkt_rx_en = 0;
4490 cmr_config.s.lmac_type = lmac_type;
4491 cmr_config.s.lane_to_sds = ((lane_to_sds == 1) ?
4492 index : ((lane_to_sds == 0) ?
4493 (index ? 0xe : 4) :
4494 lane_to_sds));
4495 csr_wr_node(node, CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
4496 }
4497 csr_wr_node(node, CVMX_BGXX_CMR_TX_LMACS(bgx), num_ports);
4498 csr_wr_node(node, CVMX_BGXX_CMR_RX_LMACS(bgx), num_ports);
4499
4500
4501 for (index = 0; index < num_ports; index++) {
4502 cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
4503
4504 spu_pmd_control.u64 =
4505 csr_rd_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
4506
4507 if (mode == CVMX_QLM_MODE_10G_KR || mode == CVMX_QLM_MODE_40G_KR4)
4508 spu_pmd_control.s.train_en = 1;
4509 else if (mode == CVMX_QLM_MODE_XFI || mode == CVMX_QLM_MODE_XLAUI)
4510 spu_pmd_control.s.train_en = 0;
4511
4512 csr_wr_node(node, CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
4513 spu_pmd_control.u64);
4514 }
4515 }
4516
4517
4518 if (!is_pcie)
4519 __qlm_setup_pll_cn78xx(node, qlm);
4520
4521
4522 if (CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_PLL_STAT(qlm),
4523 cvmx_gserx_pll_stat_t,
4524 pll_lock, ==, 1, 10000)) {
4525 printf("%d:QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n",
4526 node, qlm);
4527 return -1;
4528 }
4529
4530
4531 if (is_pcie)
4532 __cvmx_qlm_pcie_errata_cn78xx(node, qlm);
4533 else
4534 __qlm_init_errata_20844(node, qlm);
4535
4536
4537
4538
4539
4540 if (!is_pcie && CVMX_WAIT_FOR_FIELD64_NODE(node, CVMX_GSERX_QLM_STAT(qlm),
4541 cvmx_gserx_qlm_stat_t, rst_rdy,
4542 ==, 1, 10000)) {
4543 printf("%d:QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n",
4544 node, qlm);
4545 return -1;
4546 }
4547
4548
4549
4550
4551
4552
4553 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
4554 (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
4555 __qlm_errata_gser_26150(0, qlm, is_pcie);
4556
4557
4558
4559
4560
4561 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) && baud_mhz == 103125)
4562 __qlm_kr_inc_dec_gser26636(node, qlm);
4563
4564
4565
4566
4567
4568 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X) &&
4569 (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
4570 cvmx_qlm_gser_errata_25992(node, qlm);
4571
4572
4573
4574
4575
4576 if (baud_mhz == 103125)
4577 __qlm_rx_eq_temp_gser27140(node, qlm);
4578
4579
4580
4581
4582 if (is_bgx && mode == CVMX_QLM_MODE_RXAUI) {
4583 int l;
4584
4585 for (l = 0; l < 4; l++) {
4586 cvmx_gserx_lanex_rx_cfg_4_t cfg4;
4587 cvmx_gserx_lanex_tx_cfg_0_t cfg0;
4588
4589 cfg4.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
4590 cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
4591 csr_wr_node(node, CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
4592
4593 cfg0.u64 = csr_rd_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
4594 cfg0.s.cfg_tx_swing = 0x12;
4595 csr_wr_node(node, CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
4596 }
4597 }
4598
4599 return 0;
4600}
4601
4602static int __is_qlm_valid_bgx_cn73xx(int qlm)
4603{
4604 if (qlm == 2 || qlm == 3 || qlm == 5 || qlm == 6)
4605 return 0;
4606 return 1;
4607}
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632
4633
4634static int octeon_configure_qlm_cn73xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
4635 int ref_clk_sel, int ref_clk_input)
4636{
4637 cvmx_gserx_phy_ctl_t phy_ctl;
4638 cvmx_gserx_lane_mode_t lmode;
4639 cvmx_gserx_cfg_t cfg;
4640 cvmx_gserx_refclk_sel_t refclk_sel;
4641 int is_pcie = 0;
4642 int is_bgx = 0;
4643 int lane_mode = 0;
4644 short lmac_type[4] = { 0 };
4645 short sds_lane[4] = { 0 };
4646 bool alt_pll = false;
4647 int enable_training = 0;
4648 int additional_lmacs = 0;
4649
4650 debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
4651 __func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
4652
4653
4654 if (qlm == 4) {
4655 if (mode == CVMX_QLM_MODE_SATA_2X1)
4656 return __setup_sata(qlm, baud_mhz, ref_clk_sel, ref_clk_input);
4657
4658 printf("Invalid mode for QLM4\n");
4659 return 0;
4660 }
4661
4662 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
4663
4664
4665
4666
4667 __set_sli_window_ctl_errata_31375(0);
4668
4669 if (cfg.s.pcie && rc == 0 &&
4670 (mode == CVMX_QLM_MODE_PCIE || mode == CVMX_QLM_MODE_PCIE_1X8 ||
4671 mode == CVMX_QLM_MODE_PCIE_1X2)) {
4672 debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
4673 return 0;
4674 }
4675
4676
4677 refclk_sel.u64 = 0;
4678 if (ref_clk_input == 0) {
4679 refclk_sel.s.com_clk_sel = 0;
4680 refclk_sel.s.use_com1 = 0;
4681 } else if (ref_clk_input == 1) {
4682 refclk_sel.s.com_clk_sel = 1;
4683 refclk_sel.s.use_com1 = 0;
4684 } else {
4685 refclk_sel.s.com_clk_sel = 1;
4686 refclk_sel.s.use_com1 = 1;
4687 }
4688
4689 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4690
4691
4692 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
4693 phy_ctl.s.phy_reset = 1;
4694 phy_ctl.s.phy_pd = 1;
4695 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
4696
4697 udelay(1000);
4698
4699
4700 if (mode != CVMX_QLM_MODE_PCIE && mode != CVMX_QLM_MODE_PCIE_1X2 &&
4701 mode != CVMX_QLM_MODE_PCIE_1X8) {
4702 if (__is_qlm_valid_bgx_cn73xx(qlm))
4703 return -1;
4704 }
4705
4706 switch (mode) {
4707 case CVMX_QLM_MODE_PCIE:
4708 case CVMX_QLM_MODE_PCIE_1X2:
4709 case CVMX_QLM_MODE_PCIE_1X8: {
4710 cvmx_pemx_cfg_t pemx_cfg;
4711 cvmx_pemx_on_t pemx_on;
4712 cvmx_pemx_qlm_t pemx_qlm;
4713 cvmx_rst_soft_prstx_t rst_prst;
4714 int port = 0;
4715
4716 is_pcie = 1;
4717
4718 if (qlm < 5 && mode == CVMX_QLM_MODE_PCIE_1X2) {
4719 printf("Invalid PCIe mode(%d) for QLM%d\n", mode, qlm);
4720 return -1;
4721 }
4722
4723 if (ref_clk_sel == 0) {
4724 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
4725 refclk_sel.s.pcie_refclk125 = 0;
4726 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4727 if (gen3 == 0)
4728 lane_mode = R_2_5G_REFCLK100;
4729 else if (gen3 == 1)
4730 lane_mode = R_5G_REFCLK100;
4731 else
4732 lane_mode = R_8G_REFCLK100;
4733 } else if (ref_clk_sel == 1) {
4734 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
4735 refclk_sel.s.pcie_refclk125 = 1;
4736 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
4737 if (gen3 == 0)
4738 lane_mode = R_2_5G_REFCLK125;
4739 else if (gen3 == 1)
4740 lane_mode = R_5G_REFCLK125;
4741 else
4742 lane_mode = R_8G_REFCLK125;
4743 } else {
4744 printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
4745 return -1;
4746 }
4747
4748 switch (qlm) {
4749 case 0:
4750 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
4751 rst_prst.s.soft_prst = rc;
4752 csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
4753 __setup_pem_reset(0, 0, !rc);
4754
4755 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
4756 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4757 pemx_cfg.cn78xx.hostmd = rc;
4758 pemx_cfg.cn78xx.md = gen3;
4759 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
4760
4761 if (mode == CVMX_QLM_MODE_PCIE) {
4762 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
4763 pemx_on.s.pemon = 1;
4764 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
4765 }
4766 break;
4767 case 1:
4768 if (mode == CVMX_QLM_MODE_PCIE) {
4769 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
4770 rst_prst.s.soft_prst = rc;
4771 csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
4772 __setup_pem_reset(0, 1, !rc);
4773
4774 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
4775 pemx_cfg.cn78xx.lanes8 = 0;
4776 pemx_cfg.cn78xx.hostmd = rc;
4777 pemx_cfg.cn78xx.md = gen3;
4778 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
4779
4780 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
4781 pemx_on.s.pemon = 1;
4782 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
4783 } else {
4784 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
4785 pemx_on.s.pemon = 1;
4786 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
4787 }
4788 break;
4789 case 2:
4790 {
4791 pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(2));
4792 pemx_qlm.cn73xx.pemdlmsel = 0;
4793 csr_wr(CVMX_PEMX_QLM(2), pemx_qlm.u64);
4794
4795 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(2));
4796 rst_prst.s.soft_prst = rc;
4797 csr_wr(CVMX_RST_SOFT_PRSTX(2), rst_prst.u64);
4798 __setup_pem_reset(0, 2, !rc);
4799
4800 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
4801 pemx_cfg.cn78xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE_1X8);
4802 pemx_cfg.cn78xx.hostmd = rc;
4803 pemx_cfg.cn78xx.md = gen3;
4804 csr_wr(CVMX_PEMX_CFG(2), pemx_cfg.u64);
4805
4806 if (mode == CVMX_QLM_MODE_PCIE) {
4807 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
4808 pemx_on.s.pemon = 1;
4809 csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
4810 }
4811 break;
4812 }
4813 case 3:
4814
4815 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(2));
4816 if (pemx_cfg.cn78xx.lanes8) {
4817
4818
4819 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(2));
4820 pemx_on.s.pemon = 1;
4821 csr_wr(CVMX_PEMX_ON(2), pemx_on.u64);
4822 }
4823
4824 if (mode == CVMX_QLM_MODE_PCIE) {
4825 pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(3));
4826 pemx_qlm.cn73xx.pemdlmsel = 0;
4827 csr_wr(CVMX_PEMX_QLM(3), pemx_qlm.u64);
4828
4829 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(3));
4830 rst_prst.s.soft_prst = rc;
4831 csr_wr(CVMX_RST_SOFT_PRSTX(3), rst_prst.u64);
4832 __setup_pem_reset(0, 3, !rc);
4833
4834 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(3));
4835 pemx_cfg.cn78xx.lanes8 = 0;
4836 pemx_cfg.cn78xx.hostmd = rc;
4837 pemx_cfg.cn78xx.md = gen3;
4838 csr_wr(CVMX_PEMX_CFG(3), pemx_cfg.u64);
4839
4840 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(3));
4841 pemx_on.s.pemon = 1;
4842 csr_wr(CVMX_PEMX_ON(3), pemx_on.u64);
4843 }
4844 break;
4845 case 5:
4846 case 6:
4847 port = (qlm == 5) ? 2 : 3;
4848 if (mode == CVMX_QLM_MODE_PCIE_1X2) {
4849
4850 pemx_qlm.u64 = csr_rd(CVMX_PEMX_QLM(port));
4851 pemx_qlm.cn73xx.pemdlmsel = 1;
4852 csr_wr(CVMX_PEMX_QLM(port), pemx_qlm.u64);
4853
4854 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(port));
4855 rst_prst.s.soft_prst = rc;
4856 csr_wr(CVMX_RST_SOFT_PRSTX(port), rst_prst.u64);
4857 __setup_pem_reset(0, port, !rc);
4858
4859 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(port));
4860 pemx_cfg.cn78xx.lanes8 = 0;
4861 pemx_cfg.cn78xx.hostmd = rc;
4862 pemx_cfg.cn78xx.md = gen3;
4863 csr_wr(CVMX_PEMX_CFG(port), pemx_cfg.u64);
4864
4865 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(port));
4866 pemx_on.s.pemon = 1;
4867 csr_wr(CVMX_PEMX_ON(port), pemx_on.u64);
4868 }
4869 break;
4870 default:
4871 break;
4872 }
4873 break;
4874 }
4875 case CVMX_QLM_MODE_SGMII:
4876 is_bgx = 1;
4877 lmac_type[0] = 0;
4878 lmac_type[1] = 0;
4879 lmac_type[2] = 0;
4880 lmac_type[3] = 0;
4881 sds_lane[0] = 0;
4882 sds_lane[1] = 1;
4883 sds_lane[2] = 2;
4884 sds_lane[3] = 3;
4885 break;
4886 case CVMX_QLM_MODE_SGMII_2X1:
4887 if (qlm == 5) {
4888 is_bgx = 1;
4889 lmac_type[0] = 0;
4890 lmac_type[1] = 0;
4891 lmac_type[2] = -1;
4892 lmac_type[3] = -1;
4893 sds_lane[0] = 0;
4894 sds_lane[1] = 1;
4895 } else if (qlm == 6) {
4896 is_bgx = 1;
4897 lmac_type[0] = -1;
4898 lmac_type[1] = -1;
4899 lmac_type[2] = 0;
4900 lmac_type[3] = 0;
4901 sds_lane[2] = 2;
4902 sds_lane[3] = 3;
4903 additional_lmacs = 2;
4904 }
4905 break;
4906 case CVMX_QLM_MODE_XAUI:
4907 is_bgx = 5;
4908 lmac_type[0] = 1;
4909 lmac_type[1] = -1;
4910 lmac_type[2] = -1;
4911 lmac_type[3] = -1;
4912 sds_lane[0] = 0xe4;
4913 break;
4914 case CVMX_QLM_MODE_RXAUI:
4915 is_bgx = 3;
4916 lmac_type[0] = 2;
4917 lmac_type[1] = 2;
4918 lmac_type[2] = -1;
4919 lmac_type[3] = -1;
4920 sds_lane[0] = 0x4;
4921 sds_lane[1] = 0xe;
4922 break;
4923 case CVMX_QLM_MODE_RXAUI_1X2:
4924 if (qlm == 5) {
4925 is_bgx = 3;
4926 lmac_type[0] = 2;
4927 lmac_type[1] = -1;
4928 lmac_type[2] = -1;
4929 lmac_type[3] = -1;
4930 sds_lane[0] = 0x4;
4931 }
4932 if (qlm == 6) {
4933 is_bgx = 3;
4934 lmac_type[0] = -1;
4935 lmac_type[1] = -1;
4936 lmac_type[2] = 2;
4937 lmac_type[3] = -1;
4938 sds_lane[2] = 0xe;
4939 additional_lmacs = 2;
4940 }
4941 break;
4942 case CVMX_QLM_MODE_10G_KR:
4943 enable_training = 1;
4944 case CVMX_QLM_MODE_XFI:
4945 is_bgx = 1;
4946 lmac_type[0] = 3;
4947 lmac_type[1] = 3;
4948 lmac_type[2] = 3;
4949 lmac_type[3] = 3;
4950 sds_lane[0] = 0;
4951 sds_lane[1] = 1;
4952 sds_lane[2] = 2;
4953 sds_lane[3] = 3;
4954 break;
4955 case CVMX_QLM_MODE_10G_KR_1X2:
4956 enable_training = 1;
4957 case CVMX_QLM_MODE_XFI_1X2:
4958 if (qlm == 5) {
4959 is_bgx = 1;
4960 lmac_type[0] = 3;
4961 lmac_type[1] = 3;
4962 lmac_type[2] = -1;
4963 lmac_type[3] = -1;
4964 sds_lane[0] = 0;
4965 sds_lane[1] = 1;
4966 } else if (qlm == 6) {
4967 is_bgx = 1;
4968 lmac_type[0] = -1;
4969 lmac_type[1] = -1;
4970 lmac_type[2] = 3;
4971 lmac_type[3] = 3;
4972 sds_lane[2] = 2;
4973 sds_lane[3] = 3;
4974 additional_lmacs = 2;
4975 }
4976 break;
4977 case CVMX_QLM_MODE_40G_KR4:
4978 enable_training = 1;
4979 case CVMX_QLM_MODE_XLAUI:
4980 is_bgx = 5;
4981 lmac_type[0] = 4;
4982 lmac_type[1] = -1;
4983 lmac_type[2] = -1;
4984 lmac_type[3] = -1;
4985 sds_lane[0] = 0xe4;
4986 break;
4987 case CVMX_QLM_MODE_RGMII_SGMII:
4988 is_bgx = 1;
4989 lmac_type[0] = 5;
4990 lmac_type[1] = 0;
4991 lmac_type[2] = 0;
4992 lmac_type[3] = 0;
4993 sds_lane[0] = 0;
4994 sds_lane[1] = 1;
4995 sds_lane[2] = 2;
4996 sds_lane[3] = 3;
4997 break;
4998 case CVMX_QLM_MODE_RGMII_SGMII_1X1:
4999 if (qlm == 5) {
5000 is_bgx = 1;
5001 lmac_type[0] = 5;
5002 lmac_type[1] = 0;
5003 lmac_type[2] = -1;
5004 lmac_type[3] = -1;
5005 sds_lane[0] = 0;
5006 sds_lane[1] = 1;
5007 }
5008 break;
5009 case CVMX_QLM_MODE_RGMII_SGMII_2X1:
5010 if (qlm == 6) {
5011 is_bgx = 1;
5012 lmac_type[0] = 5;
5013 lmac_type[1] = -1;
5014 lmac_type[2] = 0;
5015 lmac_type[3] = 0;
5016 sds_lane[0] = 0;
5017 sds_lane[2] = 0;
5018 sds_lane[3] = 1;
5019 }
5020 break;
5021 case CVMX_QLM_MODE_RGMII_10G_KR:
5022 enable_training = 1;
5023 case CVMX_QLM_MODE_RGMII_XFI:
5024 is_bgx = 1;
5025 lmac_type[0] = 5;
5026 lmac_type[1] = 3;
5027 lmac_type[2] = 3;
5028 lmac_type[3] = 3;
5029 sds_lane[0] = 0;
5030 sds_lane[1] = 1;
5031 sds_lane[2] = 2;
5032 sds_lane[3] = 3;
5033 break;
5034 case CVMX_QLM_MODE_RGMII_10G_KR_1X1:
5035 enable_training = 1;
5036 case CVMX_QLM_MODE_RGMII_XFI_1X1:
5037 if (qlm == 5) {
5038 is_bgx = 3;
5039 lmac_type[0] = 5;
5040 lmac_type[1] = 3;
5041 lmac_type[2] = -1;
5042 lmac_type[3] = -1;
5043 sds_lane[0] = 0;
5044 sds_lane[1] = 1;
5045 }
5046 break;
5047 case CVMX_QLM_MODE_RGMII_40G_KR4:
5048 enable_training = 1;
5049 case CVMX_QLM_MODE_RGMII_XLAUI:
5050 is_bgx = 5;
5051 lmac_type[0] = 5;
5052 lmac_type[1] = 4;
5053 lmac_type[2] = -1;
5054 lmac_type[3] = -1;
5055 sds_lane[0] = 0x0;
5056 sds_lane[1] = 0xe4;
5057 break;
5058 case CVMX_QLM_MODE_RGMII_RXAUI:
5059 is_bgx = 3;
5060 lmac_type[0] = 5;
5061 lmac_type[1] = 2;
5062 lmac_type[2] = 2;
5063 lmac_type[3] = -1;
5064 sds_lane[0] = 0x0;
5065 sds_lane[1] = 0x4;
5066 sds_lane[2] = 0xe;
5067 break;
5068 case CVMX_QLM_MODE_RGMII_XAUI:
5069 is_bgx = 5;
5070 lmac_type[0] = 5;
5071 lmac_type[1] = 1;
5072 lmac_type[2] = -1;
5073 lmac_type[3] = -1;
5074 sds_lane[0] = 0;
5075 sds_lane[1] = 0xe4;
5076 break;
5077 default:
5078 break;
5079 }
5080
5081 if (is_pcie == 0)
5082 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz, &alt_pll);
5083 debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
5084 alt_pll ? "true" : "false");
5085 if (lane_mode == -1)
5086 return -1;
5087
5088 if (alt_pll) {
5089 debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
5090 __func__, qlm, lane_mode, ref_clk_sel);
5091 if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
5092 printf("%s: Error: reference clock %d is not supported for qlm %d, lane mode: 0x%x\n",
5093 __func__, ref_clk_sel, qlm, lane_mode);
5094 return -1;
5095 }
5096 }
5097
5098
5099 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5100 phy_ctl.s.phy_pd = 0;
5101 phy_ctl.s.phy_reset = 1;
5102 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5103
5104
5105 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
5106 cfg.s.bgx = is_bgx & 1;
5107 cfg.s.bgx_quad = (is_bgx >> 2) & 1;
5108 cfg.s.bgx_dual = (is_bgx >> 1) & 1;
5109 cfg.s.pcie = is_pcie;
5110 csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
5111
5112
5113 lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
5114 lmode.s.lmode = lane_mode;
5115 csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
5116
5117
5118 if (is_bgx) {
5119 int bgx = (qlm < 4) ? qlm - 2 : 2;
5120 cvmx_bgxx_cmrx_config_t cmr_config;
5121 cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
5122 cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
5123 int index, total_lmacs = 0;
5124
5125 for (index = 0; index < 4; index++) {
5126 cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
5127 cmr_config.s.enable = 0;
5128 cmr_config.s.data_pkt_rx_en = 0;
5129 cmr_config.s.data_pkt_tx_en = 0;
5130 if (lmac_type[index] != -1) {
5131 cmr_config.s.lmac_type = lmac_type[index];
5132 cmr_config.s.lane_to_sds = sds_lane[index];
5133 total_lmacs++;
5134
5135 if (lmac_type[index] == 2)
5136 total_lmacs += 1;
5137 }
5138 csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
5139
5140
5141
5142
5143 if (lmac_type[index] == 5) {
5144 cvmx_bgxx_cmr_global_config_t global_config;
5145
5146 global_config.u64 = csr_rd(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx));
5147 global_config.s.bgx_clk_enable = 1;
5148 csr_wr(CVMX_BGXX_CMR_GLOBAL_CONFIG(bgx), global_config.u64);
5149 }
5150
5151
5152 if (enable_training == 1 &&
5153 (lmac_type[index] == 3 || lmac_type[index] == 4)) {
5154 spu_pmd_control.u64 =
5155 csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
5156 spu_pmd_control.s.train_en = 1;
5157 csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
5158 spu_pmd_control.u64);
5159 }
5160 }
5161
5162
5163 rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
5164 rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
5165 csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
5166 csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
5167 }
5168
5169
5170 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5171 phy_ctl.s.phy_reset = 0;
5172 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5173
5174
5175
5176
5177
5178 udelay(1);
5179
5180
5181
5182
5183
5184 if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm),
5185 cvmx_gserx_qlm_stat_t,
5186 rst_rdy, ==, 1, 10000)) {
5187 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
5188 return -1;
5189 }
5190
5191
5192 if (!is_pcie)
5193 __qlm_setup_pll_cn78xx(0, qlm);
5194
5195
5196 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
5197 pll_lock, ==, 1, 10000)) {
5198 printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
5199 return -1;
5200 }
5201
5202
5203
5204
5205
5206
5207 if (OCTEON_IS_MODEL(OCTEON_CN73XX_PASS1_0) &&
5208 (baud_mhz == 103125 || (is_pcie && gen3 == 2)))
5209 __qlm_errata_gser_26150(0, qlm, is_pcie);
5210
5211
5212
5213
5214
5215 if (baud_mhz == 103125)
5216 __qlm_kr_inc_dec_gser26636(0, qlm);
5217
5218
5219
5220
5221
5222 if (baud_mhz == 103125 || (is_pcie && gen3 == 2))
5223 cvmx_qlm_gser_errata_25992(0, qlm);
5224
5225
5226
5227
5228
5229 if (baud_mhz == 103125)
5230 __qlm_rx_eq_temp_gser27140(0, qlm);
5231
5232
5233
5234
5235 if (is_bgx) {
5236 int l;
5237
5238 for (l = 0; l < 4; l++) {
5239 cvmx_gserx_lanex_rx_cfg_4_t cfg4;
5240 cvmx_gserx_lanex_tx_cfg_0_t cfg0;
5241
5242 if (lmac_type[l] == 2) {
5243
5244 cfg4.u64 = csr_rd(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm));
5245 cfg4.s.cfg_rx_errdet_ctrl = 0xcf6f;
5246 csr_wr(CVMX_GSERX_LANEX_RX_CFG_4(l, qlm), cfg4.u64);
5247
5248 cfg0.u64 = csr_rd(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm));
5249 cfg0.s.cfg_tx_swing = 0x12;
5250 csr_wr(CVMX_GSERX_LANEX_TX_CFG_0(l, qlm), cfg0.u64);
5251 }
5252 }
5253 }
5254
5255 return 0;
5256}
5257
5258static int __rmac_pll_config(int baud_mhz, int qlm, int mode)
5259{
5260 cvmx_gserx_pll_px_mode_0_t pmode0;
5261 cvmx_gserx_pll_px_mode_1_t pmode1;
5262 cvmx_gserx_lane_px_mode_0_t lmode0;
5263 cvmx_gserx_lane_px_mode_1_t lmode1;
5264 cvmx_gserx_lane_mode_t lmode;
5265
5266 switch (baud_mhz) {
5267 case 98304:
5268 pmode0.u64 = 0x1a0a;
5269 pmode1.u64 = 0x3228;
5270 lmode0.u64 = 0x600f;
5271 lmode1.u64 = 0xa80f;
5272 break;
5273 case 49152:
5274 if (mode == CVMX_QLM_MODE_SDL) {
5275 pmode0.u64 = 0x3605;
5276 pmode1.u64 = 0x0814;
5277 lmode0.u64 = 0x000f;
5278 lmode1.u64 = 0x6814;
5279 } else {
5280 pmode0.u64 = 0x1a0a;
5281 pmode1.u64 = 0x3228;
5282 lmode0.u64 = 0x650f;
5283 lmode1.u64 = 0xe80f;
5284 }
5285 break;
5286 case 24576:
5287 pmode0.u64 = 0x1a0a;
5288 pmode1.u64 = 0x3228;
5289 lmode0.u64 = 0x6a0f;
5290 lmode1.u64 = 0xe80f;
5291 break;
5292 case 12288:
5293 pmode0.u64 = 0x1a0a;
5294 pmode1.u64 = 0x3228;
5295 lmode0.u64 = 0x6f0f;
5296 lmode1.u64 = 0xe80f;
5297 break;
5298 case 6144:
5299 pmode0.u64 = 0x160a;
5300 pmode1.u64 = 0x1019;
5301 lmode0.u64 = 0x000f;
5302 lmode1.u64 = 0x2814;
5303 break;
5304 case 3072:
5305 pmode0.u64 = 0x160a;
5306 pmode1.u64 = 0x1019;
5307 lmode0.u64 = 0x050f;
5308 lmode1.u64 = 0x6814;
5309 break;
5310 default:
5311 printf("Invalid speed for CPRI/SDL configuration\n");
5312 return -1;
5313 }
5314
5315 lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
5316 csr_wr(CVMX_GSERX_PLL_PX_MODE_0(lmode.s.lmode, qlm), pmode0.u64);
5317 csr_wr(CVMX_GSERX_PLL_PX_MODE_1(lmode.s.lmode, qlm), pmode1.u64);
5318 csr_wr(CVMX_GSERX_LANE_PX_MODE_0(lmode.s.lmode, qlm), lmode0.u64);
5319 csr_wr(CVMX_GSERX_LANE_PX_MODE_1(lmode.s.lmode, qlm), lmode1.u64);
5320 return 0;
5321}
5322
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344static int octeon_configure_qlm_cnf75xx(int qlm, int baud_mhz, int mode, int rc, int gen3,
5345 int ref_clk_sel, int ref_clk_input)
5346{
5347 cvmx_gserx_phy_ctl_t phy_ctl;
5348 cvmx_gserx_lane_mode_t lmode;
5349 cvmx_gserx_cfg_t cfg;
5350 cvmx_gserx_refclk_sel_t refclk_sel;
5351 int is_pcie = 0;
5352 int is_bgx = 0;
5353 int is_srio = 0;
5354 int is_rmac = 0;
5355 int is_rmac_pipe = 0;
5356 int lane_mode = 0;
5357 short lmac_type[4] = { 0 };
5358 short sds_lane[4] = { 0 };
5359 bool alt_pll = false;
5360 int enable_training = 0;
5361 int additional_lmacs = 0;
5362 int port = (qlm == 3) ? 1 : 0;
5363 cvmx_sriox_status_reg_t status_reg;
5364
5365 debug("%s(qlm: %d, baud_mhz: %d, mode: %d, rc: %d, gen3: %d, ref_clk_sel: %d, ref_clk_input: %d\n",
5366 __func__, qlm, baud_mhz, mode, rc, gen3, ref_clk_sel, ref_clk_input);
5367 if (qlm > 8) {
5368 printf("Invalid qlm%d passed\n", qlm);
5369 return -1;
5370 }
5371
5372
5373
5374
5375 __set_sli_window_ctl_errata_31375(0);
5376
5377 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
5378
5379
5380 if (cfg.s.pcie && rc == 0) {
5381 debug("%s: qlm %d is in PCIe endpoint mode, returning\n", __func__, qlm);
5382 return 0;
5383 }
5384
5385 if (cfg.s.srio && rc == 0) {
5386 debug("%s: qlm %d is in SRIO endpoint mode, returning\n", __func__, qlm);
5387 return 0;
5388 }
5389
5390
5391 refclk_sel.u64 = 0;
5392 if (ref_clk_input == 0) {
5393 refclk_sel.s.com_clk_sel = 0;
5394 refclk_sel.s.use_com1 = 0;
5395 } else if (ref_clk_input == 1) {
5396 refclk_sel.s.com_clk_sel = 1;
5397 refclk_sel.s.use_com1 = 0;
5398 } else {
5399 refclk_sel.s.com_clk_sel = 1;
5400 refclk_sel.s.use_com1 = 1;
5401 }
5402
5403 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
5404
5405
5406 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5407 phy_ctl.s.phy_reset = 1;
5408 phy_ctl.s.phy_pd = 1;
5409 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5410
5411 udelay(1000);
5412
5413 switch (mode) {
5414 case CVMX_QLM_MODE_PCIE:
5415 case CVMX_QLM_MODE_PCIE_1X2:
5416 case CVMX_QLM_MODE_PCIE_2X1: {
5417 cvmx_pemx_cfg_t pemx_cfg;
5418 cvmx_pemx_on_t pemx_on;
5419 cvmx_rst_soft_prstx_t rst_prst;
5420
5421 is_pcie = 1;
5422
5423 if (qlm > 1) {
5424 printf("Invalid PCIe mode for QLM%d\n", qlm);
5425 return -1;
5426 }
5427
5428 if (ref_clk_sel == 0) {
5429 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
5430 refclk_sel.s.pcie_refclk125 = 0;
5431 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
5432 if (gen3 == 0)
5433 lane_mode = R_2_5G_REFCLK100;
5434 else if (gen3 == 1)
5435 lane_mode = R_5G_REFCLK100;
5436 else
5437 lane_mode = R_8G_REFCLK100;
5438 } else if (ref_clk_sel == 1) {
5439 refclk_sel.u64 = csr_rd(CVMX_GSERX_REFCLK_SEL(qlm));
5440 refclk_sel.s.pcie_refclk125 = 1;
5441 csr_wr(CVMX_GSERX_REFCLK_SEL(qlm), refclk_sel.u64);
5442 if (gen3 == 0)
5443 lane_mode = R_2_5G_REFCLK125;
5444 else if (gen3 == 1)
5445 lane_mode = R_5G_REFCLK125;
5446 else
5447 lane_mode = R_8G_REFCLK125;
5448 } else {
5449 printf("Invalid reference clock for PCIe on QLM%d\n", qlm);
5450 return -1;
5451 }
5452
5453 switch (qlm) {
5454 case 0:
5455 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(0));
5456 rst_prst.s.soft_prst = rc;
5457 csr_wr(CVMX_RST_SOFT_PRSTX(0), rst_prst.u64);
5458 __setup_pem_reset(0, 0, !rc);
5459
5460 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(0));
5461 pemx_cfg.cnf75xx.hostmd = rc;
5462 pemx_cfg.cnf75xx.lanes8 = (mode == CVMX_QLM_MODE_PCIE);
5463 pemx_cfg.cnf75xx.md = gen3;
5464 csr_wr(CVMX_PEMX_CFG(0), pemx_cfg.u64);
5465
5466 if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
5467 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
5468 pemx_on.s.pemon = 1;
5469 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
5470 }
5471 break;
5472 case 1:
5473 if (mode == CVMX_QLM_MODE_PCIE_1X2 || mode == CVMX_QLM_MODE_PCIE_2X1) {
5474 rst_prst.u64 = csr_rd(CVMX_RST_SOFT_PRSTX(1));
5475 rst_prst.s.soft_prst = rc;
5476 csr_wr(CVMX_RST_SOFT_PRSTX(1), rst_prst.u64);
5477 __setup_pem_reset(0, 1, !rc);
5478
5479 pemx_cfg.u64 = csr_rd(CVMX_PEMX_CFG(1));
5480 pemx_cfg.cnf75xx.hostmd = rc;
5481 pemx_cfg.cnf75xx.md = gen3;
5482 csr_wr(CVMX_PEMX_CFG(1), pemx_cfg.u64);
5483
5484 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(1));
5485 pemx_on.s.pemon = 1;
5486 csr_wr(CVMX_PEMX_ON(1), pemx_on.u64);
5487 } else {
5488 pemx_on.u64 = csr_rd(CVMX_PEMX_ON(0));
5489 pemx_on.s.pemon = 1;
5490 csr_wr(CVMX_PEMX_ON(0), pemx_on.u64);
5491 }
5492 break;
5493 default:
5494 break;
5495 }
5496 break;
5497 }
5498 case CVMX_QLM_MODE_SRIO_1X4:
5499 case CVMX_QLM_MODE_SRIO_2X2:
5500 case CVMX_QLM_MODE_SRIO_4X1: {
5501 int spd = 0xf;
5502
5503 if (cvmx_fuse_read(1601)) {
5504 debug("SRIO is not supported on cnf73xx model\n");
5505 return -1;
5506 }
5507
5508 switch (baud_mhz) {
5509 case 1250:
5510 switch (ref_clk_sel) {
5511 case 0:
5512 spd = 0x3;
5513 break;
5514 case 1:
5515 spd = 0xa;
5516 break;
5517 case 2:
5518 spd = 0x4;
5519 break;
5520 default:
5521 spd = 0xf;
5522 break;
5523 }
5524 break;
5525 case 2500:
5526 switch (ref_clk_sel) {
5527 case 0:
5528 spd = 0x2;
5529 break;
5530 case 1:
5531 spd = 0x9;
5532 break;
5533 case 2:
5534 spd = 0x7;
5535 break;
5536 default:
5537 spd = 0xf;
5538 break;
5539 }
5540 break;
5541 case 3125:
5542 switch (ref_clk_sel) {
5543 case 1:
5544 spd = 0x8;
5545 break;
5546 case 2:
5547 spd = 0xe;
5548 break;
5549 default:
5550 spd = 0xf;
5551 break;
5552 }
5553 break;
5554 case 5000:
5555 switch (ref_clk_sel) {
5556 case 0:
5557 spd = 0x0;
5558 break;
5559 case 1:
5560 spd = 0x6;
5561 break;
5562 case 2:
5563 spd = 0xb;
5564 break;
5565 default:
5566 spd = 0xf;
5567 break;
5568 }
5569 break;
5570 default:
5571 spd = 0xf;
5572 break;
5573 }
5574
5575 if (spd == 0xf) {
5576 printf("ERROR: Invalid SRIO speed (%d) configured for QLM%d\n", baud_mhz,
5577 qlm);
5578 return -1;
5579 }
5580
5581 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
5582 status_reg.s.spd = spd;
5583 csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
5584 is_srio = 1;
5585 break;
5586 }
5587
5588 case CVMX_QLM_MODE_SGMII_2X1:
5589 if (qlm == 4) {
5590 is_bgx = 1;
5591 lmac_type[0] = 0;
5592 lmac_type[1] = 0;
5593 lmac_type[2] = -1;
5594 lmac_type[3] = -1;
5595 sds_lane[0] = 0;
5596 sds_lane[1] = 1;
5597 } else if (qlm == 5) {
5598 is_bgx = 1;
5599 lmac_type[0] = -1;
5600 lmac_type[1] = -1;
5601 lmac_type[2] = 0;
5602 lmac_type[3] = 0;
5603 sds_lane[2] = 2;
5604 sds_lane[3] = 3;
5605 additional_lmacs = 2;
5606 }
5607 break;
5608 case CVMX_QLM_MODE_10G_KR_1X2:
5609 enable_training = 1;
5610 case CVMX_QLM_MODE_XFI_1X2:
5611 if (qlm == 5) {
5612 is_bgx = 1;
5613 lmac_type[0] = -1;
5614 lmac_type[1] = -1;
5615 lmac_type[2] = 3;
5616 lmac_type[3] = 3;
5617 sds_lane[2] = 2;
5618 sds_lane[3] = 3;
5619 additional_lmacs = 2;
5620 }
5621 break;
5622 case CVMX_QLM_MODE_CPRI:
5623 is_rmac = 1;
5624 break;
5625 case CVMX_QLM_MODE_SDL:
5626 is_rmac = 1;
5627 is_rmac_pipe = 1;
5628 lane_mode = 1;
5629 break;
5630 default:
5631 break;
5632 }
5633
5634 if (is_rmac_pipe == 0 && is_pcie == 0) {
5635 lane_mode = __get_lane_mode_for_speed_and_ref_clk(ref_clk_sel, baud_mhz,
5636 &alt_pll);
5637 }
5638
5639 debug("%s: %d lane mode: %d, alternate PLL: %s\n", __func__, mode, lane_mode,
5640 alt_pll ? "true" : "false");
5641 if (lane_mode == -1)
5642 return -1;
5643
5644 if (alt_pll) {
5645 debug("%s: alternate PLL settings used for qlm %d, lane mode %d, reference clock %d\n",
5646 __func__, qlm, lane_mode, ref_clk_sel);
5647 if (__set_qlm_ref_clk_cn78xx(0, qlm, lane_mode, ref_clk_sel)) {
5648 printf("%s: Error: reference clock %d is not supported for qlm %d\n",
5649 __func__, ref_clk_sel, qlm);
5650 return -1;
5651 }
5652 }
5653
5654
5655 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5656 phy_ctl.s.phy_pd = 0;
5657 phy_ctl.s.phy_reset = 1;
5658 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5659
5660
5661 cfg.u64 = csr_rd(CVMX_GSERX_CFG(qlm));
5662 cfg.s.bgx = is_bgx & 1;
5663 cfg.s.bgx_quad = (is_bgx >> 2) & 1;
5664 cfg.s.bgx_dual = (is_bgx >> 1) & 1;
5665 cfg.s.pcie = is_pcie;
5666 cfg.s.srio = is_srio;
5667 cfg.s.rmac = is_rmac;
5668 cfg.s.rmac_pipe = is_rmac_pipe;
5669 csr_wr(CVMX_GSERX_CFG(qlm), cfg.u64);
5670
5671
5672 lmode.u64 = csr_rd(CVMX_GSERX_LANE_MODE(qlm));
5673 lmode.s.lmode = lane_mode;
5674 csr_wr(CVMX_GSERX_LANE_MODE(qlm), lmode.u64);
5675
5676
5677
5678
5679 if (is_bgx) {
5680 int bgx = 0;
5681 cvmx_bgxx_cmrx_config_t cmr_config;
5682 cvmx_bgxx_cmr_rx_lmacs_t rx_lmacs;
5683 cvmx_bgxx_spux_br_pmd_control_t spu_pmd_control;
5684 int index, total_lmacs = 0;
5685
5686 for (index = 0; index < 4; index++) {
5687 cmr_config.u64 = csr_rd(CVMX_BGXX_CMRX_CONFIG(index, bgx));
5688 cmr_config.s.enable = 0;
5689 cmr_config.s.data_pkt_rx_en = 0;
5690 cmr_config.s.data_pkt_tx_en = 0;
5691 if (lmac_type[index] != -1) {
5692 cmr_config.s.lmac_type = lmac_type[index];
5693 cmr_config.s.lane_to_sds = sds_lane[index];
5694 total_lmacs++;
5695 }
5696 csr_wr(CVMX_BGXX_CMRX_CONFIG(index, bgx), cmr_config.u64);
5697
5698
5699 if (enable_training == 1 &&
5700 (lmac_type[index] == 3 || lmac_type[index] == 4)) {
5701 spu_pmd_control.u64 =
5702 csr_rd(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx));
5703 spu_pmd_control.s.train_en = 1;
5704 csr_wr(CVMX_BGXX_SPUX_BR_PMD_CONTROL(index, bgx),
5705 spu_pmd_control.u64);
5706 }
5707 }
5708
5709
5710 rx_lmacs.u64 = csr_rd(CVMX_BGXX_CMR_RX_LMACS(bgx));
5711 rx_lmacs.s.lmacs = total_lmacs + additional_lmacs;
5712 csr_wr(CVMX_BGXX_CMR_RX_LMACS(bgx), rx_lmacs.u64);
5713 csr_wr(CVMX_BGXX_CMR_TX_LMACS(bgx), rx_lmacs.u64);
5714 }
5715
5716
5717 phy_ctl.u64 = csr_rd(CVMX_GSERX_PHY_CTL(qlm));
5718 phy_ctl.s.phy_reset = 0;
5719 csr_wr(CVMX_GSERX_PHY_CTL(qlm), phy_ctl.u64);
5720
5721
5722
5723
5724
5725 udelay(1);
5726
5727 if (is_srio) {
5728 status_reg.u64 = csr_rd(CVMX_SRIOX_STATUS_REG(port));
5729 status_reg.s.srio = 1;
5730 csr_wr(CVMX_SRIOX_STATUS_REG(port), status_reg.u64);
5731 return 0;
5732 }
5733
5734
5735
5736
5737
5738 if (!is_pcie && CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_QLM_STAT(qlm), cvmx_gserx_qlm_stat_t,
5739 rst_rdy, ==, 1, 10000)) {
5740 printf("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
5741 return -1;
5742 }
5743
5744
5745 if (is_rmac)
5746 __rmac_pll_config(baud_mhz, qlm, mode);
5747 else if (!(is_pcie || is_srio))
5748 __qlm_setup_pll_cn78xx(0, qlm);
5749
5750
5751 if (CVMX_WAIT_FOR_FIELD64(CVMX_GSERX_PLL_STAT(qlm), cvmx_gserx_pll_stat_t,
5752 pll_lock, ==, 1, 10000)) {
5753 printf("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
5754 return -1;
5755 }
5756
5757
5758
5759
5760
5761 if (baud_mhz == 103125)
5762 __qlm_rx_eq_temp_gser27140(0, qlm);
5763
5764 return 0;
5765}
5766
5767
5768
5769
5770
5771
5772
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791
5792
5793int octeon_configure_qlm(int qlm, int speed, int mode, int rc, int pcie_mode, int ref_clk_sel,
5794 int ref_clk_input)
5795{
5796 int node = 0;
5797
5798 debug("%s(%d, %d, %d, %d, %d, %d, %d)\n", __func__, qlm, speed, mode, rc, pcie_mode,
5799 ref_clk_sel, ref_clk_input);
5800 if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
5801 return octeon_configure_qlm_cn61xx(qlm, speed, mode, rc, pcie_mode);
5802 else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
5803 return octeon_configure_qlm_cn70xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
5804 ref_clk_input);
5805 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
5806 return octeon_configure_qlm_cn78xx(node, qlm, speed, mode, rc, pcie_mode,
5807 ref_clk_sel, ref_clk_input);
5808 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
5809 return octeon_configure_qlm_cn73xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
5810 ref_clk_input);
5811 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
5812 return octeon_configure_qlm_cnf75xx(qlm, speed, mode, rc, pcie_mode, ref_clk_sel,
5813 ref_clk_input);
5814 else
5815 return -1;
5816}
5817
5818void octeon_init_qlm(int node)
5819{
5820 int qlm;
5821 cvmx_gserx_phy_ctl_t phy_ctl;
5822 cvmx_gserx_cfg_t cfg;
5823 int baud_mhz;
5824 int pem;
5825
5826 if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
5827 return;
5828
5829 for (qlm = 0; qlm < 8; qlm++) {
5830 phy_ctl.u64 = csr_rd_node(node, CVMX_GSERX_PHY_CTL(qlm));
5831 if (phy_ctl.s.phy_reset == 0) {
5832 cfg.u64 = csr_rd_node(node, CVMX_GSERX_CFG(qlm));
5833 if (cfg.s.pcie)
5834 __cvmx_qlm_pcie_errata_cn78xx(node, qlm);
5835 else
5836 __qlm_init_errata_20844(node, qlm);
5837
5838 baud_mhz = cvmx_qlm_get_gbaud_mhz_node(node, qlm);
5839 if (baud_mhz == 6250 || baud_mhz == 6316)
5840 octeon_qlm_tune_v3(node, qlm, baud_mhz, 0xa, 0xa0, -1, -1);
5841 else if (baud_mhz == 103125)
5842 octeon_qlm_tune_v3(node, qlm, baud_mhz, 0xd, 0xd0, -1, -1);
5843 }
5844 }
5845
5846
5847 for (pem = 0; pem < 4; pem++) {
5848 cvmx_rst_ctlx_t rst_ctl;
5849
5850 rst_ctl.u64 = csr_rd_node(node, CVMX_RST_CTLX(pem));
5851 __setup_pem_reset(node, pem, !rst_ctl.s.host_mode);
5852 }
5853}
5854