1
2
3
4
5
6
7
8
9#include <time.h>
10#include <log.h>
11#include <linux/delay.h>
12
13#include <mach/cvmx-regs.h>
14#include <mach/cvmx-csr.h>
15#include <mach/cvmx-bootmem.h>
16#include <mach/octeon-model.h>
17#include <mach/cvmx-fuse.h>
18#include <mach/octeon-feature.h>
19#include <mach/cvmx-qlm.h>
20#include <mach/octeon_qlm.h>
21#include <mach/cvmx-pcie.h>
22#include <mach/cvmx-coremask.h>
23
24#include <mach/cvmx-agl-defs.h>
25#include <mach/cvmx-bgxx-defs.h>
26#include <mach/cvmx-ciu-defs.h>
27#include <mach/cvmx-gmxx-defs.h>
28#include <mach/cvmx-ipd-defs.h>
29#include <mach/cvmx-pcsx-defs.h>
30#include <mach/cvmx-pcsxx-defs.h>
31#include <mach/cvmx-pki-defs.h>
32#include <mach/cvmx-pko-defs.h>
33#include <mach/cvmx-xcv-defs.h>
34
35#include <mach/cvmx-helper.h>
36#include <mach/cvmx-helper-board.h>
37#include <mach/cvmx-helper-cfg.h>
38
39int __cvmx_helper_xaui_enumerate(int xiface)
40{
41 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
42 int interface = xi.interface;
43 union cvmx_gmxx_hg2_control gmx_hg2_control;
44
45 if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
46 enum cvmx_qlm_mode qlm_mode =
47 cvmx_qlm_get_dlm_mode(0, interface);
48
49 if (qlm_mode == CVMX_QLM_MODE_RXAUI)
50 return 1;
51 return 0;
52 }
53
54 gmx_hg2_control.u64 = csr_rd(CVMX_GMXX_HG2_CONTROL(interface));
55 if (gmx_hg2_control.s.hg2tx_en)
56 return 16;
57 else
58 return 1;
59}
60
61
62
63
64
65
66
67
68
69
70
71int __cvmx_helper_xaui_probe(int xiface)
72{
73 int i, ports;
74 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
75 int interface = xi.interface;
76 union cvmx_gmxx_inf_mode mode;
77
78
79
80
81
82 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
83 union cvmx_ciu_qlm2 ciu_qlm;
84
85 ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
86 ciu_qlm.s.txbypass = 1;
87 ciu_qlm.s.txdeemph = 0x5;
88 ciu_qlm.s.txmargin = 0x1a;
89 csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
90 }
91
92
93
94
95
96 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
97
98 union cvmx_mio_rst_boot mio_rst_boot;
99
100 mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
101
102 if (mio_rst_boot.cn63xx.qlm2_spd == 0xb) {
103 union cvmx_ciu_qlm2 ciu_qlm;
104
105 ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
106 ciu_qlm.s.txbypass = 1;
107 ciu_qlm.s.txdeemph = 0xa;
108 ciu_qlm.s.txmargin = 0x1f;
109 csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
110 }
111 }
112
113
114
115
116
117 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
118 int qlm = cvmx_qlm_interface(xiface);
119 enum cvmx_qlm_mode mode = cvmx_qlm_get_mode(qlm);
120
121 if (mode != CVMX_QLM_MODE_XAUI && mode != CVMX_QLM_MODE_RXAUI)
122 return 0;
123 }
124
125 ports = __cvmx_helper_xaui_enumerate(xiface);
126
127 if (ports <= 0)
128 return 0;
129
130
131
132
133
134
135 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
136 mode.s.en = 1;
137 csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
138
139 if (!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
140 !OCTEON_IS_MODEL(OCTEON_CN70XX)) {
141
142
143
144
145
146
147 for (i = 0; i < 16; i++) {
148 union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
149
150 pko_mem_port_ptrs.u64 = 0;
151
152
153
154
155 pko_mem_port_ptrs.s.static_p = 0;
156 pko_mem_port_ptrs.s.qos_mask = 0xff;
157
158 pko_mem_port_ptrs.s.eid = interface * 4;
159 pko_mem_port_ptrs.s.pid = interface * 16 + i;
160 pko_mem_port_ptrs.s.bp_port = interface * 16 + i;
161 csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
162 }
163 }
164
165 return ports;
166}
167
168
169
170
171
172
173
174
175
176
177int __cvmx_helper_xaui_link_init(int interface)
178{
179 union cvmx_gmxx_prtx_cfg gmx_cfg;
180 union cvmx_pcsxx_control1_reg xaui_ctl;
181 union cvmx_pcsxx_misc_ctl_reg misc_ctl;
182 union cvmx_gmxx_tx_xaui_ctl tx_ctl;
183
184
185
186
187 misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
188 misc_ctl.s.gmxeno = 1;
189 csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
190
191
192 csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
193 csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
194 csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
195
196
197
198
199 tx_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
200
201 tx_ctl.s.dic_en = 1;
202 tx_ctl.s.uni_en = 0;
203 csr_wr(CVMX_GMXX_TX_XAUI_CTL(interface), tx_ctl.u64);
204
205
206 xaui_ctl.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
207 xaui_ctl.s.lo_pwr = 0;
208
209
210
211
212
213 if (!OCTEON_IS_MODEL(OCTEON_CN63XX) &&
214 !OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) &&
215 !OCTEON_IS_MODEL(OCTEON_CN68XX))
216 xaui_ctl.s.reset = 1;
217 csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), xaui_ctl.u64);
218
219 if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) && interface != 1) {
220
221
222
223
224
225
226 int qlm = interface;
227#ifdef CVMX_QLM_DUMP_STATE
228 debug("%s:%d: XAUI%d: Applying workaround for Errata G-16467\n",
229 __func__, __LINE__, qlm);
230 cvmx_qlm_display_registers(qlm);
231 debug("\n");
232#endif
233
234
235
236
237 if ((cvmx_qlm_get_gbaud_mhz(qlm) == 6250) &&
238 (cvmx_qlm_jtag_get(qlm, 0, "clkf_byp") != 20)) {
239
240 udelay(100);
241 cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 20);
242
243 cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 0);
244
245 udelay(100);
246
247 cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 0);
248 }
249#ifdef CVMX_QLM_DUMP_STATE
250 debug("%s:%d: XAUI%d: Done applying workaround for Errata G-16467\n",
251 __func__, __LINE__, qlm);
252 cvmx_qlm_display_registers(qlm);
253 debug("\n\n");
254#endif
255 }
256
257
258 if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_CONTROL1_REG(interface),
259 cvmx_pcsxx_control1_reg_t, reset, ==, 0,
260 10000))
261 return -1;
262
263 if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_10GBX_STATUS_REG(interface),
264 cvmx_pcsxx_10gbx_status_reg_t, alignd, ==, 1,
265 10000))
266 return -1;
267
268 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_RX_XAUI_CTL(interface),
269 cvmx_gmxx_rx_xaui_ctl_t, status, ==, 0,
270 10000))
271 return -1;
272
273
274
275
276 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
277 cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000))
278 return -1;
279
280 if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
281 cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
282 return -1;
283
284
285 gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
286 gmx_cfg.s.speed = 1;
287 gmx_cfg.s.speed_msb = 0;
288 gmx_cfg.s.slottime = 1;
289 csr_wr(CVMX_GMXX_TX_PRTS(interface), 1);
290 csr_wr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
291 csr_wr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
292 csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
293
294
295 if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS1_REG(interface),
296 cvmx_pcsxx_status1_reg_t, rcv_lnk, ==, 1,
297 10000))
298 return -1;
299 if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
300 cvmx_pcsxx_status2_reg_t, xmtflt, ==, 0,
301 10000))
302 return -1;
303 if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
304 cvmx_pcsxx_status2_reg_t, rcvflt, ==, 0,
305 10000))
306 return -1;
307
308
309 misc_ctl.s.gmxeno = 0;
310 csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
311
312
313 csr_wr(CVMX_GMXX_RXX_INT_REG(0, interface), ~0x0ull);
314 csr_wr(CVMX_GMXX_TX_INT_REG(interface), ~0x0ull);
315 csr_wr(CVMX_PCSXX_INT_REG(interface), ~0x0ull);
316
317
318 gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
319 gmx_cfg.s.en = 1;
320 csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
321
322 return 0;
323}
324
325
326
327
328
329
330
331
332
333
334
335int __cvmx_helper_xaui_enable(int xiface)
336{
337 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
338 int interface = xi.interface;
339
340 __cvmx_helper_setup_gmx(interface, 1);
341
342
343 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
344 union cvmx_gmxx_bpid_msk bpid_msk;
345 union cvmx_gmxx_bpid_mapx bpid_map;
346 union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
347 union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
348
349
350 gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
351 gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, 0);
352 csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmxx_prtx_cfg.u64);
353
354
355 bpid_map.u64 = csr_rd(CVMX_GMXX_BPID_MAPX(0, interface));
356 bpid_map.s.val = 1;
357 bpid_map.s.bpid = cvmx_helper_get_bpid(interface, 0);
358 csr_wr(CVMX_GMXX_BPID_MAPX(0, interface), bpid_map.u64);
359
360 bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
361 bpid_msk.s.msk_or |= 1;
362 bpid_msk.s.msk_and &= ~1;
363 csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
364
365
366 gmxx_txx_append_cfg.u64 =
367 csr_rd(CVMX_GMXX_TXX_APPEND(0, interface));
368 gmxx_txx_append_cfg.s.fcs = 0;
369 gmxx_txx_append_cfg.s.pad = 0;
370 csr_wr(CVMX_GMXX_TXX_APPEND(0, interface),
371 gmxx_txx_append_cfg.u64);
372 }
373
374
375 if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
376 union cvmx_gmxx_rxaui_ctl rxaui_ctl;
377
378 rxaui_ctl.u64 = csr_rd(CVMX_GMXX_RXAUI_CTL(interface));
379 rxaui_ctl.s.disparity = 1;
380 csr_wr(CVMX_GMXX_RXAUI_CTL(interface), rxaui_ctl.u64);
381 }
382
383 __cvmx_helper_xaui_link_init(interface);
384
385 return 0;
386}
387
388
389
390
391
392
393
394
395
396
397
398
399cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
400{
401 int interface = cvmx_helper_get_interface_num(ipd_port);
402 union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
403 union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
404 union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
405 cvmx_helper_link_info_t result;
406
407 gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
408 gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
409 pcsxx_status1_reg.u64 = csr_rd(CVMX_PCSXX_STATUS1_REG(interface));
410 result.u64 = 0;
411
412
413 if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0 &&
414 pcsxx_status1_reg.s.rcv_lnk == 1) {
415 union cvmx_pcsxx_misc_ctl_reg misc_ctl;
416
417 result.s.link_up = 1;
418 result.s.full_duplex = 1;
419 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
420 union cvmx_mio_qlmx_cfg qlm_cfg;
421 int lanes;
422 int qlm = (interface == 1) ? 0 : interface;
423
424 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
425 result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
426 lanes = (qlm_cfg.s.qlm_cfg == 7) ? 2 : 4;
427 result.s.speed *= lanes;
428 } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
429 int qlm = cvmx_qlm_interface(interface);
430
431 result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
432 result.s.speed *= 4;
433 } else {
434 result.s.speed = 10000;
435 }
436 misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
437 if (misc_ctl.s.gmxeno)
438 __cvmx_helper_xaui_link_init(interface);
439 } else {
440
441 csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
442 csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
443 csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
444 }
445 return result;
446}
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
462{
463 int interface = cvmx_helper_get_interface_num(ipd_port);
464 union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
465 union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
466
467 gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
468 gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
469
470
471 if (!link_info.s.link_up)
472 return 0;
473
474
475 if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0)
476 return 0;
477
478
479 return __cvmx_helper_xaui_link_init(interface);
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
497 int enable_internal,
498 int enable_external)
499{
500 int interface = cvmx_helper_get_interface_num(ipd_port);
501 union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
502 union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
503
504
505 pcsxx_control1_reg.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
506 pcsxx_control1_reg.s.loopbck1 = enable_internal;
507 csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), pcsxx_control1_reg.u64);
508
509
510 gmxx_xaui_ext_loopback.u64 =
511 csr_rd(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
512 gmxx_xaui_ext_loopback.s.en = enable_external;
513 csr_wr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
514 gmxx_xaui_ext_loopback.u64);
515
516
517 return __cvmx_helper_xaui_link_init(interface);
518}
519