uboot/arch/mips/mach-octeon/cvmx-helper-xaui.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2018-2022 Marvell International Ltd.
   4 *
   5 * Functions for XAUI initialization, configuration,
   6 * and monitoring.
   7 */
   8
   9#include <time.h>
  10#include <log.h>
  11#include <linux/delay.h>
  12
  13#include <mach/cvmx-regs.h>
  14#include <mach/cvmx-csr.h>
  15#include <mach/cvmx-bootmem.h>
  16#include <mach/octeon-model.h>
  17#include <mach/cvmx-fuse.h>
  18#include <mach/octeon-feature.h>
  19#include <mach/cvmx-qlm.h>
  20#include <mach/octeon_qlm.h>
  21#include <mach/cvmx-pcie.h>
  22#include <mach/cvmx-coremask.h>
  23
  24#include <mach/cvmx-agl-defs.h>
  25#include <mach/cvmx-bgxx-defs.h>
  26#include <mach/cvmx-ciu-defs.h>
  27#include <mach/cvmx-gmxx-defs.h>
  28#include <mach/cvmx-ipd-defs.h>
  29#include <mach/cvmx-pcsx-defs.h>
  30#include <mach/cvmx-pcsxx-defs.h>
  31#include <mach/cvmx-pki-defs.h>
  32#include <mach/cvmx-pko-defs.h>
  33#include <mach/cvmx-xcv-defs.h>
  34
  35#include <mach/cvmx-helper.h>
  36#include <mach/cvmx-helper-board.h>
  37#include <mach/cvmx-helper-cfg.h>
  38
  39int __cvmx_helper_xaui_enumerate(int xiface)
  40{
  41        struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  42        int interface = xi.interface;
  43        union cvmx_gmxx_hg2_control gmx_hg2_control;
  44
  45        if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
  46                enum cvmx_qlm_mode qlm_mode =
  47                        cvmx_qlm_get_dlm_mode(0, interface);
  48
  49                if (qlm_mode == CVMX_QLM_MODE_RXAUI)
  50                        return 1;
  51                return 0;
  52        }
  53        /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
  54        gmx_hg2_control.u64 = csr_rd(CVMX_GMXX_HG2_CONTROL(interface));
  55        if (gmx_hg2_control.s.hg2tx_en)
  56                return 16;
  57        else
  58                return 1;
  59}
  60
  61/**
  62 * @INTERNAL
  63 * Probe a XAUI interface and determine the number of ports
  64 * connected to it. The XAUI interface should still be down
  65 * after this call.
  66 *
  67 * @param xiface Interface to probe
  68 *
  69 * @return Number of ports on the interface. Zero to disable.
  70 */
  71int __cvmx_helper_xaui_probe(int xiface)
  72{
  73        int i, ports;
  74        struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
  75        int interface = xi.interface;
  76        union cvmx_gmxx_inf_mode mode;
  77
  78        /*
  79         * CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis
  80         * be programmed.
  81         */
  82        if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0)) {
  83                union cvmx_ciu_qlm2 ciu_qlm;
  84
  85                ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
  86                ciu_qlm.s.txbypass = 1;
  87                ciu_qlm.s.txdeemph = 0x5;
  88                ciu_qlm.s.txmargin = 0x1a;
  89                csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
  90        }
  91
  92        /*
  93         * CN63XX Pass 2.x errata G-15273 requires the QLM De-emphasis
  94         * be programmed when using a 156.25Mhz ref clock.
  95         */
  96        if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X)) {
  97                /* Read the QLM speed pins */
  98                union cvmx_mio_rst_boot mio_rst_boot;
  99
 100                mio_rst_boot.u64 = csr_rd(CVMX_MIO_RST_BOOT);
 101
 102                if (mio_rst_boot.cn63xx.qlm2_spd == 0xb) {
 103                        union cvmx_ciu_qlm2 ciu_qlm;
 104
 105                        ciu_qlm.u64 = csr_rd(CVMX_CIU_QLM2);
 106                        ciu_qlm.s.txbypass = 1;
 107                        ciu_qlm.s.txdeemph = 0xa;
 108                        ciu_qlm.s.txmargin = 0x1f;
 109                        csr_wr(CVMX_CIU_QLM2, ciu_qlm.u64);
 110                }
 111        }
 112
 113        /*
 114         * Check if QLM is configured correct for XAUI/RXAUI, verify
 115         * the speed as well as mode.
 116         */
 117        if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 118                int qlm = cvmx_qlm_interface(xiface);
 119                enum cvmx_qlm_mode mode = cvmx_qlm_get_mode(qlm);
 120
 121                if (mode != CVMX_QLM_MODE_XAUI && mode != CVMX_QLM_MODE_RXAUI)
 122                        return 0;
 123        }
 124
 125        ports = __cvmx_helper_xaui_enumerate(xiface);
 126
 127        if (ports <= 0)
 128                return 0;
 129
 130        /*
 131         * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
 132         * interface needs to be enabled before IPD otherwise per port
 133         * backpressure may not work properly.
 134         */
 135        mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
 136        mode.s.en = 1;
 137        csr_wr(CVMX_GMXX_INF_MODE(interface), mode.u64);
 138
 139        if (!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
 140            !OCTEON_IS_MODEL(OCTEON_CN70XX)) {
 141                /*
 142                 * Setup PKO to support 16 ports for HiGig2 virtual
 143                 * ports. We're pointing all of the PKO packet ports
 144                 * for this interface to the XAUI. This allows us to
 145                 * use HiGig2 backpressure per port.
 146                 */
 147                for (i = 0; i < 16; i++) {
 148                        union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
 149
 150                        pko_mem_port_ptrs.u64 = 0;
 151                        /*
 152                         * We set each PKO port to have equal priority
 153                         * in a round robin fashion.
 154                         */
 155                        pko_mem_port_ptrs.s.static_p = 0;
 156                        pko_mem_port_ptrs.s.qos_mask = 0xff;
 157                        /* All PKO ports map to the same XAUI hardware port */
 158                        pko_mem_port_ptrs.s.eid = interface * 4;
 159                        pko_mem_port_ptrs.s.pid = interface * 16 + i;
 160                        pko_mem_port_ptrs.s.bp_port = interface * 16 + i;
 161                        csr_wr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
 162                }
 163        }
 164
 165        return ports;
 166}
 167
 168/**
 169 * @INTERNAL
 170 * Bringup XAUI interface. After this call packet I/O should be
 171 * fully functional.
 172 *
 173 * @param interface to bring up
 174 *
 175 * @return Zero on success, negative on failure
 176 */
 177int __cvmx_helper_xaui_link_init(int interface)
 178{
 179        union cvmx_gmxx_prtx_cfg gmx_cfg;
 180        union cvmx_pcsxx_control1_reg xaui_ctl;
 181        union cvmx_pcsxx_misc_ctl_reg misc_ctl;
 182        union cvmx_gmxx_tx_xaui_ctl tx_ctl;
 183
 184        /* (1) Interface has already been enabled. */
 185
 186        /* (2) Disable GMX. */
 187        misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
 188        misc_ctl.s.gmxeno = 1;
 189        csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
 190
 191        /* (3) Disable GMX and PCSX interrupts. */
 192        csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
 193        csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
 194        csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
 195
 196        /* (4) Bring up the PCSX and GMX reconciliation layer. */
 197        /* (4)a Set polarity and lane swapping. */
 198        /* (4)b */
 199        tx_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
 200        /* Enable better IFG packing and improves performance */
 201        tx_ctl.s.dic_en = 1;
 202        tx_ctl.s.uni_en = 0;
 203        csr_wr(CVMX_GMXX_TX_XAUI_CTL(interface), tx_ctl.u64);
 204
 205        /* (4)c Aply reset sequence */
 206        xaui_ctl.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
 207        xaui_ctl.s.lo_pwr = 0;
 208
 209        /*
 210         * Errata G-15618 requires disabling PCS soft reset in some
 211         * OCTEON II models.
 212         */
 213        if (!OCTEON_IS_MODEL(OCTEON_CN63XX) &&
 214            !OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X) &&
 215            !OCTEON_IS_MODEL(OCTEON_CN68XX))
 216                xaui_ctl.s.reset = 1;
 217        csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), xaui_ctl.u64);
 218
 219        if (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) && interface != 1) {
 220                /*
 221                 * Note that GMX 1 was skipped as GMX0 is on the same
 222                 * QLM and will always be done first
 223                 *
 224                 * Workaround for Errata (G-16467).
 225                 */
 226                int qlm = interface;
 227#ifdef CVMX_QLM_DUMP_STATE
 228                debug("%s:%d: XAUI%d: Applying workaround for Errata G-16467\n",
 229                      __func__, __LINE__, qlm);
 230                cvmx_qlm_display_registers(qlm);
 231                debug("\n");
 232#endif
 233                /*
 234                 * This workaround only applies to QLMs running XAUI
 235                 * at 6.25Ghz
 236                 */
 237                if ((cvmx_qlm_get_gbaud_mhz(qlm) == 6250) &&
 238                    (cvmx_qlm_jtag_get(qlm, 0, "clkf_byp") != 20)) {
 239                        /* Wait 100us for links to stabalize */
 240                        udelay(100);
 241                        cvmx_qlm_jtag_set(qlm, -1, "clkf_byp", 20);
 242                        /* Allow the QLM to exit reset */
 243                        cvmx_qlm_jtag_set(qlm, -1, "cfg_rst_n_clr", 0);
 244                        /* Wait 100us for links to stabalize */
 245                        udelay(100);
 246                        /* Allow TX on QLM */
 247                        cvmx_qlm_jtag_set(qlm, -1, "cfg_tx_idle_set", 0);
 248                }
 249#ifdef CVMX_QLM_DUMP_STATE
 250                debug("%s:%d: XAUI%d: Done applying workaround for Errata G-16467\n",
 251                      __func__, __LINE__, qlm);
 252                cvmx_qlm_display_registers(qlm);
 253                debug("\n\n");
 254#endif
 255        }
 256
 257        /* Wait for PCS to come out of reset */
 258        if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_CONTROL1_REG(interface),
 259                                  cvmx_pcsxx_control1_reg_t, reset, ==, 0,
 260                                  10000))
 261                return -1;
 262        /* Wait for PCS to be aligned */
 263        if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_10GBX_STATUS_REG(interface),
 264                                  cvmx_pcsxx_10gbx_status_reg_t, alignd, ==, 1,
 265                                  10000))
 266                return -1;
 267        /* Wait for RX to be ready */
 268        if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_RX_XAUI_CTL(interface),
 269                                  cvmx_gmxx_rx_xaui_ctl_t, status, ==, 0,
 270                                  10000))
 271                return -1;
 272
 273        /* (6) Configure GMX */
 274
 275        /* Wait for GMX RX to be idle */
 276        if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
 277                                  cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000))
 278                return -1;
 279        /* Wait for GMX TX to be idle */
 280        if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface),
 281                                  cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
 282                return -1;
 283
 284        /* GMX configure */
 285        gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
 286        gmx_cfg.s.speed = 1;
 287        gmx_cfg.s.speed_msb = 0;
 288        gmx_cfg.s.slottime = 1;
 289        csr_wr(CVMX_GMXX_TX_PRTS(interface), 1);
 290        csr_wr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
 291        csr_wr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
 292        csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
 293
 294        /* Wait for receive link */
 295        if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS1_REG(interface),
 296                                  cvmx_pcsxx_status1_reg_t, rcv_lnk, ==, 1,
 297                                  10000))
 298                return -1;
 299        if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
 300                                  cvmx_pcsxx_status2_reg_t, xmtflt, ==, 0,
 301                                  10000))
 302                return -1;
 303        if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface),
 304                                  cvmx_pcsxx_status2_reg_t, rcvflt, ==, 0,
 305                                  10000))
 306                return -1;
 307
 308        /* (8) Enable packet reception */
 309        misc_ctl.s.gmxeno = 0;
 310        csr_wr(CVMX_PCSXX_MISC_CTL_REG(interface), misc_ctl.u64);
 311
 312        /* Clear all error interrupts before enabling the interface. */
 313        csr_wr(CVMX_GMXX_RXX_INT_REG(0, interface), ~0x0ull);
 314        csr_wr(CVMX_GMXX_TX_INT_REG(interface), ~0x0ull);
 315        csr_wr(CVMX_PCSXX_INT_REG(interface), ~0x0ull);
 316
 317        /* Enable GMX */
 318        gmx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
 319        gmx_cfg.s.en = 1;
 320        csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
 321
 322        return 0;
 323}
 324
 325/**
 326 * @INTERNAL
 327 * Bringup and enable a XAUI interface. After this call packet
 328 * I/O should be fully functional. This is called with IPD
 329 * enabled but PKO disabled.
 330 *
 331 * @param xiface Interface to bring up
 332 *
 333 * @return Zero on success, negative on failure
 334 */
 335int __cvmx_helper_xaui_enable(int xiface)
 336{
 337        struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
 338        int interface = xi.interface;
 339
 340        __cvmx_helper_setup_gmx(interface, 1);
 341
 342        /* Setup PKND and BPID */
 343        if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
 344                union cvmx_gmxx_bpid_msk bpid_msk;
 345                union cvmx_gmxx_bpid_mapx bpid_map;
 346                union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
 347                union cvmx_gmxx_txx_append gmxx_txx_append_cfg;
 348
 349                /* Setup PKIND */
 350                gmxx_prtx_cfg.u64 = csr_rd(CVMX_GMXX_PRTX_CFG(0, interface));
 351                gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, 0);
 352                csr_wr(CVMX_GMXX_PRTX_CFG(0, interface), gmxx_prtx_cfg.u64);
 353
 354                /* Setup BPID */
 355                bpid_map.u64 = csr_rd(CVMX_GMXX_BPID_MAPX(0, interface));
 356                bpid_map.s.val = 1;
 357                bpid_map.s.bpid = cvmx_helper_get_bpid(interface, 0);
 358                csr_wr(CVMX_GMXX_BPID_MAPX(0, interface), bpid_map.u64);
 359
 360                bpid_msk.u64 = csr_rd(CVMX_GMXX_BPID_MSK(interface));
 361                bpid_msk.s.msk_or |= 1;
 362                bpid_msk.s.msk_and &= ~1;
 363                csr_wr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
 364
 365                /* CN68XX adds the padding and FCS in PKO, not GMX */
 366                gmxx_txx_append_cfg.u64 =
 367                        csr_rd(CVMX_GMXX_TXX_APPEND(0, interface));
 368                gmxx_txx_append_cfg.s.fcs = 0;
 369                gmxx_txx_append_cfg.s.pad = 0;
 370                csr_wr(CVMX_GMXX_TXX_APPEND(0, interface),
 371                       gmxx_txx_append_cfg.u64);
 372        }
 373
 374        /* 70XX eval boards use Marvel phy, set disparity accordingly. */
 375        if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
 376                union cvmx_gmxx_rxaui_ctl rxaui_ctl;
 377
 378                rxaui_ctl.u64 = csr_rd(CVMX_GMXX_RXAUI_CTL(interface));
 379                rxaui_ctl.s.disparity = 1;
 380                csr_wr(CVMX_GMXX_RXAUI_CTL(interface), rxaui_ctl.u64);
 381        }
 382
 383        __cvmx_helper_xaui_link_init(interface);
 384
 385        return 0;
 386}
 387
 388/**
 389 * @INTERNAL
 390 * Return the link state of an IPD/PKO port as returned by
 391 * auto negotiation. The result of this function may not match
 392 * Octeon's link config if auto negotiation has changed since
 393 * the last call to cvmx_helper_link_set().
 394 *
 395 * @param ipd_port IPD/PKO port to query
 396 *
 397 * @return Link state
 398 */
 399cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
 400{
 401        int interface = cvmx_helper_get_interface_num(ipd_port);
 402        union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
 403        union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
 404        union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
 405        cvmx_helper_link_info_t result;
 406
 407        gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
 408        gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
 409        pcsxx_status1_reg.u64 = csr_rd(CVMX_PCSXX_STATUS1_REG(interface));
 410        result.u64 = 0;
 411
 412        /* Only return a link if both RX and TX are happy */
 413        if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0 &&
 414            pcsxx_status1_reg.s.rcv_lnk == 1) {
 415                union cvmx_pcsxx_misc_ctl_reg misc_ctl;
 416
 417                result.s.link_up = 1;
 418                result.s.full_duplex = 1;
 419                if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
 420                        union cvmx_mio_qlmx_cfg qlm_cfg;
 421                        int lanes;
 422                        int qlm = (interface == 1) ? 0 : interface;
 423
 424                        qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(qlm));
 425                        result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
 426                        lanes = (qlm_cfg.s.qlm_cfg == 7) ? 2 : 4;
 427                        result.s.speed *= lanes;
 428                } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
 429                        int qlm = cvmx_qlm_interface(interface);
 430
 431                        result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
 432                        result.s.speed *= 4;
 433                } else {
 434                        result.s.speed = 10000;
 435                }
 436                misc_ctl.u64 = csr_rd(CVMX_PCSXX_MISC_CTL_REG(interface));
 437                if (misc_ctl.s.gmxeno)
 438                        __cvmx_helper_xaui_link_init(interface);
 439        } else {
 440                /* Disable GMX and PCSX interrupts. */
 441                csr_wr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
 442                csr_wr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
 443                csr_wr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
 444        }
 445        return result;
 446}
 447
 448/**
 449 * @INTERNAL
 450 * Configure an IPD/PKO port for the specified link state. This
 451 * function does not influence auto negotiation at the PHY level.
 452 * The passed link state must always match the link state returned
 453 * by cvmx_helper_link_get(). It is normally best to use
 454 * cvmx_helper_link_autoconf() instead.
 455 *
 456 * @param ipd_port  IPD/PKO port to configure
 457 * @param link_info The new link state
 458 *
 459 * @return Zero on success, negative on failure
 460 */
 461int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
 462{
 463        int interface = cvmx_helper_get_interface_num(ipd_port);
 464        union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
 465        union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
 466
 467        gmxx_tx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_TX_XAUI_CTL(interface));
 468        gmxx_rx_xaui_ctl.u64 = csr_rd(CVMX_GMXX_RX_XAUI_CTL(interface));
 469
 470        /* If the link shouldn't be up, then just return */
 471        if (!link_info.s.link_up)
 472                return 0;
 473
 474        /* Do nothing if both RX and TX are happy */
 475        if (gmxx_tx_xaui_ctl.s.ls == 0 && gmxx_rx_xaui_ctl.s.status == 0)
 476                return 0;
 477
 478        /* Bring the link up */
 479        return __cvmx_helper_xaui_link_init(interface);
 480}
 481
 482/**
 483 * @INTERNAL
 484 * Configure a port for internal and/or external loopback. Internal loopback
 485 * causes packets sent by the port to be received by Octeon. External loopback
 486 * causes packets received from the wire to sent out again.
 487 *
 488 * @param ipd_port IPD/PKO port to loopback.
 489 * @param enable_internal
 490 *                 Non zero if you want internal loopback
 491 * @param enable_external
 492 *                 Non zero if you want external loopback
 493 *
 494 * @return Zero on success, negative on failure.
 495 */
 496extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
 497                                                 int enable_internal,
 498                                                 int enable_external)
 499{
 500        int interface = cvmx_helper_get_interface_num(ipd_port);
 501        union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
 502        union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
 503
 504        /* Set the internal loop */
 505        pcsxx_control1_reg.u64 = csr_rd(CVMX_PCSXX_CONTROL1_REG(interface));
 506        pcsxx_control1_reg.s.loopbck1 = enable_internal;
 507        csr_wr(CVMX_PCSXX_CONTROL1_REG(interface), pcsxx_control1_reg.u64);
 508
 509        /* Set the external loop */
 510        gmxx_xaui_ext_loopback.u64 =
 511                csr_rd(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
 512        gmxx_xaui_ext_loopback.s.en = enable_external;
 513        csr_wr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
 514               gmxx_xaui_ext_loopback.u64);
 515
 516        /* Take the link through a reset */
 517        return __cvmx_helper_xaui_link_init(interface);
 518}
 519